diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..02e8202dfef3555b6b365c9be5cb4a7944a24318 --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +push.sh diff --git a/cuda_code/04matrix_transposed.cu b/cuda_code/04matrix_transposed.cu new file mode 100644 index 0000000000000000000000000000000000000000..916fbf10ad9a95c527cfa486d0bdcae016a85bf9 --- /dev/null +++ b/cuda_code/04matrix_transposed.cu @@ -0,0 +1,77 @@ +#include +#include +#include "Error.h" + +#define N 4 + +__global__ void transposedMatrixKernel(int* d_a, int* d_b) { + int i = threadIdx.x + blockDim.x * blockIdx.x; + int j = threadIdx.y + blockDim.y * blockIdx.y; + + d_b[i * N + j] = d_a[j * N + i]; +} + +void onDevice(int h_a[][N], int h_b[][N]) { + // declare GPU memory pointers + int *d_a, *d_b; + + const int ARRAY_BYTES = N * N * sizeof(int); + + // allocate memory on the GPU + HANDLER_ERROR_ERR(cudaMalloc((void**)&d_a, ARRAY_BYTES)); + HANDLER_ERROR_ERR(cudaMalloc((void**)&d_b, ARRAY_BYTES)); + + // copy data from CPU the GPU + HANDLER_ERROR_ERR( + cudaMemcpy(d_a, h_a, ARRAY_BYTES, cudaMemcpyHostToDevice)); + HANDLER_ERROR_ERR( + cudaMemcpy(d_b, h_b, ARRAY_BYTES, cudaMemcpyHostToDevice)); + + // execution configuration + dim3 GridBlocks(1, 1); + dim3 ThreadsBlocks(4, 4); + + // run the kernel + transposedMatrixKernel<<>>(d_a, d_b); + HANDLER_ERROR_MSG("kernel panic!!!"); + + // copy data back from the GPU to the CPU + HANDLER_ERROR_ERR( + cudaMemcpy(h_b, d_b, ARRAY_BYTES, cudaMemcpyDeviceToHost)); + + // free GPU memory + HANDLER_ERROR_ERR(cudaFree(d_a)); + HANDLER_ERROR_ERR(cudaFree(d_b)); +} + +void test(int h_a[][N], int h_b[][N]) { + // test result + for (int i = 0; i < N; i++) { + for (int j = 0; j < N; j++) { + assert(h_a[j][i] == h_b[i][j]); + } + } + + printf("-: successful execution :-\n"); +} + +void onHost() { + int i, j, k = 0; + int h_a[N][N], h_b[N][N]; + + for (i = 0; i < N; i++) { + for (j = 0; j < N; j++) { + h_a[i][j] = k; + h_b[i][j] = 0; + k++; + } + } + + // call device configuration + onDevice(h_a, h_b); + test(h_a, h_b); +} + +int main() { + onHost(); +} diff --git a/cuda_code/11-convolution.cu b/cuda_code/11-convolution.cu new file mode 100644 index 0000000000000000000000000000000000000000..126cc7755b0f2a015b06a672fabc1a9486649fc0 --- /dev/null +++ b/cuda_code/11-convolution.cu @@ -0,0 +1,50 @@ +extern "C" { + //#define Mask_width 5 + //#define Mask_radius Mask_width/2 + #define O_TILE_WIDTH 12 + #define BLOCK_WIDTH (O_TILE_WIDTH+4) + #define clamp(x, start, end) min(max(x, start), end) + __global__ void convolution_2D_kernel(float*P,float*N,int height,int width,int channels,const float* __restrict__ M,int Mask_width){ + int Mask_radius = Mask_width/2; + __shared__ float Ns[BLOCK_WIDTH][BLOCK_WIDTH*3]; + int tx=threadIdx.x; + int ty=threadIdx.y; + int row_o=blockIdx.y*O_TILE_WIDTH+ty; + int col_o=blockIdx.x*O_TILE_WIDTH+tx; + + int row_i=row_o-2; + int col_i=col_o-2; + + int i=0; + int j=0; + int k=0; + if((row_i>=0)&&(row_i=0)&&(col_i +#include +#include +#include +#define N 1024 * 1024 * 128 +#define KN 9 +#define THREADSPERBLOCK 1024 +#define BLOCKSPERGRID 1 + +float data[N]; +float kernel[KN]; +float output[N-KN+1]; +float output_from_device[N-KN+1]; + +__global__ void conv( float *data_cuda, float *kernel, float *output ){ + int tid = threadIdx.x; + while (tid < N - KN + 1) { + for(int i = 0; i < KN; i++) { + output[tid] += data_cuda[tid + i] * kernel[i]; + } + tid += blockDim.x; + } +} + +int main(){ + int cpu = true; + int pass = 1; + cudaError_t cuError = cudaSuccess ; + + double elapsedTimeCPU; + struct timespec t_start, t_end; + + float elapsedTime; + cudaEvent_t start, stop; + cudaEventCreate(&start); + cudaEventCreate(&stop); + + // generate dummy data + srand(time(NULL)); + for (int i = 0; i < KN; i++) { + kernel[i] = rand() / (float)RAND_MAX; + } + + srand(time(NULL)); + for (int i = 0; i < N; i++) { + data[i] = rand() / (float)RAND_MAX; + } + + // CPU + if (cpu) { + clock_gettime( CLOCK_REALTIME, &t_start); + for (int i = 0; i < N-KN+1; i++) { + output[i] = 0; + for (int j = 0; j < KN; j++) { + output[i] += kernel[j] * data[i+j]; + } + } + clock_gettime( CLOCK_REALTIME, &t_end); + elapsedTimeCPU = (t_end.tv_sec - t_start.tv_sec) * 1000.0; + elapsedTimeCPU += (t_end.tv_nsec - t_start.tv_nsec) / 1000000.0; + printf("CPU elapsedTime: %lf ms\n", elapsedTimeCPU); + } + + // GPU + float *d_kernel, *d_data, *d_output; + if (cudaMalloc( (void**)&d_kernel, KN * sizeof(float) ) != cudaSuccess) return 1; + if (cudaMalloc( (void**)&d_data, N * sizeof(float) ) != cudaSuccess) return 1; + if (cudaMalloc( (void**)&d_output, (N-KN+1) * sizeof(float) ) != cudaSuccess) return 1; + if (cudaMemcpy( d_kernel, kernel, KN * sizeof(float), cudaMemcpyHostToDevice ) != cudaSuccess) return 1; + if (cudaMemcpy( d_data, data, N * sizeof(float), cudaMemcpyHostToDevice ) != cudaSuccess) return 1; + + cudaEventRecord(start, 0); + conv<<>>(d_data, d_kernel, d_output); + cudaEventRecord(stop, 0); + cudaEventSynchronize(stop); + cudaEventElapsedTime(&elapsedTime, start, stop); + printf("GPU time: %13f msec\n", elapsedTime); + + cudaMemcpy( output_from_device, d_output, (N-KN+1) * sizeof(float), cudaMemcpyDeviceToHost ); + cudaEventDestroy(start); + cudaEventDestroy(stop); + + if (cudaGetLastError() != cudaSuccess) + { + printf ("Failed in kernel launch and reason is %s\n", cudaGetErrorString(cuError)) ; + return 1 ; + } + + //check correctness + if (cpu) { + for (int i = 0; i < N-KN+1; i++){ + if((output_from_device[i] - output[i]) > 0.001){ + printf("CPU:%lf GPU:%lf\n",output[i], output_from_device[i] ); + pass = 0; + break; + } + } + if(pass == 1) { + printf("Test pass!\n"); + printf("GPU / CPU = %f\n", elapsedTimeCPU / elapsedTime); + } + else + printf("Test fail!\n"); + } +} \ No newline at end of file diff --git a/cuda_code/28-pyrup-pyrdown.cu b/cuda_code/28-pyrup-pyrdown.cu new file mode 100644 index 0000000000000000000000000000000000000000..b0006734c5cc83bf06a0ae8c0f51f7b91b32db91 --- /dev/null +++ b/cuda_code/28-pyrup-pyrdown.cu @@ -0,0 +1,90 @@ +extern "C" { + + __global__ void pyrup_rgb_kernel(unsigned char *d_in,unsigned char *d_out,int colorWidthStep,int aabhas,int height,int width) + { + const int xIndex = blockIdx.x * blockDim.x + threadIdx.x; + const int yIndex = blockIdx.y * blockDim.y + threadIdx.y; + const int color_tid = (xIndex)* aabhas + (3 * (yIndex)); + const int color_tid1= (xIndex/2)* colorWidthStep + (3 * (yIndex/2)); + if(yIndex >=width || xIndex>=height) + { + return; + } + + if(yIndex%2==0 &&xIndex%2==0) + { + d_out[color_tid]=d_in[color_tid1]; + d_out[color_tid+1]=d_in[color_tid1+1]; + d_out[color_tid+2]=d_in[color_tid1+2]; + } + else + { + d_out[color_tid]=0; + d_out[color_tid+1]=0;//d_in[color_tid1+1]; + d_out[color_tid+2]=0;//d_in[color_tid1+2]; + + } + } + + __global__ void pyrup_gray_kernel(unsigned char *d_in,unsigned char *d_out,int colorWidthStep,int aabhas,int height,int width) + { + const int xIndex = blockIdx.x * blockDim.x + threadIdx.x; + const int yIndex = blockIdx.y * blockDim.y + threadIdx.y; + const int color_tid = (xIndex)* aabhas + yIndex; + const int color_tid1= (xIndex/2)* colorWidthStep + yIndex/2; + if(yIndex >=width || xIndex>=height) + { + return; + } + + if(yIndex%2==0 &&xIndex%2==0) + { + d_out[color_tid]=d_in[color_tid1]; + //d_out[color_tid+1]=d_in[color_tid1+1]; + //d_out[color_tid+2]=d_in[color_tid1+2]; + } + else + { + d_out[color_tid]=255; + //d_out[color_tid+1]=0;//d_in[color_tid1+1]; + //d_out[color_tid+2]=0;//d_in[color_tid1+2]; + + } + } + + + __global__ void pyrdown_rgb_kernel(unsigned char *d_in,unsigned char *d_out,int colorWidthStep,int aabhas,int height,int width) + { + const int xIndex = blockIdx.x * blockDim.x + threadIdx.x; + const int yIndex = blockIdx.y * blockDim.y + threadIdx.y; + const int color_tid = (xIndex)* aabhas + (3 * (yIndex)); + const int color_tid1= (2*xIndex)* colorWidthStep + (3 * (2*yIndex)); + if(yIndex >=width || xIndex>=height) + { + + return; + } + + d_out[color_tid]=d_in[color_tid1]; + d_out[color_tid+1]=d_in[color_tid1+1]; + d_out[color_tid+2]=d_in[color_tid1+2]; + } + + __global__ void pyrdown_gray_kernel(unsigned char *d_in,unsigned char *d_out,int colorWidthStep,int aabhas,int height,int width) + { + const int xIndex = blockIdx.x * blockDim.x + threadIdx.x; + const int yIndex = blockIdx.y * blockDim.y + threadIdx.y; + const int color_tid = (xIndex)* aabhas + yIndex; + const int color_tid1= (2*xIndex)* colorWidthStep + 2*yIndex; + if(yIndex >=width || xIndex>=height) + { + + return; + } + + d_out[color_tid]=d_in[color_tid1]; + //d_out[color_tid+1]=d_in[color_tid1+1]; + //d_out[color_tid+2]=d_in[color_tid1+2]; + } + +} \ No newline at end of file diff --git a/cuda_code/2d_xyWENOADV_p.cu b/cuda_code/2d_xyWENOADV_p.cu new file mode 100644 index 0000000000000000000000000000000000000000..b4bf83bbb64755cf8d7f1692d7aa870d338abd23 --- /dev/null +++ b/cuda_code/2d_xyWENOADV_p.cu @@ -0,0 +1,169 @@ +// Andrew Gloster +// November 2018 +// Example of advection in 2D with upwinding WENO + +// Copyright 2018 Andrew Gloster + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// http://www.apache.org/licenses/LICENSE-2.0 + +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + +// --------------------------------------------------------------------- +// Standard Libraries and Headers +// --------------------------------------------------------------------- + +#include +#include +#include +#include "cuda.h" + +// --------------------------------------------------------------------- +// cuSten - Note the file position is relative +// --------------------------------------------------------------------- + +#include "../../cuSten/cuSten.h" + +// --------------------------------------------------------------------- +// MACROS +// --------------------------------------------------------------------- + +#define BLOCK_X 32 +#define BLOCK_Y 32 + +// --------------------------------------------------------------------- +// Main Program +// --------------------------------------------------------------------- + +int main() +{ + // Set the device number + int deviceNum = 0; + + // Declare Domain Size + int nx = 8192; + int ny = 8192; + + double lx = 2 * M_PI; + double ly = 2 * M_PI; + + // Domain spacings + double dx = lx / (double) (nx); + double dy = ly / (double) (ny); + + // Set the number of tiles per device + int numTiles = 4; + + // Initial Conditions + double* dataInput; + double* dataOutput; + double* u; + double* v; + + // ----------------------------- + // Allocate the memory + // ----------------------------- + + cudaMallocManaged(&dataInput, nx * ny * sizeof(double)); + cudaMallocManaged(&dataOutput, nx * ny * sizeof(double)); + + cudaMallocManaged(&u, nx * ny * sizeof(double)); + cudaMallocManaged(&v, nx * ny * sizeof(double)); + + // ----------------------------- + // Set the initial conditions + // ----------------------------- + + // Indexing + int temp; + int index; + + for (int j = 0; j < ny; j++) + { + temp = j * nx; + + for (int i = 0; i < nx; i++) + { + index = temp + i; + + dataInput[index] = cos(i * dx) * sin(j * dy); + dataOutput[index] = 0.0; + + u[index] = sin(j * dy); + v[index] = - sin(i * dx); + } + } + + // Ensure all the above is completed + cudaDeviceSynchronize(); + + // ----------------------------- + // Set up device + // ----------------------------- + + // Set up the compute device structs + cuSten_t xyWENOCompute; + + // Initialise the instance of the stencil + cuStenCreate2DXYWENOADVp( + &xyWENOCompute, + + deviceNum, + + numTiles, + + nx, + ny, + + BLOCK_X, + BLOCK_Y, + + dx, + dy, + + u, + v, + + dataOutput, + + dataInput + ); + + // Synchronise to ensure everything initialised + cudaDeviceSynchronize(); + + // ----------------------------- + // Compute + // ----------------------------- + + // Run the computation + cuStenCompute2DXYWENOADVp(&xyWENOCompute, HOST); + + // // Synchronise at the end to ensure everything is complete + cudaDeviceSynchronize(); + + // ----------------------------- + // Destroy struct and free memory + // ----------------------------- + + // Destroy struct + cuStenDestroy2DXYWENOADVp(&xyWENOCompute); + + // Free memory at the end + cudaFree(dataInput); + cudaFree(dataOutput); + + cudaFree(u); + cudaFree(v); + + // Return 0 when the program completes + return 0; +} \ No newline at end of file diff --git a/cuda_code/2mm.cu b/cuda_code/2mm.cu new file mode 100644 index 0000000000000000000000000000000000000000..0f3019339e866cbc9fea1d368221fb4c35c8a2ff --- /dev/null +++ b/cuda_code/2mm.cu @@ -0,0 +1,244 @@ +/** + * 2mm.cu: This file is part of the PolyBench/GPU 1.0 test suite. + * + * + * Contact: Scott Grauer-Gray + * Louis-Noel Pouchet + * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "../../../common/polybenchUtilFuncts.h" + +//define the error threshold for the results "not matching" +#define PERCENT_DIFF_ERROR_THRESHOLD 0.05 + +#define GPU_DEVICE 0 + +/* Problem size. */ +# define NI 2048 * 4 +# define NJ 2048 * 4 +# define NK 2048 * 4 +# define NL 2048 * 4 + +/* Thread block dimensions */ +#define DIM_THREAD_BLOCK_X 32 +#define DIM_THREAD_BLOCK_Y 8 + +/* Can switch DATA_TYPE between float and double */ +typedef float DATA_TYPE; + + + +void init_array(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* D, DATA_TYPE* A_gpu, DATA_TYPE* B_gpu, DATA_TYPE* C_gpu, DATA_TYPE* D_gpu) +{ + int i, j; + + for (i = 0; i < NI; i++) + { + for (j = 0; j < NK; j++) + { + A[i*NI + j] = ((DATA_TYPE) i*j) / NI; + A_gpu[i*NI + j] = ((DATA_TYPE) i*j) / NI; + } + } + + for (i = 0; i < NK; i++) + { + for (j = 0; j < NJ; j++) + { + B[i*NK + j] = ((DATA_TYPE) i*(j+1)) / NJ; + B_gpu[i*NK + j] = ((DATA_TYPE) i*(j+1)) / NJ; + } + } + + for (i = 0; i < NL; i++) + { + for (j = 0; j < NJ; j++) + { + C[i*NL + j] = ((DATA_TYPE) i*(j+3)) / NL; + C_gpu[i*NL + j] = ((DATA_TYPE) i*(j+3)) / NL; + } + } + + for (i = 0; i < NI; i++) + { + for (j = 0; j < NL; j++) + { + D[i*NL + j] = ((DATA_TYPE) i*(j+2)) / NK; + D_gpu[i*NL + j] = ((DATA_TYPE) i*(j+2)) / NK; + } + } +} + + +void compareResults(DATA_TYPE *E, DATA_TYPE *E_outputFromGpu) +{ + int i,j,fail; + fail = 0; + + for (i=0; i < NL; i++) + { + for (j=0; j < NI; j++) + { + if (percentDiff(E[i*NI + j], E_outputFromGpu[i*NI + j]) > PERCENT_DIFF_ERROR_THRESHOLD) + { + fail++; + } + } + } + + // print results + printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); +} + + +void GPU_argv_init() +{ + cudaDeviceProp deviceProp; + cudaGetDeviceProperties(&deviceProp, GPU_DEVICE); + printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name); + cudaSetDevice( GPU_DEVICE ); +} + + +__global__ void mm2_kernel1(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C) +{ + int j = blockIdx.x * blockDim.x + threadIdx.x; + int i = blockIdx.y * blockDim.y + threadIdx.y; + + if ((i < NI) && (j < NJ)) + { + int k; + for (k = 0; k < NK; k++) + { + C[i * NJ + j] += A[i * NK + k] * B[k * NJ + j]; + } + } +} + + +__global__ void mm2_kernel2(DATA_TYPE *C, DATA_TYPE *D, DATA_TYPE *E) +{ + int j = blockIdx.x * blockDim.x + threadIdx.x; + int i = blockIdx.y * blockDim.y + threadIdx.y; + + if ((i < NI) && (j < NL)) + { + int k; + for (k = 0; k < NJ; k++) + { + E[i * NL + j] += C[i * NJ + k] * D[k * NL + j]; + } + } +} + + +void mm2_cpu(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* D, DATA_TYPE* E) +{ + int i, j, k; + + for (i = 0; i < NI; i++) + { + for (j = 0; j < NJ; j++) + { + C[i*NJ + j] = 0.0; + for (k = 0; k < NK; ++k) + { + C[i*NJ + j] += A[i*NK + k] * B[k*NJ + j]; + } + } + } + + for (i = 0; i < NI; i++) + { + for (j = 0; j < NL; j++) + { + E[i*NL + j] = 0.0; + for (k = 0; k < NJ; ++k) + { + E[i*NL + j] += C[i*NJ + k] * D[k*NL + j]; + } + } + } +} + + +void mm2Cuda(DATA_TYPE* A_gpu, DATA_TYPE* B_gpu, DATA_TYPE* C_gpu, DATA_TYPE* D_gpu, DATA_TYPE* E_gpu) +{ + double t_start, t_end; + dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y); + dim3 grid1((size_t)ceil( ((float)NJ) / ((float)block.x) ), (size_t)ceil( ((float)NI) / ((float)block.y)) ); + dim3 grid2((size_t)ceil( ((float)NL) / ((float)block.x) ), (size_t)ceil( ((float)NI) / ((float)block.y)) ); + t_start = rtclock(); + mm2_kernel1<<>>(A_gpu, B_gpu, C_gpu); + cudaDeviceSynchronize(); + mm2_kernel2<<>>(C_gpu, D_gpu, E_gpu); + cudaDeviceSynchronize(); + t_end = rtclock(); + fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); +} + + +int main(int argc, char** argv) +{ + double t_start, t_end; + + DATA_TYPE* C; + DATA_TYPE* A; + DATA_TYPE* B; + DATA_TYPE* D; + DATA_TYPE* E; + + DATA_TYPE *A_gpu; + DATA_TYPE *B_gpu; + DATA_TYPE *C_gpu; + DATA_TYPE *D_gpu; + DATA_TYPE *E_gpu; + + C = (DATA_TYPE*)malloc(NI*NJ*sizeof(DATA_TYPE)); + A = (DATA_TYPE*)malloc(NI*NK*sizeof(DATA_TYPE)); + B = (DATA_TYPE*)malloc(NK*NJ*sizeof(DATA_TYPE)); + D = (DATA_TYPE*)malloc(NJ*NL*sizeof(DATA_TYPE)); + E = (DATA_TYPE*)malloc(NI*NL*sizeof(DATA_TYPE)); + + + cudaMallocManaged(&A_gpu, sizeof(DATA_TYPE) * NI * NK); + cudaMallocManaged(&B_gpu, sizeof(DATA_TYPE) * NK * NJ); + cudaMallocManaged(&C_gpu, sizeof(DATA_TYPE) * NI * NJ); + cudaMallocManaged(&D_gpu, sizeof(DATA_TYPE) * NJ * NL); + cudaMallocManaged(&E_gpu, sizeof(DATA_TYPE) * NI * NL); + + + init_array(A, B, C, D, A_gpu, B_gpu, C_gpu, D_gpu); + GPU_argv_init(); + + mm2Cuda(A_gpu, B_gpu, C_gpu, D_gpu, E_gpu); + + t_start = rtclock(); + mm2_cpu(A, B, C, D, E); + t_end = rtclock(); + fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); + + compareResults(E, E_gpu); + + free(C); + free(A); + free(B); + free(D); + free(E); + cudaFree(A_gpu); + cudaFree(B_gpu); + cudaFree(C_gpu); + cudaFree(D_gpu); + cudaFree(E_gpu); + return 0; +} + diff --git a/cuda_code/ACNetHDNL3.cu b/cuda_code/ACNetHDNL3.cu new file mode 100644 index 0000000000000000000000000000000000000000..3ee0857a0e8d8c2cf29f56ab900d8a39dd1028ae --- /dev/null +++ b/cuda_code/ACNetHDNL3.cu @@ -0,0 +1,1653 @@ +#include "CudaHelper.cuh" +#include "CudaInterface.hpp" + +__device__ __constant__ static const float kernelsL1[9 * 8] = +{ +-0.0461f, 0.1274f, 0.2976f, +-0.0393f, -0.1251f, 0.2527f, + 0.0791f, 0.0600f, -0.0303f, +-0.0520f, -0.5039f, -0.3305f, +-0.0115f, 0.0456f, 0.4370f, + 0.0601f, 0.0780f, 0.3106f, +-0.0017f, -0.0018f, -0.0017f, +-0.0017f, -0.0018f, -0.0018f, +-0.0017f, -0.0017f, -0.0017f, + 0.2666f, 0.1687f, 0.2303f, +-0.1901f, 0.3825f, 0.3024f, + 0.1811f, 0.0581f, 0.2080f, +-0.1246f, 0.0155f, -0.4075f, + 0.1156f, 0.5929f, 0.1449f, +-0.1080f, -0.0171f, -0.0516f, +-0.0817f, 0.2247f, 0.0472f, + 0.0394f, 0.1085f, 0.1435f, +-0.0480f, -0.0135f, -0.0606f, +-0.0083f, 0.2045f, 0.1056f, +-0.2239f, 0.2823f, -0.1926f, + 0.2581f, 0.1362f, -0.1914f, +-0.0833f, 0.0702f, 0.0234f, + 0.3616f, 0.3789f, -0.1840f, + 0.0128f, 0.1347f, -0.0187f +}; +__device__ __constant__ static const float biasL1[8] = +{ +-0.1329f, -0.0431f, -0.0031f, -0.0129f, 0.2294f, -0.2595f, -0.2370f, -0.0499f +}; +__device__ __constant__ static const float kernelsL[8][9 * 8 * 8] = +{ +{ + 1.4090e-01f, -1.8985e-02f, -6.8589e-02f, + 6.6491e-02f, 1.4360e-02f, 8.5223e-02f, + 1.8782e-01f, 9.8042e-02f, -3.4558e-02f, + 2.5606e-01f, 2.2027e-01f, 2.7603e-01f, + 1.9424e-01f, 3.4537e-02f, 9.5975e-02f, + 1.1223e-02f, -4.3377e-01f, -1.4760e-01f, +-3.4293e-40f, -5.5421e-40f, -4.4763e-41f, +-6.3322e-40f, -3.1495e-40f, -7.8264e-41f, +-1.5375e-40f, -3.3656e-40f, 5.2441e-40f, + 1.2413e-01f, 1.5682e-01f, 1.1465e-01f, + 1.6683e-02f, 7.8382e-02f, 1.0110e-01f, + 1.4902e-01f, 1.3608e-01f, 1.1674e-01f, +-6.5160e-02f, 7.7748e-02f, 2.1773e-02f, + 2.0652e-02f, 2.7245e-01f, 1.0297e-01f, +-2.0953e-02f, 6.1685e-02f, 4.4128e-02f, + 6.1538e-02f, -1.9746e-02f, -1.2785e-02f, + 2.5931e-02f, 1.2740e-01f, 9.0033e-02f, + 8.6448e-02f, 2.0684e-01f, 9.8063e-02f, +-7.8384e-03f, 6.3277e-02f, 7.6751e-03f, + 3.5956e-02f, 1.0555e-01f, 4.2728e-02f, + 7.1578e-02f, 1.3253e-01f, 1.1171e-01f, +-2.7538e-02f, 1.5836e-01f, 1.0014e-01f, +-4.9113e-02f, 1.6911e-01f, 2.7329e-01f, + 7.9170e-03f, 9.5440e-02f, 1.3922e-01f, + 8.0151e-02f, 4.3438e-02f, 5.5314e-02f, + 3.4896e-02f, 1.6816e-01f, -4.5783e-03f, +-1.4579e-03f, 2.0493e-01f, 2.6238e-02f, + 2.6499e-02f, 3.9490e-01f, -1.1582e-02f, + 3.5790e-01f, 1.4317e-01f, -2.1775e-01f, + 4.1794e-03f, -3.2513e-01f, -1.6729e-01f, + 3.4040e-41f, -6.2960e-42f, -1.0067e-40f, + 5.5978e-41f, -1.2353e-40f, -1.1347e-40f, + 5.4572e-40f, -6.4384e-40f, -4.1234e-40f, +-9.3690e-02f, 1.7765e-01f, 1.1275e-01f, + 9.1159e-03f, 1.7375e-01f, 1.1427e-01f, +-7.8385e-02f, 1.5658e-01f, -3.8399e-02f, +-1.0756e-01f, 5.9943e-02f, -6.7273e-02f, +-1.1117e-01f, 1.5267e-01f, 1.1563e-01f, +-1.2964e-01f, -3.8604e-02f, -2.4532e-02f, + 1.6324e-02f, 1.3112e-01f, 6.1679e-03f, +-7.7703e-03f, 2.6311e-01f, 8.9427e-02f, +-2.8948e-02f, 1.9341e-01f, 4.4339e-02f, + 6.4559e-03f, -6.8885e-02f, 1.1481e-01f, +-1.0665e-01f, 3.8613e-02f, 7.0410e-02f, +-6.1680e-02f, -1.7374e-02f, 9.5475e-03f, +-4.0081e-02f, -3.1549e-02f, 2.8311e-01f, +-1.2178e-01f, -1.3848e-01f, 1.7416e-01f, +-8.1756e-02f, -1.7718e-01f, 7.9533e-02f, +-3.1299e-03f, -3.2305e-03f, -3.2094e-03f, +-3.1548e-03f, -3.2553e-03f, -3.2453e-03f, +-3.1459e-03f, -3.2278e-03f, -3.2076e-03f, +-3.6554e-05f, -3.6715e-05f, -3.1284e-05f, +-1.4927e-05f, -1.4357e-05f, -1.2185e-05f, +-1.5771e-09f, -1.1439e-09f, -6.4952e-10f, + 3.7723e-40f, 4.9166e-40f, -2.1946e-40f, +-4.7599e-40f, -4.3356e-40f, -8.3928e-41f, + 2.6127e-40f, 4.8634e-40f, 2.7720e-40f, +-5.4972e-03f, -5.6409e-03f, -5.6919e-03f, +-5.5818e-03f, -5.7079e-03f, -5.7542e-03f, +-5.6338e-03f, -5.7437e-03f, -5.7600e-03f, +-3.7940e-03f, -3.8853e-03f, -3.8693e-03f, +-3.8995e-03f, -3.9616e-03f, -3.8945e-03f, +-3.8438e-03f, -3.9156e-03f, -3.8269e-03f, +-7.2342e-05f, -7.8682e-05f, -4.7701e-05f, +-1.1126e-04f, -1.1918e-04f, -7.8931e-05f, +-1.1644e-04f, -1.2418e-04f, -8.2350e-05f, +-2.3881e-04f, -3.7971e-04f, -3.9448e-04f, +-2.4112e-04f, -3.8395e-04f, -4.0189e-04f, +-2.3451e-04f, -3.7525e-04f, -3.9222e-04f, +-3.9853e-03f, -4.0748e-03f, -4.1134e-03f, +-4.0685e-03f, -4.1456e-03f, -4.1548e-03f, +-4.0547e-03f, -4.1388e-03f, -4.1357e-03f, + 5.3008e-02f, 2.2252e-02f, -7.1158e-02f, +-6.6411e-02f, -3.0015e-02f, -2.2526e-02f, + 1.2259e-01f, -6.2488e-02f, 5.6190e-02f, + 1.5981e-02f, -7.6832e-02f, 1.7908e-02f, + 2.7618e-01f, 5.4054e-02f, 8.7282e-02f, + 1.5212e-02f, -1.1097e-01f, -2.2265e-02f, +-6.8532e-41f, -6.0539e-40f, 4.6269e-40f, +-2.9221e-40f, -3.8468e-40f, -4.6656e-40f, + 6.4572e-40f, -6.1625e-40f, 6.4545e-40f, + 3.5920e-02f, 9.0955e-02f, -1.7626e-02f, + 4.7826e-02f, 1.8832e-01f, -4.4043e-02f, +-3.8405e-02f, 5.9176e-02f, 6.8182e-02f, + 3.7657e-03f, 2.6441e-02f, -2.5585e-01f, + 1.0969e-01f, 2.3914e-01f, 3.5120e-02f, +-1.6252e-01f, 3.4371e-02f, -2.7501e-01f, + 4.9289e-02f, 2.2088e-02f, -1.4588e-02f, + 1.6384e-01f, -8.1421e-03f, -6.9613e-02f, + 1.0820e-01f, 1.1137e-01f, 7.2648e-03f, + 1.5243e-01f, 1.3659e-01f, 2.7553e-02f, + 1.3966e-01f, 1.1019e-01f, 1.9817e-02f, + 1.1420e-01f, -5.1386e-03f, 6.8617e-03f, +-1.3264e-02f, 2.1508e-01f, 4.8430e-02f, + 5.1149e-02f, 2.9165e-01f, 2.8077e-01f, + 2.9288e-03f, 9.0611e-02f, 8.1538e-02f, +-1.1812e-01f, 1.5603e-02f, 1.1571e-01f, +-3.4958e-02f, -1.6688e-03f, -4.6619e-02f, +-1.0417e-02f, -3.1802e-02f, 1.8357e-02f, + 1.1064e-01f, 1.8397e-01f, 4.8449e-02f, +-8.3336e-03f, 1.6029e-01f, 3.9490e-02f, +-4.0959e-01f, -2.6134e-01f, 2.0766e-02f, + 6.6073e-41f, -6.7490e-40f, -5.1131e-41f, +-4.3320e-41f, -3.7194e-40f, 2.0674e-40f, +-5.2359e-40f, -3.4006e-40f, -4.9257e-40f, +-4.7260e-02f, 2.8518e-03f, -2.7764e-01f, + 6.9182e-03f, 1.3938e-01f, -1.3162e-01f, +-6.0901e-03f, 1.0339e-01f, 6.0419e-02f, +-1.4449e-01f, -3.2043e-02f, -9.1466e-02f, +-1.4022e-02f, 3.1703e-01f, 5.8166e-02f, +-1.5243e-02f, 1.4521e-01f, 2.0790e-04f, +-1.0255e-01f, -7.8766e-02f, -1.2395e-01f, + 7.9894e-03f, 3.7079e-03f, -3.2134e-02f, + 1.1663e-01f, 1.4808e-01f, 2.0431e-01f, + 7.4026e-02f, 6.9632e-02f, 1.7156e-01f, +-3.0385e-02f, 2.3218e-01f, 7.3855e-02f, +-8.8530e-02f, -5.9224e-02f, 2.3431e-02f, + 1.4596e-02f, 3.2442e-02f, -1.1308e-01f, +-6.3734e-02f, 2.5270e-01f, 7.8081e-02f, + 1.0468e-02f, 1.5473e-01f, 3.8676e-02f, +-1.0842e-01f, 8.6778e-03f, 1.4985e-01f, + 8.1757e-03f, -8.2109e-02f, 8.5471e-02f, +-2.1437e-01f, -6.1173e-02f, 4.8163e-02f, + 2.8965e-01f, 1.9748e-01f, 4.2651e-02f, + 1.8196e-01f, 3.3932e-01f, 3.9594e-01f, + 3.9657e-01f, 4.2167e-01f, 2.9290e-01f, + 7.4011e-41f, 6.5220e-40f, -5.9885e-40f, + 7.4011e-41f, 6.2047e-40f, -7.1533e-40f, + 4.1950e-40f, -1.1886e-40f, -5.9922e-40f, + 1.9662e-01f, 2.1402e-01f, 3.1041e-02f, +-1.1079e-01f, 1.3361e-01f, -2.1608e-01f, +-1.7962e-01f, -8.0576e-02f, -3.1277e-01f, + 1.0620e-02f, 2.4024e-01f, 1.0657e-01f, +-7.9906e-05f, 2.8760e-01f, 4.1231e-02f, +-1.3261e-02f, -1.0868e-01f, -1.1267e-01f, +-1.0659e-02f, -2.6051e-02f, -4.5389e-02f, + 5.8261e-02f, 4.0288e-02f, 6.7050e-02f, +-2.6462e-01f, -1.7846e-01f, -1.0002e-01f, +-6.2904e-02f, 1.5275e-01f, 4.4282e-03f, + 1.4446e-01f, 1.1814e-01f, -8.0349e-02f, + 2.0331e-02f, 3.3014e-02f, 1.2710e-01f, + 1.6084e-01f, 3.8819e-01f, 1.0854e-01f, +-6.8126e-03f, 3.5673e-01f, 1.8938e-01f, +-1.1660e-01f, -5.7694e-02f, -2.9194e-01f, + 1.2775e-02f, -3.2769e-02f, 1.7228e-02f, + 1.8324e-01f, 1.1983e-01f, -1.6944e-02f, + 1.0593e-01f, 1.3451e-01f, 5.2536e-02f, + 1.9147e-01f, 1.3875e-01f, 1.0298e-01f, +-2.0871e-01f, -1.7197e-01f, 1.1342e-01f, +-1.7581e-01f, 4.0972e-02f, 2.9796e-01f, + 3.2588e-40f, -4.3663e-40f, -2.6518e-40f, + 3.2588e-40f, -4.3663e-40f, -2.6518e-40f, + 4.1600e-40f, -4.4350e-40f, -4.8744e-41f, + 3.7289e-02f, 8.1769e-03f, 1.7059e-02f, + 3.7735e-02f, 6.6571e-02f, -6.6137e-02f, +-5.8890e-02f, -7.7019e-03f, -6.2128e-02f, +-4.0751e-02f, 1.1710e-01f, -1.1586e-01f, +-1.2999e-01f, -1.6384e-02f, -2.1858e-01f, +-2.8028e-01f, -6.0443e-02f, -1.1880e-01f, + 1.8152e-01f, 1.5364e-01f, 1.1781e-01f, + 2.9010e-01f, 2.4612e-01f, 1.3170e-01f, + 1.9022e-01f, 1.8117e-01f, 1.6483e-01f, + 9.3342e-02f, 2.6607e-01f, 1.4679e-01f, + 1.6729e-01f, 2.5374e-01f, 1.1954e-01f, + 6.3258e-02f, 1.0557e-01f, 6.7221e-02f, +-5.2017e-02f, 1.9628e-01f, 1.7243e-01f, +-3.2667e-02f, 1.5756e-01f, 1.9347e-01f, +-9.5252e-02f, -3.7525e-02f, -3.4543e-04f, +-4.9759e-02f, 4.0383e-02f, -2.0231e-02f, +-1.1776e-01f, 3.4182e-02f, 3.6720e-02f, +-1.4822e-02f, -4.1658e-02f, -1.3729e-02f, +-1.9215e-02f, 2.4427e-02f, -9.0638e-02f, +-1.4438e-01f, -2.1785e-01f, -5.1789e-02f, +-2.0279e-01f, -3.3918e-01f, -1.6871e-01f, + 6.1262e-41f, 2.4066e-40f, 6.6851e-40f, + 5.3430e-40f, -3.2335e-40f, -3.7400e-40f, +-6.3256e-40f, -4.7491e-40f, 2.2854e-40f, +-6.8701e-03f, -1.4849e-02f, 8.6332e-02f, + 1.1686e-01f, 1.8346e-01f, 1.8797e-01f, +-2.3251e-02f, 7.3973e-02f, 1.0532e-01f, +-6.1838e-02f, 5.6667e-02f, 8.1584e-02f, +-3.8900e-02f, 7.0927e-02f, 9.5606e-02f, +-4.5098e-02f, -1.0829e-01f, -1.2224e-01f, + 3.5047e-03f, 3.2898e-02f, 3.5622e-02f, + 1.6170e-02f, 4.3721e-02f, 9.7496e-02f, + 2.3445e-03f, 6.0417e-02f, 1.3482e-01f, + 6.0570e-02f, -5.7139e-03f, -1.0883e-03f, + 2.2701e-02f, -2.9113e-02f, 7.9178e-03f, + 8.1214e-02f, -4.1408e-02f, 1.3616e-02f, +-4.7985e-02f, 1.0304e-02f, -3.3236e-02f, +-1.6334e-02f, -8.1538e-02f, 1.8629e-02f, +-9.3720e-02f, -1.2920e-01f, -4.0836e-02f +} +, +{ + 1.0443e-01f, 1.5461e-01f, -1.4743e-01f, + 1.6716e-01f, 1.0532e-01f, -2.3088e-01f, + 1.0218e-01f, 1.2393e-01f, -9.6646e-02f, + 1.7659e-01f, -7.3279e-02f, 1.9627e-02f, + 1.7721e-01f, -1.4329e-01f, -1.2533e-01f, + 1.6551e-01f, -3.4616e-01f, 9.5618e-02f, + 4.5827e-09f, 9.3413e-09f, 1.7015e-08f, + 1.2245e-08f, 9.9727e-09f, 6.7108e-09f, + 1.9612e-07f, 3.9479e-08f, 1.1537e-09f, + 2.2127e-02f, 9.2715e-02f, -1.2150e-01f, + 7.5652e-02f, 1.1548e-01f, -1.2420e-01f, +-1.0693e-03f, -7.2839e-02f, -1.9664e-01f, + 1.4466e-01f, -1.8552e-03f, -1.3575e-01f, + 2.0699e-01f, 8.0396e-02f, -1.9651e-01f, +-4.7075e-02f, -5.1259e-02f, -8.2593e-02f, +-2.2385e-01f, 3.0066e-03f, -2.2659e-02f, + 6.1827e-02f, 2.5331e-02f, -5.3898e-02f, + 2.7091e-01f, 1.0991e-01f, -3.3600e-01f, +-8.9499e-02f, -9.3821e-03f, 2.2675e-02f, + 1.1213e-01f, 1.3276e-01f, 2.0368e-02f, + 6.5408e-02f, 4.1598e-02f, -4.7917e-02f, + 6.0740e-03f, 1.2236e-04f, -1.0659e-01f, +-1.8072e-02f, -9.1082e-02f, -9.0414e-02f, + 4.9052e-02f, -1.4298e-01f, -3.9721e-02f, + 1.1840e-01f, 2.2503e-01f, 2.4587e-02f, + 9.3023e-02f, 6.9650e-02f, 1.6798e-01f, +-1.5640e-03f, 1.6300e-02f, 6.3585e-02f, + 1.4431e-01f, 3.7885e-02f, 1.6692e-02f, + 1.7345e-01f, 7.2315e-02f, 1.8942e-02f, + 1.1081e-01f, 8.2973e-02f, -9.7717e-02f, +-5.2264e-03f, -5.2641e-03f, -5.2727e-03f, +-5.2809e-03f, -5.3125e-03f, -5.3153e-03f, +-5.2915e-03f, -5.3251e-03f, -5.3231e-03f, + 6.0008e-02f, 2.0268e-01f, 1.3396e-01f, +-2.5202e-03f, -1.7750e-02f, -1.2019e-02f, + 1.1806e-01f, -2.2306e-02f, 3.6464e-02f, + 7.9324e-02f, 3.1883e-02f, 1.5483e-02f, +-4.3537e-02f, 1.2204e-02f, 1.8905e-02f, +-8.1581e-02f, -1.1307e-01f, -6.0718e-02f, +-2.4865e-01f, -1.0199e-01f, 1.9886e-02f, +-1.0519e-02f, 6.9972e-02f, 4.8012e-02f, +-1.5282e-02f, 1.1979e-01f, 8.7968e-02f, +-3.6752e-02f, 1.9523e-02f, 7.1321e-02f, +-5.8295e-02f, 5.3242e-02f, 1.2773e-01f, +-7.9671e-02f, 8.3249e-04f, 7.4904e-02f, + 1.1792e-01f, 2.2135e-03f, -9.0963e-03f, +-2.8356e-03f, -4.2661e-02f, 6.9497e-02f, + 9.3561e-02f, 1.0475e-01f, 5.4745e-02f, +-8.5901e-02f, -2.1969e-01f, -1.5572e-01f, + 3.6473e-02f, 1.1097e-01f, -2.6830e-02f, + 1.2199e-02f, 1.8917e-01f, 1.1906e-01f, + 1.0664e-01f, -2.7005e-01f, 1.5492e-01f, +-4.1771e-02f, -1.6580e-01f, 2.9234e-02f, +-1.9854e-02f, 2.1436e-01f, -1.1100e-01f, + 4.5382e-04f, 4.2085e-04f, 5.6852e-04f, + 3.4951e-04f, 3.7354e-04f, 3.2786e-04f, + 2.0790e-04f, 2.8606e-04f, 3.2415e-04f, +-1.5500e-02f, 2.2865e-02f, -3.0070e-01f, + 1.8467e-01f, 2.4899e-01f, 1.4812e-02f, +-1.2318e-01f, 2.3175e-01f, 7.2244e-02f, + 1.6713e-01f, 1.9089e-02f, -2.7494e-01f, + 1.0202e-01f, 2.9200e-01f, -3.6055e-03f, + 1.3265e-01f, 2.2551e-01f, 1.9897e-01f, +-3.9474e-02f, 1.6262e-01f, 1.6726e-01f, +-8.6222e-02f, 2.0573e-01f, -7.3247e-01f, +-9.5391e-02f, 3.8933e-01f, 1.5861e-01f, +-1.2202e-01f, -6.4735e-02f, -1.1762e-01f, +-2.2427e-02f, -1.9171e-01f, -1.6092e-01f, + 3.2356e-01f, -2.2234e-01f, -1.3743e-01f, +-1.1493e-01f, -2.4936e-02f, 2.9212e-02f, +-9.8112e-02f, -1.8021e-02f, -1.0507e-01f, +-1.0168e-01f, 1.1759e-01f, -9.8203e-02f, +-2.8871e-02f, 1.3249e-01f, 7.8378e-02f, +-1.1012e-01f, -4.0596e-02f, 5.4202e-02f, + 4.9022e-02f, -1.1744e-01f, 9.8888e-02f, + 1.3343e-02f, 1.4358e-01f, -8.7142e-02f, + 1.9952e-01f, 3.3708e-02f, 2.0721e-02f, + 2.6527e-02f, -2.3822e-01f, 2.4706e-01f, +-3.2750e-04f, -2.8475e-04f, -6.3494e-05f, +-2.2378e-04f, -1.8046e-04f, -1.9242e-05f, +-4.2124e-05f, -2.2062e-05f, 4.5500e-07f, + 1.1692e-01f, 4.0366e-01f, -1.8709e-02f, + 8.2700e-02f, 1.7884e-01f, -1.3520e-01f, + 3.7758e-02f, 3.7048e-02f, -2.8109e-01f, +-2.3438e-01f, 5.9423e-02f, -1.7300e-01f, + 1.0343e-02f, 7.2307e-02f, -4.3852e-01f, +-5.7429e-02f, -4.9136e-02f, -8.0327e-02f, + 8.1094e-02f, 2.9118e-02f, 1.6677e-01f, + 1.2155e-01f, 6.5358e-01f, 2.4544e-01f, + 3.1163e-02f, 3.7463e-02f, -2.6613e-01f, + 1.2723e-01f, 1.2541e-01f, 1.4319e-02f, + 1.9055e-01f, -5.7441e-02f, 1.1146e-01f, +-1.0690e-02f, -1.7567e-01f, -1.2238e-01f, +-2.0879e-01f, -6.5278e-02f, -7.9327e-02f, +-1.6564e-01f, -1.3659e-01f, -2.6231e-01f, +-3.1916e-01f, -2.6553e-01f, -9.8647e-02f, +-1.0617e-01f, 1.2782e-01f, -2.1053e-02f, +-1.2329e-01f, 1.4952e-01f, -1.7466e-02f, +-1.6969e-01f, 3.6980e-02f, -6.7732e-02f, +-3.1220e-02f, 4.0615e-02f, -1.5251e-01f, +-2.0017e-01f, 2.2421e-01f, -2.5682e-02f, +-6.5873e-02f, 1.8346e-01f, 1.2982e-02f, + 1.4021e-06f, -1.6929e-05f, -8.4696e-05f, + 1.9580e-05f, 2.9943e-06f, 3.0084e-06f, + 2.0769e-04f, 1.4661e-05f, 2.9503e-06f, +-1.4485e-01f, 1.8841e-01f, -1.7954e-01f, + 2.1551e-01f, 2.2601e-01f, -8.6689e-03f, + 8.6926e-02f, -6.8989e-02f, -1.2683e-01f, +-8.7712e-02f, 6.3176e-02f, 1.1983e-01f, + 1.0790e-01f, 6.6418e-02f, 6.5849e-02f, + 1.2483e-01f, 1.2428e-01f, 4.4994e-02f, + 1.5139e-01f, -1.2116e-01f, -3.5497e-01f, +-6.1889e-02f, 3.4088e-01f, 1.3148e-01f, +-1.6478e-01f, 4.4477e-02f, -1.1979e-01f, + 3.8343e-02f, 1.7992e-01f, 3.6790e-01f, + 3.0426e-01f, 1.1235e-01f, 4.9815e-01f, + 2.6290e-01f, 1.9703e-01f, 1.5881e-01f, +-6.4678e-03f, 2.4401e-01f, 1.9266e-01f, +-1.4089e-01f, 1.2323e-01f, 4.4340e-02f, +-8.8856e-02f, 8.4036e-02f, -9.8488e-02f, +-1.7377e-03f, -1.7654e-03f, -1.7223e-03f, +-1.7651e-03f, -1.7919e-03f, -1.7491e-03f, +-1.7172e-03f, -1.7446e-03f, -1.7041e-03f, +-3.0384e-04f, -2.9297e-04f, -2.4838e-04f, +-3.2961e-04f, -3.1678e-04f, -2.7009e-04f, +-3.1665e-04f, -3.0492e-04f, -2.6122e-04f, + 3.7109e-40f, -3.7915e-40f, -5.2536e-40f, + 5.8286e-41f, -5.6108e-40f, 4.3331e-40f, +-3.0184e-42f, -4.8987e-40f, -5.1788e-40f, +-4.0457e-04f, -4.3257e-04f, -4.1616e-04f, +-4.2268e-04f, -4.5118e-04f, -4.3407e-04f, +-3.9446e-04f, -4.2199e-04f, -4.0650e-04f, +-1.1253e-16f, -1.1328e-14f, -2.0489e-14f, +-3.0346e-19f, -1.7189e-16f, -4.5141e-16f, +-2.4957e-30f, -1.8191e-23f, -3.5882e-22f, +-3.1610e-36f, -1.7544e-24f, -2.2187e-21f, +-4.2887e-19f, -1.5526e-15f, -1.5160e-14f, +-1.7750e-16f, -6.8066e-14f, -3.3764e-13f, +-6.9570e-24f, -5.1139e-23f, -2.9335e-23f, +-1.9091e-22f, -1.0323e-21f, -4.5931e-22f, +-2.0010e-22f, -9.3710e-22f, -3.5622e-22f, +-2.9470e-04f, -2.9081e-04f, -2.5958e-04f, +-3.2290e-04f, -3.1810e-04f, -2.8461e-04f, +-3.1795e-04f, -3.1356e-04f, -2.8121e-04f, + 6.1623e-02f, 1.7057e-01f, 8.0478e-02f, + 1.2624e-01f, 1.8468e-01f, 2.1901e-02f, + 7.6033e-02f, 1.3455e-01f, 8.4037e-02f, + 8.4434e-02f, -1.7069e-02f, -7.8318e-02f, + 4.9244e-02f, 4.4782e-02f, -6.9747e-02f, + 1.2915e-01f, 1.1453e-01f, -6.5243e-02f, +-5.0985e-03f, -5.1407e-03f, -5.1687e-03f, +-5.1185e-03f, -5.1511e-03f, -5.1712e-03f, +-5.0986e-03f, -5.1272e-03f, -5.1409e-03f, +-1.8186e-02f, 6.2680e-02f, 3.3235e-02f, + 1.3398e-02f, 1.6497e-01f, 4.3523e-02f, +-2.4101e-02f, 1.3316e-01f, 1.8373e-02f, +-6.2677e-04f, 6.5026e-03f, 2.5948e-02f, + 6.6542e-02f, 1.2352e-01f, 1.5155e-02f, +-8.6237e-02f, -2.0907e-02f, 1.0237e-02f, +-1.7807e-01f, -8.6196e-02f, -3.2408e-02f, +-8.1946e-03f, -1.3957e-02f, -1.6733e-01f, + 2.6269e-02f, 1.6817e-01f, 9.4029e-02f, + 3.4005e-02f, -1.2833e-02f, -1.2038e-01f, +-4.8950e-02f, 3.9857e-02f, 1.4048e-02f, +-6.4758e-02f, 9.9603e-02f, 1.0748e-01f, +-1.0850e-02f, 9.8875e-02f, -4.4439e-02f, + 9.1219e-02f, 6.6400e-02f, -6.7693e-02f, + 5.3318e-02f, 1.1838e-02f, -1.5164e-01f, +-5.8568e-02f, 1.1249e-01f, -3.8286e-02f, +-7.1122e-02f, 9.5799e-02f, 3.8521e-02f, +-1.3846e-01f, 1.4167e-01f, -3.5500e-03f, +-1.0343e-01f, -3.3025e-02f, 3.7186e-02f, +-2.0769e-03f, 1.3558e-01f, -1.3009e-01f, + 1.0167e-02f, 1.5358e-02f, -9.8009e-02f, + 2.4123e-05f, -1.1800e-05f, -1.4180e-04f, + 3.5217e-05f, -6.3838e-06f, -1.2243e-04f, + 8.5525e-05f, 2.1599e-06f, -5.3290e-05f, +-1.4471e-01f, 2.0111e-02f, -1.2449e-01f, + 5.3368e-02f, 3.2918e-01f, 1.4034e-01f, +-1.1833e-01f, -1.9225e-02f, -1.2658e-01f, +-2.6966e-01f, 1.1751e-01f, 9.7072e-02f, +-1.9929e-01f, 9.7986e-02f, -5.1240e-02f, +-9.5073e-02f, -6.8070e-02f, -2.1318e-01f, + 9.5305e-02f, -4.0551e-02f, -1.0936e-01f, + 5.2687e-02f, 4.5340e-01f, 2.3531e-01f, +-1.3385e-02f, 1.5922e-01f, -1.8371e-01f, +-1.2203e-01f, -7.2567e-02f, -3.0000e-01f, +-3.4356e-02f, -1.3471e-01f, -9.0995e-02f, +-2.5230e-01f, -2.4846e-01f, -1.8529e-01f, +-1.6962e-01f, 1.0905e-01f, 1.1557e-01f, +-1.4405e-01f, 8.9191e-02f, 1.1715e-01f, +-1.3237e-01f, 5.2092e-02f, -1.2227e-01f +} +, +{ + 2.0013e-01f, 2.2105e-01f, 1.9196e-01f, + 6.8158e-02f, 1.7154e-01f, -8.6677e-02f, + 9.2652e-02f, 1.0789e-01f, 1.6745e-01f, +-2.9254e-01f, -7.6815e-02f, 5.8812e-02f, +-4.6466e-02f, 1.3941e-02f, 2.3353e-01f, +-1.5033e-01f, 7.5167e-02f, 1.4433e-01f, + 2.8008e-02f, 3.1625e-01f, 3.2877e-02f, +-5.8835e-02f, -1.7305e-01f, -6.1558e-02f, +-1.2227e-01f, 3.9931e-02f, 3.0300e-02f, + 2.3004e-01f, 4.1834e-02f, -5.7790e-02f, +-2.2861e-01f, 2.9314e-01f, 1.6884e-01f, +-2.8009e-02f, 4.7550e-02f, -4.4542e-02f, +-2.4674e-01f, -1.5483e-01f, 3.2653e-02f, +-2.1574e-01f, 3.1083e-01f, -1.4025e-03f, + 1.7354e-02f, 5.6417e-02f, 1.0844e-01f, +-4.2681e-40f, 4.5893e-42f, -7.4234e-40f, + 1.7665e-40f, 4.0151e-40f, 4.6269e-40f, + 2.5452e-40f, -7.0179e-40f, -1.2338e-40f, +-1.4957e-01f, -1.9087e-02f, 7.1170e-02f, +-1.4435e-01f, 8.9560e-02f, 1.3879e-01f, +-3.6992e-02f, 5.9822e-02f, 1.9241e-02f, +-2.4402e-03f, 1.5097e-01f, 6.3958e-02f, +-1.7630e-01f, 3.6009e-01f, -2.0383e-01f, +-8.5106e-03f, 4.0863e-03f, -2.7575e-02f, + 7.8942e-02f, -1.8640e-01f, -6.7715e-02f, + 7.2777e-02f, -1.3804e-01f, -7.0332e-02f, + 1.5185e-01f, -4.3530e-02f, 1.4502e-01f, +-3.2928e-02f, -3.0583e-02f, 9.2061e-02f, + 1.2493e-01f, 1.0400e-01f, 1.3780e-01f, + 1.4438e-01f, 8.2051e-02f, 1.6159e-02f, + 2.7478e-02f, 1.7768e-01f, 2.5945e-01f, +-3.4662e-01f, 2.0330e-03f, 8.8118e-02f, +-2.9628e-01f, -1.3212e-01f, -1.8145e-02f, +-1.9330e-01f, 3.9238e-02f, -4.6944e-02f, +-1.5668e-01f, -5.7104e-02f, 1.9558e-01f, + 6.5305e-02f, 5.9933e-02f, 7.7337e-02f, +-2.4906e-02f, -1.1235e-01f, 1.3822e-02f, +-3.9988e-02f, -9.1882e-03f, 1.9204e-02f, + 1.0504e-01f, 4.6820e-03f, -2.1836e-02f, +-2.6953e-40f, 2.5334e-40f, -1.3028e-40f, + 1.4110e-41f, 5.6841e-40f, 3.6368e-40f, +-1.1746e-41f, -7.0658e-41f, -3.9413e-40f, + 1.5025e-02f, 7.4419e-02f, 9.5652e-02f, + 5.0297e-02f, 6.6704e-02f, 5.7316e-02f, + 2.5102e-02f, 1.1985e-01f, 2.6043e-02f, + 3.3297e-02f, -7.7374e-02f, -1.1114e-01f, +-7.5586e-02f, -1.9338e-02f, -1.3739e-02f, + 4.5616e-02f, -6.4946e-02f, -6.9372e-02f, +-7.5874e-03f, -1.1141e-01f, -2.9135e-02f, +-6.9436e-03f, -1.4418e-02f, 1.6436e-03f, +-1.3051e-01f, -1.3324e-01f, -9.3934e-02f, + 1.2184e-01f, 1.9386e-01f, 1.7995e-01f, +-2.7452e-02f, 9.9736e-02f, 1.0020e-01f, +-6.3290e-02f, -2.1447e-02f, -1.7005e-01f, + 1.3857e-01f, 2.3338e-01f, 2.5410e-01f, + 2.3002e-01f, 1.9551e-01f, 1.4452e-01f, + 4.7040e-01f, 2.2647e-01f, 1.5215e-01f, + 2.6927e-02f, -2.1304e-01f, -1.4762e-01f, +-5.6998e-02f, 2.9064e-01f, 1.8085e-01f, + 8.9393e-02f, -1.7463e-01f, -2.7095e-01f, + 3.8434e-02f, 1.7198e-01f, -1.8122e-02f, +-1.3857e-01f, 1.9418e-01f, 1.5019e-01f, +-5.6337e-02f, -5.3265e-01f, 3.2122e-01f, +-2.4484e-40f, -5.3707e-40f, 1.5854e-41f, + 5.1791e-40f, -4.1875e-41f, 5.6732e-40f, + 1.3048e-40f, 1.6452e-40f, -4.5028e-40f, +-3.0692e-02f, 1.8569e-01f, 2.0327e-01f, +-7.4756e-02f, -5.1765e-02f, 4.2475e-02f, +-9.0675e-02f, -3.0438e-01f, -3.5088e-01f, +-1.9129e-02f, -1.5663e-03f, 4.9895e-02f, +-1.9441e-02f, 9.3237e-02f, 1.2910e-01f, +-2.3919e-02f, -4.0539e-01f, 2.8167e-02f, + 2.0203e-01f, 3.3424e-02f, 1.7927e-02f, + 4.1923e-02f, -1.6967e-01f, 2.5656e-02f, +-1.5869e-01f, -1.8727e-01f, 2.7860e-03f, +-4.0276e-02f, -6.7792e-03f, 3.3699e-02f, +-6.7044e-03f, 1.7686e-02f, 2.9786e-02f, +-1.5623e-02f, 3.7904e-02f, 2.4737e-02f, +-1.2282e-01f, -3.6563e-02f, 4.1976e-02f, +-9.9622e-03f, 8.8981e-02f, 2.1364e-02f, +-8.5668e-02f, -1.6803e-01f, -4.4974e-02f, + 1.3164e-01f, 4.1294e-01f, 1.8897e-01f, + 2.1991e-01f, 1.6247e-02f, 1.1569e-01f, +-3.0142e-02f, 1.4069e-02f, 3.6646e-02f, +-2.6816e-02f, -3.9767e-02f, 1.4061e-01f, +-1.3603e-01f, -2.0649e-01f, 7.5837e-02f, +-1.6984e-02f, -8.3800e-03f, 2.3652e-04f, + 1.5049e-40f, 4.6504e-40f, 1.3625e-40f, +-7.5358e-40f, -3.4257e-40f, 9.9763e-41f, + 4.7243e-40f, 7.4890e-40f, -7.9440e-42f, +-5.9692e-02f, -2.8047e-02f, 2.3795e-02f, +-3.5284e-02f, 1.1448e-02f, 5.0302e-04f, +-3.5066e-02f, 4.6185e-02f, 1.2167e-02f, + 3.7583e-02f, -3.6598e-02f, 1.0206e-01f, +-9.6229e-02f, -1.5977e-01f, 4.9157e-02f, + 3.7293e-02f, 5.8766e-02f, 1.0448e-02f, + 1.1490e-01f, 1.4459e-01f, 8.6936e-02f, + 2.8609e-01f, -4.8108e-02f, 9.0023e-02f, + 6.7941e-02f, -5.7148e-03f, 1.0021e-01f, + 7.3816e-02f, 7.3794e-02f, 8.0970e-03f, + 2.8307e-02f, 3.6635e-03f, -1.1769e-01f, + 4.1374e-02f, 3.9933e-02f, -4.4292e-02f, + 5.9423e-02f, 1.9009e-01f, -2.3735e-01f, +-2.6670e-01f, 5.8789e-01f, -2.0048e-01f, +-3.7082e-01f, 1.8045e-01f, 5.4820e-02f, +-6.3567e-01f, 2.0098e-01f, 1.0653e-01f, +-2.5056e-01f, 6.5065e-01f, -4.0471e-01f, + 5.4715e-02f, 2.4375e-01f, -2.7402e-01f, + 1.5982e-01f, 1.0923e-01f, 2.1566e-01f, + 2.0239e-01f, -9.0221e-02f, -4.4606e-01f, + 1.0550e-01f, 5.4666e-02f, -2.7134e-01f, +-4.6424e-40f, 2.9137e-40f, 7.4968e-41f, + 1.2376e-41f, -5.6213e-40f, -6.3457e-40f, + 2.5404e-40f, 2.0013e-40f, 3.5611e-40f, + 5.5423e-02f, 3.9843e-02f, -1.7509e-01f, + 5.4480e-02f, 5.0331e-02f, -1.6793e-01f, + 6.6093e-02f, 3.0163e-02f, -8.2023e-02f, +-1.5490e-01f, 1.7457e-01f, 2.7832e-01f, + 1.1482e-01f, 2.5759e-01f, -2.4199e-01f, +-9.3891e-02f, 9.1921e-02f, -6.4480e-03f, + 1.9266e-01f, 5.2907e-02f, 7.0289e-02f, + 1.3582e-01f, 6.4246e-02f, 1.4989e-01f, + 6.2013e-03f, -6.8884e-02f, 6.8734e-02f, +-1.0483e-01f, -7.7134e-02f, -3.6204e-02f, + 1.7590e-02f, 5.0844e-02f, 1.4234e-01f, + 7.2913e-02f, 6.0726e-02f, 6.4414e-02f, +-8.5021e-02f, -1.0621e-03f, 5.5851e-02f, + 2.4666e-01f, 6.5652e-02f, -1.8180e-02f, + 1.5225e-01f, 1.2928e-01f, 3.1578e-03f, + 1.1468e-01f, 1.9544e-01f, 6.6637e-02f, + 6.3430e-02f, 2.0542e-01f, 7.0876e-02f, + 3.4779e-02f, 1.0037e-02f, -2.2134e-02f, +-6.9304e-02f, 1.1184e-01f, -3.7015e-02f, +-1.7634e-01f, 1.2475e-01f, 9.1947e-02f, +-6.0550e-02f, -1.3904e-01f, 7.5192e-02f, +-2.2871e-40f, 4.7367e-41f, -1.0711e-40f, +-2.8662e-40f, 4.0542e-41f, 3.3067e-40f, +-4.4395e-41f, -7.2684e-41f, 1.8695e-40f, +-1.6702e-01f, -2.6654e-01f, 8.7902e-03f, +-2.0108e-01f, -3.8093e-01f, -8.3700e-02f, +-7.5433e-02f, -2.0689e-01f, 2.7951e-02f, + 2.9938e-03f, 1.1378e-01f, 7.1598e-02f, +-1.6031e-01f, 1.3475e-01f, 1.5800e-01f, +-7.2019e-02f, -1.1663e-01f, 8.0692e-02f, + 1.0610e-01f, 1.1163e-02f, -1.4959e-01f, +-1.1576e-01f, -8.5645e-02f, 4.0414e-02f, + 5.6245e-02f, 1.7056e-01f, 2.5734e-01f, +-6.1086e-02f, -7.0851e-02f, 7.6851e-02f, +-2.7595e-02f, -6.0890e-02f, 4.7472e-02f, + 7.1059e-03f, 6.0942e-05f, 7.4915e-02f, + 1.9350e-01f, -1.8458e-02f, -2.3040e-02f, + 6.3477e-02f, 1.1923e-01f, 9.9319e-02f, + 6.4839e-02f, 2.7973e-01f, 1.2902e-01f, +-1.7829e-01f, 5.7083e-03f, -6.1680e-03f, +-1.1256e-01f, -2.7951e-02f, -2.1544e-01f, +-2.1614e-02f, -7.1468e-02f, -2.2054e-02f, +-8.7543e-02f, -1.2982e-01f, 1.9386e-01f, +-5.7157e-03f, -1.0108e-01f, 1.4467e-01f, +-6.5742e-02f, -7.2054e-02f, 1.7924e-01f, + 7.5418e-40f, 6.3043e-40f, 4.9815e-40f, +-1.0952e-40f, 3.0327e-40f, -2.3848e-40f, + 4.1302e-40f, 2.0150e-40f, -1.6509e-40f, +-1.3985e-02f, -1.0550e-01f, 5.8772e-02f, +-1.7108e-02f, -7.3644e-02f, 3.3014e-02f, +-1.8224e-03f, 2.8931e-03f, 9.2762e-02f, + 4.1531e-02f, -1.5139e-01f, -1.7773e-01f, + 9.6548e-02f, -1.1914e-01f, -4.6536e-02f, + 8.6754e-02f, -4.0057e-03f, 1.8983e-01f, + 1.6545e-01f, -4.7311e-02f, -7.2455e-03f, + 3.7567e-01f, 1.8883e-01f, -7.4325e-02f, +-5.8252e-02f, -1.3811e-02f, -7.0470e-02f, +-3.2943e-02f, -7.0770e-02f, -1.4700e-01f, + 1.7043e-02f, 9.4331e-02f, 4.2857e-03f, + 4.1247e-03f, 1.6690e-01f, 4.2146e-02f, + 1.1420e-01f, -7.4456e-02f, -3.8763e-02f, + 1.6807e-01f, 9.3636e-03f, -1.1796e-01f, + 1.7703e-01f, 1.1386e-03f, -6.8707e-02f, + 1.0259e-01f, -1.8918e-02f, 6.5902e-03f, + 1.2421e-02f, -7.8960e-02f, 2.1766e-02f, + 1.3062e-01f, 4.6001e-02f, 2.4199e-01f, +-1.2955e-02f, -1.9329e-01f, 5.2074e-03f, + 5.9446e-02f, 1.8832e-01f, 2.2094e-01f, +-1.0954e-01f, -8.1867e-02f, -4.3324e-02f, +-3.9596e-41f, 2.8677e-40f, -6.5843e-40f, + 4.2812e-41f, -3.5323e-40f, 4.8298e-40f, + 7.6351e-40f, -2.4759e-40f, 7.3030e-40f, +-1.1284e-01f, -8.4171e-02f, -1.5935e-01f, +-3.2299e-02f, 1.5427e-01f, 8.9029e-02f, +-3.8815e-02f, 1.3098e-01f, -4.3065e-02f, +-2.5276e-01f, -1.7018e-01f, 9.7901e-02f, + 1.4218e-01f, 3.1236e-01f, 2.9636e-01f, +-2.3613e-02f, -5.5258e-02f, -2.0550e-01f +} +, +{ + 0.0333f, 0.1145f, -0.0922f, + 0.1185f, 0.4533f, -0.2015f, +-0.0774f, 0.1759f, -0.0496f, + 0.0954f, -0.0499f, 0.0824f, + 0.1059f, 0.0173f, -0.0586f, +-0.0666f, -0.0287f, -0.0652f, +-0.0558f, -0.1362f, 0.0015f, + 0.1277f, 0.1020f, -0.1369f, + 0.0020f, -0.0103f, -0.0804f, + 0.0507f, 0.1404f, -0.0241f, + 0.0520f, 0.1239f, 0.0633f, +-0.0268f, 0.0335f, 0.0883f, +-0.0549f, -0.1022f, -0.0515f, +-0.0163f, -0.1167f, -0.0442f, + 0.0858f, -0.0804f, -0.0014f, + 0.0354f, -0.0666f, -0.2105f, +-0.0950f, 0.1578f, -0.0920f, +-0.1303f, 0.0299f, -0.0195f, +-0.0281f, -0.1993f, -0.0154f, + 0.0796f, 0.0503f, 0.0954f, + 0.0540f, 0.0212f, 0.0389f, +-0.1387f, 0.1091f, -0.1212f, + 0.1556f, 0.3573f, 0.0976f, +-0.0587f, -0.2070f, 0.2067f, + 0.0138f, 0.0051f, -0.1008f, + 0.2877f, 0.1079f, -0.0681f, + 0.0953f, -0.0739f, -0.2349f, + 0.1482f, 0.0657f, 0.0480f, + 0.1590f, -0.0009f, 0.1402f, + 0.0700f, 0.0435f, 0.1190f, + 0.0957f, 0.0117f, -0.1010f, + 0.1790f, -0.0200f, -0.0765f, + 0.0797f, 0.1455f, -0.0340f, + 0.0008f, -0.0267f, 0.0089f, + 0.0644f, 0.0647f, 0.0397f, + 0.0463f, -0.0116f, -0.0771f, + 0.2237f, 0.0324f, 0.0192f, +-0.0082f, -0.0345f, 0.0294f, + 0.0719f, -0.0185f, 0.1008f, +-0.0307f, 0.0134f, -0.0747f, + 0.0776f, -0.1485f, 0.0135f, + 0.0965f, -0.0665f, -0.1263f, +-0.0101f, -0.0097f, -0.0144f, +-0.0022f, -0.0083f, 0.0277f, + 0.0136f, -0.0076f, 0.0314f, +-0.0008f, 0.0722f, -0.0704f, + 0.0053f, 0.0767f, 0.0368f, +-0.0189f, -0.1354f, 0.0231f, +-0.1416f, 0.1945f, -0.1756f, + 0.2058f, 0.0401f, -0.1348f, +-0.0945f, -0.2530f, -0.3082f, +-0.0096f, 0.0871f, 0.0699f, +-0.0092f, 0.0423f, 0.0995f, +-0.0914f, -0.0570f, -0.0718f, +-0.0739f, -0.2749f, -0.2320f, + 0.1488f, -0.2698f, -0.1977f, + 0.1445f, -0.1655f, -0.0758f, + 0.2035f, -0.0138f, 0.0332f, + 0.0282f, -0.2247f, -0.0945f, +-0.0614f, -0.2484f, -0.0595f, +-0.1174f, -0.1252f, 0.1969f, +-0.1101f, -0.2950f, -0.2164f, +-0.0348f, -0.0891f, 0.1250f, + 0.0195f, 0.0050f, 0.0300f, +-0.0508f, -0.0316f, -0.0194f, + 0.0199f, 0.0345f, 0.0444f, +-0.0022f, -0.0529f, 0.1604f, + 0.0756f, -0.2015f, -0.2117f, +-0.0837f, -0.1270f, 0.1330f, + 0.0286f, 0.0952f, 0.1082f, + 0.0724f, -0.0446f, -0.1156f, + 0.0545f, 0.0444f, -0.0291f, + 0.0759f, 0.1110f, 0.0944f, + 0.1615f, 0.4302f, -0.1060f, + 0.0418f, -0.0281f, -0.1378f, +-0.0757f, -0.0527f, -0.1578f, + 0.0123f, -0.0427f, 0.1504f, + 0.0694f, 0.0690f, 0.0203f, + 0.2132f, -0.3449f, 0.0936f, + 0.2491f, 0.0279f, -0.0884f, +-0.0447f, 0.1589f, -0.0054f, +-0.0246f, 0.1247f, 0.0403f, + 0.0513f, -0.0541f, -0.1141f, + 0.0712f, -0.1174f, -0.0051f, + 0.2304f, 0.2431f, -0.0517f, +-0.1548f, -0.0401f, 0.2032f, +-0.0087f, -0.1676f, -0.0600f, + 0.1094f, -0.0329f, 0.0530f, +-0.0580f, 0.1499f, -0.0806f, +-0.0086f, -0.1400f, -0.0636f, + 0.0708f, -0.1003f, -0.1113f, +-0.0732f, -0.1199f, 0.0060f, +-0.0534f, -0.0011f, 0.0965f, +-0.0268f, 0.0116f, -0.1161f, + 0.0787f, 0.3925f, -0.0819f, +-0.0041f, -0.0892f, -0.2063f, +-0.1296f, 0.0924f, -0.0079f, + 0.5625f, 0.4013f, 0.1645f, +-0.0137f, -0.1935f, 0.2714f, + 0.0980f, 0.0016f, -0.1461f, + 0.1576f, 0.0305f, -0.1450f, + 0.1503f, -0.0303f, -0.1403f, + 0.0262f, -0.0077f, 0.0459f, + 0.2718f, 0.0754f, 0.2404f, + 0.1381f, -0.1499f, 0.0016f, + 0.1454f, -0.1278f, -0.0085f, + 0.1674f, -0.0834f, 0.1993f, + 0.0874f, -0.0598f, -0.0188f, + 0.2003f, 0.3296f, 0.0153f, +-0.0154f, 0.5550f, -0.0945f, + 0.0489f, 0.0415f, -0.0940f, + 0.0164f, 0.0791f, 0.1077f, +-0.0893f, 0.1231f, 0.0473f, +-0.0319f, 0.1444f, 0.1690f, +-0.0518f, -0.1404f, -0.1778f, +-0.0170f, 0.1395f, -0.0234f, + 0.0128f, -0.0112f, -0.0472f, + 0.1039f, 0.1982f, -0.0272f, + 0.0282f, -0.1199f, -0.2622f, +-0.0449f, 0.0239f, -0.1030f, +-0.0840f, -0.1044f, -0.0646f, + 0.0588f, 0.1937f, -0.2494f, + 0.0180f, 0.0747f, 0.1530f, + 0.0500f, 0.1756f, 0.0491f, +-0.1113f, -0.0079f, 0.0854f, +-0.1493f, -0.0559f, -0.0373f, + 0.1972f, -0.3158f, -0.0500f, + 0.1932f, 0.3177f, -0.0018f, +-0.0516f, -0.1144f, 0.0686f, + 0.0175f, 0.0598f, 0.0345f, +-0.0667f, -0.1078f, 0.0384f, + 0.0897f, 0.2198f, -0.0531f, +-0.2596f, -0.1997f, 0.0195f, + 0.0332f, 0.4098f, 0.1381f, + 0.1985f, -0.0669f, -0.1275f, +-0.0751f, -0.2388f, -0.0672f, + 0.0090f, 0.0891f, -0.0362f, + 0.1392f, -0.0518f, 0.2039f, + 0.2079f, -0.1202f, 0.0707f, + 0.0498f, -0.1237f, -0.0665f, +-0.0398f, -0.1557f, -0.0928f, + 0.0505f, 0.1220f, 0.0352f, +-0.0674f, -0.1159f, 0.0724f, +-0.0331f, -0.1751f, 0.0766f, + 0.0992f, -0.0763f, 0.0090f, +-0.1223f, 0.2621f, -0.2029f, + 0.0509f, -0.0279f, -0.1061f, + 0.0598f, 0.0353f, -0.1610f, + 0.0165f, 0.0835f, 0.0704f, +-0.0079f, -0.0982f, 0.0187f, + 0.2331f, -0.1929f, 0.0684f, +-0.0507f, 0.1476f, -0.0886f, +-0.0275f, 0.1658f, 0.0697f, +-0.1123f, -0.0069f, -0.0851f, +-0.0377f, -0.0917f, -0.0629f, +-0.0420f, 0.0506f, 0.1111f, + 0.1086f, 0.1351f, -0.0851f, + 0.0466f, 0.2750f, 0.0185f, +-0.0208f, 0.2090f, 0.0271f, + 0.0217f, -0.0548f, 0.0078f, +-0.0609f, 0.1029f, -0.1641f, + 0.1392f, 0.0115f, 0.0317f, +-0.0570f, 0.1060f, 0.1814f, +-0.2015f, -0.1301f, 0.1082f, + 0.2452f, -0.1815f, -0.0046f, + 0.0103f, -0.0466f, -0.0895f, + 0.0158f, -0.0594f, -0.1386f, +-0.0073f, -0.0719f, -0.0716f, + 0.1308f, -0.0206f, 0.0511f, +-0.0437f, -0.0763f, 0.0287f, + 0.0493f, -0.1239f, 0.0219f, +-0.0041f, 0.0373f, 0.0262f, + 0.0078f, -0.0249f, -0.0284f, + 0.0598f, -0.0205f, -0.0276f, + 0.0115f, -0.1778f, -0.0395f, + 0.1673f, -0.0036f, 0.2334f, + 0.0706f, -0.0694f, 0.0177f, + 0.1123f, -0.0043f, 0.0716f, +-0.0894f, -0.1609f, 0.0334f, +-0.0046f, -0.2006f, -0.0977f, +-0.0127f, 0.1198f, -0.0339f, +-0.0283f, 0.1354f, 0.1637f, +-0.1696f, 0.0187f, -0.2621f, + 0.0496f, 0.2834f, 0.0423f, + 0.1126f, 0.3962f, 0.1660f, +-0.0750f, 0.1955f, 0.0590f, +-0.1088f, -0.1146f, -0.1219f, + 0.1360f, 0.1524f, 0.0498f, +-0.1151f, 0.0219f, -0.0063f, +-0.0821f, 0.0247f, -0.1065f, + 0.1153f, 0.2085f, 0.0618f, +-0.0383f, 0.0527f, -0.2067f +} +, +{ + 1.8014e-01f, 2.1908e-01f, -2.1088e-03f, + 1.7345e-01f, 2.7654e-01f, 1.3607e-02f, + 1.1363e-01f, 9.9105e-02f, -6.5730e-02f, +-3.5679e-02f, 9.6072e-03f, 4.0721e-02f, +-1.8771e-02f, -2.3484e-04f, -1.0230e-02f, + 1.6965e-02f, -1.3032e-02f, -6.3906e-02f, +-4.5686e-02f, -3.6733e-02f, -4.8873e-02f, + 4.0752e-02f, 2.1615e-02f, -1.4822e-02f, + 1.1689e-01f, 3.0153e-02f, -5.0163e-04f, +-7.0394e-03f, -1.2387e-01f, -8.9243e-02f, +-1.8312e-01f, -1.3868e-01f, -6.2618e-02f, +-8.1627e-02f, -2.0480e-01f, -3.0740e-01f, + 4.4296e-02f, 3.8572e-02f, 4.3754e-02f, + 1.7538e-01f, 5.3284e-02f, -7.5663e-03f, + 1.9670e-01f, -1.2397e-01f, -1.6266e-01f, + 1.4575e-01f, -5.7771e-02f, 2.7619e-02f, + 2.2757e-02f, -4.8910e-01f, -2.6201e-01f, + 3.6513e-02f, -2.0704e-01f, -1.3225e-01f, +-6.7533e-02f, 1.1289e-02f, 7.1316e-02f, +-7.6847e-02f, 6.8128e-02f, 7.4717e-02f, + 1.1269e-01f, 2.9978e-02f, 3.2132e-02f, +-5.4557e-02f, -4.4599e-02f, 4.1835e-02f, + 5.7964e-02f, -2.1246e-03f, 1.5007e-01f, + 1.8432e-01f, 1.1463e-01f, 2.2691e-01f, + 9.6166e-02f, 4.7887e-02f, -3.8399e-02f, + 5.8153e-02f, -2.0255e-02f, -1.1362e-01f, + 2.6402e-02f, 2.5562e-02f, 1.9096e-02f, + 1.1588e-01f, 1.4540e-01f, 1.1948e-01f, + 1.0360e-01f, 5.9083e-02f, 1.9263e-01f, + 1.6953e-01f, 2.7390e-02f, 9.7883e-02f, + 1.5059e-01f, 6.7593e-02f, -4.5843e-03f, + 8.7031e-02f, -2.0926e-03f, -6.3056e-02f, +-6.6960e-02f, -5.2056e-02f, -7.3570e-02f, + 1.4361e-02f, 1.1059e-01f, -4.9720e-02f, + 4.4270e-02f, 3.9995e-02f, 4.3101e-03f, +-1.1042e-01f, 4.5028e-02f, -8.9124e-02f, +-1.2906e-01f, -7.6972e-02f, -6.5449e-03f, +-1.9269e-01f, 2.8349e-01f, 1.1573e-01f, +-1.7983e-01f, 9.7615e-02f, 9.4003e-03f, +-4.7802e-02f, -1.5889e-01f, -1.2693e-01f, + 7.4717e-02f, 2.8655e-01f, -7.2637e-02f, + 1.5837e-02f, 8.7125e-02f, -1.2198e-01f, +-1.7754e-02f, -5.6443e-02f, -9.8661e-03f, + 6.3040e-02f, 2.0249e-02f, -3.5368e-02f, + 9.7756e-03f, 2.6760e-02f, -5.5172e-02f, +-1.0406e-02f, 4.8313e-02f, 2.4717e-02f, +-5.2851e-02f, 6.8496e-02f, -2.5933e-02f, + 4.5932e-02f, 5.9892e-02f, 1.9200e-02f, +-5.1316e-40f, -5.1811e-40f, -1.5144e-40f, +-6.7758e-38f, -5.4608e-40f, -3.9680e-40f, +-1.9155e-39f, 2.0423e-41f, 1.5256e-41f, +-2.5559e-08f, -3.2461e-08f, -2.6821e-08f, +-3.6885e-08f, -4.6896e-08f, -3.9086e-08f, +-3.4305e-08f, -4.4160e-08f, -3.7187e-08f, +-3.7416e-40f, 3.6550e-40f, 5.0727e-40f, +-1.6722e-40f, 3.9228e-40f, 5.4548e-40f, +-5.7512e-40f, -2.8156e-40f, 9.4571e-41f, +-4.7040e-40f, -1.6974e-40f, 6.3849e-40f, +-3.7322e-40f, 2.6014e-40f, 2.3080e-40f, +-2.8395e-40f, -3.7116e-40f, 4.4393e-40f, + 1.1597e-40f, 4.3291e-40f, 3.8219e-40f, + 3.3393e-40f, 3.1747e-40f, -1.8400e-36f, +-5.5215e-40f, 1.7648e-40f, -1.6540e-35f, +-3.0953e-40f, 5.3063e-40f, -1.6454e-40f, + 2.1341e-40f, 2.0790e-40f, -3.0226e-40f, +-2.6807e-40f, -1.6601e-40f, 5.1829e-40f, +-1.8897e-40f, -4.5956e-41f, 5.3784e-40f, +-2.5661e-40f, -2.1726e-40f, 1.2010e-40f, + 1.8263e-41f, 1.1214e-40f, -3.7693e-40f, +-4.2596e-40f, 1.8854e-40f, 5.5010e-40f, +-6.6262e-40f, -4.8808e-40f, 3.3123e-40f, + 5.9379e-41f, 2.3249e-40f, 4.4504e-40f, +-8.4836e-04f, -8.4397e-04f, -5.8640e-04f, +-8.3506e-04f, -8.0192e-04f, -5.3901e-04f, +-8.3539e-04f, -7.8069e-04f, -4.8720e-04f, +-3.4706e-04f, -4.4640e-04f, -5.2353e-04f, +-4.4518e-04f, -5.3374e-04f, -5.2734e-04f, +-5.8780e-04f, -5.8730e-04f, -5.4362e-04f, +-5.2452e-04f, -5.4578e-04f, -5.6266e-04f, +-4.2387e-04f, -4.4643e-04f, -4.8936e-04f, +-3.5880e-04f, -3.7886e-04f, -4.1998e-04f, +-2.4479e-04f, -4.0736e-04f, -3.1189e-04f, +-3.4922e-04f, -4.0173e-04f, -2.5042e-04f, +-5.7091e-04f, -5.2665e-04f, -2.3293e-04f, +-2.8505e-04f, 9.7283e-05f, 3.1209e-04f, +-2.7463e-04f, 1.8704e-04f, 4.4351e-04f, +-9.1436e-05f, 3.2602e-04f, 5.7573e-04f, +-4.0112e-04f, -4.2566e-04f, -2.4300e-04f, +-9.9362e-05f, -6.5499e-05f, 3.2872e-05f, + 1.1584e-04f, 2.3417e-04f, 3.4427e-04f, +-7.5767e-05f, 3.9768e-06f, 6.2201e-05f, + 2.3151e-05f, 2.5595e-04f, 3.4038e-04f, +-1.3871e-05f, 3.0295e-04f, 4.4170e-04f, +-1.7802e-04f, -4.5376e-04f, -5.1847e-04f, +-5.0687e-04f, -5.5837e-04f, -2.5917e-04f, +-5.3992e-04f, -7.1375e-04f, -4.8728e-04f, +-1.7543e-01f, -3.4151e-01f, -3.2619e-02f, +-1.9701e-02f, -1.5494e-01f, -1.6534e-01f, + 3.5632e-02f, -1.0897e-01f, -3.8379e-02f, +-6.1420e-02f, -1.0735e-01f, 1.4730e-01f, + 7.4386e-02f, -1.0487e-01f, 7.9646e-02f, + 1.7130e-02f, 4.4391e-02f, -5.1959e-03f, + 4.5682e-02f, -1.1543e-01f, 9.4035e-03f, +-3.4376e-01f, -1.1961e-01f, 1.0099e-01f, + 1.1335e-01f, 7.5840e-02f, 1.0675e-01f, + 4.9539e-02f, 8.7406e-02f, 4.4951e-02f, + 1.8111e-01f, 2.6406e-01f, -1.5924e-02f, +-1.1464e-01f, 8.4579e-04f, -6.6811e-02f, +-8.9635e-03f, 1.8236e-03f, 3.6561e-02f, +-7.0281e-02f, 2.9717e-01f, 3.1836e-02f, +-1.3647e-01f, -6.5627e-02f, 9.3063e-02f, +-2.1851e-01f, -6.0226e-02f, -1.0326e-01f, + 5.3441e-02f, 1.9103e-01f, -5.7999e-02f, +-3.3512e-02f, 1.5496e-01f, -1.1111e-01f, + 2.3256e-03f, -1.5004e-01f, -9.1248e-02f, +-9.7706e-02f, 1.9549e-01f, -1.5403e-01f, +-1.5327e-01f, 8.3335e-02f, 5.6111e-03f, +-1.5707e-01f, 8.0277e-03f, -7.3955e-02f, +-1.4111e-01f, -1.3548e-01f, -1.0563e-01f, + 2.3054e-01f, -2.1822e-02f, -6.6938e-03f, +-1.0259e-01f, 4.3577e-02f, -1.7630e-01f, + 1.6484e-01f, 4.2413e-01f, 6.9475e-02f, +-2.4705e-01f, 2.5757e-01f, -9.5611e-02f, + 1.0236e-01f, -3.4820e-02f, -6.8818e-03f, +-1.1434e-01f, -3.1800e-01f, 2.1337e-02f, +-1.9939e-01f, -2.6532e-01f, 7.3361e-02f, + 6.5939e-02f, 9.5812e-02f, -7.0156e-02f, +-1.6249e-02f, -1.5927e-02f, -1.1189e-01f, +-9.3936e-03f, -1.0933e-01f, -2.9399e-02f, +-2.8752e-02f, -4.5613e-02f, -1.2718e-02f, + 3.8781e-01f, 2.6776e-01f, -1.0373e-02f, +-2.3927e-02f, -6.4398e-02f, 9.9117e-02f, +-6.0732e-02f, -5.5917e-03f, 5.1716e-02f, +-1.4168e-01f, 1.7661e-01f, -5.5893e-02f, +-3.0419e-01f, -3.5537e-01f, 2.1978e-01f, +-1.8610e-01f, -5.7743e-03f, 3.2649e-02f, + 1.9975e-01f, 1.6508e-01f, 1.3808e-02f, + 1.0733e-01f, 1.4722e-01f, 5.8671e-02f, + 6.4940e-02f, 1.6114e-01f, 3.9697e-02f, + 1.1530e-01f, 2.4021e-01f, -2.1669e-01f, + 6.0220e-02f, 2.0257e-01f, -1.5227e-01f, +-6.1096e-02f, 6.6511e-02f, -1.3858e-01f, +-6.5275e-02f, 1.0891e-01f, 8.2048e-02f, +-6.7907e-02f, 2.2863e-02f, -1.0322e-01f, + 1.6542e-01f, -1.4436e-01f, 6.4125e-02f, +-1.0378e-01f, -3.2346e-01f, -1.5123e-02f, + 3.8758e-03f, 1.1006e-01f, -4.4325e-02f, +-1.0102e-01f, -3.7699e-02f, 9.2472e-02f, +-6.8972e-02f, -1.2308e-02f, 1.6478e-01f, + 3.4351e-02f, -1.7461e-02f, 1.0301e-01f, +-2.7125e-01f, -5.6730e-02f, -2.5989e-01f, +-3.0163e-01f, -1.4826e-01f, -3.4955e-01f, +-1.6259e-01f, -1.6708e-01f, -2.7964e-01f, +-6.7134e-02f, -2.2385e-01f, 2.1776e-01f, +-1.1351e-02f, -3.7861e-01f, 1.8687e-01f, + 4.0551e-02f, 8.1943e-02f, 1.0866e-01f, + 1.0273e-01f, 1.1844e-01f, -1.1852e-01f, + 2.6758e-02f, -8.5806e-02f, 5.9444e-02f, +-5.1627e-02f, 7.1636e-02f, 2.2841e-01f, +-3.7242e-03f, 2.9723e-01f, 1.1918e-01f, + 8.4994e-02f, -3.5747e-01f, 3.6148e-02f, + 9.9705e-02f, -1.3736e-01f, -6.0080e-02f, + 1.2370e-01f, 5.0668e-02f, -6.0246e-02f, + 6.0562e-02f, -3.5068e-01f, -3.2645e-01f, + 9.1020e-04f, 6.6203e-02f, -1.0770e-01f, + 1.9434e-02f, 3.0018e-01f, 2.8018e-01f, + 1.4021e-01f, 2.7481e-01f, 2.2868e-01f, + 4.8540e-02f, 1.7719e-01f, -4.5834e-02f, +-9.6349e-02f, -2.3008e-02f, -1.4497e-01f, + 4.3053e-02f, -1.0161e-01f, 2.8750e-02f, +-1.2594e-01f, -1.0388e-02f, -4.3966e-02f, + 7.5993e-02f, -7.1609e-02f, 1.4624e-02f, + 4.1110e-02f, 7.1258e-02f, -2.9109e-02f, +-5.8698e-03f, 1.2389e-01f, 4.7648e-02f, +-6.1585e-04f, -4.4556e-02f, -2.3373e-02f, +-4.4883e-02f, -7.7722e-02f, -7.3635e-02f, +-2.7750e-02f, -1.5117e-03f, -8.7368e-02f, + 2.5113e-02f, 7.7490e-02f, 2.9024e-02f, + 1.5426e-01f, 2.5472e-01f, 4.8057e-02f, +-1.1969e-01f, -1.1487e-01f, -1.1802e-01f, +-4.7392e-02f, -4.2226e-02f, 3.1968e-02f, +-2.6717e-01f, -5.0206e-02f, 8.1946e-04f, +-4.0426e-02f, 1.4373e-01f, -3.3121e-03f, +-4.5292e-02f, -2.4538e-02f, 1.0377e-01f, +-1.7780e-02f, 2.0058e-01f, -2.4343e-02f, +-1.1714e-02f, 1.5984e-01f, -1.2638e-01f, + 6.4655e-02f, 3.7703e-02f, 3.7970e-02f, + 9.1864e-03f, 1.1468e-01f, -6.2760e-04f, +-1.4812e-01f, 6.5670e-03f, 1.0765e-01f, + 1.5023e-01f, -7.0594e-02f, -1.3924e-01f, + 3.6016e-02f, -3.9078e-02f, -3.8950e-02f, + 1.8735e-02f, -1.5573e-01f, -1.2456e-01f +} +, +{ + 4.8634e-02f, -1.3617e-01f, 6.1231e-02f, +-7.0235e-02f, -6.4110e-01f, 1.5985e-01f, + 8.6151e-02f, 1.1847e-01f, 1.3819e-01f, +-3.6017e-04f, -3.2273e-02f, -8.5485e-02f, +-7.0804e-03f, 2.1751e-01f, 7.2575e-03f, +-8.3606e-02f, -1.4885e-01f, -1.2702e-01f, + 4.0848e-41f, 8.0934e-40f, -1.8889e-40f, +-3.9103e-40f, -7.4709e-40f, 3.8377e-40f, +-2.4159e-40f, -4.7610e-40f, 7.7359e-40f, +-8.6217e-05f, -5.9763e-05f, -4.0558e-05f, +-7.4966e-05f, -4.7074e-05f, -3.1656e-05f, +-9.8390e-05f, -6.6833e-05f, -4.7669e-05f, + 3.5375e-02f, 2.8660e-02f, 4.1277e-02f, + 1.6289e-01f, -3.2199e-01f, -1.7845e-02f, + 2.4659e-01f, -3.9618e-02f, 4.1065e-03f, + 2.7267e-02f, 8.6819e-02f, 9.5070e-02f, +-7.2700e-02f, -2.8826e-01f, 1.1750e-03f, + 2.5259e-02f, 2.4681e-03f, 6.4737e-02f, + 7.3023e-03f, 2.9631e-02f, 1.0820e-02f, +-2.1400e-02f, 5.4244e-01f, 1.5639e-01f, +-1.7561e-01f, 4.8947e-01f, -8.8305e-02f, + 6.5073e-02f, 3.4922e-01f, 1.3483e-01f, + 1.4506e-01f, -2.5472e-01f, -7.2894e-02f, + 4.5945e-02f, 1.4040e-01f, 1.2148e-01f, +-2.6932e-01f, -1.1518e-01f, -9.3158e-03f, +-2.3961e-01f, -1.2479e-01f, -8.9796e-02f, + 1.8688e-02f, -4.9267e-02f, 7.7189e-02f, +-7.3691e-02f, 7.8186e-03f, 1.3761e-02f, +-1.5689e-01f, 3.1138e-02f, 3.9231e-02f, +-4.3607e-03f, 2.0813e-01f, 5.5635e-02f, +-6.7000e-41f, 9.8995e-41f, 3.0043e-40f, + 6.7190e-40f, 4.0827e-40f, 7.6057e-40f, + 4.2208e-40f, 8.1141e-40f, -3.3569e-40f, + 1.0179e-03f, 5.1543e-04f, 3.8076e-04f, + 7.3507e-04f, 4.5432e-04f, 3.7410e-04f, + 9.3014e-04f, 6.7365e-04f, 6.0051e-04f, +-5.1998e-02f, 6.5768e-02f, 3.1603e-02f, +-3.0198e-02f, -3.1692e-02f, -6.9299e-02f, + 1.7672e-02f, 2.3766e-01f, 5.7877e-02f, +-5.7944e-02f, 1.2624e-01f, -1.4396e-01f, +-4.1542e-02f, 6.5110e-01f, 1.0942e-01f, +-1.3133e-01f, 5.0538e-02f, -2.7371e-02f, +-3.7515e-02f, 2.8703e-02f, 1.2382e-03f, + 3.8542e-01f, -2.2754e-02f, 3.4459e-02f, + 3.0545e-01f, -5.3817e-01f, -2.1389e-03f, + 1.3888e-02f, -2.2775e-01f, -6.3692e-02f, +-1.8430e-01f, 5.8452e-02f, 4.5764e-02f, +-8.5045e-02f, -1.7060e-01f, -1.8565e-02f, +-2.0384e-02f, -3.3018e-02f, -5.1135e-02f, +-4.5789e-02f, -1.8105e-01f, 3.5419e-02f, +-5.0081e-02f, 8.7719e-02f, 1.0373e-01f, +-1.0033e-02f, 7.0530e-02f, -7.8012e-03f, + 8.4042e-02f, 1.1982e-01f, -9.6046e-02f, +-6.4009e-02f, -1.0711e-01f, -1.3523e-01f, + 1.8868e-41f, -7.0039e-40f, -7.2568e-40f, + 1.7408e-40f, -7.8143e-40f, -6.8130e-40f, +-6.3142e-40f, -6.2560e-40f, -7.4238e-40f, + 2.6297e-04f, 7.0014e-05f, -4.0981e-04f, + 2.6263e-04f, 4.2811e-05f, -4.9950e-04f, + 3.9795e-04f, 1.2615e-04f, -4.7660e-04f, + 7.5933e-02f, 2.6295e-02f, 2.7984e-02f, +-5.5914e-03f, -8.7981e-02f, -9.2618e-02f, + 4.2725e-02f, -3.1210e-01f, 1.3412e-01f, + 5.2683e-02f, 3.9891e-01f, 2.9150e-02f, +-6.6090e-02f, 2.9455e-01f, -1.9710e-01f, + 1.4546e-02f, -2.5572e-02f, 8.1125e-02f, + 1.2271e-01f, 1.6097e-01f, 4.5644e-02f, + 3.6101e-02f, -1.7174e-02f, 6.6110e-02f, + 1.5078e-01f, 4.5180e-01f, 7.7154e-02f, +-5.9725e-02f, 1.0185e-01f, 1.1363e-03f, + 6.7791e-02f, 1.7696e-02f, 5.2638e-02f, + 3.3051e-02f, -8.4049e-02f, 1.4380e-01f, + 1.8744e-02f, -2.0940e-01f, -2.1424e-01f, +-2.1329e-01f, -1.3154e-01f, -3.2572e-01f, + 1.1292e-01f, 1.2361e-02f, -1.5506e-01f, +-1.0362e-02f, 1.9955e-02f, 4.2639e-02f, +-2.1952e-02f, -2.4682e-02f, -2.4453e-02f, +-2.5606e-02f, -3.3580e-02f, -3.6340e-02f, +-5.0830e-40f, 6.3797e-40f, -5.2775e-40f, +-7.7988e-40f, -7.4579e-40f, -5.1901e-40f, +-3.8275e-41f, -5.7607e-40f, -1.3656e-40f, + 2.7164e-04f, 5.9977e-04f, 8.6886e-04f, + 3.0116e-04f, 7.0106e-04f, 1.0248e-03f, + 2.9177e-04f, 6.4748e-04f, 9.4825e-04f, + 6.6310e-02f, 1.5240e-02f, -5.3044e-02f, + 1.2545e-01f, 5.0582e-02f, 2.7358e-02f, + 1.9338e-01f, 1.1377e-01f, 4.6110e-02f, +-3.1997e-02f, 1.5171e-02f, -4.9372e-02f, + 5.4615e-04f, 1.7262e-01f, -2.2081e-01f, + 8.4871e-02f, 1.7824e-02f, -3.6429e-02f, + 4.2821e-02f, -1.0055e-01f, 4.8927e-02f, + 1.2524e-01f, 5.8859e-02f, -2.0980e-02f, + 2.2897e-01f, 1.7594e-01f, 3.4239e-02f, + 1.0915e-01f, 1.2088e-01f, 1.0151e-01f, + 6.8449e-03f, -1.5546e-01f, 1.2024e-01f, + 4.9036e-02f, -1.2245e-01f, 4.6713e-02f, + 7.5083e-03f, -4.8084e-02f, 9.7731e-03f, + 4.8779e-02f, 3.1848e-02f, -9.3517e-02f, + 6.4595e-02f, 3.9337e-02f, -7.2343e-02f, + 3.9519e-02f, 4.1867e-02f, -5.0485e-02f, + 2.5257e-02f, 1.4071e-01f, 1.3606e-01f, + 1.7481e-01f, 2.0210e-01f, 1.7241e-01f, +-7.6295e-40f, -7.8460e-40f, -4.1806e-41f, +-7.9994e-40f, -7.3271e-40f, -6.2665e-40f, +-7.9602e-40f, -7.0226e-40f, -7.4131e-40f, +-4.5544e-04f, -5.2379e-04f, -7.0755e-04f, +-3.3807e-04f, -3.8123e-04f, -5.3222e-04f, +-3.1771e-04f, -3.4586e-04f, -4.8784e-04f, +-3.5257e-02f, -1.1866e-02f, 1.9717e-02f, +-6.0777e-02f, -7.3127e-03f, -3.2825e-02f, +-1.4952e-01f, 3.2117e-01f, -6.3786e-02f, +-1.0255e-02f, 1.2961e-01f, -8.6823e-02f, + 1.6994e-01f, 4.7491e-01f, 2.7135e-01f, + 2.8538e-03f, 1.5572e-01f, -3.3736e-02f, + 8.5996e-02f, -1.0176e-02f, 2.6629e-02f, + 7.3362e-02f, -7.7525e-03f, 5.6261e-02f, + 1.0819e-01f, -2.5863e-01f, -5.7146e-03f, +-7.1781e-02f, 2.8376e-03f, 7.8298e-02f, + 1.3183e-01f, 2.7149e-02f, -9.9786e-02f, + 9.0491e-02f, 8.7938e-02f, -2.1882e-02f, + 4.1396e-03f, -4.5816e-02f, -7.8892e-02f, +-6.3855e-03f, 1.7502e-01f, 1.2053e-01f, + 1.2492e-01f, 6.1258e-02f, -4.0516e-02f, +-4.5409e-02f, -4.5877e-02f, -7.6414e-02f, +-1.0573e-02f, -1.2517e-01f, -4.3991e-02f, +-2.6447e-02f, -9.5478e-02f, -2.4735e-02f, +-4.6548e-41f, -1.6443e-40f, -3.1221e-40f, +-3.2675e-40f, -2.7265e-40f, -3.1190e-40f, +-2.2065e-40f, -2.5407e-40f, -6.9511e-40f, +-1.2727e-04f, -2.6585e-04f, -3.5516e-04f, + 3.4272e-05f, -1.6810e-04f, -3.1677e-04f, +-5.5355e-05f, -2.9924e-04f, -4.3692e-04f, +-5.6428e-02f, 1.0771e-01f, 1.0185e-01f, + 2.2948e-01f, -7.8744e-02f, 6.0768e-04f, +-2.2355e-03f, -2.0128e-03f, -5.7317e-03f, +-7.1232e-03f, 1.0297e-01f, 1.6872e-01f, + 1.9194e-01f, -1.1578e-01f, 1.0732e-01f, +-8.6952e-02f, 3.2901e-02f, -6.6658e-03f, + 7.3979e-02f, 8.3875e-02f, -7.6372e-03f, + 1.9577e-01f, 2.7391e-01f, 4.5275e-02f, + 1.5610e-01f, 2.3802e-01f, 1.6555e-02f, + 1.3814e-01f, 1.2870e-01f, 9.1626e-02f, +-4.6890e-02f, -8.8734e-02f, 7.8866e-02f, + 1.0027e-01f, 2.2139e-01f, 1.0050e-01f, +-6.5845e-02f, -1.0990e-01f, -6.9896e-02f, + 4.1687e-02f, 3.0631e-02f, -8.8441e-02f, +-1.1868e-01f, 1.0836e-02f, 2.5873e-02f, +-1.7114e-02f, 7.6295e-02f, 1.5439e-02f, +-2.4271e-02f, 5.8538e-02f, 9.8190e-02f, + 4.9742e-02f, 8.7807e-02f, 6.5871e-02f, +-7.2669e-40f, -7.5936e-41f, -7.4975e-40f, +-1.6984e-42f, -1.7334e-40f, -8.4954e-41f, +-2.1556e-41f, -1.5374e-40f, -1.5515e-40f, +-6.2626e-04f, -7.2727e-04f, -8.1665e-04f, +-5.6584e-04f, -6.1190e-04f, -6.9584e-04f, +-5.6278e-04f, -5.8554e-04f, -6.3554e-04f, + 8.1550e-02f, -4.1817e-03f, 1.2301e-02f, +-4.5800e-02f, 4.6708e-02f, -8.7972e-02f, +-2.9880e-01f, 2.6456e-01f, 3.9363e-03f, +-3.0939e-02f, -1.9921e-01f, -3.8689e-03f, +-8.6803e-02f, 3.4857e-01f, -1.0201e-01f, + 2.1597e-02f, 1.4380e-02f, 4.3448e-02f, + 7.1195e-02f, 1.4980e-01f, 3.8079e-02f, +-1.2678e-01f, -8.1274e-02f, -4.3445e-02f, + 5.2482e-02f, -1.8763e-01f, 1.1557e-01f, +-9.4614e-02f, 5.4415e-02f, -3.1485e-02f, +-3.6451e-02f, 1.4379e-01f, 5.2291e-02f, +-9.2069e-02f, 9.5675e-02f, -5.8433e-02f, + 7.5768e-03f, -7.1280e-02f, -1.4576e-01f, +-1.4671e-01f, -1.2446e-01f, -1.5207e-01f, +-5.4368e-02f, 3.8303e-02f, -8.1794e-02f, + 2.0492e-02f, 4.0910e-02f, 1.1379e-02f, + 3.1582e-02f, 3.6039e-02f, -4.4040e-03f, + 1.7540e-02f, 1.4097e-04f, -6.4367e-02f, +-7.9553e-40f, -5.3941e-40f, -7.1912e-40f, +-5.8099e-40f, -6.8315e-40f, -6.6012e-40f, +-7.6242e-40f, -5.4784e-40f, -7.0267e-40f, +-2.9197e-04f, -2.1994e-04f, -1.9501e-04f, +-2.6516e-05f, -1.2642e-05f, -8.4345e-05f, + 1.6763e-04f, 1.1268e-04f, -5.4516e-05f, +-3.8007e-03f, -6.8765e-02f, -9.5716e-02f, + 6.3091e-02f, -8.1971e-02f, -9.2895e-02f, +-6.8353e-03f, 7.3639e-02f, 1.3505e-01f, + 9.0083e-02f, 2.4352e-01f, 3.9708e-02f, +-5.4051e-02f, -6.8748e-02f, -1.8937e-01f, +-1.9808e-03f, -7.1337e-02f, -2.8316e-02f, + 8.1504e-02f, 8.3226e-03f, 6.9013e-03f, + 9.4393e-02f, 5.9322e-02f, 5.5023e-02f, + 1.0236e-01f, -4.0205e-02f, 3.5172e-02f, + 6.5381e-02f, 4.9075e-02f, -5.3931e-02f, + 4.3961e-02f, 9.0223e-03f, -4.1678e-02f, +-6.4262e-02f, -5.0304e-02f, -9.3597e-02f +} +, +{ + 3.8496e-01f, 1.4287e-01f, 3.4530e-02f, +-5.5398e-01f, -6.0381e-02f, 1.2078e-02f, + 7.9983e-02f, 2.1478e-01f, -5.7915e-02f, +-1.4020e-01f, -2.6914e-02f, 1.5915e-02f, + 1.2371e-01f, 2.5496e-01f, -2.9867e-02f, + 1.3269e-02f, -9.9596e-02f, -2.3173e-01f, + 5.1471e-02f, -4.5507e-01f, -7.7620e-02f, +-5.1328e-02f, -1.9808e-02f, -4.7051e-02f, + 3.0573e-02f, 7.8762e-02f, -7.2627e-02f, + 6.8690e-02f, -4.0125e-02f, 5.6657e-02f, + 8.0208e-02f, -2.0075e-02f, 1.4019e-01f, +-5.7959e-02f, -7.3152e-02f, 2.0202e-02f, +-8.8702e-02f, -1.9911e-01f, -1.5570e-01f, + 2.8401e-02f, 5.8802e-02f, 1.3050e-01f, + 2.1905e-02f, -3.4298e-02f, 4.0447e-02f, + 1.0184e-01f, -9.0101e-02f, -9.2770e-02f, + 1.1713e-02f, -3.2514e-01f, 1.9393e-01f, +-9.4227e-02f, 2.7053e-01f, -9.7233e-02f, +-1.0478e-01f, 6.0652e-02f, 8.3399e-02f, + 1.1104e-01f, 2.9008e-01f, 4.9208e-02f, +-1.5414e-02f, 3.1718e-02f, -7.9083e-02f, +-5.2358e-03f, 9.0101e-02f, 5.2973e-02f, + 5.5527e-02f, -1.6599e-02f, -8.5167e-02f, +-5.1018e-02f, 7.2243e-03f, -9.5684e-02f, +-5.0608e-02f, -6.7864e-02f, -8.9496e-02f, +-2.4348e-01f, 2.7477e-01f, -1.7588e-01f, + 1.3927e-01f, 5.5502e-02f, -1.3370e-02f, +-4.3509e-02f, -2.1511e-01f, -5.9070e-02f, + 1.0293e-01f, 4.2678e-01f, -8.7527e-02f, +-6.8546e-02f, -5.6296e-02f, -8.7962e-02f, +-8.6130e-02f, 9.2069e-02f, 7.2303e-02f, + 2.4365e-02f, 2.1988e-01f, -7.9408e-03f, +-3.0063e-02f, 1.1554e-01f, -5.0311e-02f, + 1.0605e-02f, 5.4598e-02f, 1.3826e-02f, +-1.4342e-02f, 1.5353e-01f, -5.3974e-03f, + 1.5583e-01f, -6.0889e-02f, -1.5772e-02f, +-2.5956e-02f, -3.5285e-01f, -2.0338e-01f, + 2.6011e-01f, 2.2737e-01f, -1.4693e-01f, +-7.7964e-02f, 1.0053e-01f, -5.4278e-02f, +-3.0668e-02f, 3.4556e-02f, -3.4321e-02f, + 7.8695e-02f, -2.2357e-01f, 9.5733e-02f, + 1.7483e-01f, -1.5153e-01f, -1.8262e-03f, + 4.7605e-02f, -2.2834e-01f, 4.6383e-02f, + 1.5701e-01f, 3.2264e-01f, 1.0334e-02f, + 6.3351e-02f, 1.1340e-01f, 8.3478e-02f, + 6.4196e-02f, 3.3460e-02f, 8.8473e-02f, + 5.4663e-02f, -1.7665e-03f, -4.1935e-02f, +-6.1346e-03f, -5.4463e-02f, -6.2960e-02f, + 2.8159e-02f, 2.9903e-02f, 9.2429e-03f, +-3.0041e-02f, -9.7783e-02f, -4.9500e-02f, + 9.5350e-02f, -7.9143e-02f, -1.3244e-01f, +-6.5129e-02f, 1.4568e-01f, 6.6843e-02f, + 1.5241e-01f, -7.8736e-02f, 1.0721e-01f, +-5.9015e-02f, 1.5320e-01f, 3.0796e-01f, +-5.4266e-03f, -6.0804e-02f, 3.7326e-02f, + 7.4844e-02f, 4.8340e-02f, 1.5251e-01f, + 3.8158e-02f, 1.2087e-01f, -8.9003e-02f, +-5.8369e-02f, -7.3813e-02f, 1.2240e-02f, +-4.5106e-03f, 7.4580e-02f, 1.2042e-01f, + 4.1959e-02f, 1.4529e-01f, 5.3636e-03f, +-4.9708e-03f, -1.0775e-02f, -5.9374e-02f, + 1.5358e-02f, 1.7277e-02f, -1.5412e-01f, + 8.1647e-02f, 3.3503e-02f, -8.1934e-02f, +-1.5807e-02f, -1.0001e-02f, -1.0059e-02f, +-9.0493e-03f, -7.8954e-02f, 4.3891e-02f, +-9.3815e-03f, 3.2241e-02f, 4.7962e-02f, +-7.2252e-03f, 7.9324e-02f, 2.0662e-02f, +-5.7710e-02f, -5.1142e-02f, -1.4296e-01f, + 2.1501e-02f, -1.9518e-02f, -2.7658e-02f, + 1.4983e-01f, 8.5447e-02f, 7.2092e-04f, + 1.1275e-01f, 6.1131e-02f, 5.7955e-02f, + 1.5624e-02f, 2.7225e-01f, 1.1716e-01f, +-1.6322e-04f, -1.3368e-04f, -1.5575e-04f, +-1.0525e-04f, -1.0765e-04f, -1.5306e-04f, +-8.9692e-05f, -1.0857e-04f, -1.7316e-04f, +-1.8015e-03f, -1.3733e-03f, -3.9154e-04f, +-1.8453e-03f, -1.4238e-03f, -4.4163e-04f, +-1.5511e-03f, -1.1131e-03f, -2.0087e-04f, +-2.4082e-03f, -2.2576e-03f, -1.9231e-03f, +-2.4913e-03f, -2.4136e-03f, -2.1678e-03f, +-2.5057e-03f, -2.4650e-03f, -2.2732e-03f, +-2.3901e-05f, -1.5870e-05f, -5.8255e-06f, +-1.5163e-05f, -1.2370e-05f, -6.0712e-06f, +-1.3098e-05f, -1.1132e-05f, -5.7866e-06f, +-5.9760e-03f, -5.9998e-03f, -6.0295e-03f, +-5.9962e-03f, -6.0100e-03f, -6.0277e-03f, +-6.0003e-03f, -6.0059e-03f, -6.0148e-03f, +-3.2764e-05f, -2.9574e-05f, -2.8001e-05f, +-1.0846e-05f, -1.1569e-05f, -1.4282e-05f, +-1.6255e-06f, -2.5666e-06f, -4.7808e-06f, +-5.1999e-03f, -5.2334e-03f, -5.2847e-03f, +-5.2057e-03f, -5.2283e-03f, -5.2713e-03f, +-5.2195e-03f, -5.2321e-03f, -5.2633e-03f, +-3.0782e-06f, -9.2118e-06f, -1.6177e-05f, +-1.6382e-06f, -6.9559e-06f, -1.4245e-05f, +-1.1471e-06f, -6.5984e-06f, -1.4903e-05f, + 7.7574e-02f, -1.2866e-02f, 4.1348e-03f, +-6.7298e-02f, -1.3691e-01f, 6.4079e-02f, + 3.7962e-02f, 8.7737e-02f, -4.1046e-02f, +-2.8471e-02f, 1.7647e-01f, 6.4232e-02f, + 1.2316e-01f, 3.6800e-01f, -1.5740e-01f, +-6.0839e-02f, 1.5449e-02f, -1.0761e-01f, +-6.6869e-02f, -1.2867e-01f, -4.0195e-02f, +-4.9651e-02f, -5.5500e-02f, -2.5879e-02f, + 2.0179e-02f, 6.8467e-02f, 2.6575e-02f, +-6.7728e-04f, -7.6269e-02f, 2.3470e-02f, + 7.1869e-02f, -1.1855e-01f, -2.1067e-02f, + 1.3263e-01f, -3.2957e-02f, -3.4365e-03f, + 8.1936e-02f, 1.3073e-01f, 1.1477e-01f, + 1.2429e-01f, 1.6129e-01f, 1.6251e-01f, + 1.5476e-02f, 3.2862e-02f, 2.1999e-02f, +-2.9189e-02f, -3.3615e-02f, 5.5616e-04f, +-2.4059e-02f, -9.6181e-03f, -4.1175e-02f, +-6.3680e-04f, -9.6559e-02f, -9.1448e-02f, + 3.0238e-02f, 1.2534e-01f, 1.5256e-02f, +-4.2118e-02f, 1.5723e-01f, 2.6929e-03f, + 1.9873e-02f, 5.3050e-02f, -1.0153e-03f, + 2.0634e-02f, 9.2825e-03f, -6.8027e-03f, + 3.1335e-03f, -7.7443e-03f, -1.8307e-02f, + 7.9974e-03f, -1.0283e-03f, -6.2520e-03f, + 4.5050e-02f, 9.9504e-02f, -1.3404e-01f, +-6.7271e-01f, -5.7290e-02f, 2.6919e-02f, + 2.3673e-01f, 2.4688e-02f, -2.0227e-02f, + 5.1389e-02f, -3.9810e-02f, -8.9700e-02f, + 2.8445e-02f, 3.9136e-01f, -1.1508e-01f, +-1.0449e-01f, -6.2005e-02f, 6.5721e-02f, +-1.9123e-01f, -4.2613e-02f, 3.5371e-02f, + 1.9207e-01f, 8.7916e-02f, 4.8089e-02f, +-5.7912e-02f, 1.0014e-01f, -9.4659e-02f, + 1.1240e-02f, -6.2254e-03f, 1.3399e-01f, + 1.6483e-01f, -3.5079e-01f, 1.1612e-02f, + 2.9215e-01f, 5.6875e-02f, 6.9505e-02f, + 1.3721e-02f, 1.2607e-01f, 2.6426e-02f, +-2.0529e-01f, 2.1768e-01f, 2.1232e-01f, +-6.3574e-02f, 2.3504e-02f, -1.0811e-01f, +-1.3470e-02f, -3.6446e-02f, -5.4379e-02f, +-1.3257e-01f, -8.3412e-02f, 3.7745e-02f, + 5.8778e-02f, -2.6060e-01f, 3.8262e-02f, +-4.3689e-03f, -6.6703e-02f, -2.2025e-01f, +-9.0961e-02f, 1.3855e-01f, 3.4573e-04f, +-2.9613e-01f, -3.6138e-02f, -1.3827e-01f, + 4.5896e-02f, -5.3871e-02f, -1.0037e-01f, + 1.8457e-01f, 1.0338e-01f, -5.7306e-02f, + 5.5510e-02f, -9.4938e-02f, -5.6527e-05f, + 1.6372e-01f, -3.3854e-02f, 5.6332e-02f, +-4.0251e-01f, -5.9428e-02f, -9.1470e-02f, +-1.5921e-02f, -5.7948e-02f, 8.1682e-03f, +-3.7833e-03f, 1.6293e-01f, 5.3784e-02f, + 1.1053e-01f, -1.3867e-01f, 2.6772e-02f, +-1.3133e-02f, 3.7614e-01f, 3.6361e-03f, +-1.4205e-01f, 3.1312e-02f, -9.9928e-02f, +-1.5755e-01f, 4.2016e-01f, 9.4065e-02f, + 2.7536e-02f, 1.2620e-01f, -1.4894e-01f, +-4.2137e-02f, -9.8700e-02f, -1.7479e-01f, + 4.5836e-02f, 5.3893e-02f, -1.0138e-01f, + 8.3609e-02f, 2.1849e-02f, -1.0648e-01f, + 7.4801e-02f, -1.2671e-01f, -1.5007e-02f, + 2.7440e-01f, -3.1351e-01f, 6.5787e-02f, +-6.7820e-02f, 1.6312e-01f, -1.3254e-02f, +-2.5770e-02f, -2.0041e-02f, 5.8243e-02f, + 1.6055e-02f, 1.1971e-02f, -4.6112e-02f, +-1.6276e-01f, -1.5313e-02f, -7.9826e-03f, + 9.1668e-02f, 9.7722e-02f, 1.3754e-01f, +-7.4817e-02f, -4.1923e-01f, -1.2337e-01f, + 1.3472e-01f, -4.0745e-02f, -5.4055e-02f, +-1.2943e-02f, 4.8796e-02f, 4.2007e-02f, + 9.4668e-02f, 8.6149e-02f, 1.2362e-01f, + 7.0637e-02f, 2.3565e-01f, 1.4582e-01f, + 5.6904e-02f, -8.2166e-02f, 1.0563e-01f, + 9.3969e-02f, -2.2909e-01f, 4.6537e-02f, + 6.5257e-02f, 1.4804e-01f, -6.2092e-02f, +-1.5699e-02f, -1.5303e-02f, 1.6671e-01f, +-6.1947e-03f, 2.5749e-01f, 1.5257e-01f, + 3.2908e-02f, -5.9907e-02f, 1.1502e-01f, + 7.5876e-02f, -2.6699e-01f, -1.5891e-02f, +-8.0426e-02f, 1.3406e-01f, -1.9881e-02f, + 3.5472e-02f, -8.2140e-02f, 1.6509e-02f, + 8.3390e-03f, -7.8291e-02f, -2.0754e-01f, + 3.4490e-02f, 2.7913e-01f, 5.9566e-02f, + 2.5288e-02f, 1.1725e-01f, -1.0356e-01f, +-5.0955e-02f, 9.2093e-02f, -5.8477e-02f, + 4.4325e-02f, 3.2973e-02f, -1.9477e-01f, + 3.9582e-02f, -8.6877e-02f, -1.1753e-01f, + 3.0401e-02f, -2.8757e-02f, -2.5563e-02f, + 5.0741e-02f, -3.5056e-01f, -2.5584e-01f, + 9.1709e-02f, -4.0932e-02f, 2.3812e-01f, + 5.0945e-02f, 4.9246e-02f, 1.2738e-01f, + 5.1440e-03f, 1.5703e-01f, 5.5743e-02f, +-3.9492e-02f, 1.2114e-01f, 2.0531e-02f, + 8.0800e-02f, 2.6680e-03f, -1.6660e-02f, + 1.0684e-01f, 1.2308e-01f, 1.7882e-02f, + 1.8280e-02f, 1.0972e-01f, -5.2912e-03f +} +, +{ +-1.3812e-02f, -4.6271e-02f, 7.3790e-02f, +-6.3801e-02f, -3.6817e-01f, -1.7880e-02f, + 5.2986e-02f, 1.8626e-01f, 1.5645e-03f, + 1.2367e-02f, -6.2923e-02f, 3.0844e-02f, + 9.3623e-02f, 1.9527e-01f, -2.6366e-02f, +-2.0837e-02f, -3.4424e-02f, 4.0256e-02f, + 4.1482e-02f, 6.1795e-02f, -1.1293e-02f, +-8.9944e-02f, -1.3608e-01f, 1.8067e-02f, + 3.6974e-02f, 5.2530e-03f, -2.7474e-02f, + 1.1872e-05f, 1.9000e-05f, 2.0729e-05f, + 1.0139e-05f, 1.6832e-05f, 1.9392e-05f, + 6.5445e-06f, 1.0973e-05f, 1.3521e-05f, +-5.3340e-02f, 1.3108e-03f, 4.0436e-02f, + 5.7068e-02f, -2.7923e-02f, -5.4781e-02f, +-2.9293e-02f, 2.7145e-02f, 2.7340e-02f, + 5.3520e-03f, 1.8766e-02f, 4.0297e-01f, + 2.6473e-02f, -3.4675e-02f, -1.1783e-01f, +-2.5038e-02f, -1.7702e-02f, -3.4908e-02f, + 1.4847e-02f, 2.3237e-01f, -6.3687e-02f, +-6.5672e-02f, -2.1888e-01f, -1.7233e-02f, + 4.0608e-02f, -6.9580e-02f, -2.2200e-02f, + 5.8163e-02f, 1.3695e-01f, -2.6257e-02f, +-1.3328e-01f, -3.5730e-01f, 2.4507e-02f, +-4.5611e-03f, 2.0424e-01f, -3.9821e-02f, + 5.5300e-02f, -1.6006e-01f, 1.1717e-01f, +-2.6107e-02f, -8.6995e-02f, 8.3720e-02f, + 7.5494e-02f, 3.2189e-01f, 1.5527e-01f, +-6.6869e-02f, 1.4469e-01f, 5.1805e-02f, + 9.8760e-02f, -1.6759e-01f, -1.2350e-01f, + 5.7005e-02f, 8.4904e-02f, 8.9713e-02f, +-1.4263e-02f, 2.8914e-02f, 3.2239e-02f, +-2.4871e-02f, 5.6014e-02f, -4.4469e-02f, + 3.1209e-02f, 1.3677e-02f, -2.1052e-02f, +-1.6548e-03f, -1.8796e-03f, -1.9883e-03f, +-1.6186e-03f, -1.8494e-03f, -1.9670e-03f, +-1.5841e-03f, -1.8173e-03f, -1.9345e-03f, + 3.5726e-02f, 1.8013e-01f, 1.6913e-02f, +-1.2168e-01f, -6.3848e-02f, 3.0555e-02f, + 3.0269e-02f, -1.0260e-01f, -1.5259e-02f, +-4.7375e-03f, 5.5115e-02f, 6.2642e-01f, + 9.9776e-03f, -2.1988e-01f, -2.0984e-01f, + 7.0470e-03f, 6.3178e-02f, -1.3607e-02f, + 1.1918e-01f, -2.4081e-01f, 1.7889e-01f, +-1.0514e-01f, 2.9220e-01f, -1.3263e-01f, + 5.6091e-03f, -4.1623e-02f, 2.5589e-02f, +-1.8496e-01f, 2.7698e-02f, -6.5768e-02f, + 2.9677e-01f, 4.4163e-02f, 5.8530e-02f, +-1.1010e-01f, -7.6787e-02f, 3.9844e-02f, + 5.2113e-03f, -1.8202e-02f, 1.4129e-03f, +-6.1402e-03f, -2.7222e-01f, 7.4690e-02f, + 1.9131e-02f, 2.2753e-01f, 1.9587e-02f, +-2.7391e-02f, 6.7917e-03f, 2.0496e-03f, + 6.7333e-02f, 7.8262e-02f, 2.1110e-03f, +-5.4519e-02f, 3.0763e-02f, 1.5628e-02f, + 9.5055e-02f, 3.8855e-02f, 1.2446e-02f, +-1.5152e-01f, 7.8124e-02f, -1.2616e-02f, + 9.3100e-03f, -1.6528e-02f, -1.2873e-02f, +-1.8377e-03f, -1.9231e-03f, -1.8930e-03f, +-1.8058e-03f, -1.8841e-03f, -1.8678e-03f, +-1.7387e-03f, -1.7966e-03f, -1.7781e-03f, +-4.5122e-02f, 1.7027e-03f, -3.5534e-03f, + 8.5222e-03f, 1.0130e-01f, 4.7893e-02f, + 6.5574e-02f, 7.2150e-03f, -2.1820e-03f, +-5.5105e-03f, -1.8990e-01f, 2.6527e-02f, + 6.6140e-03f, 2.1537e-01f, -2.2183e-02f, +-8.0628e-03f, 6.8398e-03f, 9.4474e-03f, + 1.2239e-01f, -1.3337e-01f, 7.3391e-02f, +-1.2205e-01f, 1.3145e-01f, -2.0063e-02f, + 2.2168e-02f, 3.6097e-03f, 2.7146e-02f, + 4.6717e-02f, 2.1122e-02f, 1.5491e-02f, +-1.3077e-01f, 1.1635e-01f, 1.0849e-02f, + 8.0113e-02f, -8.4028e-02f, 1.2863e-03f, +-2.9796e-02f, -8.4537e-02f, -2.6766e-03f, +-7.7771e-03f, -2.4274e-03f, 8.6274e-02f, +-2.0354e-02f, 4.1245e-02f, 8.4227e-02f, + 5.5894e-02f, 1.0706e-01f, 5.2965e-02f, +-7.8731e-03f, 5.5825e-01f, 1.0373e-01f, +-1.1975e-01f, -2.0071e-02f, -2.5286e-02f, +-7.7477e-02f, 5.3589e-02f, -1.5710e-03f, +-1.2753e-01f, 2.5166e-01f, 8.2205e-03f, +-9.8349e-02f, -4.9539e-02f, -5.4941e-02f, +-4.9916e-03f, -4.9986e-03f, -5.0660e-03f, +-4.9770e-03f, -4.9840e-03f, -5.0543e-03f, +-4.9997e-03f, -5.0114e-03f, -5.0809e-03f, + 6.1819e-02f, 1.5061e-01f, 1.1984e-02f, + 1.2905e-01f, 2.5921e-01f, 1.4768e-01f, + 4.5548e-02f, 1.4902e-01f, -4.8961e-03f, +-1.3605e-02f, 8.2896e-02f, -4.1931e-01f, +-2.2657e-02f, 2.4768e-01f, 2.6528e-01f, +-1.1566e-02f, -8.7819e-03f, 4.3618e-02f, +-3.4332e-02f, -1.8392e-01f, 4.4471e-02f, +-3.7073e-02f, -5.4620e-02f, 1.0899e-01f, + 3.7891e-02f, 9.9487e-02f, 3.2383e-02f, +-6.3628e-02f, -5.0303e-03f, 5.4617e-02f, +-8.7802e-02f, 2.1977e-01f, -6.0249e-03f, + 6.3554e-02f, -5.4291e-02f, -2.6709e-02f, +-1.5505e-02f, -6.7104e-02f, 3.8607e-02f, +-1.1427e-01f, -3.2524e-01f, 4.0077e-02f, +-6.5144e-03f, 1.2313e-01f, -2.7924e-02f, + 1.4265e-02f, -3.8338e-02f, 8.6780e-02f, + 1.5341e-01f, 1.2174e-01f, -7.3160e-02f, + 2.6326e-04f, 7.3690e-02f, 5.2187e-02f, +-3.3114e-02f, -3.6588e-02f, 1.1635e-02f, +-3.3521e-02f, 1.0767e-01f, -8.9125e-03f, +-2.2431e-02f, -4.5655e-03f, 7.5531e-03f, + 6.7227e-04f, 7.2856e-04f, 7.3907e-04f, + 6.5335e-04f, 7.0702e-04f, 7.1233e-04f, + 6.1540e-04f, 6.7286e-04f, 6.7797e-04f, +-3.1496e-02f, 6.0514e-02f, 4.2013e-02f, +-2.8617e-02f, 1.4846e-02f, 4.0016e-03f, + 4.7006e-03f, -4.0017e-02f, -3.0411e-02f, +-9.6037e-03f, 8.8522e-02f, 9.8616e-02f, + 4.1297e-02f, -3.2645e-01f, -7.6144e-03f, +-1.0711e-02f, 3.9324e-02f, 4.0144e-02f, + 5.2899e-02f, -7.8668e-02f, -5.4798e-02f, +-2.0428e-01f, 5.7238e-02f, -3.6937e-02f, +-3.6103e-02f, -8.2683e-02f, -2.8101e-02f, + 8.2479e-02f, 5.7766e-02f, -1.2019e-01f, +-3.8373e-01f, 6.8272e-02f, -1.1758e-02f, + 5.1129e-02f, -2.7931e-01f, 4.5608e-02f, +-2.5151e-02f, -5.0816e-02f, 1.7231e-02f, +-3.6376e-02f, 1.5916e-01f, 2.9192e-02f, +-4.1947e-02f, 5.3183e-02f, -9.7289e-02f, + 4.6138e-02f, 7.0842e-02f, 1.6673e-02f, +-1.7243e-03f, 2.7203e-01f, 3.8262e-02f, +-1.4000e-01f, -7.3793e-02f, -2.0050e-02f, +-1.8750e-02f, -8.5319e-02f, -3.0858e-02f, +-5.9981e-02f, 1.2729e-01f, 1.4094e-02f, +-5.4088e-02f, -2.3694e-02f, -9.7485e-03f, +-4.7840e-03f, -4.8359e-03f, -4.8727e-03f, +-4.7882e-03f, -4.8380e-03f, -4.8755e-03f, +-4.7859e-03f, -4.8321e-03f, -4.8633e-03f, + 4.9511e-02f, 1.0935e-01f, -3.7430e-03f, + 1.1834e-01f, 7.7243e-02f, 4.3074e-02f, + 6.7446e-02f, 2.9734e-02f, -1.1276e-02f, +-2.0080e-02f, 1.3561e-01f, -1.3455e-01f, +-1.4505e-02f, 2.2100e-01f, 4.9635e-02f, +-1.0040e-02f, 3.4560e-02f, -7.4607e-03f, +-6.8873e-02f, -5.6221e-02f, 1.2255e-02f, +-2.9198e-02f, 7.1612e-02f, 2.9402e-02f, + 4.1036e-02f, 4.6417e-02f, 6.0284e-03f, +-6.5261e-02f, 2.1426e-03f, 2.4192e-02f, +-1.6073e-03f, -6.2222e-03f, -1.8295e-02f, + 2.4952e-04f, -2.0623e-02f, -3.3064e-03f, + 5.9188e-02f, -4.8839e-02f, 7.9840e-02f, +-6.7952e-02f, -4.7191e-01f, 1.5117e-01f, + 1.5668e-01f, 2.4733e-01f, 1.1354e-01f, + 1.7742e-02f, -4.4059e-02f, 9.5374e-03f, + 3.2049e-01f, -1.3779e-01f, 9.6608e-02f, + 8.4580e-02f, 1.4293e-01f, 6.1574e-02f, + 2.8777e-03f, 7.8795e-02f, -5.1902e-02f, + 1.2212e-01f, 1.0321e-01f, 3.2360e-02f, +-9.6617e-02f, 7.8941e-03f, -7.0876e-02f, + 3.5869e-03f, 3.5891e-03f, 3.5923e-03f, + 3.5746e-03f, 3.5840e-03f, 3.5967e-03f, + 3.5785e-03f, 3.5932e-03f, 3.6080e-03f, + 1.5454e-03f, 3.0582e-03f, 4.3737e-02f, +-5.9833e-02f, -1.1247e-01f, 4.4380e-02f, +-1.3206e-01f, 8.2778e-03f, 4.7963e-02f, +-4.3720e-02f, -7.5722e-03f, 2.0510e-01f, + 3.0133e-02f, -4.0506e-01f, 2.7867e-01f, + 5.5586e-02f, 2.8926e-02f, 1.3360e-03f, + 1.9490e-05f, 3.3326e-01f, -7.7241e-02f, +-1.5648e-01f, 1.5195e-01f, -1.3995e-01f, + 8.6519e-02f, 1.0447e-01f, -4.1413e-02f, +-3.8667e-03f, 1.6159e-01f, 1.1627e-01f, +-2.2646e-01f, -3.4758e-02f, -6.7956e-03f, +-3.2689e-01f, 1.9606e-01f, -9.1523e-02f, + 1.1238e-02f, 1.5084e-03f, 4.2113e-02f, +-1.1154e-02f, -3.6596e-01f, -7.2252e-02f, + 6.6621e-02f, 1.0188e-01f, 4.1032e-01f, + 3.5892e-02f, -4.8304e-02f, 6.6142e-03f, + 1.3374e-01f, 2.2720e-01f, -7.1224e-02f, + 6.8952e-02f, 2.0467e-01f, 5.0251e-02f, +-6.2016e-02f, 2.2175e-01f, -1.7764e-02f, + 2.7542e-02f, 1.4905e-01f, 3.6637e-02f, +-7.2231e-02f, 5.0271e-03f, -7.1823e-02f, + 3.5760e-03f, 3.5540e-03f, 3.5692e-03f, + 3.5664e-03f, 3.5490e-03f, 3.5689e-03f, + 3.5671e-03f, 3.5619e-03f, 3.5864e-03f, + 2.7470e-02f, -3.9752e-02f, 4.1063e-02f, +-2.4985e-02f, -1.7969e-01f, 8.2186e-02f, +-5.4251e-02f, -5.9651e-03f, 2.5079e-02f, +-2.1197e-02f, 2.5426e-02f, 1.3585e-01f, +-1.3460e-02f, -1.1377e-01f, 1.2278e-01f, + 3.6533e-02f, 1.2843e-02f, 5.6219e-02f, + 5.8141e-04f, 2.8354e-01f, -6.2016e-02f, +-1.0289e-01f, 1.8724e-01f, -9.9475e-02f, + 5.1193e-02f, 7.5986e-02f, -1.2951e-03f, +-8.2587e-02f, 1.8498e-01f, 1.0891e-01f, + 1.3538e-01f, -4.7728e-01f, 1.0868e-01f, +-8.6415e-02f, -1.7061e-01f, 1.0457e-02f +} +}; +__device__ __constant__ static const float biasL[8][8] = +{ +{ +-0.1175f, -0.0258f, -0.0053f, -0.0437f, -0.0563f, -0.1047f, -0.3449f, 0.0568f +} +, +{ + 0.0339f, -0.1738f, 0.0061f, 0.1565f, -0.0316f, -0.0016f, -0.0032f, -0.0554f +} +, +{ +-0.0508f, -0.0609f, 0.0347f, -0.0802f, -0.0438f, 0.2512f, -0.0491f, -0.0259f +} +, +{ + 0.0655f, 0.0255f, 0.0228f, -0.0027f, -0.0155f, -0.0163f, -0.0174f, -0.1095f +} +, +{ + 4.9947e-03f, 5.3372e-03f, -4.5286e-09f, -1.3756e-03f, 3.8858e-03f, -4.4197e-02f, 3.3970e-02f, 2.8411e-02f +} +, +{ +-0.0396f, 0.0007f, 0.1735f, 0.0109f, 0.1177f, 0.0919f, 0.0567f, -0.0005f +} +, +{ + 0.0127f, -0.0688f, 0.1102f, -0.0052f, 0.1602f, -0.0191f, -0.0322f, 0.0311f +} +, +{ + 0.0063f, 0.0093f, 0.0729f, 0.3734f, 0.0006f, 0.1915f, 0.3186f, 0.2636f +} +}; +__device__ __constant__ static const float kernelsL10[4 * 8] = +{ +-0.0967f, -0.3094f, + 0.3537f, 0.5705f, + 0.2547f, 0.3360f, +-0.0718f, -0.0700f, +-0.3013f, -0.1602f, + 0.4520f, 0.0495f, + 0.1564f, 0.3773f, +-0.0216f, 0.4367f, +-0.4855f, -0.1972f, +-0.2026f, -0.4390f, + 0.3743f, -0.1156f, + 0.4408f, -0.3123f, +-0.3577f, 0.0753f, +-0.3396f, 0.0336f, + 0.1052f, -0.4180f, + 0.0799f, -0.3587f +}; + +#include "ACNetCommon.cuh" + +DECLARE_ACNET_HDN_INTERFACE_FUNCTION(3) diff --git a/cuda_code/Activation_27.cu b/cuda_code/Activation_27.cu new file mode 100644 index 0000000000000000000000000000000000000000..4ecf7fe00d7127fe0f77c9b76eb00c080c8650f1 --- /dev/null +++ b/cuda_code/Activation_27.cu @@ -0,0 +1,587 @@ +#define _USE_MATH_DEFINES + +#include + +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { + +// ----------------------------------- +// prelu forward +// ----------------------------------- +template +void prelu_cuda_kernel_share_weights( + const Tensor& input, + Tensor& result, + const scalar_t* weight_data) +{ + auto iter = TensorIterator::unary_op(result, input); + + at::native::gpu_kernel(iter, + [weight_data] GPU_LAMBDA (scalar_t input_val) { + return (input_val > 0) ? input_val : *weight_data * input_val; + }); +} + +template +__global__ void prelu_cuda_kernel_multi_weights( + scalar_t* result_data, + const scalar_t* input_data, + const scalar_t* weight_data, + int64_t input_stride0, + int64_t input_stride1, + int64_t input_numel) { + + int64_t linearId = blockIdx.x * blockDim.x + threadIdx.x; + if (linearId >= input_numel) return; + + // multiply values at each channel with weight[channel_index] + int64_t channel = (linearId % input_stride0) / input_stride1; + scalar_t input_data_val = input_data[linearId]; + result_data[linearId] = (input_data_val > 0) ? input_data_val : weight_data[channel] * input_data_val; +} + +Tensor prelu_cuda(const Tensor& self, const Tensor& weight_) { + TORCH_CHECK(self.is_cuda()); + TORCH_CHECK(weight_.is_cuda()); + + auto input = self.contiguous(); + auto weight = weight_.contiguous(); + + TORCH_CHECK(input.is_contiguous()); + TORCH_CHECK(weight.is_contiguous()); + + int64_t weight_num = weight.numel(); + Tensor result = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); + auto strides = input.strides(); + + // case1: shared weight for all channels + if (weight_num == 1) { + AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_cuda", [&] { + prelu_cuda_kernel_share_weights( + input, + result, + weight.data_ptr()); + }); + } + else { // case2: multiple weights, one for each channel + int64_t input_ndim = input.dim(); + TORCH_CHECK(input_ndim > 0, "Not allow zero-dim input tensor."); + + int64_t channel_size = 1; // channel_size default to 1 + int64_t input_stride0 = 1, input_stride1 = 1; + + if (input_ndim > 1) { + channel_size = input.size(1); // channel is the 2nd dim of input + input_stride0 = strides[0]; + input_stride1 = strides[1]; + } + TORCH_CHECK(channel_size == weight_num, + "Mismatch of parameter numbers and input channel size. Found parameter numbers = ", weight_num, + " and channel size = ", channel_size, "."); + + // config to run cuda kernel + int64_t input_numel = input.numel(); + const dim3 block = dim3(std::min(static_cast(cuda::getApplyBlock().x), input_numel)); + dim3 grid; + int curDevice = -1; + cudaGetDevice(&curDevice); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice); + TORCH_CHECK(cuda::getApplyGrid(input_numel, grid, curDevice), "prelu: input too large or too many dimensions"); + + AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_cuda", [&] { + prelu_cuda_kernel_multi_weights + <<>>( + result.data_ptr(), + input.data_ptr(), + weight.data_ptr(), + input_stride0, + input_stride1, + input_numel); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + }); + } + return result; +} + +// ----------------------------------- +// prelu backward +// ----------------------------------- +template +void prelu_cuda_backward_kernel_share_weights( + const Tensor& input, + const Tensor& grad_out, + Tensor& input_grad, + Tensor& weight_grad_collector, + const scalar_t* weight_data) { + at::TensorIterator iter = TensorIteratorConfig() + .add_borrowed_output(input_grad) + .add_borrowed_output(weight_grad_collector) + .add_borrowed_input(input) + .add_borrowed_input(grad_out) + .build(); + + // N.B. `std::tuple` does not support `::operator=` on device code. + gpu_kernel_multiple_outputs(iter, [=] GPU_LAMBDA (scalar_t input, scalar_t grad_out) -> thrust::tuple { + scalar_t input_grad = input > 0 ? grad_out : (*weight_data) * grad_out; + scalar_t weight_grad_collector = input > 0 ? scalar_t(0) : input * grad_out; + return {input_grad, weight_grad_collector}; + }); +} + +template +__global__ void prelu_cuda_backward_kernel_multi_weights( + const scalar_t* input_data, + const scalar_t* weight_data, + const scalar_t* grad_out_data, + scalar_t* input_grad_data, + scalar_t* weight_grad_collector, + int64_t input_stride0, + int64_t input_stride1, + int64_t input_numel) { + + int64_t linearId = blockIdx.x * blockDim.x + threadIdx.x; + if (linearId >= input_numel) return; + int64_t channel = (linearId % input_stride0) / input_stride1; + scalar_t input_data_val = input_data[linearId]; + scalar_t grad_out_data_val = grad_out_data[linearId]; + input_grad_data[linearId] = (input_data_val > 0) ? grad_out_data_val : weight_data[channel] * grad_out_data_val; + weight_grad_collector[linearId] = (input_data_val > 0) ? scalar_t(0) : input_data_val * grad_out_data_val; +} + +std::tuple prelu_backward_cuda(const Tensor& grad_out_, const Tensor& self, const Tensor& weight_) { + TORCH_CHECK(grad_out_.is_cuda()); + TORCH_CHECK(self.is_cuda()); + TORCH_CHECK(weight_.is_cuda()); + + auto input = self.contiguous(); + auto grad_out = grad_out_.contiguous(); + auto weight = weight_.contiguous(); + + TORCH_CHECK(input.is_contiguous()); + TORCH_CHECK(weight.is_contiguous()); + TORCH_CHECK(grad_out.is_contiguous()); + + int64_t weight_num = weight.numel(); + auto strides = input.strides(); + auto dims = input.dim(); + Tensor input_grad = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); + Tensor weight_grad = at::empty_like(weight, LEGACY_CONTIGUOUS_MEMORY_FORMAT); + Tensor weight_grad_collector = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); + // case1: shared parameter for all channels + if (weight_num == 1) { + AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_backward_cuda", [&] { + prelu_cuda_backward_kernel_share_weights( + input, + grad_out, + input_grad, + weight_grad_collector, + weight.data_ptr()); + }); + weight_grad.fill_(weight_grad_collector.sum()); + } + else { // case2: multiple parameters, one for each channel + int64_t input_ndim = input.dim(); + TORCH_CHECK(input_ndim > 0, "Not allow zero-dim input tensor."); + + int64_t channel_size = 1; // channel_size default to 1 + int64_t input_stride0 = 1, input_stride1 = 1; + + if (input_ndim > 1) { + channel_size = input.size(1); // channel is the 2nd dim of input + input_stride0 = strides[0]; + input_stride1 = strides[1]; + } + TORCH_CHECK(channel_size == weight_num, + "Mismatch of parameter numbers and input channel size. Found parameter numbers = ", weight_num, + " and channel size = ", channel_size, "."); + + // config to run cuda kernel + int64_t input_numel = input.numel(); + const dim3 block = dim3(std::min(static_cast(cuda::getApplyBlock().x), input_numel)); + dim3 grid; + int curDevice = -1; + cudaGetDevice(&curDevice); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice); + TORCH_CHECK(cuda::getApplyGrid(input_numel, grid, curDevice), "prelu_backward_cuda: input too large or too many dimensions"); + + AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_backward_cuda", [&] { + prelu_cuda_backward_kernel_multi_weights + <<>>( + input.data_ptr(), + weight.data_ptr(), + grad_out.data_ptr(), + input_grad.data_ptr(), + weight_grad_collector.data_ptr(), + input_stride0, + input_stride1, + input_numel); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + }); + // update weight_grad + std::vector reduce_dims; + reduce_dims.push_back(0); + if (dims > 2) { + for(int64_t i = 2; i < dims; i++) reduce_dims.push_back(i); + } + weight_grad = weight_grad_collector.sum(reduce_dims); + } + return std::tuple{input_grad, weight_grad}; +} + +// ----------------------------------- +// hardshrink +// ----------------------------------- +void hardshrink_kernel(TensorIterator& iter, const Scalar& value) { + AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardshrink_cuda", [&]() { + auto lambd = value.to(); + gpu_kernel(iter, [lambd]GPU_LAMBDA(scalar_t a) -> scalar_t { + return (a >= -lambd && a <= lambd) ? scalar_t(0) : a; + }); + }); +} + +void softshrink_kernel(TensorIteratorBase& iter, const Scalar& value) { + AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "softshrink_cuda", [&]() { + auto lambd = value.to(); + gpu_kernel(iter, [lambd]GPU_LAMBDA(scalar_t a) -> scalar_t { + return a > lambd ? a - lambd : (a < -lambd ? a + lambd : scalar_t(0)); + }); + }); +} + +void shrink_backward_kernel(TensorIterator& iter, const Scalar& value) { + AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "shrink_backward_cuda", [&]() { + auto lambd = value.to(); + gpu_kernel(iter, [lambd]GPU_LAMBDA(scalar_t grad_val, scalar_t self_val) -> scalar_t { + return (self_val >= -lambd && self_val <= lambd) ? scalar_t(0) : grad_val; + }); + }); +} + +void hardtanh_backward_kernel(TensorIterator& iter, const Scalar& min, const Scalar& max) { + AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, iter.dtype(), "hardtanh_backward_cuda", [&]() { + auto min_val = min.to(); + auto max_val = max.to(); + gpu_kernel(iter, [min_val, max_val]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { + return (b <= min_val) || (b >= max_val) ? scalar_t(0) : a; + }); + }); +} + +void softplus_kernel(TensorIteratorBase& iter, const Scalar& beta_, const Scalar& threshold_) { + AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "softplus_cuda", [&]() { + auto beta = beta_.to(); + auto threshold = threshold_.to(); + gpu_kernel(iter, [beta, threshold]GPU_LAMBDA(scalar_t a) -> scalar_t { + return (a * beta) > threshold ? a : static_cast(::log1p(std::exp(a * beta))) / beta; + }); + }); +} + +void softplus_backward_kernel(TensorIteratorBase& iter, const Scalar& beta_, const Scalar& threshold_) { + AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "softplus_backward_cuda", [&]() { + auto beta = beta_.to(); + auto threshold = threshold_.to(); + gpu_kernel(iter, [beta, threshold]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { + scalar_t z = std::exp(b * beta); + return (b * beta) > threshold ? a : a * z / (z + scalar_t(1.)); + }); + }); +} + +template +void threshold_kernel_impl(TensorIteratorBase& iter, scalar_t threshold, scalar_t value) { + gpu_kernel_with_scalars(iter, [=]GPU_LAMBDA(scalar_t x, scalar_t other) -> scalar_t { + return x <= threshold ? value : other; + }); +} + +static void threshold_kernel_cuda(TensorIteratorBase& iter, const Scalar& threshold, const Scalar& value) { + AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "threshold_cuda", [&] { + threshold_kernel_impl(iter, threshold.to(), value.to()); + }); +} + +void elu_kernel(TensorIteratorBase& iter, const Scalar& alpha, const Scalar& scale, const Scalar& input_scale) { + AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "elu_cuda", [&]() { + auto negcoef = alpha.to() * scale.to(); + auto poscoef = scale.to(); + auto negiptcoef = input_scale.to(); + gpu_kernel(iter, [negcoef, poscoef, negiptcoef]GPU_LAMBDA(scalar_t a) -> scalar_t { + return a > scalar_t(0) ? a * poscoef : (static_cast(std::exp(a * negiptcoef)) - scalar_t(1.)) * negcoef; + }); + }); +} + +void elu_backward_kernel(TensorIteratorBase& iter, const Scalar& alpha, const Scalar& scale, const Scalar& input_scale, bool is_result) { + AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "elu_backward_cuda", [&]() { + auto negcoef = alpha.to() * scale.to(); + auto poscoef = scale.to(); + auto negiptcoef = input_scale.to(); + gpu_kernel(iter, [negcoef, poscoef, negiptcoef, is_result]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { + if (is_result) { + return b <= scalar_t(0) ? a * negiptcoef * (b + negcoef) : a * poscoef; + } else { + return b <= scalar_t(0) ? a * negiptcoef * negcoef * (static_cast(std::exp(b * negiptcoef))) : a * poscoef; + } + }); + }); +} + +namespace { + +void GeluCUDAKernelImpl(TensorIterator& it) { + AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, it.dtype(), "GeluCUDAKernelImpl", [&]() { + using T_ACC = acc_type; + gpu_kernel(it, [] GPU_LAMBDA(scalar_t x) -> scalar_t { + return static_cast(x) * + c10::cuda::compat::normcdf(static_cast(x)); + }); + }); +} + +void GeluBackwardCUDAKernelImpl(TensorIterator& it) { + AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, + it.dtype(), "GeluBackwardCUDAKernelImpl", [&]() { + using T_ACC = acc_type; + gpu_kernel(it, [] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t { + constexpr T_ACC kBeta = M_2_SQRTPI * M_SQRT1_2 * T_ACC(0.5); + const T_ACC cdf = c10::cuda::compat::normcdf(static_cast(x)); + const T_ACC pdf = + c10::cuda::compat::exp( + T_ACC(-0.5) * static_cast(x) * static_cast(x)) * + kBeta; + return static_cast(dy) * (cdf + static_cast(x) * pdf); + }); + }); +} + +void leaky_relu_kernel(TensorIteratorBase& iter, const Scalar& negval_) { + AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "leaky_relu_cuda", [&]() { + auto negval = negval_.to(); + gpu_kernel(iter, [negval]GPU_LAMBDA(scalar_t a) -> scalar_t { + return a > scalar_t(0) ? a : a * negval; + }); + }); +} + +void leaky_relu_backward_kernel(TensorIteratorBase& iter, const Scalar& negval_) { + AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "leaky_relu_backward_cuda", [&]() { + auto negval = negval_.to(); + gpu_kernel(iter, [negval]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { + return a > scalar_t(0) ? b : b * negval; + }); + }); +} + +void hardswish_kernel(TensorIterator& iter) { + AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardswish_cuda", [&]() { + using T_ACC = acc_type; + const T_ACC zero(0.0f); + const T_ACC one_sixth(1.0f / 6.0f); + const T_ACC three(3.0f); + const T_ACC six(6.0f); + gpu_kernel(iter, [zero, one_sixth, three, six]GPU_LAMBDA(scalar_t self_val) -> scalar_t { + T_ACC x = static_cast(self_val); + return x * std::min(std::max(x + three, zero), six) * one_sixth; + }); + }); +} + +void hardswish_backward_kernel(TensorIterator& iter) { + AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardswish_backward_cuda", [&]() { + using T_ACC = acc_type; + const T_ACC zero(0.0f); + const T_ACC three(3.0f); + const T_ACC neg_three(-3.0f); + const T_ACC one_half(0.5f); + gpu_kernel( + iter, + [zero, three, neg_three, one_half]GPU_LAMBDA(scalar_t grad_val_, scalar_t self_val_) -> scalar_t { + T_ACC grad_val = static_cast(grad_val_); + T_ACC self_val = static_cast(self_val_); + if (self_val < neg_three) { + return zero; + } else if (self_val <= three) { + return grad_val * ((self_val / three) + one_half); + } else { + return grad_val; + } + }); + }); +} + +void hardsigmoid_kernel(TensorIteratorBase& iter) { + AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardsigmoid_cuda", [&]() { + using T_ACC = acc_type; + const T_ACC zero(0.0f); + const T_ACC one_sixth(1.0f / 6.0f); + const T_ACC three(3.0f); + const T_ACC six(6.0f); + gpu_kernel(iter, [zero, one_sixth, three, six]GPU_LAMBDA(scalar_t self_val) -> scalar_t { + T_ACC x = static_cast(self_val); + return std::min(std::max(x + three, zero), six) * one_sixth; + }); + }); +} + +void hardsigmoid_backward_kernel(TensorIteratorBase& iter) { + AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardsigmoid_backward_cuda", [&]() { + using T_ACC = acc_type; + const T_ACC zero(0.0f); + const T_ACC three(3.0f); + const T_ACC neg_three(-3.0f); + const T_ACC one_sixth(1.0f / 6.0f); + gpu_kernel( + iter, + [zero, three, neg_three, one_sixth]GPU_LAMBDA(scalar_t grad_val_, scalar_t self_val_) -> scalar_t { + T_ACC grad_val = static_cast(grad_val_); + T_ACC self_val = static_cast(self_val_); + return (self_val > neg_three && self_val < three) + ? grad_val * one_sixth + : zero; + }); + }); +} + +void silu_kernel(TensorIteratorBase& iter) { + AT_DISPATCH_FLOATING_TYPES_AND2( + at::ScalarType::Half, + at::ScalarType::BFloat16, + iter.dtype(), + "silu_cuda", + [&]() { + gpu_kernel( + iter, + [] GPU_LAMBDA(scalar_t x) -> scalar_t { + using T_ACC = acc_type; + const T_ACC x_acc = static_cast(x); + return x_acc / (T_ACC(1) + c10::cuda::compat::exp(-x_acc)); + }); + }); +} + +void silu_backward_kernel(TensorIterator& iter) { + AT_DISPATCH_FLOATING_TYPES_AND2( + at::ScalarType::Half, + at::ScalarType::BFloat16, + iter.dtype(), + "silu_backward_cuda", + [&]() { + gpu_kernel( + iter, + [] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t { + using T_ACC = acc_type; + const T_ACC dy_acc = static_cast(dy); + const T_ACC x_acc = static_cast(x); + const T_ACC s_acc = + T_ACC(1) / (T_ACC(1) + c10::cuda::compat::exp(-x_acc)); + return dy_acc * s_acc * (T_ACC(1) + x_acc * (T_ACC(1) - s_acc)); + }); + }); +} + +void mish_kernel(TensorIteratorBase& iter) { + AT_DISPATCH_FLOATING_TYPES_AND2( + at::ScalarType::Half, + at::ScalarType::BFloat16, + iter.dtype(), + "mish_cuda", + [&]() { + gpu_kernel( + iter, + [] GPU_LAMBDA(scalar_t x) -> scalar_t { + using T_ACC = acc_type; + const T_ACC x_acc = static_cast(x); + return x_acc * c10::cuda::compat::tanh(c10::cuda::compat::log1p(c10::cuda::compat::exp(x_acc))); + }); + }); +} + +void mish_backward_kernel(TensorIterator& iter) { + AT_DISPATCH_FLOATING_TYPES_AND2( + at::ScalarType::Half, + at::ScalarType::BFloat16, + iter.dtype(), + "mish_backward_cuda", + [&]() { + gpu_kernel( + iter, + [] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t { + using T_ACC = acc_type; + const T_ACC dy_acc = static_cast(dy); + const T_ACC x_acc = static_cast(x); + const T_ACC s_acc = + T_ACC(1) / (T_ACC(1) + c10::cuda::compat::exp(-x_acc)); + const T_ACC t_acc = + c10::cuda::compat::tanh(c10::cuda::compat::log1p(c10::cuda::compat::exp(x_acc))); + return dy_acc * (t_acc + x_acc * s_acc * (T_ACC(1) - t_acc * t_acc)); + }); + }); +} + +} // namespace + +Tensor gelu_cuda(const Tensor& self) { + Tensor Y = at::native::empty_like( + self, + c10::nullopt /* dtype */, + c10::nullopt /* layout */, + c10::nullopt /* device */, + c10::nullopt /* pin_memory */, + LEGACY_CONTIGUOUS_MEMORY_FORMAT); + auto it = TensorIterator::unary_op(Y, self); + GeluCUDAKernelImpl(it); + return Y; +} + +Tensor gelu_backward_cuda(const Tensor& grad, const Tensor& self) { + Tensor dX = at::native::empty_like( + self, + c10::nullopt /* dtype */, + c10::nullopt /* layout */, + c10::nullopt /* device */, + c10::nullopt /* pin_memory */, + LEGACY_CONTIGUOUS_MEMORY_FORMAT); + auto it = TensorIterator::borrowing_binary_op(dX, grad, self); + GeluBackwardCUDAKernelImpl(it); + return dX; +} + +REGISTER_DISPATCH(hardtanh_backward_stub, &hardtanh_backward_kernel); +REGISTER_DISPATCH(hardshrink_stub, &hardshrink_kernel); +REGISTER_DISPATCH(softshrink_stub, &softshrink_kernel); +REGISTER_DISPATCH(shrink_backward_stub, &shrink_backward_kernel); +REGISTER_DISPATCH(elu_stub, &elu_kernel); +REGISTER_DISPATCH(elu_backward_stub, &elu_backward_kernel); +REGISTER_DISPATCH(leaky_relu_stub, &leaky_relu_kernel); +REGISTER_DISPATCH(leaky_relu_backward_stub, &leaky_relu_backward_kernel); +REGISTER_DISPATCH(hardswish_stub, &hardswish_kernel); +REGISTER_DISPATCH(hardswish_backward_stub, &hardswish_backward_kernel); +REGISTER_DISPATCH(hardsigmoid_stub, &hardsigmoid_kernel); +REGISTER_DISPATCH(hardsigmoid_backward_stub, &hardsigmoid_backward_kernel); +REGISTER_DISPATCH(softplus_stub, &softplus_kernel); +REGISTER_DISPATCH(softplus_backward_stub, &softplus_backward_kernel); +REGISTER_DISPATCH(silu_stub, &silu_kernel); +REGISTER_DISPATCH(silu_backward_stub, &silu_backward_kernel); +REGISTER_DISPATCH(mish_stub, &mish_kernel); +REGISTER_DISPATCH(mish_backward_stub, &mish_backward_kernel); +REGISTER_DISPATCH(threshold_stub, &threshold_kernel_cuda); + +} // namespace native +} // namespace at diff --git a/cuda_code/AimetOpUtilsGpu_4.cu b/cuda_code/AimetOpUtilsGpu_4.cu new file mode 100644 index 0000000000000000000000000000000000000000..930eec94d7f48d69427d198605136e7fbbd60b4b --- /dev/null +++ b/cuda_code/AimetOpUtilsGpu_4.cu @@ -0,0 +1,76 @@ +//============================================================================== +// +// @@-COPYRIGHT-START-@@ +// +// Copyright (c) 2020, Qualcomm Innovation Center, Inc. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// SPDX-License-Identifier: BSD-3-Clause +// +// @@-COPYRIGHT-END-@@ +// +//============================================================================== + +#ifdef GOOGLE_CUDA + +#define EIGEN_USE_GPU +#define EIGEN_USE_THREADS + +#include "AimetOpUtils.h" + +using namespace tensorflow; + +#define EIGEN_USE_GPU +typedef Eigen::GpuDevice GPUDevice; + + +// GPU specialization of actual computations. +template +void copyInputTensorsToOutputTensors(const GPUDevice& d, const T* inTensor, size_t count, T* outTensor) +{ + // copy input_tensor to output_tensor + cudaMemcpy(outTensor, inTensor, count * sizeof(float), cudaMemcpyDeviceToDevice); +} + +template +T copyLiteralToHost(const GPUDevice& d, const T* deviceValue) +{ + T hostValue; + cudaMemcpy(&hostValue, deviceValue, sizeof(T), cudaMemcpyDeviceToHost); + + return hostValue; +} + +template void copyInputTensorsToOutputTensors(const GPUDevice& d, const float* inTensor, size_t count, float* outTensor); +template int8 copyLiteralToHost(const GPUDevice&, const int8* deviceValue); +template int32 copyLiteralToHost(const GPUDevice&, const int32* deviceValue); +template uint64 copyLiteralToHost(const GPUDevice&, const uint64* deviceValue); +template double copyLiteralToHost(const GPUDevice&, const double* deviceValue); +template bool copyLiteralToHost(const GPUDevice&, const bool* deviceValue); + +#endif // GOOGLE_CUDA \ No newline at end of file diff --git a/cuda_code/ArrayManip_1.cu b/cuda_code/ArrayManip_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..44086ac547c1d32a6c8dd45af59574eaa1edf8b9 --- /dev/null +++ b/cuda_code/ArrayManip_1.cu @@ -0,0 +1,70 @@ +#include "AbstractAPI.h" +#include "interfaces/cuda/Internals.h" +#include +#include + +namespace device { +template __global__ void kernel_scaleArray(T *array, const T scalar, const size_t numElements) { + size_t index = threadIdx.x + blockIdx.x * blockDim.x; + if (index < numElements) { + array[index] *= scalar; + } +} + +template void Algorithms::scaleArray(T *devArray, + T scalar, + const size_t numElements, + void* streamPtr) { + dim3 block(64, 1, 1); + dim3 grid = internals::computeGrid1D(block, numElements); + auto stream = reinterpret_cast(streamPtr); + kernel_scaleArray<<>>(devArray, scalar, numElements); + CHECK_ERR; +} +template void Algorithms::scaleArray(real *devArray, real scalar, const size_t numElements, void* streamPtr); +template void Algorithms::scaleArray(int *devArray, int scalar, const size_t numElements, void* streamPtr); +template void Algorithms::scaleArray(char *devArray, char scalar, const size_t numElements, void* streamPtr); + +//-------------------------------------------------------------------------------------------------- +template __global__ void kernel_fillArray(T *array, T scalar, const size_t numElements) { + size_t index = threadIdx.x + blockIdx.x * blockDim.x; + if (index < numElements) { + array[index] = scalar; + } +} + +template void Algorithms::fillArray(T *devArray, const T scalar, const size_t numElements, void* streamPtr) { + dim3 block(64, 1, 1); + dim3 grid = internals::computeGrid1D(block, numElements); + auto stream = reinterpret_cast(streamPtr); + kernel_fillArray<<>>(devArray, scalar, numElements); + CHECK_ERR; +} +template void Algorithms::fillArray(real *devArray, real scalar, const size_t numElements, void* streamPtr); +template void Algorithms::fillArray(int *devArray, int scalar, const size_t numElements, void* streamPtr); +template void Algorithms::fillArray(char *devArray, char scalar, const size_t numElements, void* streamPtr); + +//-------------------------------------------------------------------------------------------------- +__global__ void kernel_touchMemory(real *ptr, size_t size, bool clean) { + int id = threadIdx.x + blockIdx.x * blockDim.x; + if (id < size) { + if (clean) { + ptr[id] = 0; + } else { + real value = ptr[id]; + // Do something dummy here. We just need to check the pointers point to valid memory locations. + // Avoid compiler optimization. Possibly, implement a dummy code with asm. + value += 1; + value -= 1; + } + } +} + +void Algorithms::touchMemory(real *ptr, size_t size, bool clean, void* streamPtr) { + dim3 block(256, 1, 1); + dim3 grid = internals::computeGrid1D(block, size); + auto stream = reinterpret_cast(streamPtr); + kernel_touchMemory<<>>(ptr, size, clean); + CHECK_ERR; +} +} // namespace device diff --git a/cuda_code/BE_L1D_HIT.cu b/cuda_code/BE_L1D_HIT.cu new file mode 100644 index 0000000000000000000000000000000000000000..f37c23b29118f1422d46cd6ecafa82eb15efb87d --- /dev/null +++ b/cuda_code/BE_L1D_HIT.cu @@ -0,0 +1,179 @@ +#include +#include +#include +#include +// Includes +#include +#include "../include/ContAcq-IntClk.h" + +// includes, project +#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples +//#include +//#include + +// includes CUDA +#include + +#define THREADS_PER_BLOCK 256 +#define NUM_OF_BLOCKS 60 +#define ITERATIONS REPLACE_ITERATIONS + +#define LINE_SIZE 128 +#define SETS 64 +#define ASSOC 6 +#define SIMD_WIDTH 32 + +// Variables +int* h_A; +int* h_B; +int* h_C; +int* d_A; +int* d_B; +int* d_C; +bool noprompt = false; +unsigned int my_timer; + +// Functions +void CleanupResources(void); +void RandomInit(int*, int); +void ParseArguments(int, char**); + +//////////////////////////////////////////////////////////////////////////////// +// These are CUDA Helper functions + +// This will output the proper CUDA error strings in the event that a CUDA host call returns an error +#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) + +inline void __checkCudaErrors(cudaError err, const char *file, const int line ){ + if(cudaSuccess != err){ + fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) ); + exit(-1); + } +} + +// This will output the proper error string when calling cudaGetLastError +#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) + +inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){ + cudaError_t err = cudaGetLastError(); + if (cudaSuccess != err){ + fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) ); + exit(-1); + } +} + +// end of CUDA Helper Functions + + + + +// Device code +__global__ void PowerKernal(int* A, int* C, int N){ + int tid = blockDim.x * blockIdx.x + threadIdx.x; + //Do Some Computation + + int size = (LINE_SIZE*ASSOC*SETS)/sizeof(int); + unsigned j=0, k=0; + int m_sum=0; + // Fill the L1 cache, Miss on first LD, Hit on subsequent LDs + for(k=0; k>>(d_A, d_B, d_C, N); + dim3 dimGrid(NUM_OF_BLOCKS,1); + dim3 dimBlock(THREADS_PER_BLOCK,1); + + CUT_SAFE_CALL(cutCreateTimer(&my_timer)); + TaskHandle taskhandle = LaunchDAQ(); + CUT_SAFE_CALL(cutStartTimer(my_timer)); + + PowerKernal<<>>(d_A, d_C, N); + + CUDA_SAFE_CALL( cudaThreadSynchronize() ); + printf("execution time = %f\n", cutGetTimerValue(my_timer)); + TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer)); + CUT_SAFE_CALL(cutStopTimer(my_timer)); + CUT_SAFE_CALL(cutDeleteTimer(my_timer)); + + getLastCudaError("kernel launch failure"); + + #ifdef _DEBUG + checkCudaErrors( cudaDeviceSynchronize() ); + #endif + + // Copy result from device memory to host memory + // h_C contains the result in host memory + checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) ); + + CleanupResources(); + + return 0; +} + +void CleanupResources(void){ + // Free device memory + if (d_A) + cudaFree(d_A); + //if (d_B) +// cudaFree(d_B); + if (d_C) + cudaFree(d_C); + + // Free host memory + if (h_A) + free(h_A); + // if (h_B) +// free(h_B); + if (h_C) + free(h_C); + +} + +// Allocates an array with random float entries. +void RandomInit(int* data, int n){ + for (int i = 0; i < n; ++i) + data[i] = (int)(rand() / RAND_MAX); +} + + + + + + diff --git a/cuda_code/BE_L1D_MISS_L2D_HIT_13.cu b/cuda_code/BE_L1D_MISS_L2D_HIT_13.cu new file mode 100644 index 0000000000000000000000000000000000000000..536757f910fe6a5591e832bbe90a5ed8c20303de --- /dev/null +++ b/cuda_code/BE_L1D_MISS_L2D_HIT_13.cu @@ -0,0 +1,189 @@ +#include +#include +#include +#include +// Includes +#include +#include "../include/ContAcq-IntClk.h" +//#include "REPEATL.h" +#include "../include/REPEATW.h" +// includes, project +#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples +//#include +//#include + +// includes CUDA +#include + +#define THREADS_PER_BLOCK 256 +#define NUM_OF_BLOCKS 60 +#define max_tid THREADS_PER_BLOCK*NUM_OF_BLOCKS +#define LINE_SIZE 32 +#define SETS 64 +#define ASSOC 6 +#define SIMD_WIDTH 32 +#define ITERATIONS REPLACE_ITERATIONS +// Variables +int* h_A; +int* h_B; +int* h_C; +int* d_A; +int* d_B; +int* d_C; +bool noprompt = false; +unsigned int my_timer; + +// Functions +void CleanupResources(void); +void RandomInit(int*, int); +void ParseArguments(int, char**); + +//////////////////////////////////////////////////////////////////////////////// +// These are CUDA Helper functions + +// This will output the proper CUDA error strings in the event that a CUDA host call returns an error +#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) + +inline void __checkCudaErrors(cudaError err, const char *file, const int line ){ + if(cudaSuccess != err){ + fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) ); + exit(-1); + } +} + +// This will output the proper error string when calling cudaGetLastError +#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) + +inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){ + cudaError_t err = cudaGetLastError(); + if (cudaSuccess != err){ + fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) ); + exit(-1); + } +} + +// end of CUDA Helper Functions + + + + +// Device code +__global__ void PowerKernal(int* A, int* C, int N){ + int tid = blockDim.x * blockIdx.x + threadIdx.x; + //Do Some Computation + + int size = (400*max_tid*LINE_SIZE)/sizeof(int); + unsigned j=0, k=0; + + int sum=0; + + // Fill the L1 cache, Miss on every iteration + for (int i=0; i>>(d_A, d_B, d_C, N); + dim3 dimGrid(NUM_OF_BLOCKS,1); + dim3 dimBlock(THREADS_PER_BLOCK,1); + CUT_SAFE_CALL(cutCreateTimer(&my_timer)); + TaskHandle taskhandle = LaunchDAQ(); + CUT_SAFE_CALL(cutStartTimer(my_timer)); + PowerKernal<<>>(d_A, d_C, N); + CUDA_SAFE_CALL( cudaThreadSynchronize() ); + printf("execution time = %f\n", cutGetTimerValue(my_timer)); + TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer)); + CUT_SAFE_CALL(cutStopTimer(my_timer)); + CUT_SAFE_CALL(cutDeleteTimer(my_timer)); + + getLastCudaError("kernel launch failure"); + + #ifdef _DEBUG + checkCudaErrors( cudaDeviceSynchronize() ); + #endif + + // Copy result from device memory to host memory + // h_C contains the result in host memory + checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) ); + + CleanupResources(); + + return 0; +} + +void CleanupResources(void){ + // Free device memory + if (d_A) + cudaFree(d_A); + //if (d_B) +// cudaFree(d_B); + if (d_C) + cudaFree(d_C); + + // Free host memory + if (h_A) + free(h_A); + // if (h_B) +// free(h_B); + if (h_C) + free(h_C); + +} + +// Allocates an array with random float entries. +void RandomInit(int* data, int n){ + for (int i = 0; i < n; ++i) + data[i] = (int)(rand() / RAND_MAX); +} + + + + + + diff --git a/cuda_code/BE_L1D_MISS_L2D_HIT_19.cu b/cuda_code/BE_L1D_MISS_L2D_HIT_19.cu new file mode 100644 index 0000000000000000000000000000000000000000..2c2bb52b04086cb3f1c996c44abc7a323e474716 --- /dev/null +++ b/cuda_code/BE_L1D_MISS_L2D_HIT_19.cu @@ -0,0 +1,171 @@ +// Includes +#include +#include + + +// includes CUDA +#include + +// includes, project +#include "../include/REPEATW.h" + + +#define THREADS_PER_BLOCK 256 +#define NUM_OF_BLOCKS 640 +#define max_tid THREADS_PER_BLOCK*NUM_OF_BLOCKS +#define LINE_SIZE 12 +// Variables +int* h_A; +int* h_B; +int* h_C; +int* d_A; +int* d_B; +int* d_C; +bool noprompt = false; +unsigned int my_timer; + +// Functions +void CleanupResources(void); +void RandomInit(int*, int); + +//////////////////////////////////////////////////////////////////////////////// +// These are CUDA Helper functions + +// This will output the proper CUDA error strings in the event that a CUDA host call returns an error +#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) + +inline void __checkCudaErrors(cudaError err, const char *file, const int line ){ + if(cudaSuccess != err){ + fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) ); + exit(-1); + } +} + +// This will output the proper error string when calling cudaGetLastError +#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) + +inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){ + cudaError_t err = cudaGetLastError(); + if (cudaSuccess != err){ + fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) ); + exit(-1); + } +} + +// end of CUDA Helper Functions + + + + +// Device code +__global__ void PowerKernal(int* A, int* C, int iterations){ + int tid = blockDim.x * blockIdx.x + threadIdx.x; + //Do Some Computation + + int sum=0; + + // Fill the L1 cache, Miss on every iteration + for (int i=0; i>>(d_A, d_C, iterations); + checkCudaErrors(cudaEventRecord(stop)); + + checkCudaErrors(cudaEventSynchronize(stop)); + checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop)); + printf("gpu execution time = %.2f s\n", elapsedTime/1000); + + getLastCudaError("kernel launch failure"); + cudaThreadSynchronize(); + + // Copy result from device memory to host memory + // h_C contains the result in host memory + checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) ); + + checkCudaErrors(cudaEventDestroy(start)); + checkCudaErrors(cudaEventDestroy(stop)); + CleanupResources(); + + return 0; +} + +void CleanupResources(void){ + // Free device memory + if (d_A) + cudaFree(d_A); + //if (d_B) +// cudaFree(d_B); + if (d_C) + cudaFree(d_C); + + // Free host memory + if (h_A) + free(h_A); + // if (h_B) +// free(h_B); + if (h_C) + free(h_C); +} + +// Allocates an array with random float entries. +void RandomInit(int* data, int n){ + for (int i = 0; i < n; ++i) + data[i] = (int)(rand() / RAND_MAX); +} \ No newline at end of file diff --git a/cuda_code/BE_MEM_SHRD_Acss.cu b/cuda_code/BE_MEM_SHRD_Acss.cu new file mode 100644 index 0000000000000000000000000000000000000000..60da8970e466836c56c4356135e9f725e3193974 --- /dev/null +++ b/cuda_code/BE_MEM_SHRD_Acss.cu @@ -0,0 +1,174 @@ +#include +#include +#include +// Includes +#include +#include "../include/ContAcq-IntClk.h" + +// includes, project +#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples +//#include +//#include + +// includes CUDA +#include + +#define THREADS_PER_BLOCK 256 +#define NUM_OF_BLOCKS 60 +#define ITERATIONS REPLACE_ITERATIONS + +// Variables +unsigned* h_C1; +float* h_C2; +unsigned* d_C1; +float* d_C2; +bool noprompt = false; +unsigned int my_timer; + +// Functions +void CleanupResources(void); +void RandomInit(unsigned*, int); +void ParseArguments(int, char**); + +//////////////////////////////////////////////////////////////////////////////// +// These are CUDA Helper functions + +// This will output the proper CUDA error strings in the event that a CUDA host call returns an error +#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) + +inline void __checkCudaErrors(cudaError err, const char *file, const int line ) +{ + if(cudaSuccess != err){ + fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) ); + exit(-1); + } +} + +// This will output the proper error string when calling cudaGetLastError +#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) + +inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ) +{ + cudaError_t err = cudaGetLastError(); + if (cudaSuccess != err){ + fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) ); + exit(-1); + } +} + +// end of CUDA Helper Functions + + + + +// Device code +__global__ void PowerKernal(unsigned* C1, float* C2, int N) +{ + int i = threadIdx.x; + //Do Some Computation + __device__ __shared__ unsigned I1[THREADS_PER_BLOCK]; + __device__ __shared__ unsigned I2[THREADS_PER_BLOCK]; + __device__ __shared__ float I3[THREADS_PER_BLOCK]; + __device__ __shared__ float I4[THREADS_PER_BLOCK]; + + I1[i]=i*2; + I2[i]=i; + I3[i]=i/2; + I4[i]=i; + + __syncthreads(); + + for(unsigned k=0; k>>(d_A, d_B, d_C, N); + dim3 dimGrid(NUM_OF_BLOCKS,1); + dim3 dimBlock(THREADS_PER_BLOCK,1); + + CUT_SAFE_CALL(cutCreateTimer(&my_timer)); + TaskHandle taskhandle = LaunchDAQ(); + CUT_SAFE_CALL(cutStartTimer(my_timer)); + + PowerKernal<<>>(d_C1, d_C2, N); + + CUDA_SAFE_CALL( cudaThreadSynchronize() ); + printf("execution time = %f\n", cutGetTimerValue(my_timer)); + TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer)); + CUT_SAFE_CALL(cutStopTimer(my_timer)); + CUT_SAFE_CALL(cutDeleteTimer(my_timer)); + + getLastCudaError("kernel launch failure"); + +#ifdef _DEBUG + checkCudaErrors( cudaDeviceSynchronize() ); +#endif + + // Copy result from device memory to host memory + // h_C contains the result in host memory + checkCudaErrors( cudaMemcpy(h_C1, d_C1, size1, cudaMemcpyDeviceToHost) ); + checkCudaErrors( cudaMemcpy(h_C2, d_C2, size2, cudaMemcpyDeviceToHost) ); + + CleanupResources(); + + return 0; +} + +void CleanupResources(void) +{ + // Free device memory + if (d_C1) + cudaFree(d_C1); + if (d_C2) + cudaFree(d_C2); + + + // Free host memory + if (h_C1) + free(h_C1); + if (d_C2) + cudaFree(d_C2); + +} + +// Allocates an array with random float entries. +void RandomInit(unsigned* data, int n) +{ + for (int i = 0; i < n; ++i){ + srand((unsigned)time(0)); + data[i] = rand() / RAND_MAX; + } +} + + + + + + diff --git a/cuda_code/BE_SP_FP_DIV_1.cu b/cuda_code/BE_SP_FP_DIV_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..1e2f647f23646ab72191b213af8967cb2b652bc2 --- /dev/null +++ b/cuda_code/BE_SP_FP_DIV_1.cu @@ -0,0 +1,220 @@ +#include +#include +//#include +// Includes +//#include + +// includes, project +//#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples +//#include +//#include + +// includes CUDA +#include + +#define THREADS_PER_BLOCK 256 +#define NUM_OF_BLOCKS 640 +//#define ITERATIONS 40 +//#include "../include/ContAcq-IntClk.h" + +// Variables +float* h_A; +float* h_B; +float* h_C; +float* d_A; +float* d_B; +float* d_C; +//bool noprompt = false; +//unsigned int my_timer; + +// Functions +void CleanupResources(void); +void RandomInit(float*, int); +//void ParseArguments(int, char**); + +//////////////////////////////////////////////////////////////////////////////// +// These are CUDA Helper functions + +// This will output the proper CUDA error strings in the event that a CUDA host call returns an error +#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) + +inline void __checkCudaErrors(cudaError err, const char *file, const int line ) +{ + if(cudaSuccess != err){ + fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) ); + exit(-1); + } +} + +// This will output the proper error string when calling cudaGetLastError +#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) + +inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ) +{ + cudaError_t err = cudaGetLastError(); + if (cudaSuccess != err){ + fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) ); + exit(-1); + } +} + +// end of CUDA Helper Functions + + + + +__global__ void PowerKernal3(const float* A, const float* B, float* C, int N) +{ + int i = blockDim.x * blockIdx.x + threadIdx.x; + //Do Some Computation + float Value1; + float Value2 = 999999; + float Value3; + float Value; + float I1=A[i]; + float I2=B[i]; + + + __syncthreads(); + #pragma unroll 100 + // Excessive Division Operations + for(unsigned k=0; k>>(d_A, d_B, d_C, N); + dim3 dimGrid(NUM_OF_BLOCKS,1); + dim3 dimBlock(THREADS_PER_BLOCK,1); + dim3 dimGrid2(1,1); + dim3 dimBlock2(1,1); + + checkCudaErrors(cudaEventRecord(start)); + PowerKernal3<<>>(d_A, d_B, d_C, iterations); + checkCudaErrors(cudaEventRecord(stop)); + + checkCudaErrors(cudaEventSynchronize(stop)); + checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop)); + printf("execution time = %.2f s\n", elapsedTime/1000); + getLastCudaError("kernel launch failure"); + cudaThreadSynchronize(); + +/*CUT_SAFE_CALL(cutCreateTimer(&my_timer)); +TaskHandle taskhandle = LaunchDAQ(); +CUT_SAFE_CALL(cutStartTimer(my_timer)); +printf("execution time = %f\n", cutGetTimerValue(my_timer)); + + + +PowerKernal3<<>>(d_A, d_B, d_C, N); +CUDA_SAFE_CALL( cudaThreadSynchronize() ); +printf("execution time = %f\n", cutGetTimerValue(my_timer)); + + +getLastCudaError("kernel launch failure"); +CUDA_SAFE_CALL( cudaThreadSynchronize() ); +CUT_SAFE_CALL(cutStopTimer(my_timer)); +TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer)); +printf("execution time = %f\n", cutGetTimerValue(my_timer)); +CUT_SAFE_CALL(cutDeleteTimer(my_timer)); + +#ifdef _DEBUG + checkCudaErrors( cudaDeviceSynchronize() ); +#endif*/ + + // Copy result from device memory to host memory + // h_C contains the result in host memory + checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) ); + checkCudaErrors(cudaEventDestroy(start)); + checkCudaErrors(cudaEventDestroy(stop)); + CleanupResources(); + + return 0; +} + +void CleanupResources(void) +{ + // Free device memory + if (d_A) + cudaFree(d_A); + if (d_B) + cudaFree(d_B); + if (d_C) + cudaFree(d_C); + + // Free host memory + if (h_A) + free(h_A); + if (h_B) + free(h_B); + if (h_C) + free(h_C); + +} + +// Allocates an array with random float entries. +void RandomInit(float* data, int n) +{ + for (int i = 0; i < n; ++i){ + data[i] = rand() / RAND_MAX; + } +} + + + + + + diff --git a/cuda_code/BE_SP_INT_ADD_l32_1.cu b/cuda_code/BE_SP_INT_ADD_l32_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..a63b6c7816e85c6794bf18953d49740fe417d1e3 --- /dev/null +++ b/cuda_code/BE_SP_INT_ADD_l32_1.cu @@ -0,0 +1,184 @@ +// Includes +#include +#include +// includes from project + + +// includes from CUDA +#include +//#include + +#define THREADS_PER_BLOCK 256 +#define NUM_OF_BLOCKS 640 + + +// Variables +unsigned* h_A; +unsigned* h_B; +unsigned* h_C; +unsigned* d_A; +unsigned* d_B; +unsigned* d_C; + +// Functions +void CleanupResources(void); +void RandomInit(unsigned*, int); + +//////////////////////////////////////////////////////////////////////////////// +// These are CUDA Helper functions + +// This will output the proper CUDA error strings in the event that a CUDA host call returns an error +#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) + +inline void __checkCudaErrors(cudaError err, const char *file, const int line ) +{ + if(cudaSuccess != err){ + fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) ); + exit(-1); + } +} + +// This will output the proper error string when calling cudaGetLastError +#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) + +inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ) +{ + cudaError_t err = cudaGetLastError(); + if (cudaSuccess != err){ + fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) ); + exit(-1); + } +} +// end of CUDA Helper Functions + +__global__ void PowerKernal2(const unsigned* A, const unsigned* B, unsigned* C, int iterations) +{ + int i = blockDim.x * blockIdx.x + threadIdx.x; + //Do Some Computation + unsigned Value1=0; + unsigned Value2=0; + unsigned Value3=0; + unsigned Value=0; + unsigned I1=A[i]; + unsigned I2=B[i]; + + // Excessive INT addition access + if((i%32)<=31){ + #pragma unroll 100 + for(unsigned k=0; k>>(d_A, d_B, d_C, iterations); + checkCudaErrors(cudaEventRecord(stop)); + + checkCudaErrors(cudaEventSynchronize(stop)); + checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop)); + printf("gpu execution time = %.2f s\n", elapsedTime/1000); + + getLastCudaError("kernel launch failure"); + cudaThreadSynchronize(); + + // Copy result from device memory to host memory + // h_C contains the result in host memory + checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) ); + + checkCudaErrors(cudaEventDestroy(start)); + checkCudaErrors(cudaEventDestroy(stop)); + CleanupResources(); + + return 0; +} + +void CleanupResources(void) +{ + // Free device memory + if (d_A) + cudaFree(d_A); + if (d_B) + cudaFree(d_B); + if (d_C) + cudaFree(d_C); + + // Free host memory + if (h_A) + free(h_A); + if (h_B) + free(h_B); + if (h_C) + free(h_C); + +} + +// Allocates an array with random unsigned entries. +void RandomInit(unsigned* data, int n) +{ + for (int i = 0; i < n; ++i){ + srand((unsigned)time(0)); + data[i] = rand() / RAND_MAX; + } +} \ No newline at end of file diff --git a/cuda_code/BE_SP_INT_MUL_3.cu b/cuda_code/BE_SP_INT_MUL_3.cu new file mode 100644 index 0000000000000000000000000000000000000000..67e8dfc17a8230a902b99c263e4eb4b09c5d393f --- /dev/null +++ b/cuda_code/BE_SP_INT_MUL_3.cu @@ -0,0 +1,225 @@ +#include +#include +//#include +// Includes +//#include +//#include "../include/ContAcq-IntClk.h" + +// includes, project +//#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples +//#include +//#include + +// includes CUDA +#include + +#define THREADS_PER_BLOCK 256 +#define NUM_OF_BLOCKS 640 +//#define ITERATIONS 40 + +// Variables +unsigned* h_A; +unsigned* h_B; +unsigned* h_C; +unsigned* d_A; +unsigned* d_B; +unsigned* d_C; +//bool noprompt = false; +//unsigned int my_timer; + +// Functions +void CleanupResources(void); +void RandomInit(unsigned*, int); +//void ParseArguments(int, char**); + +//////////////////////////////////////////////////////////////////////////////// +// These are CUDA Helper functions + +// This will output the proper CUDA error strings in the event that a CUDA host call returns an error +#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) + +inline void __checkCudaErrors(cudaError err, const char *file, const int line ) +{ + if(cudaSuccess != err){ + fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) ); + exit(-1); + } +} + +// This will output the proper error string when calling cudaGetLastError +#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) + +inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ) +{ + cudaError_t err = cudaGetLastError(); + if (cudaSuccess != err){ + fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) ); + exit(-1); + } +} + +// end of CUDA Helper Functions + + + + +// Device code + + +__global__ void PowerKernal3(const unsigned* A, const unsigned* B, unsigned* C, int N) +{ + int i = blockDim.x * blockIdx.x + threadIdx.x; + //Do Some Computation + unsigned Value1; + unsigned Value2 = 999999; + unsigned Value3; + unsigned Value; + unsigned I1=A[i]; + unsigned I2=B[i]; + + +#pragma unroll 100 + // Excessive Multiplication + for(unsigned k=0; k>>(d_A, d_B, d_C, N); + dim3 dimGrid(NUM_OF_BLOCKS,1); + dim3 dimBlock(THREADS_PER_BLOCK,1); + dim3 dimGrid2(1,1); + dim3 dimBlock2(1,1); + + checkCudaErrors(cudaEventRecord(start)); + PowerKernal3<<>>(d_A, d_B, d_C, iterations); + checkCudaErrors(cudaEventRecord(stop)); + + checkCudaErrors(cudaEventSynchronize(stop)); + checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop)); + printf("execution time = %.2f s\n", elapsedTime/1000); + getLastCudaError("kernel launch failure"); + cudaThreadSynchronize(); + + /*CUT_SAFE_CALL(cutCreateTimer(&my_timer)); + TaskHandle taskhandle = LaunchDAQ(); + CUT_SAFE_CALL(cutStartTimer(my_timer)); + printf("execution time = %f\n", cutGetTimerValue(my_timer)); + + + +PowerKernal3<<>>(d_A, d_B, d_C, N); +CUDA_SAFE_CALL( cudaThreadSynchronize() ); +printf("execution time = %f\n", cutGetTimerValue(my_timer)); + + +getLastCudaError("kernel launch failure"); +CUDA_SAFE_CALL( cudaThreadSynchronize() ); +CUT_SAFE_CALL(cutStopTimer(my_timer)); +TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer)); +printf("execution time = %f\n", cutGetTimerValue(my_timer)); +CUT_SAFE_CALL(cutDeleteTimer(my_timer)); + +#ifdef _DEBUG + checkCudaErrors( cudaDeviceSynchronize() ); +#endif*/ + + // Copy result from device memory to host memory + // h_C contains the result in host memory + checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) ); + checkCudaErrors(cudaEventDestroy(start)); + checkCudaErrors(cudaEventDestroy(stop)); + CleanupResources(); + + return 0; +} + +void CleanupResources(void) +{ + // Free device memory + if (d_A) + cudaFree(d_A); + if (d_B) + cudaFree(d_B); + if (d_C) + cudaFree(d_C); + + // Free host memory + if (h_A) + free(h_A); + if (h_B) + free(h_B); + if (h_C) + free(h_C); + +} + +// Allocates an array with random float entries. +void RandomInit(unsigned* data, int n) +{ + for (int i = 0; i < n; ++i){ + srand((unsigned)time(0)); + data[i] = rand() / RAND_MAX; + } +} + + + + + + diff --git a/cuda_code/BK5_1.cu b/cuda_code/BK5_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..0bd90d8969d868e9372a1833d0d70fe69acb210f --- /dev/null +++ b/cuda_code/BK5_1.cu @@ -0,0 +1,812 @@ +/* + +See LICENSE file. + +*/ + +#include +#include +#include +#include +#include "meshBasis.hpp" + +void matrixPrint(int Nrows, int Ncols, dfloat *A, const char *mess){ +#if 0 + printf("%s = [\n", mess); + for(int i=0;i + __forceinline__ __device__ + void BK5Device(const int numElements, + const int element, + const dfloat lambda, + const dfloat * __restrict__ op, + const dfloat * __restrict__ DofToDofD, + const dfloat * __restrict__ oddDofToDofD, + const dfloat * __restrict__ evenDofToDofD, + dfloat * __restrict__ r_p, + dfloat * __restrict__ r_Ap){ + + __shared__ dfloat s_p[p_Nblock][NUM_DOFS_1D][NUM_DOFS_1D]; + __shared__ dfloat s_Gpr[p_Nblock][NUM_DOFS_1D][NUM_DOFS_1D]; + __shared__ dfloat s_Gps[p_Nblock][NUM_DOFS_1D][NUM_DOFS_1D]; + + // assumes NUM_DOFS_2D threads + int t = threadIdx.x; + int blk = threadIdx.y; + + int i = t%NUM_DOFS_1D; + int j = t/NUM_DOFS_1D; + + for(int k = 0; k < NUM_DOFS_1D; k++) { + r_Ap[k] = 0.f; // zero the accumulator + } + + // Layer by layer +#pragma unroll + for(int k = 0; k < NUM_DOFS_1D; k++) { + + // share r_p[k] + __syncthreads(); + + s_p[blk][j][i] = r_p[k]; + + __syncthreads(); + + dfloat G00 = 0, G01 =0, G02 =0, G11 =0, G12 =0, G22 =0, GWJ =0; + + // prefetch geometric factors + const int gbase = element*p_Nggeo*NUM_DOFS_3D + ijkN(i,j,k,NUM_DOFS_1D); + + if(element +__global__ void BK5ConstantKernel(const int numElements, + const dfloat lambda, + const dfloat * __restrict__ op, + const dfloat * __restrict__ DofToDofD, + const dfloat * __restrict__ oddDofToDofD, + const dfloat * __restrict__ evenDofToDofD, + const dfloat * __restrict__ solIn, + dfloat * __restrict__ solOut){ + + __shared__ dfloat s_DofToDofD[NUM_DOFS_2D]; + + dfloat r_q[NUM_DOFS_1D]; + dfloat r_Aq[NUM_DOFS_1D]; + + const unsigned int t = threadIdx.x; + const int blk = threadIdx.y; + + const int element = blockIdx.x*p_Nblock + blk; + + const unsigned int a = t%NUM_DOFS_1D; + const unsigned int b = t/NUM_DOFS_1D; + + s_DofToDofD[t] = DofToDofD[t]; + + if(element < numElements){ + for(int c=0;c + (numElements, element, lambda, op, s_DofToDofD, const_oddDofToDofD, const_evenDofToDofD, r_q, r_Aq); + + if(element + __forceinline__ __device__ + dfloat BK5CubeDevice(const int numElements, + const int element, + const dfloat lambda, + const dfloat * __restrict__ op, + const dfloat * __restrict__ DofToDofD, + dfloat r_p){ + + __shared__ dfloat s_p[NUM_DOFS_1D][NUM_DOFS_1D][NUM_DOFS_1D]; + + // assumes NUM_DOFS_2D threads + int i = threadIdx.x; + int j = threadIdx.y; + int k = threadIdx.z; + + dfloat r_Ap = 0; // zero the accumulator + + s_p[k][j][i] = r_p; + + __syncthreads(); + + dfloat G00 = 0, G01 =0, G02 =0, G11 =0, G12 =0, G22 =0, GWJ =0; + + // prefetch geometric factors + const int gbase = element*p_Nggeo*NUM_DOFS_3D + ijkN(i,j,k,NUM_DOFS_1D); + + if(element +__global__ void BK5CubeKernel(const int numElements, + const dfloat lambda, + const dfloat * __restrict__ op, + const dfloat * __restrict__ DofToDofD, + const dfloat * __restrict__ solIn, + dfloat * __restrict__ solOut){ + + __shared__ dfloat s_DofToDofD[NUM_DOFS_2D]; + + const int element = blockIdx.x; + + int a = threadIdx.x; + int b = threadIdx.y; + int c = threadIdx.z; + + if(c==0) + s_DofToDofD[b*NUM_DOFS_1D+a] = DofToDofD[b*NUM_DOFS_1D+a]; + + int id = ijklN(a,b,c,element,NUM_DOFS_1D); + + dfloat r_p = solIn[id]; + + __syncthreads(); + + dfloat r_Ap = BK5CubeDevice + (numElements, element, lambda, op, s_DofToDofD, r_p); + + solOut[id] = r_Ap; + +} + + + +double bandwidthTest(cudaStream_t stream, int Ntests, size_t bwNtotal){ + + cudaEvent_t start, end; + cudaEventCreate(&start); + cudaEventCreate(&end); + + dfloat *h_bwTest1, *c_bwTest1; + dfloat *h_bwTest2, *c_bwTest2; + + randAlloc(bwNtotal/2, &h_bwTest1, &c_bwTest1); + randAlloc(bwNtotal/2, &h_bwTest2, &c_bwTest2); + + cudaDeviceSynchronize(); + cudaEventRecord(start, stream); + + for(int test=0;test=(NUM_ROWS_OP/2)){ + cubX[n*NUM_ROWS_OP + NUM_ROWS_OP-1-n] = +1; + cubInvX[n*NUM_ROWS_OP + NUM_ROWS_OP-1-n] = -0.5; + } + } + + for(int n=0;n=NUM_COLS_OP/2){ + X[n*NUM_COLS_OP + NUM_COLS_OP-1-n] = -1; + invX[n*NUM_COLS_OP + NUM_COLS_OP-1-n] = 0.5; + } + } + + if(NUM_COLS_OP%2) X[(NUM_COLS_OP)*(NUM_COLS_OP)/2] = 1; + if(NUM_COLS_OP%2) invX[(NUM_COLS_OP)*(NUM_COLS_OP)/2] = 1; + + if(NUM_ROWS_OP%2) cubX[(NUM_ROWS_OP)*(NUM_ROWS_OP)/2] = 1; + if(NUM_ROWS_OP%2) cubInvX[(NUM_ROWS_OP)*(NUM_ROWS_OP)/2] = 1; + + // if(NUM_COLS_OP%2) invX[(NUM_COLS_OP)*(NUM_COLS_OP)/2] = 1; + // if(NUM_ROWS_OP%2) cubInvX[(NUM_ROWS_OP+1)*(NUM_ROWS_OP+1)/2] = 1; + + dfloat *IinvX = (dfloat*) calloc(NUM_COLS_OP*NUM_ROWS_OP, sizeof(dfloat)); + dfloat *cubInvXIinvX = (dfloat*) calloc(NUM_COLS_OP*NUM_ROWS_OP, sizeof(dfloat)); + + // post multiply by invX + for(int i=0;i [ A[0][0] B[0][0] A[0][1] B[0][1] .. A[0][HALF_DOFS_1D-1] B[0][HALF_DOFS_1D-1] .. + // [ 0 B ] + + dfloat *oddOP = (dfloat*) calloc(NUM_ROWS_OP*HALF_ROWS_OP, sizeof(dfloat)); + dfloat *evenOP = (dfloat*) calloc(NUM_ROWS_OP*HALF_ROWS_OP, sizeof(dfloat)); + + for(int i=0;i <<< G, B, 0, stream >>> \ + (numElements, lambda, c_op, c_DofToDofD, c_oddDofToDofD,c_evenDofToDofD, c_solIn, c_solOut); \ + } \ + else{ \ + dim3 G(numElements,1,1); \ + dim3 B(Nq, Nq, Nq); \ + BK5CubeKernel <<< G, B, 0, stream >>> \ + (numElements, lambda, c_op, c_DofToDofD, c_solIn, c_solOut); \ + } \ +} + + +#define ERR printf("massMatrixMultiplyRegister with Nq=%d not available", Nq); exit(-1) + + if(Nq==2){ + BK5Kernel(2,16); + return; + } + + if(Nq==3){ + BK5Kernel(3,7); + return; + } + + if(Nq==4){ + BK5Kernel(4,4); + return; + } + + if(Nq==5){ + BK5Kernel(5,5); + return; + } + + if(Nq==6){ + BK5Kernel(6,3); + return; + } + + if(Nq==7){ + BK5Kernel(7,2); + return; + } + + if(Nq==8){ + BK5Kernel(8,1); + return; + } + + if(Nq==9){ + BK5Kernel(9,1); + return; + } + + if(Nq==10){ + BK5Kernel(10,1); + return; + } + + if(Nq==11){ + BK5Kernel(11,1); + return; + } + + if(Nq==12){ + BK5Kernel(12,1); + return; + } + + if(Nq==13){ + BK5Kernel(13,1); + return; + } + + ERR; +} + + +dfloat nothingTest(cudaStream_t stream, int Ntests){ + + cudaEvent_t start, end; + cudaEventCreate(&start); + cudaEventCreate(&end); + + cudaDeviceSynchronize(); + + float nothingElapsed = 0; + { + + // time kernel that does nothing + +#if USE_GRAPH==1 + // cuda stream capture sequence for nothingKernel + cudaGraph_t nothingGraph; + + cudaStreamBeginCapture(stream, cudaStreamCaptureModeGlobal); + + for(int test=0;test>> (); + } + + cudaStreamEndCapture(stream, ¬hingGraph); + + // time graph sequence for nothing + cudaGraphExec_t nothingInstance; + cudaGraphInstantiate(¬hingInstance, nothingGraph, NULL, NULL, 0); + + cudaEventRecord(start, stream); + + cudaGraphLaunch(nothingInstance, stream); + + cudaEventRecord(end, stream); +#else + + cudaEventRecord(start, stream); + + for(int test=0;test>> (); + + cudaEventRecord(end, stream); + +#endif + + cudaDeviceSynchronize(); + + cudaEventElapsedTime(¬hingElapsed, start, end); + nothingElapsed /= 1000.; + nothingElapsed /= (double) Ntests; + + } + + return nothingElapsed; +} + + +int main(int argc, char **argv){ + + cudaStream_t stream; + cudaStreamCreate(&stream); + + if(argc!=4){ + printf("Usage: ./BK5 Nq numElements mode\n"); + exit(-1); + } + + // read number of elements + int Nq = atoi(argv[1]); + int numElements = atoi(argv[2]); + int mode = atoi(argv[3]); + + dfloat lambda = 0; + + printf("Running: NUM_DOFS_1D=%d, numElements=%d\n", Nq, numElements); + + int Np = Nq*Nq*Nq; + int halfNq = ((Nq+1)/2); + + int Ntotal = numElements*Np; + + int Ntests = 10; + + double estimatedActualDeviceBandwidth = bandwidthTest(stream, Ntests, (Ntotal*2+7*Ntotal)*sizeof(dfloat)); + + dfloat *h_op, *c_op; + dfloat *h_solOut, *c_solOut; + dfloat *h_solIn, *c_solIn; + + dfloat *h_DofToDofD, *c_DofToDofD; + dfloat *c_oddDofToDofD, *c_evenDofToDofD; + + // float fields + randAlloc(Ntotal*p_Nggeo, &h_op, &c_op); + + randAlloc(Ntotal, &h_solIn, &c_solIn); + randAlloc(Ntotal, &h_solOut, &c_solOut); + + randAlloc(Nq*Nq, &h_DofToDofD, &c_DofToDofD); + + // give D the correct symmetry + for(int i=0;imaxDiff) ? diff:maxDiff; + } + } + printf("|| Mq_{host} - Mq_{device} ||_linf = %lg\n", maxDiff); + + cudaEventDestroy(start); + cudaEventDestroy(end); + + return 0; + +} diff --git a/cuda_code/BatchNormalization_8.cu b/cuda_code/BatchNormalization_8.cu new file mode 100644 index 0000000000000000000000000000000000000000..cbe99f3cac2e7dfc62701c71c2d8ad27e4b1210c --- /dev/null +++ b/cuda_code/BatchNormalization_8.cu @@ -0,0 +1,99 @@ +#ifndef THC_GENERIC_FILE +#define THC_GENERIC_FILE "generic/BatchNormalization.cu" +#else + +#define DeviceTensor3 THCDeviceTensor +#define DeviceTensor1 THCDeviceTensor + +template +static THCDeviceTensor devicetensor(THCState *state, THCTensor *t) { + if (!t) { + return THCDeviceTensor(); + } + + int inDim = THCTensor_(nDimension)(state, t); + if (inDim == Dim) { + return toDeviceTensor(state, t); + } + + // View in which the last dimensions are collapsed or expanded as needed + THAssert(THCTensor_(isContiguous)(state, t)); + int size[Dim]; + for (int i = 0; i < Dim || i < inDim; ++i) { + if (i < Dim && i < inDim) { + size[i] = t->size[i]; + } else if (i < Dim) { + size[i] = 1; + } else { + size[Dim - 1] *= t->size[i]; + } + } + return THCDeviceTensor(THCTensor_(data)(state, t), size); +} + +void THNN_(BatchNormalization_updateOutput)( + THCState *state, THCTensor *input_, THCTensor *output_, + THCTensor *weight_, THCTensor *bias_, THCTensor *runningMean_, + THCTensor *runningVar_, THCTensor *saveMean_, THCTensor *saveStd_, + bool train, double momentum, double eps) { + + THCTensor_(resizeAs)(state, output_, input_); + DeviceTensor3 input = devicetensor<3>(state, input_); + DeviceTensor3 output = devicetensor<3>(state, output_); + DeviceTensor1 weight = devicetensor<1>(state, weight_); + DeviceTensor1 bias = devicetensor<1>(state, bias_); + DeviceTensor1 runningMean = devicetensor<1>(state, runningMean_); + DeviceTensor1 runningVar = devicetensor<1>(state, runningVar_); + DeviceTensor1 saveMean = devicetensor<1>(state, saveMean_); + DeviceTensor1 saveStd = devicetensor<1>(state, saveStd_); + + cudaStream_t s = THCState_getCurrentStream(state); + cudaDeviceProp *prop = THCState_getCurrentDeviceProperties(state); + + if (!train) { + dim3 blocks(input.getSize(1)); + dim3 threads(getNumThreads(input.getSize(2))); + BatchNormalizationUpdateOutputInference_kernel <<>>( + input, output, runningMean, runningVar, weight, bias, eps); + } else { + dim3 blocks(input.getSize(1)); + dim3 threads(getNumThreads(input.getSize(2))); + BatchNormalizationUpdateOutput_kernel <<>>( + input, output, weight, bias, eps, momentum, runningMean, runningVar, + saveMean, saveStd); + } + THCudaCheck(cudaGetLastError()); +} + +void THNN_(BatchNormalization_backward)( + THCState *state, THCTensor *input_, THCTensor *gradOutput_, + THCTensor *gradInput_, THCTensor *gradWeight_, THCTensor *gradBias_, + THCTensor *weight_, THCTensor *runningMean_, THCTensor *runningVar_, + THCTensor *saveMean_, THCTensor *saveStd_, bool train, float scale, double eps) { + + THCUNN_check_shape(state, input_, gradOutput_); + DeviceTensor3 input = devicetensor<3>(state, input_); + DeviceTensor3 gradOutput = devicetensor<3>(state, gradOutput_); + DeviceTensor3 gradInput = devicetensor<3>(state, gradInput_); + DeviceTensor1 gradWeight = devicetensor<1>(state, gradWeight_); + DeviceTensor1 gradBias = devicetensor<1>(state, gradBias_); + DeviceTensor1 weight = devicetensor<1>(state, weight_); + DeviceTensor1 runningMean = devicetensor<1>(state, runningMean_); + DeviceTensor1 runningVar = devicetensor<1>(state, runningVar_); + DeviceTensor1 saveMean = devicetensor<1>(state, saveMean_); + DeviceTensor1 saveStd = devicetensor<1>(state, saveStd_); + + cudaStream_t s = THCState_getCurrentStream(state); + + dim3 blocks(gradOutput.getSize(1)); + dim3 threads(getNumThreads(gradOutput.getSize(2))); + BatchNormalizationBackward_kernel <<>>( + input, gradOutput, gradInput, gradWeight, gradBias, weight, runningMean, runningVar, + saveMean, saveStd, train, scale, eps); + THCudaCheck(cudaGetLastError()); +} + +#undef DeviceTensor3 +#undef DeviceTensor1 + +#endif diff --git a/cuda_code/BlockSelectFloat_1.cu b/cuda_code/BlockSelectFloat_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..ec35a951fa7c2a1d954dd9c65013d4e895b7a638 --- /dev/null +++ b/cuda_code/BlockSelectFloat_1.cu @@ -0,0 +1,122 @@ +/** + * Copyright (c) 2015-present, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under the BSD+Patents license found in the + * LICENSE file in the root directory of this source tree. + */ + +// Copyright 2004-present Facebook. All Rights Reserved. +#include "blockselect/BlockSelectImpl.cuh" + +namespace faiss { namespace gpu { + +// warp Q to thread Q: +// 1, 1 +// 32, 2 +// 64, 3 +// 128, 3 +// 256, 4 +// 512, 8 +// 1024, 8 + +BLOCK_SELECT_DECL(float, true, 1); +BLOCK_SELECT_DECL(float, true, 32); +BLOCK_SELECT_DECL(float, true, 64); +BLOCK_SELECT_DECL(float, true, 128); +BLOCK_SELECT_DECL(float, true, 256); +BLOCK_SELECT_DECL(float, true, 512); +BLOCK_SELECT_DECL(float, true, 1024); + +BLOCK_SELECT_DECL(float, false, 1); +BLOCK_SELECT_DECL(float, false, 32); +BLOCK_SELECT_DECL(float, false, 64); +BLOCK_SELECT_DECL(float, false, 128); +BLOCK_SELECT_DECL(float, false, 256); +BLOCK_SELECT_DECL(float, false, 512); +BLOCK_SELECT_DECL(float, false, 1024); + +void runBlockSelect(Tensor& in, + Tensor& outK, + Tensor& outV, + bool dir, int k, cudaStream_t stream) { + FAISS_ASSERT(k <= 1024); + + if (dir) { + if (k == 1) { + BLOCK_SELECT_CALL(float, true, 1); + } else if (k <= 32) { + BLOCK_SELECT_CALL(float, true, 32); + } else if (k <= 64) { + BLOCK_SELECT_CALL(float, true, 64); + } else if (k <= 128) { + BLOCK_SELECT_CALL(float, true, 128); + } else if (k <= 256) { + BLOCK_SELECT_CALL(float, true, 256); + } else if (k <= 512) { + BLOCK_SELECT_CALL(float, true, 512); + } else if (k <= 1024) { + BLOCK_SELECT_CALL(float, true, 1024); + } + } else { + if (k == 1) { + BLOCK_SELECT_CALL(float, false, 1); + } else if (k <= 32) { + BLOCK_SELECT_CALL(float, false, 32); + } else if (k <= 64) { + BLOCK_SELECT_CALL(float, false, 64); + } else if (k <= 128) { + BLOCK_SELECT_CALL(float, false, 128); + } else if (k <= 256) { + BLOCK_SELECT_CALL(float, false, 256); + } else if (k <= 512) { + BLOCK_SELECT_CALL(float, false, 512); + } else if (k <= 1024) { + BLOCK_SELECT_CALL(float, false, 1024); + } + } +} + +void runBlockSelectPair(Tensor& inK, + Tensor& inV, + Tensor& outK, + Tensor& outV, + bool dir, int k, cudaStream_t stream) { + FAISS_ASSERT(k <= 1024); + + if (dir) { + if (k == 1) { + BLOCK_SELECT_PAIR_CALL(float, true, 1); + } else if (k <= 32) { + BLOCK_SELECT_PAIR_CALL(float, true, 32); + } else if (k <= 64) { + BLOCK_SELECT_PAIR_CALL(float, true, 64); + } else if (k <= 128) { + BLOCK_SELECT_PAIR_CALL(float, true, 128); + } else if (k <= 256) { + BLOCK_SELECT_PAIR_CALL(float, true, 256); + } else if (k <= 512) { + BLOCK_SELECT_PAIR_CALL(float, true, 512); + } else if (k <= 1024) { + BLOCK_SELECT_PAIR_CALL(float, true, 1024); + } + } else { + if (k == 1) { + BLOCK_SELECT_PAIR_CALL(float, false, 1); + } else if (k <= 32) { + BLOCK_SELECT_PAIR_CALL(float, false, 32); + } else if (k <= 64) { + BLOCK_SELECT_PAIR_CALL(float, false, 64); + } else if (k <= 128) { + BLOCK_SELECT_PAIR_CALL(float, false, 128); + } else if (k <= 256) { + BLOCK_SELECT_PAIR_CALL(float, false, 256); + } else if (k <= 512) { + BLOCK_SELECT_PAIR_CALL(float, false, 512); + } else if (k <= 1024) { + BLOCK_SELECT_PAIR_CALL(float, false, 1024); + } + } +} + +} } // namespace diff --git a/cuda_code/BounceBackNVEGPU_6.cu b/cuda_code/BounceBackNVEGPU_6.cu new file mode 100644 index 0000000000000000000000000000000000000000..2ded1e5549a4cd1343e406e36439fd98329f3248 --- /dev/null +++ b/cuda_code/BounceBackNVEGPU_6.cu @@ -0,0 +1,103 @@ +// Copyright (c) 2018-2020, Michael P. Howard +// Copyright (c) 2021, Auburn University +// This file is part of the azplugins project, released under the Modified BSD License. + +/*! + * \file BounceBackNVEGPU.cu + * \brief Template specialization of CUDA kernels for BounceBackNVEGPU geometries. Each instance of the + * nve_bounce_step_one must be templated explicitly for each geometry. + */ + +#include "BounceBackNVEGPU.cuh" +#include "BounceBackGeometry.h" + +namespace azplugins +{ +namespace gpu +{ + +//! Template instantiation of slit geometry streaming +template cudaError_t nve_bounce_step_one + (const bounce_args_t& args, const mpcd::detail::SlitGeometry& geom); + +namespace kernel +{ +//! Kernel for applying second step of velocity Verlet algorithm with bounce back +/*! + * \param d_vel Particle velocities + * \param d_accel Particle accelerations + * \param d_net_force Net force on each particle + * \param d_group Indexes in particle group + * \param dt Timestep + * \param N Number of particles in group + * + * \b Implementation: + * Using one thread per particle, the particle velocities are updated according to the second step of the velocity Verlet + * algorithm. This is the standard update as in MD, and is only reimplemented here in case future modifications are necessary. + */ +__global__ void nve_bounce_step_two(Scalar4 *d_vel, + Scalar3 *d_accel, + const Scalar4 *d_net_force, + const unsigned int *d_group, + const Scalar dt, + const unsigned int N) + { + // one thread per particle + unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; + if (idx >= N) + return; + const unsigned int pid = d_group[idx]; + + const Scalar4 net_force = d_net_force[pid]; + Scalar3 accel = make_scalar3(net_force.x, net_force.y, net_force.z); + Scalar4 vel = d_vel[pid]; + accel.x /= vel.w; + accel.y /= vel.w; + accel.z /= vel.w; + + // then, update the velocity + vel.x += Scalar(0.5) * accel.x * dt; + vel.y += Scalar(0.5) * accel.y * dt; + vel.z += Scalar(0.5) * accel.z * dt; + + d_vel[pid] = vel; + d_accel[pid] = accel; + } +} // end namespace kernel + +/*! + * \param d_vel Particle velocities + * \param d_accel Particle accelerations + * \param d_net_force Net force on each particle + * \param d_group Indexes in particle group + * \param dt Timestep + * \param N Number of particles in group + * \param block_size Number of threads per block + * + * \sa kernel::nve_bounce_step_two + */ +cudaError_t nve_bounce_step_two(Scalar4 *d_vel, + Scalar3 *d_accel, + const Scalar4 *d_net_force, + const unsigned int *d_group, + const Scalar dt, + const unsigned int N, + const unsigned int block_size) + { + static unsigned int max_block_size = UINT_MAX; + if (max_block_size == UINT_MAX) + { + cudaFuncAttributes attr; + cudaFuncGetAttributes(&attr, (const void*)kernel::nve_bounce_step_two); + max_block_size = attr.maxThreadsPerBlock; + } + + unsigned int run_block_size = min(block_size, max_block_size); + dim3 grid(N / run_block_size + 1); + kernel::nve_bounce_step_two<<>>(d_vel, d_accel, d_net_force, d_group, dt, N); + + return cudaSuccess; + } + +} // end namespace gpu +} // end namespace azplugins diff --git a/cuda_code/COOtoCSR_2.cu b/cuda_code/COOtoCSR_2.cu new file mode 100644 index 0000000000000000000000000000000000000000..787872742e9824fcc1bfc55d44b330a8328febd3 --- /dev/null +++ b/cuda_code/COOtoCSR_2.cu @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2020, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "COOtoCSR.cuh" + +namespace cugraph { + +// Explicit instantiation for uint32_t + float +template std::unique_ptr> coo_to_csr( + GraphCOOView const &graph, rmm::mr::device_memory_resource *); + +// Explicit instantiation for uint32_t + double +template std::unique_ptr> +coo_to_csr(GraphCOOView const &graph, + rmm::mr::device_memory_resource *); + +// Explicit instantiation for int + float +template std::unique_ptr> coo_to_csr( + GraphCOOView const &graph, rmm::mr::device_memory_resource *); + +// Explicit instantiation for int + double +template std::unique_ptr> coo_to_csr( + GraphCOOView const &graph, rmm::mr::device_memory_resource *); + +// Explicit instantiation for int64_t + float +template std::unique_ptr> coo_to_csr( + GraphCOOView const &graph, rmm::mr::device_memory_resource *); + +// Explicit instantiation for int64_t + double +template std::unique_ptr> coo_to_csr( + GraphCOOView const &graph, rmm::mr::device_memory_resource *); + +// in-place versions: +// +// Explicit instantiation for uint32_t + float +template void coo_to_csr_inplace( + GraphCOOView &graph, GraphCSRView &result); + +// Explicit instantiation for uint32_t + double +template void coo_to_csr_inplace( + GraphCOOView &graph, + GraphCSRView &result); + +// Explicit instantiation for int + float +template void coo_to_csr_inplace( + GraphCOOView &graph, GraphCSRView &result); + +// Explicit instantiation for int + double +template void coo_to_csr_inplace( + GraphCOOView &graph, GraphCSRView &result); + +// Explicit instantiation for int64_t + float +template void coo_to_csr_inplace( + GraphCOOView &graph, GraphCSRView &result); + +// Explicit instantiation for int64_t + double +template void coo_to_csr_inplace( + GraphCOOView &graph, GraphCSRView &result); + +} // namespace cugraph diff --git a/cuda_code/CPU_GPU_time.cu b/cuda_code/CPU_GPU_time.cu new file mode 100644 index 0000000000000000000000000000000000000000..f5271adb04d862ac66fced33a043db08441b5653 --- /dev/null +++ b/cuda_code/CPU_GPU_time.cu @@ -0,0 +1,110 @@ +/* + * ===================================================================================== + * + * Filename: testLironPaper.cu + * + * Description: This tries to match computation results with those in + * Liron's paper. + * + * Version: 1.0 + * Created: 04/04/2014 07:58:39 AM + * Revision: none + * Compiler: gcc + * + * Author: Hoang-Ngan Nguyen (), zhoangngan-gmail + * Organization: + * + * ===================================================================================== + */ + +#include "periodicStokes.h" +#include /* srand, rand */ +#include /* time */ +#include +#include +#include +#include +using namespace std; +using namespace Eigen; + +int main( int argc, char *argv[] ) { + MatrixOnHost L(3, 1, 1); + + //open binary file to save GPU and CPU times + //char timeName[80]; + //sprintf(timeName, "CPU_GPU_time", filename); + //ofstream outTime(timeName, ios::binary); + + //if (!outTime) { + //std::cout << "Error: Could not open file \"" << timeName << "\"" + //<< ". Error occurs on line " << __LINE__ + //<< " in source file \"" << __FILE__ << "\"" << std::endl;; + //exit(1); + //} + MatrixOnHost timeM(2, 4); + + int maxShell = 4; + double d = 1.0/sqrt(atan(1)*4); + double e = 0; + + clock_t start, end; + cudaEvent_t eStart, eStop; + HANDLE_ERROR( cudaEventCreate( &eStart ) ); + HANDLE_ERROR( cudaEventCreate( &eStop ) ); + + MatrixOnHost newM(2, 1), oldM(2, 1); + float elapsedTime; + size_t size = 10; + size_t maxCol = 10; + for (int i = 0; i < 4; i++) { + cout << "Round " << i << endl; + MatrixOnHost x(3, size), x0(3, maxCol); + x.setRandom(); + x0 = x; + //MatrixOnDevice dx = x, dx0 = x0; + //MatrixOnDevice dA(3*dx.columns(), 3*dx0.columns()); + + // record GPU time + //for (int j = 0; j * maxCol < size; j++) { + //for (int l = 0; l < 3; l++) { + //for (int k = 0; k < maxCol; k++) { + //x0(l, k) = x(l, j * maxCol + k); + //} + //} + //dx0 = x0; + //HANDLE_ERROR( cudaEventRecord( eStart, 0) ); + //imageStokeslet( dA, dx, dx0, d, e, maxShell, maxShell-1, L(0), L(1) ); + //HANDLE_ERROR( cudaEventRecord( eStop , 0) ); + //HANDLE_ERROR( cudaEventSynchronize( eStop ) ); + //HANDLE_ERROR( cudaEventElapsedTime( &elapsedTime, eStart, eStop ) ); + //timeM(0, i) = elapsedTime; + //} + + //record CPU time + MatrixOnHost A(3*x.columns(), 3*x.columns()); + MatrixOnHost absA = A, refSol = A; + start = clock(); + newM(0) = newM(1) = oldM(0) = oldM(1) = 0; + realShells ( refSol, absA, x, x, newM, oldM, L, d, e ); + fourierShells(A, absA, x, x, newM, oldM, L, d, e); + refSol = refSol + A; + newM(0) = newM(1) = maxShell; + realShells ( refSol, absA, x, x, newM, oldM, L, d, e ); + newM(0) = newM(1) = maxShell-1; + fourierShells(refSol, absA, x, x, newM, oldM, L, d, e); + end = clock(); + timeM(1, i) = 1000.0 * ((double) (end - start)) / CLOCKS_PER_SEC ; + size *= 8; + cout << "maxShell = " << maxShell << endl; + cout << "time is " << timeM(1, i) << "ms" << endl; + } + timeM.write("CPU_GPU_time"); + HANDLE_ERROR( cudaEventDestroy( eStart ) ); + HANDLE_ERROR( cudaEventDestroy( eStop ) ); + //outTime.close(); + + cout << "Done!!!!!!!!!!!!!!" << endl; + + return EXIT_SUCCESS; +} // ---------- end of function main ---------- + diff --git a/cuda_code/CUAPI_Asyn_PoissonGravitySolver_8.cu b/cuda_code/CUAPI_Asyn_PoissonGravitySolver_8.cu new file mode 100644 index 0000000000000000000000000000000000000000..77a7b8136b45c1d62afe8234708581d76c537906 --- /dev/null +++ b/cuda_code/CUAPI_Asyn_PoissonGravitySolver_8.cu @@ -0,0 +1,473 @@ +#include "CUAPI.h" +#include "CUPOT.h" + +#if ( defined GPU && defined GRAVITY ) + + + +// Poisson solver prototypes +#if ( POT_SCHEME == SOR ) +#ifdef USE_PSOLVER_10TO14 +__global__ void CUPOT_PoissonSolver_SOR_10to14cube( const real g_Rho_Array [][ RHO_NXT*RHO_NXT*RHO_NXT ], + const real g_Pot_Array_In [][ POT_NXT*POT_NXT*POT_NXT ], + real g_Pot_Array_Out[][ GRA_NXT*GRA_NXT*GRA_NXT ], + const int Min_Iter, const int Max_Iter, const real Omega_6, + const real Const, const IntScheme_t IntScheme ); +#else +__global__ void CUPOT_PoissonSolver_SOR_16to18cube( const real g_Rho_Array [][ RHO_NXT*RHO_NXT*RHO_NXT ], + const real g_Pot_Array_In [][ POT_NXT*POT_NXT*POT_NXT ], + real g_Pot_Array_Out[][ GRA_NXT*GRA_NXT*GRA_NXT ], + const int Min_Iter, const int Max_Iter, const real Omega_6, + const real Const, const IntScheme_t IntScheme ); +#endif // #ifdef USE_PSOLVER_10TO14 ... else ... + +#elif ( POT_SCHEME == MG ) +__global__ void CUPOT_PoissonSolver_MG( const real g_Rho_Array [][ RHO_NXT*RHO_NXT*RHO_NXT ], + const real g_Pot_Array_In [][ POT_NXT*POT_NXT*POT_NXT ], + real g_Pot_Array_Out[][ GRA_NXT*GRA_NXT*GRA_NXT ], + const real dh_Min, const int Max_Iter, const int NPre_Smooth, + const int NPost_Smooth, const real Tolerated_Error, const real Poi_Coeff, + const IntScheme_t IntScheme ); +#endif // POT_SCHEME + + +// Gravity solver prototypes +#if ( MODEL == HYDRO ) +__global__ +void CUPOT_HydroGravitySolver( + real g_Flu_Array_New[][GRA_NIN][ CUBE(PS1) ], + const real g_Pot_Array_New[][ CUBE(GRA_NXT) ], + const double g_Corner_Array [][3], + const real g_Pot_Array_USG[][ CUBE(USG_NXT_G) ], + const real g_Flu_Array_USG[][GRA_NIN-1][ CUBE(PS1) ], + char g_DE_Array [][ CUBE(PS1) ], + const real g_EngyB_Array [][ CUBE(PS1) ], + const real dt, const real dh, const bool P5_Gradient, + const OptGravityType_t GravityType, + const double TimeNew, const double TimeOld, const real MinEint ); + +#elif ( MODEL == ELBDM ) +__global__ void CUPOT_ELBDMGravitySolver( real g_Flu_Array[][GRA_NIN][ PS1*PS1*PS1 ], + const real g_Pot_Array[][ GRA_NXT*GRA_NXT*GRA_NXT ], + const double g_Corner_Array[][3], + const real EtaDt, const real dh, const real Lambda, const bool ExtPot, + const double TimeNew ); + +#else +#error : ERROR : unsupported MODEL !! +#endif // MODEL + + +// declare all device pointers +extern real (*d_Rho_Array_P )[ CUBE(RHO_NXT) ]; +extern real (*d_Pot_Array_P_In )[ CUBE(POT_NXT) ]; +extern real (*d_Pot_Array_P_Out)[ CUBE(GRA_NXT) ]; +extern real (*d_Flu_Array_G )[GRA_NIN][ CUBE(PS1)]; +extern double (*d_Corner_Array_G)[3]; +#if ( MODEL == HYDRO ) +#ifdef UNSPLIT_GRAVITY +extern real (*d_Pot_Array_USG_G)[ CUBE(USG_NXT_G) ]; +extern real (*d_Flu_Array_USG_G)[GRA_NIN-1][ CUBE(PS1) ]; +#else +static real (*d_Pot_Array_USG_G)[ CUBE(USG_NXT_G) ] = NULL; +static real (*d_Flu_Array_USG_G)[GRA_NIN-1][ CUBE(PS1) ] = NULL; +#endif +#ifdef DUAL_ENERGY +extern char (*d_DE_Array_G)[ CUBE(PS1) ]; +#else +static char (*d_DE_Array_G)[ CUBE(PS1) ] = NULL; +#endif +#ifdef MHD +extern real (*d_EngyB_Array_G)[ CUBE(PS1) ]; +#else +static real (*d_EngyB_Array_G)[ CUBE(PS1) ] = NULL; +#endif +#endif // #if ( MODEL == HYDRO ) + +extern cudaStream_t *Stream; + + + + +//------------------------------------------------------------------------------------------------------- +// Function : CUAPI_Asyn_PoissonGravitySolver +// Description : Invoke the CUPOT_PoissonSolver_XXtoXXcube and/or CUPOT_GravitySolver kernel(s) to evaluate +// the gravitational potential and/or advance the fluid variables by the gravitational +// acceleration for a group of patches +// +// *********************************************************** +// ** Asynchronous Function ** +// ** ** +// ** will return before the execution in GPU is complete ** +// *********************************************************** +// +// Note : a. Use streams for the asychronous memory copy between device and host +// b. Prefix "d" : for pointers pointing to the "Device" memory space +// Prefix "h" : for pointers pointing to the "Host" memory space +// +// Parameter : h_Rho_Array : Host array storing the input density +// h_Pot_Array_In : Host array storing the input "coarse-grid" potential for interpolation +// h_Pot_Array_Out : Host array to store the output potential +// h_Flu_Array : Host array to store the fluid variables for the Gravity solver +// h_Corner_Array : Host array storing the physical corner coordinates of each patch +// h_Pot_Array_USG : Host array storing the prepared potential for UNSPLIT_GRAVITY +// h_Flu_Array_USG : Host array storing the prepared density + momentum for UNSPLIT_GRAVITY +// h_DE_Array : Host array storing the dual-energy status (for both input and output) +// h_EngyB_Array : Host array storing the cell-centered magnetic energy (MHD only) +// NPatchGroup : Number of patch groups evaluated simultaneously by GPU +// dt : Time interval to advance solution +// dh : Grid size +// SOR_Min_Iter : Minimum # of iterations for SOR +// SOR_Max_Iter : Maximum # of iterations for SOR +// SOR_Omega : Over-relaxation parameter +// MG_Max_Iter : Maximum number of iterations for multigrid +// MG_NPre_Smooth : Number of pre-smoothing steps for multigrid +// MG_NPos_tSmooth : Number of post-smoothing steps for multigrid +// MG_Tolerated_Error : Maximum tolerated error for multigrid +// Poi_Coeff : Coefficient in front of density in the Poisson equation (4*Pi*Newton_G*a) +// IntScheme : Interpolation scheme for potential +// --> currently supported schemes include +// INT_CQUAD : conservative quadratic interpolation +// INT_QUAD : quadratic interpolation +// P5_Gradient : Use 5-points stencil to evaluate the potential gradient +// ELBDM_Eta : Particle mass / Planck constant in ELBDM +// ELBDM_Lambda : Quartic self-interaction coefficient in ELBDM +// Poisson : true --> invoke the Poisson solver +// GraAcc : true --> invoke the Gravity solver +// GPU_NStream : Number of CUDA streams for the asynchronous memory copy +// GravityType : Types of gravity --> self-gravity, external gravity, both +// TimeNew : Physical time at the current step (for the external gravity solver) +// TimeOld : Physical time at the previous step (for the external gravity solver in UNSPLIT_GRAVITY) +// ExtPot : Add the external potential +// MinEint : Minimum allowed internal energy (== MIN_PRES / (GAMMA-1)) +// +// Useless parameters in HYDRO : ELBDM_Eta, ELBDM_Lambda +// Useless parameters in ELBDM : P5_Gradient +//------------------------------------------------------------------------------------------------------- +void CUAPI_Asyn_PoissonGravitySolver( const real h_Rho_Array [][RHO_NXT][RHO_NXT][RHO_NXT], + const real h_Pot_Array_In [][POT_NXT][POT_NXT][POT_NXT], + real h_Pot_Array_Out[][GRA_NXT][GRA_NXT][GRA_NXT], + real h_Flu_Array [][GRA_NIN][PS1][PS1][PS1], + const double h_Corner_Array[][3], + const real h_Pot_Array_USG[][USG_NXT_G][USG_NXT_G][USG_NXT_G], + const real h_Flu_Array_USG[][GRA_NIN-1][PS1][PS1][PS1], + char h_DE_Array [][PS1][PS1][PS1], + const real h_EngyB_Array [][PS1][PS1][PS1], + const int NPatchGroup, const real dt, const real dh, const int SOR_Min_Iter, + const int SOR_Max_Iter, const real SOR_Omega, const int MG_Max_Iter, + const int MG_NPre_Smooth, const int MG_NPost_Smooth, + const real MG_Tolerated_Error, const real Poi_Coeff, + const IntScheme_t IntScheme, const bool P5_Gradient, const real ELBDM_Eta, + const real ELBDM_Lambda, const bool Poisson, const bool GraAcc, const int GPU_NStream, + const OptGravityType_t GravityType, const double TimeNew, const double TimeOld, + const bool ExtPot, const real MinEint ) +{ + +// model-independent constants +# if ( POT_SCHEME == SOR ) + const dim3 Poi_Block_Dim( RHO_NXT/2, RHO_NXT, POT_BLOCK_SIZE_Z ); +# elif ( POT_SCHEME == MG ) + const dim3 Poi_Block_Dim( POT_BLOCK_SIZE_X, 1, 1 ); +# endif + const dim3 Gra_Block_Dim( GRA_BLOCK_SIZE ); + const int NPatch = NPatchGroup*8; +# if ( POT_SCHEME == SOR ) + const real Poi_Const = Poi_Coeff*dh*dh; + const real SOR_Omega_6 = SOR_Omega/6.0; +# endif + +// model-dependent constants +# if ( MODEL == HYDRO ) + +# elif ( MODEL == ELBDM ) + const real ELBDM_EtaDt = ELBDM_Eta*dt; + +# else +# error : ERROR : unsupported MODEL !! +# endif + + +// check +# if ( MODEL == ELBDM && !defined STORE_POT_GHOST && GRA_GHOST_SIZE != 0 ) +# warning : WARNING : GRA_GHOST_SIZE != 0 in ELBDM (without STORE_POT_GHOST) !! +# endif + +# ifdef GAMER_DEBUG + const int Poi_NThread = Poi_Block_Dim.x * Poi_Block_Dim.y * Poi_Block_Dim.z; + +// minimum number of threads for spatial interpolation + if ( Poisson && Poi_NThread < (POT_NXT-2)*(POT_NXT-2) ) + Aux_Error( ERROR_INFO, "Poi_NThread (%d) < (POT_NXT-2)*(POT_NXT-2) (%d) !!\n", + Poi_NThread, (POT_NXT-2)*(POT_NXT-2) ); + +// constraint due to the reduction operation in "CUPOT_Poisson_10to14cube" and "CUPOT_PoissonSolver_MG" +# if ( ( POT_SCHEME == SOR && defined USE_PSOLVER_10TO14 ) || POT_SCHEME == MG ) + if ( Poisson && Poi_NThread < 64 ) + Aux_Error( ERROR_INFO, "incorrect parameter %s = %d (must >= 64) !!\n", "Poi_NThread", Poi_NThread ); +# endif + +// constraint in "CUPOT_PoissonSolver_SOR_16to18cube" +# if ( POT_SCHEME == SOR && !defined USE_PSOLVER_10TO14 ) + if ( Poisson && Poi_NThread != RHO_NXT*RHO_NXT/2 ) + Aux_Error( ERROR_INFO, "incorrect parameter %s = %d (must == %d) !!\n", "Poi_NThread", Poi_NThread, + RHO_NXT*RHO_NXT/2 ); +# endif + + if ( GraAcc ) + { + if ( GravityType == GRAVITY_EXTERNAL || GravityType == GRAVITY_BOTH || ExtPot ) + { + if ( h_Corner_Array == NULL ) Aux_Error( ERROR_INFO, "h_Corner_Array == NULL !!\n" ); + if ( d_Corner_Array_G == NULL ) Aux_Error( ERROR_INFO, "d_Corner_Array_G == NULL !!\n" ); + } + +# ifdef UNSPLIT_GRAVITY + if ( GravityType == GRAVITY_SELF || GravityType == GRAVITY_BOTH ) + { + if ( h_Pot_Array_USG == NULL ) Aux_Error( ERROR_INFO, "h_Pot_Array_USG == NULL !!\n" ); + if ( d_Pot_Array_USG_G == NULL ) Aux_Error( ERROR_INFO, "d_Pot_Array_USG_G == NULL !!\n" ); + } + + if ( h_Flu_Array_USG == NULL ) Aux_Error( ERROR_INFO, "h_Flu_Array_USG == NULL !!\n" ); + if ( d_Flu_Array_USG_G == NULL ) Aux_Error( ERROR_INFO, "d_Flu_Array_USG_G == NULL !!\n" ); +# endif + +# ifdef DUAL_ENERGY + if ( h_DE_Array == NULL ) Aux_Error( ERROR_INFO, "h_DE_Array == NULL !!\n" ); + if ( d_DE_Array_G == NULL ) Aux_Error( ERROR_INFO, "d_DE_Array_G == NULL !!\n" ); +# endif + +# ifdef MHD + if ( h_EngyB_Array == NULL ) Aux_Error( ERROR_INFO, "h_EngyB_Array == NULL !!\n" ); + if ( d_EngyB_Array_G == NULL ) Aux_Error( ERROR_INFO, "d_EngyB_Array_G == NULL !!\n" ); +# endif + } +# endif // #ifdef GAMER_DEBUG + + if ( Poisson && ( IntScheme != INT_CQUAD && IntScheme != INT_QUAD ) ) + Aux_Error( ERROR_INFO, "incorrect parameter %s = %d !!\n", "IntScheme", IntScheme ); + + + int *NPatch_per_Stream = new int [GPU_NStream]; + int *Rho_MemSize = new int [GPU_NStream]; + int *Pot_MemSize_In = new int [GPU_NStream]; + int *Pot_MemSize_Out = new int [GPU_NStream]; + int *Flu_MemSize = new int [GPU_NStream]; + int *Corner_MemSize = new int [GPU_NStream]; + int *UsedPatch = new int [GPU_NStream]; +# ifdef UNSPLIT_GRAVITY + int *Pot_USG_MemSize = new int [GPU_NStream]; + int *Flu_USG_MemSize = new int [GPU_NStream]; +# endif +# ifdef DUAL_ENERGY + int *DE_MemSize = new int [GPU_NStream]; +# endif +# ifdef MHD + int *EngyB_MemSize = new int [GPU_NStream]; +# endif + + +// set the number of patches in each stream + UsedPatch[0] = 0; + + if ( GPU_NStream == 1 ) NPatch_per_Stream[0] = NPatch; + else + { + for (int s=0; s>> + ( d_Rho_Array_P + UsedPatch[s], + d_Pot_Array_P_In + UsedPatch[s], + d_Pot_Array_P_Out + UsedPatch[s], + SOR_Min_Iter, SOR_Max_Iter, SOR_Omega_6, Poi_Const, IntScheme ); +# else + CUPOT_PoissonSolver_SOR_16to18cube <<< NPatch_per_Stream[s], Poi_Block_Dim, 0, Stream[s] >>> + ( d_Rho_Array_P + UsedPatch[s], + d_Pot_Array_P_In + UsedPatch[s], + d_Pot_Array_P_Out + UsedPatch[s], + SOR_Min_Iter, SOR_Max_Iter, SOR_Omega_6, Poi_Const, IntScheme ); +# endif // #ifdef USE_PSOLVER_10TO14 ... else ... + +# elif ( POT_SCHEME == MG ) + + CUPOT_PoissonSolver_MG <<< NPatch_per_Stream[s], Poi_Block_Dim, 0, Stream[s] >>> + ( d_Rho_Array_P + UsedPatch[s], + d_Pot_Array_P_In + UsedPatch[s], + d_Pot_Array_P_Out + UsedPatch[s], + dh, MG_Max_Iter, MG_NPre_Smooth, MG_NPost_Smooth, MG_Tolerated_Error, + Poi_Coeff, IntScheme ); + +# else + +# error : unsupported GPU Poisson solver + +# endif // POT_SCHEME + } // if ( Poisson ) + + +// b2. Gravity solver + if ( GraAcc ) + { +# if ( MODEL == HYDRO ) + CUPOT_HydroGravitySolver <<< NPatch_per_Stream[s], Gra_Block_Dim, 0, Stream[s] >>> + ( d_Flu_Array_G + UsedPatch[s], + d_Pot_Array_P_Out + UsedPatch[s], + d_Corner_Array_G + UsedPatch[s], + d_Pot_Array_USG_G + UsedPatch[s], + d_Flu_Array_USG_G + UsedPatch[s], + d_DE_Array_G + UsedPatch[s], + d_EngyB_Array_G + UsedPatch[s], + dt, dh, P5_Gradient, GravityType, TimeNew, TimeOld, MinEint ); + +# elif ( MODEL == ELBDM ) + CUPOT_ELBDMGravitySolver <<< NPatch_per_Stream[s], Gra_Block_Dim, 0, Stream[s] >>> + ( d_Flu_Array_G + UsedPatch[s], + d_Pot_Array_P_Out + UsedPatch[s], + d_Corner_Array_G + UsedPatch[s], + ELBDM_EtaDt, dh, ELBDM_Lambda, ExtPot, TimeNew ); + +# else +# error : ERROR : unsupported MODEL !! +# endif // MODEL + } // if ( GraAcc ) + + CUDA_CHECK_ERROR( cudaGetLastError() ); + } // for (int s=0; s 0 ) +# include "CUFLU_Shared_FluUtility.cu" +#endif + +#ifdef DUAL_ENERGY +# include "CUFLU_Shared_DualEnergy.cu" +#endif + +#endif // #ifdef __CUDACC__ + + + + +//------------------------------------------------------------------------------------------------------- +// Function : Hydro_FullStepUpdate +// Description : Evaluate the full-step solution +// +// Note : 1. This function is shared by MHM, MHM_RP, and CTU schemes +// 2. Invoke dual-energy check if DualEnergySwitch is on +// +// Parameter : g_Input : Array storing the input fluid data +// g_Output : Array to store the updated fluid data +// g_DE_Status : Array to store the dual-energy status +// g_FC_B : Array storing the updated face-centered B field +// --> For the dual-energy formalism only +// g_Flux : Array storing the input face-centered fluxes +// --> Accessed with the array stride N_FL_FLUX even thought its actually +// allocated size is N_FC_FLUX^3 +// dt : Time interval to advance solution +// dh : Cell size +// MinDens/Eint : Density and internal energy floors +// DualEnergySwitch : Use the dual-energy formalism if E_int/E_kin < DualEnergySwitch +// NormPassive : true --> normalize passive scalars so that the sum of their mass density +// is equal to the gas mass density +// NNorm : Number of passive scalars to be normalized +// --> Should be set to the global variable "PassiveNorm_NVar" +// NormIdx : Target variable indices to be normalized +// --> Should be set to the global variable "PassiveNorm_VarIdx" +// EoS : EoS object +// --> Only for obtaining Gamma used by the dual-energy formalism +//------------------------------------------------------------------------------------------------------- +GPU_DEVICE +void Hydro_FullStepUpdate( const real g_Input[][ CUBE(FLU_NXT) ], real g_Output[][ CUBE(PS2) ], char g_DE_Status[], + const real g_FC_B[][ PS2P1*SQR(PS2) ], const real g_Flux[][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_FLUX) ], + const real dt, const real dh, const real MinDens, const real MinEint, + const real DualEnergySwitch, const bool NormPassive, const int NNorm, const int NormIdx[], + const EoS_t *EoS ) +{ + + const int didx_flux[3] = { 1, N_FL_FLUX, SQR(N_FL_FLUX) }; + const real dt_dh = dt/dh; + + real dFlux[3][NCOMP_TOTAL], Output_1Cell[NCOMP_TOTAL], Emag; + + + const int size_ij = SQR(PS2); + CGPU_LOOP( idx_out, CUBE(PS2) ) + { + const int i_out = idx_out % PS2; + const int j_out = idx_out % size_ij / PS2; + const int k_out = idx_out / size_ij; + +// for MHD, one additional flux is evaluated along each transverse direction for computing the CT electric field +# ifdef MHD + const int i_flux = i_out + 1; + const int j_flux = j_out + 1; + const int k_flux = k_out + 1; +# else + const int i_flux = i_out; + const int j_flux = j_out; + const int k_flux = k_out; +# endif + const int idx_flux = IDX321( i_flux, j_flux, k_flux, N_FL_FLUX, N_FL_FLUX ); + + const int i_in = i_out + FLU_GHOST_SIZE; + const int j_in = j_out + FLU_GHOST_SIZE; + const int k_in = k_out + FLU_GHOST_SIZE; + const int idx_in = IDX321( i_in, j_in, k_in, FLU_NXT, FLU_NXT ); + + +// 1. calculate flux difference to update the fluid data + for (int d=0; d<3; d++) + for (int v=0; v these checks have been moved to Flu_Close()->CorrectUnphysical() +// because we want to apply 1st-order-flux correction BEFORE setting a minimum density and pressure +// --> this consideration holds even when DUAL_ENERGY is adopted (e.g., when density is negative, +// even when DUAL_ENERGY is on, we still want to try the 1st-order-flux correction before setting a floor value) +// --> but for barotropic EoS, we apply Eint floor here to avoid any false alarm caused by Eint<0 +# ifdef BAROTROPIC_EOS +# ifdef MHD + Emag = MHD_GetCellCenteredBEnergy( g_FC_B[MAGX], g_FC_B[MAGY], g_FC_B[MAGZ], + PS2, PS2, PS2, i_out, j_out, k_out ); +# else + Emag = NULL_REAL; +# endif +// Output_1Cell[DENS] = FMAX( Output_1Cell[DENS], MinDens ); + Output_1Cell[ENGY] = Hydro_CheckMinEintInEngy( Output_1Cell[DENS], Output_1Cell[MOMX], + Output_1Cell[MOMY], Output_1Cell[MOMZ], + Output_1Cell[ENGY], MinEint, Emag ); +# endif // #ifdef BAROTROPIC_EOS + + +// 2. floor and normalize passive scalars +# if ( NCOMP_PASSIVE > 0 ) + for (int v=NCOMP_FLUID; v currently, even when UNSPLIT_GRAVITY is on (which would update the internal energy), we still invoke +// Hydro_DualEnergyFix() here and will fix the internal energy in the gravity solver for cells updated +// by the dual-energy formalism (i.e., for cells with their dual-energy status marked as DE_UPDATED_BY_DUAL) +// --> this feature might be modified in the future +# ifdef DUAL_ENERGY +// B field must be updated in advance +# ifdef MHD + Emag = MHD_GetCellCenteredBEnergy( g_FC_B[MAGX], g_FC_B[MAGY], g_FC_B[MAGZ], + PS2, PS2, PS2, i_out, j_out, k_out ); +# else + Emag = NULL_REAL; +# endif +// we no longer apply density and pressure floors here since we want to enable 1st-order-flux correction for that + const bool CheckMinPres_No = false; +// Output_1Cell[DENS] = FMAX( Output_1Cell[DENS], MinDens ); + + Hydro_DualEnergyFix( Output_1Cell[DENS], Output_1Cell[MOMX], Output_1Cell[MOMY], Output_1Cell[MOMZ], + Output_1Cell[ENGY], Output_1Cell[ENPY], g_DE_Status[idx_out], + EoS->AuxArrayDevPtr_Flt[1], EoS->AuxArrayDevPtr_Flt[2], CheckMinPres_No, NULL_REAL, + DualEnergySwitch, Emag ); +# endif // #ifdef DUAL_ENERGY + + +// 4. store results to the output array + for (int v=0; v, line <%d>, function <%s>\n", + Output_1Cell[DENS], __FILE__, __LINE__, __FUNCTION__ ); + + if ( Hydro_CheckNegative(Output_1Cell[ENGY]) ) + printf( "WARNING : invalid energy (%14.7e) at file <%s>, line <%d>, function <%s>\n", + Output_1Cell[ENGY], __FILE__, __LINE__, __FUNCTION__ ); +# endif + + } // CGPU_LOOP( idx_out, CUBE(PS2) ) + +} // FUNCTION : Hydro_FullStepUpdate + + + +#endif // #if ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP || FLU_SCHEME == CTU ) + + + +#endif // #ifndef __CUFLU_FULLSTEPUPDATE__ diff --git a/cuda_code/Col2Im_9.cu b/cuda_code/Col2Im_9.cu new file mode 100644 index 0000000000000000000000000000000000000000..ee4b36c96d79d9dfa7f964e6a02c4c06dd9bced0 --- /dev/null +++ b/cuda_code/Col2Im_9.cu @@ -0,0 +1,207 @@ +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +namespace at { +namespace native { +namespace { + +void col2im_out_cuda_template( + Tensor& output, + const Tensor& input_, + IntArrayRef output_size, + IntArrayRef kernel_size, + IntArrayRef dilation, + IntArrayRef padding, + IntArrayRef stride) { + TensorArg input_arg{input_, "input", 1}; + TensorArg output_arg{output, "output", 2}; + checkAllSameGPU("col2im_out_cuda", {input_arg, output_arg}); + + TORCH_CHECK( + output_size.size() == 2, + "It is expected output_size equals to 2, but got size ", + output_size.size()); + + TORCH_CHECK( + kernel_size.size() == 2, + "It is expected kernel_size equals to 2, but got size ", + kernel_size.size()); + + TORCH_CHECK( + dilation.size() == 2, + "It is expected dilation equals to 2, but got size ", + dilation.size()); + + TORCH_CHECK( + padding.size() == 2, + "It is expected padding equals to 2, but got size ", + padding.size()); + + TORCH_CHECK( + stride.size() == 2, + "It is expected stride equals to 2, but got size ", + stride.size()); + + int64_t output_height = output_size[0]; + int64_t output_width = output_size[1]; + int64_t kernel_height = kernel_size[0]; + int64_t kernel_width = kernel_size[1]; + int64_t dilation_height = dilation[0]; + int64_t dilation_width = dilation[1]; + int64_t pad_height = padding[0]; + int64_t pad_width = padding[1]; + int64_t stride_height = stride[0]; + int64_t stride_width = stride[1]; + + col2im_shape_check( + input_, + Tensor(), + output_height, + output_width, + kernel_height, + kernel_width, + dilation_height, + dilation_width, + pad_height, + pad_width, + stride_height, + stride_width); + + Tensor input = input_.contiguous(); + + bool batched_input = true; + if (input.dim() == 2) { + // Force batch + batched_input = false; + input.resize_({1, input.size(0), input.size(1)}); + } + + int64_t batch_size = input.size(0); + int64_t n_input_plane = input.size(1); + int64_t n_output_plane = n_input_plane / (kernel_width * kernel_height); + + output.resize_({batch_size, n_output_plane, output_height, output_width}); + output.zero_(); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "col2im_out_cuda", [&] { + using accscalar_t = at::acc_type; + + Tensor input_n; + Tensor output_n; + + int64_t height_col = (output_height + 2 * pad_height - + (dilation_height * (kernel_height - 1) + 1)) / + stride_height + + 1; + int64_t width_col = (output_width + 2 * pad_width - + (dilation_width * (kernel_width - 1) + 1)) / + stride_width + + 1; + + for (int64_t elt = 0; elt < batch_size; elt++) { + input_n = input.select(0, elt); + output_n = output.select(0, elt); + + col2im( + at::cuda::getCurrentCUDAStream(), + input_n.data_ptr(), + n_output_plane, + output_height, + output_width, + height_col, + width_col, + kernel_height, + kernel_width, + pad_height, + pad_width, + stride_height, + stride_width, + dilation_height, + dilation_width, + output_n.data_ptr()); + } + + if (!batched_input) { + output.resize_({n_output_plane, output_height, output_width}); + } + }); +} + +void col2im_backward_out_cuda_template( + Tensor& grad_input, + const Tensor& grad_output, + IntArrayRef kernel_size, + IntArrayRef dilation, + IntArrayRef padding, + IntArrayRef stride) { + // im2col_out_cuda checks size of kernel_size, dilation, padding and stride + im2col_out_cuda( + grad_input, grad_output, kernel_size, dilation, padding, stride); +} + +} // namespace + +Tensor& col2im_out_cuda( + Tensor& output, + const Tensor& input, + IntArrayRef output_size, + IntArrayRef kernel_size, + IntArrayRef dilation, + IntArrayRef padding, + IntArrayRef stride) { + col2im_out_cuda_template( + output, input, output_size, kernel_size, dilation, padding, stride); + return output; +} + +Tensor col2im_cuda( + const Tensor& input, + IntArrayRef output_size, + IntArrayRef kernel_size, + IntArrayRef dilation, + IntArrayRef padding, + IntArrayRef stride) { + Tensor output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); + + col2im_out_cuda_template( + output, input, output_size, kernel_size, dilation, padding, stride); + return output; +} + +Tensor& col2im_backward_out_cuda( + Tensor& grad_input, + const Tensor& grad_output, + IntArrayRef kernel_size, + IntArrayRef dilation, + IntArrayRef padding, + IntArrayRef stride) { + col2im_backward_out_cuda_template( + grad_input, grad_output, kernel_size, dilation, padding, stride); + return grad_input; +} + +Tensor col2im_backward_cuda( + const Tensor& grad_output, + IntArrayRef kernel_size, + IntArrayRef dilation, + IntArrayRef padding, + IntArrayRef stride) { + Tensor grad_input = at::empty_like(grad_output, LEGACY_CONTIGUOUS_MEMORY_FORMAT); + + col2im_backward_out_cuda_template( + grad_input, grad_output, kernel_size, dilation, padding, stride); + return grad_input; +} + +} // namespace native +} // namespace at diff --git a/cuda_code/CommunicatorGridGPU_1.cu b/cuda_code/CommunicatorGridGPU_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..facd1be8b8c8b47b58a9e89ce901344b75c8039c --- /dev/null +++ b/cuda_code/CommunicatorGridGPU_1.cu @@ -0,0 +1,191 @@ +// Copyright (c) 2009-2022 The Regents of the University of Michigan. +// Part of HOOMD-blue, released under the BSD 3-Clause License. + +#include "hip/hip_runtime.h" + +#ifdef __HIP_PLATFORM_HCC__ +#include +#else +#include +typedef cufftComplex hipfftComplex; +#endif + +#include "CommunicatorGridGPU.cuh" +//! Define plus operator for complex data type (needed by CommunicatorMesh) +__device__ inline hipfftComplex operator+(hipfftComplex& lhs, const hipfftComplex& rhs) + { + hipfftComplex res; + res.x = lhs.x + rhs.x; + res.y = lhs.y + rhs.y; + return res; + } + +namespace hoomd + { +namespace md + { +namespace kernel + { +template +__global__ void gpu_gridcomm_scatter_send_cells_kernel(unsigned int n_send_cells, + unsigned int* d_send_idx, + const T* d_grid, + T* d_send_buf) + { + unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; + + if (idx >= n_send_cells) + return; + d_send_buf[idx] = d_grid[d_send_idx[idx]]; + } + +template +__global__ void gpu_gridcomm_scatter_add_recv_cells_kernel(unsigned int n_unique_recv_cells, + const T* d_recv_buf, + T* d_grid, + const unsigned int* d_cell_recv, + const unsigned int* d_cell_recv_begin, + const unsigned int* d_cell_recv_end, + const unsigned int* d_recv_idx) + { + unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; + if (idx >= n_unique_recv_cells) + return; + + unsigned int begin = d_cell_recv_begin[idx]; + unsigned int end = d_cell_recv_end[idx]; + + T val = d_recv_buf[d_cell_recv[begin]]; + + // add together multiple received cells + for (unsigned int i = begin + 1; i < end; i++) + val = val + d_recv_buf[d_cell_recv[i]]; + + unsigned int recv_cell = d_recv_idx[d_cell_recv[begin]]; + if (add_outer) + { + // add to grid + d_grid[recv_cell] = d_grid[recv_cell] + val; + } + else + { + // write out to grid + d_grid[recv_cell] = val; + } + } + +template +void gpu_gridcomm_scatter_send_cells(unsigned int n_send_cells, + unsigned int* d_send_idx, + const T* d_grid, + T* d_send_buf) + { + unsigned int block_size = 256; + unsigned int n_blocks = n_send_cells / block_size + 1; + + hipLaunchKernelGGL((gpu_gridcomm_scatter_send_cells_kernel), + dim3(n_blocks), + dim3(block_size), + 0, + 0, + n_send_cells, + d_send_idx, + d_grid, + d_send_buf); + } + +template +void gpu_gridcomm_scatter_add_recv_cells(unsigned int n_unique_recv_cells, + const T* d_recv_buf, + T* d_grid, + const unsigned int* d_cell_recv, + const unsigned int* d_cell_recv_begin, + const unsigned int* d_cell_recv_end, + const unsigned int* d_recv_idx, + bool add_outer) + { + unsigned int block_size = 256; + unsigned int n_blocks = n_unique_recv_cells / block_size + 1; + + if (add_outer) + { + hipLaunchKernelGGL((gpu_gridcomm_scatter_add_recv_cells_kernel), + dim3(n_blocks), + dim3(block_size), + 0, + 0, + n_unique_recv_cells, + d_recv_buf, + d_grid, + d_cell_recv, + d_cell_recv_begin, + d_cell_recv_end, + d_recv_idx); + } + else + { + hipLaunchKernelGGL((gpu_gridcomm_scatter_add_recv_cells_kernel), + dim3(n_blocks), + dim3(block_size), + 0, + 0, + n_unique_recv_cells, + d_recv_buf, + d_grid, + d_cell_recv, + d_cell_recv_begin, + d_cell_recv_end, + d_recv_idx); + } + } + +//! Template instantiation for hipfftComplex +template void gpu_gridcomm_scatter_send_cells(unsigned int n_send_cells, + unsigned int* d_send_idx, + const hipfftComplex* d_grid, + hipfftComplex* d_send_buf); + +template void +gpu_gridcomm_scatter_add_recv_cells(unsigned int n_unique_recv_cells, + const hipfftComplex* d_recv_buf, + hipfftComplex* d_grid, + const unsigned int* d_cell_recv, + const unsigned int* d_cell_recv_begin, + const unsigned int* d_cell_recv_end, + const unsigned int* d_recv_idx, + bool add_outer); + +//! Template instantiation for Scalar +template void gpu_gridcomm_scatter_send_cells(unsigned int n_send_cells, + unsigned int* d_send_idx, + const Scalar* d_grid, + Scalar* d_send_buf); + +template void gpu_gridcomm_scatter_add_recv_cells(unsigned int n_unique_recv_cells, + const Scalar* d_recv_buf, + Scalar* d_grid, + const unsigned int* d_cell_recv, + const unsigned int* d_cell_recv_begin, + const unsigned int* d_cell_recv_end, + const unsigned int* d_recv_idx, + bool add_outer); + +//! Template instantiation for unsigned int +template void gpu_gridcomm_scatter_send_cells(unsigned int n_send_cells, + unsigned int* d_send_idx, + const unsigned int* d_grid, + unsigned int* d_send_buf); + +template void +gpu_gridcomm_scatter_add_recv_cells(unsigned int n_unique_recv_cells, + const unsigned int* d_recv_buf, + unsigned int* d_grid, + const unsigned int* d_cell_recv, + const unsigned int* d_cell_recv_begin, + const unsigned int* d_cell_recv_end, + const unsigned int* d_recv_idx, + bool add_outer); + + } // end namespace kernel + } // end namespace md + } // end namespace hoomd diff --git a/cuda_code/CompareEQKernel.cu b/cuda_code/CompareEQKernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..21f66eefd6a4d9ce43bca3b761d8484363f510bf --- /dev/null +++ b/cuda_code/CompareEQKernel.cu @@ -0,0 +1,28 @@ +#include +#include +#include +#include +#include + + +// NOTE: CUDA on Windows requires that the enclosing function +// of a __device__ lambda not have internal linkage. + +namespace at { namespace native { + +template +struct CompareEqFunctor { + __device__ __forceinline__ bool operator() (scalar_t a, scalar_t b) const { + return a == b; + } +}; + +void eq_kernel_cuda(TensorIteratorBase& iter) { + AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kHalf, kBFloat16, kBool, iter.common_dtype(), "eq_cuda", [&]() { + gpu_kernel_with_scalars(iter, CompareEqFunctor()); + }); +} + +REGISTER_DISPATCH(eq_stub, &eq_kernel_cuda); + +}} // namespace at::native diff --git a/cuda_code/CompareGEKernel_3.cu b/cuda_code/CompareGEKernel_3.cu new file mode 100644 index 0000000000000000000000000000000000000000..45c331591f7e38cec6d1a7607d66c7b3fcf2574c --- /dev/null +++ b/cuda_code/CompareGEKernel_3.cu @@ -0,0 +1,29 @@ +#define TORCH_ASSERT_NO_OPERATORS +#include +#include +#include +#include +#include + + +// NOTE: CUDA on Windows requires that the enclosing function +// of a __device__ lambda not have internal linkage. + +namespace at { namespace native { + +template +struct CompareGEFunctor { + __device__ __forceinline__ bool operator() (scalar_t a, scalar_t b) const { + return a >= b; + } +}; + +void ge_kernel_cuda(TensorIteratorBase& iter) { + AT_DISPATCH_ALL_TYPES_AND3(kHalf, kBFloat16, kBool, iter.common_dtype(), "ge_cuda", [&]() { + gpu_kernel_with_scalars(iter, CompareGEFunctor()); + }); +} + +REGISTER_DISPATCH(ge_stub, &ge_kernel_cuda); + +}} // namespace at::native diff --git a/cuda_code/CompareGTKernel.cu b/cuda_code/CompareGTKernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..cbd189ed1b6db9e833388af37db6f015b88c87c9 --- /dev/null +++ b/cuda_code/CompareGTKernel.cu @@ -0,0 +1,28 @@ +#include +#include +#include +#include +#include + + +// NOTE: CUDA on Windows requires that the enclosing function +// of a __device__ lambda not have internal linkage. + +namespace at { namespace native { + +template +struct CompareGTFunctor { + __device__ __forceinline__ bool operator() (scalar_t a, scalar_t b) const { + return a > b; + } +}; + +void gt_kernel_cuda(TensorIterator& iter) { + AT_DISPATCH_ALL_TYPES_AND3(kHalf, kBFloat16, kBool, iter.common_dtype(), "gt_cuda", [&]() { + gpu_kernel_with_scalars(iter, CompareGTFunctor()); + }); +} + +REGISTER_DISPATCH(gt_stub, >_kernel_cuda); + +}} // namespace at::native diff --git a/cuda_code/CompressKernel_8.cu b/cuda_code/CompressKernel_8.cu new file mode 100644 index 0000000000000000000000000000000000000000..21eca4faa0a231080f0184eac11171f6d0ca0f40 --- /dev/null +++ b/cuda_code/CompressKernel_8.cu @@ -0,0 +1,2022 @@ +// Copyright (c) 2009-2011 Ignacio Castano +// Copyright (c) 2007-2009 NVIDIA Corporation -- Ignacio Castano +// +// Permission is hereby granted, free of charge, to any person +// obtaining a copy of this software and associated documentation +// files (the "Software"), to deal in the Software without +// restriction, including without limitation the rights to use, +// copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following +// conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +// OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +// HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +// WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +// OTHER DEALINGS IN THE SOFTWARE. + +#include +#include // FLT_MAX + +#include "CudaMath.h" + + +#define NUM_THREADS 64 // Number of threads per block. + +typedef unsigned char uchar; +typedef unsigned short ushort; +typedef unsigned int uint; + +template +__device__ inline void swap(T & a, T & b) +{ + T tmp = a; + a = b; + b = tmp; +} + +__constant__ uchar OMatch5[256][2]; +__constant__ uchar OMatch6[256][2]; + +__constant__ float3 kColorMetric = { 1.0f, 1.0f, 1.0f }; +__constant__ float3 kColorMetricSqr = { 1.0f, 1.0f, 1.0f }; + +// Some kernels read the input through texture. +texture tex; + + +//////////////////////////////////////////////////////////////////////////////// +// Color helpers +//////////////////////////////////////////////////////////////////////////////// + +__device__ inline uint float_to_u8(float value) +{ + return min(max(__float2int_rn((255 * value + 0.5f) / (1.0f + 1.0f/255.0f)), 0), 255); +} + +__device__ inline uint float_to_u6(float value) +{ + return min(max(__float2int_rn((63 * value + 0.5f) / (1.0f + 1.0f/63.0f)), 0), 63); +} + +__device__ inline uint float_to_u5(float value) +{ + return min(max(__float2int_rn((31 * value + 0.5f) / (1.0f + 1.0f/31.0f)), 0), 31); +} + +__device__ inline float u8_to_float(uint value) +{ + return __saturatef(__uint2float_rn(value) / 255.0f); + //return (value) / 255.0f; +} + +__device__ float3 color32ToFloat3(uint c) +{ + float3 color; + color.z = u8_to_float((c >> 0) & 0xFF); + color.y = u8_to_float((c >> 8) & 0xFF); + color.x = u8_to_float((c >> 16) & 0xFF); + return color; +} + +__device__ int3 color16ToInt3(ushort c) +{ + int3 color; + + color.z = ((c >> 0) & 0x1F); + color.z = (color.z << 3) | (color.z >> 2); + + color.y = ((c >> 5) & 0x3F); + color.y = (color.y << 2) | (color.y >> 4); + + color.x = ((c >> 11) & 0x1F); + color.x = (color.x << 3) | (color.x >> 2); + + return color; +} + +__device__ float3 color16ToFloat3(ushort c) +{ + int3 color = color16ToInt3(c); + return make_float3(color.x, color.y, color.z) * (1.0f / 255.0f); +} + +__device__ int3 float3ToInt3(float3 c) +{ + return make_int3(c.x * 255, c.y * 255, c.z * 255); +} + +__device__ float3 int3ToFloat3(int3 c) +{ + return make_float3(float_to_u8(c.x), float_to_u8(c.y), float_to_u8(c.z)); +} + + +__device__ int colorDistance(int3 c0, int3 c1) +{ + int dx = c0.x-c1.x; + int dy = c0.y-c1.y; + int dz = c0.z-c1.z; + return __mul24(dx, dx) + __mul24(dy, dy) + __mul24(dz, dz); +} + + +//////////////////////////////////////////////////////////////////////////////// +// Round color to RGB565 and expand +//////////////////////////////////////////////////////////////////////////////// + + +#if 0 +__device__ inline uint float_to_u8(float value) +{ + //uint result; + //asm("cvt.sat.rni.u8.f32 %0, %1;" : "=r" (result) : "f" (value)); + //return result; + //return __float2uint_rn(__saturatef(value) * 255.0f); + + int result = __float2int_rn((255 * value + 0.5f) / (1.0f + 1.0f/255.0f)); + result = max(result, 0); + result = min(result, 255); + return result; +} + +__device__ inline float u8_to_float(uint value) +{ + //float result; + //asm("cvt.sat.rn.f32.u8 %0, %1;" : "=f" (result) : "r" (value)); // this is wrong! + //return result; + return __saturatef(__uint2float_rn(value) / 255.0f); +} + +inline __device__ float3 roundAndExpand565(float3 v, ushort * w) +{ + uint x = float_to_u8(v.x) >> 3; + uint y = float_to_u8(v.y) >> 2; + uint z = float_to_u8(v.z) >> 3; + *w = (x << 11) | (y << 5) | z; + v.x = u8_to_float((x << 3) | (x >> 2)); + v.y = u8_to_float((y << 2) | (y >> 4)); + v.z = u8_to_float((z << 3) | (z >> 2)); +// v.x = u8_to_float(x) * 255.0f / 31.0f; +// v.y = u8_to_float(y) * 255.0f / 63.0f; +// v.z = u8_to_float(z) * 255.0f / 31.0f; + return v; +} +#else + +inline __device__ float3 roundAndExpand565(float3 v, ushort * w) +{ + uint x = __float2uint_rn(__saturatef(v.x) * 31.0f); + uint y = __float2uint_rn(__saturatef(v.y) * 63.0f); + uint z = __float2uint_rn(__saturatef(v.z) * 31.0f); + + //uint x = float_to_u5(v.x); + //uint y = float_to_u6(v.y); + //uint z = float_to_u5(v.z); + + *w = (x << 11) | (y << 5) | z; + + v.x = __uint2float_rn(x) * 1.0f / 31.0f; + v.y = __uint2float_rn(y) * 1.0f / 63.0f; + v.z = __uint2float_rn(z) * 1.0f / 31.0f; + + //v.x = u8_to_float((x << 3) | (x >> 2)); + //v.y = u8_to_float((y << 2) | (y >> 4)); + //v.z = u8_to_float((z << 3) | (z >> 2)); + + return v; +} +#endif +inline __device__ float2 roundAndExpand56(float2 v, ushort * w) +{ + uint x = __float2uint_rn(__saturatef(v.x) * 31.0f); + uint y = __float2uint_rn(__saturatef(v.y) * 63.0f); + *w = (x << 11) | (y << 5); + v.x = __uint2float_rn(x) * 1.0f / 31.0f; + v.y = __uint2float_rn(y) * 1.0f / 63.0f; + return v; +} + +inline __device__ float2 roundAndExpand88(float2 v, ushort * w) +{ + uint x = __float2uint_rn(__saturatef(v.x) * 255.0f); + uint y = __float2uint_rn(__saturatef(v.y) * 255.0f); + *w = (x << 8) | y; + v.x = __uint2float_rn(x) * 1.0f / 255.0f; + v.y = __uint2float_rn(y) * 1.0f / 255.0f; + return v; +} + + +//////////////////////////////////////////////////////////////////////////////// +// Block errors +//////////////////////////////////////////////////////////////////////////////// + +__device__ float3 blockError4(const float3 * colors, uint permutation, float3 a, float3 b) +{ + float3 error = make_float3(0.0f, 0.0f, 0.0f); + + for (int i = 0; i < 16; i++) + { + const uint bits = permutation >> (2*i); + + float beta = (bits & 1); + if (bits & 2) beta = (1 + beta) / 3.0f; + float alpha = 1.0f - beta; + + float3 diff = colors[i] - (a*alpha + b*beta); + + error += diff*diff; + } + + return error; +} + +__device__ float3 blockError4(const float3 * colors, uint permutation, ushort c0, ushort c1) +{ + float3 error = make_float3(0.0f, 0.0f, 0.0f); + + int3 color0 = color16ToInt3(c0); + int3 color1 = color16ToInt3(c1); + + for (int i = 0; i < 16; i++) + { + const uint bits = permutation >> (2*i); + + int beta = (bits & 1); + if (bits & 2) beta = (1 + beta); + float alpha = 3 - beta; + + int3 color; + color.x = (color0.x * alpha + color1.x * beta) / 3; + color.y = (color0.y * alpha + color1.y * beta) / 3; + color.z = (color0.z * alpha + color1.z * beta) / 3; + + float3 diff = colors[i] - int3ToFloat3(color); + + error += diff*diff; + } + + return error; +} + + +__device__ float3 blockError3(const float3 * colors, uint permutation, float3 a, float3 b) +{ + float3 error = make_float3(0.0f, 0.0f, 0.0f); + + for (int i = 0; i < 16; i++) + { + const uint bits = permutation >> (2*i); + + float beta = (bits & 1); + if (bits & 2) beta = 0.5f; + float alpha = 1.0f - beta; + + float3 diff = colors[i] - (a*alpha + b*beta); + + error += diff*diff; + } + + return error; +} + + +//////////////////////////////////////////////////////////////////////////////// +// Sort colors +//////////////////////////////////////////////////////////////////////////////// + +// @@ Experimental code to avoid duplicate colors for faster compression. +// We could first sort along the best fit line and only compare colors that have the same projection. +// The hardest part is to maintain the indices to map packed/sorted colors to the input colors. +// We also need to update several functions that assume the number of colors is fixed to 16. +// And compute different bit maps for the different color counts. +// This is a fairly high amount of work. +__device__ int packColors(float3 * values, float * weights, int * ranks) +{ + const int tid = threadIdx.x; + + __shared__ int count; + count = 0; + + bool alive = true; + + // Append this + for (int i = 0; i < 16; i++) + { + // One thread leads on each iteration. + if (tid == i) { + + // If thread alive, then append element. + if (alive) { + values[count] = values[i]; + weights[count] = weights[i]; + count++; + } + + // Otherwise update weight. + else { + weights[ranks[i]] += weights[i]; + } + } + + // Kill all threads that have the same element and record rank. + if (values[i] == values[tid]) { + alive = false; + ranks[tid] = count - 1; + } + } + + return count; +} + + +__device__ void sortColors(const float * values, int * ranks) +{ + const int tid = threadIdx.x; + + int rank = 0; + + #pragma unroll + for (int i = 0; i < 16; i++) + { + rank += (values[i] < values[tid]); + } + + ranks[tid] = rank; + + // Resolve elements with the same index. + #pragma unroll + for (int i = 0; i < 15; i++) + { + if ((tid > i) & (ranks[tid] == ranks[i])) ++ranks[tid]; + } +} + +__device__ void sortColors(const float * values, int * ranks, int count) +{ + const int tid = threadIdx.x; + + int rank = 0; + + #pragma unroll + for (int i = 0; i < count; i++) + { + rank += (values[i] < values[tid]); + } + + ranks[tid] = rank; + + // Resolve elements with the same index. + #pragma unroll + for (int i = 0; i < count-1; i++) + { + if ((tid > i) & (ranks[tid] == ranks[i])) ++ranks[tid]; + } +} + + + +//////////////////////////////////////////////////////////////////////////////// +// Load color block to shared mem +//////////////////////////////////////////////////////////////////////////////// + +__device__ void loadColorBlockTex(uint firstBlock, uint blockWidth, float3 colors[16], float3 sums[16], int xrefs[16], int * sameColor) +{ + const int bid = blockIdx.x; + const int idx = threadIdx.x; + + __shared__ float dps[16]; + + if (idx < 16) + { + float x = 4 * ((firstBlock + bid) % blockWidth) + idx % 4; // @@ Avoid mod and div by using 2D grid? + float y = 4 * ((firstBlock + bid) / blockWidth) + idx / 4; + + // Read color and copy to shared mem. + float4 c = tex2D(tex, x, y); + + colors[idx].x = c.z; + colors[idx].y = c.y; + colors[idx].z = c.x; + + // Sort colors along the best fit line. + colorSums(colors, sums); + float3 axis = bestFitLine(colors, sums[0], kColorMetric); + + *sameColor = (axis == make_float3(0, 0, 0)); + + dps[idx] = dot(colors[idx], axis); + + sortColors(dps, xrefs); + + float3 tmp = colors[idx]; + colors[xrefs[idx]] = tmp; + } +} + +/* +__device__ void loadColorBlockTex(uint firstBlock, uint w, float3 colors[16], float3 sums[16], float weights[16], int xrefs[16], int * sameColor) +{ + const int bid = blockIdx.x; + const int idx = threadIdx.x; + + __shared__ float dps[16]; + + if (idx < 16) + { + float x = 4 * ((firstBlock + bid) % w) + idx % 4; // @@ Avoid mod and div by using 2D grid? + float y = 4 * ((firstBlock + bid) / w) + idx / 4; + + // Read color and copy to shared mem. + float4 c = tex2D(tex, x, y); + + colors[idx].x = c.z; + colors[idx].y = c.y; + colors[idx].z = c.x; + weights[idx] = 1; + + int count = packColors(colors, weights); + if (idx < count) + { + // Sort colors along the best fit line. + colorSums(colors, sums); + float3 axis = bestFitLine(colors, sums[0], kColorMetric); + + *sameColor = (axis == make_float3(0, 0, 0)); + + dps[idx] = dot(colors[idx], axis); + + sortColors(dps, xrefs); + + float3 tmp = colors[idx]; + colors[xrefs[idx]] = tmp; + } + } +} +*/ + +__device__ void loadColorBlockTex(uint firstBlock, uint width, float3 colors[16], float3 sums[16], float weights[16], int xrefs[16], int * sameColor) +{ + const int bid = blockIdx.x; + const int idx = threadIdx.x; + + __shared__ float3 rawColors[16]; + __shared__ float dps[16]; + + if (idx < 16) + { + float x = 4 * ((firstBlock + bid) % width) + idx % 4; // @@ Avoid mod and div by using 2D grid? + float y = 4 * ((firstBlock + bid) / width) + idx / 4; + + // Read color and copy to shared mem. + float4 c = tex2D(tex, x, y); + + rawColors[idx].x = c.z; + rawColors[idx].y = c.y; + rawColors[idx].z = c.x; + weights[idx] = c.w; + + colors[idx] = rawColors[idx] * weights[idx]; + + // Sort colors along the best fit line. + colorSums(colors, sums); + float3 axis = bestFitLine(colors, sums[0], kColorMetric); + + *sameColor = (axis == make_float3(0, 0, 0)); + + // Single color compressor needs unweighted colors. + if (*sameColor) colors[idx] = rawColors[idx]; + + dps[idx] = dot(colors[idx], axis); + + sortColors(dps, xrefs); + + float3 tmp = colors[idx]; + float w = weights[idx]; + colors[xrefs[idx]] = tmp; + weights[xrefs[idx]] = w; + } +} + +__device__ void loadColorBlock(const uint * image, float2 colors[16], float2 sums[16], int xrefs[16], int * sameColor) +{ + const int bid = blockIdx.x; + const int idx = threadIdx.x; + + __shared__ float dps[16]; + + if (idx < 16) + { + // Read color and copy to shared mem. + uint c = image[(bid) * 16 + idx]; + + colors[idx].y = ((c >> 8) & 0xFF) * (1.0f / 255.0f); + colors[idx].x = ((c >> 16) & 0xFF) * (1.0f / 255.0f); + + // Sort colors along the best fit line. + colorSums(colors, sums); + float2 axis = bestFitLine(colors, sums[0]); + + *sameColor = (axis == make_float2(0, 0)); + + dps[idx] = dot(colors[idx], axis); + + sortColors(dps, xrefs); + + float2 tmp = colors[idx]; + colors[xrefs[idx]] = tmp; + } +} + + +//////////////////////////////////////////////////////////////////////////////// +// Evaluate permutations +//////////////////////////////////////////////////////////////////////////////// +__device__ float evalPermutation4(const float3 * colors, uint permutation, ushort * start, ushort * end) +{ + // Compute endpoints using least squares. + float alpha2_sum = 0.0f; + float beta2_sum = 0.0f; + float alphabeta_sum = 0.0f; + float3 alphax_sum = make_float3(0.0f, 0.0f, 0.0f); + float3 betax_sum = make_float3(0.0f, 0.0f, 0.0f); + + // Compute alpha & beta for this permutation. + for (int i = 0; i < 16; i++) + { + const uint bits = permutation >> (2*i); + + float beta = (bits & 1); + if (bits & 2) beta = (1 + beta) / 3.0f; + float alpha = 1.0f - beta; + + alpha2_sum += alpha * alpha; + beta2_sum += beta * beta; + alphabeta_sum += alpha * beta; + alphax_sum += alpha * colors[i]; + betax_sum += beta * colors[i]; + } + + const float factor = 1.0f / (alpha2_sum * beta2_sum - alphabeta_sum * alphabeta_sum); + + float3 a = (alphax_sum * beta2_sum - betax_sum * alphabeta_sum) * factor; + float3 b = (betax_sum * alpha2_sum - alphax_sum * alphabeta_sum) * factor; + + // Round a, b to the closest 5-6-5 color and expand... + a = roundAndExpand565(a, start); + b = roundAndExpand565(b, end); + + // compute the error + float3 e = a * a * alpha2_sum + b * b * beta2_sum + 2.0f * (a * b * alphabeta_sum - a * alphax_sum - b * betax_sum); + + return dot(e, kColorMetricSqr); +} + +__device__ float evalPermutation3(const float3 * colors, uint permutation, ushort * start, ushort * end) +{ + // Compute endpoints using least squares. + float alpha2_sum = 0.0f; + float beta2_sum = 0.0f; + float alphabeta_sum = 0.0f; + float3 alphax_sum = make_float3(0.0f, 0.0f, 0.0f); + float3 betax_sum = make_float3(0.0f, 0.0f, 0.0f); + + // Compute alpha & beta for this permutation. + for (int i = 0; i < 16; i++) + { + const uint bits = permutation >> (2*i); + + float beta = (bits & 1); + if (bits & 2) beta = 0.5f; + float alpha = 1.0f - beta; + + alpha2_sum += alpha * alpha; + beta2_sum += beta * beta; + alphabeta_sum += alpha * beta; + alphax_sum += alpha * colors[i]; + betax_sum += beta * colors[i]; + } + + const float factor = 1.0f / (alpha2_sum * beta2_sum - alphabeta_sum * alphabeta_sum); + + float3 a = (alphax_sum * beta2_sum - betax_sum * alphabeta_sum) * factor; + float3 b = (betax_sum * alpha2_sum - alphax_sum * alphabeta_sum) * factor; + + // Round a, b to the closest 5-6-5 color and expand... + a = roundAndExpand565(a, start); + b = roundAndExpand565(b, end); + + // compute the error + float3 e = a * a * alpha2_sum + b * b * beta2_sum + 2.0f * (a * b * alphabeta_sum - a * alphax_sum - b * betax_sum); + + return dot(e, kColorMetricSqr); +} + +__constant__ const float alphaTable4[4] = { 9.0f, 0.0f, 6.0f, 3.0f }; +__constant__ const float alphaTable3[4] = { 4.0f, 0.0f, 2.0f, 2.0f }; +__constant__ const uint prods4[4] = { 0x090000,0x000900,0x040102,0x010402 }; +__constant__ const uint prods3[4] = { 0x040000,0x000400,0x040101,0x010401 }; + +__device__ float evalPermutation4(const float3 * colors, float3 color_sum, uint permutation, ushort * start, ushort * end) +{ + // Compute endpoints using least squares. + float3 alphax_sum = make_float3(0.0f, 0.0f, 0.0f); + uint akku = 0; + + // Compute alpha & beta for this permutation. + #pragma unroll + for (int i = 0; i < 16; i++) + { + const uint bits = permutation >> (2*i); + + alphax_sum += alphaTable4[bits & 3] * colors[i]; + akku += prods4[bits & 3]; + } + + float alpha2_sum = float(akku >> 16); + float beta2_sum = float((akku >> 8) & 0xff); + float alphabeta_sum = float(akku & 0xff); + float3 betax_sum = 9.0f * color_sum - alphax_sum; + + const float factor = 1.0f / (alpha2_sum * beta2_sum - alphabeta_sum * alphabeta_sum); + + float3 a = (alphax_sum * beta2_sum - betax_sum * alphabeta_sum) * factor; + float3 b = (betax_sum * alpha2_sum - alphax_sum * alphabeta_sum) * factor; + + // Round a, b to the closest 5-6-5 color and expand... + a = roundAndExpand565(a, start); + b = roundAndExpand565(b, end); + + // compute the error + float3 e = a * a * alpha2_sum + b * b * beta2_sum + 2.0f * (a * b * alphabeta_sum - a * alphax_sum - b * betax_sum); + + //float3 e = blockError4(colors, permutation, *start, *end); + + return (1.0f / 9.0f) * dot(e, kColorMetricSqr); +} + +__device__ float evalPermutation3(const float3 * colors, float3 color_sum, uint permutation, ushort * start, ushort * end) +{ + // Compute endpoints using least squares. + float3 alphax_sum = make_float3(0.0f, 0.0f, 0.0f); + uint akku = 0; + + // Compute alpha & beta for this permutation. + #pragma unroll + for (int i = 0; i < 16; i++) + { + const uint bits = permutation >> (2*i); + + alphax_sum += alphaTable3[bits & 3] * colors[i]; + akku += prods3[bits & 3]; + } + + float alpha2_sum = float(akku >> 16); + float beta2_sum = float((akku >> 8) & 0xff); + float alphabeta_sum = float(akku & 0xff); + float3 betax_sum = 4.0f * color_sum - alphax_sum; + + const float factor = 1.0f / (alpha2_sum * beta2_sum - alphabeta_sum * alphabeta_sum); + + float3 a = (alphax_sum * beta2_sum - betax_sum * alphabeta_sum) * factor; + float3 b = (betax_sum * alpha2_sum - alphax_sum * alphabeta_sum) * factor; + + // Round a, b to the closest 5-6-5 color and expand... + a = roundAndExpand565(a, start); + b = roundAndExpand565(b, end); + + // compute the error + float3 e = a * a * alpha2_sum + b * b * beta2_sum + 2.0f * (a * b * alphabeta_sum - a * alphax_sum - b * betax_sum); + + //float3 e = blockError3(colors, permutation, a, b); + + return (1.0f / 4.0f) * dot(e, kColorMetricSqr); +} + +__device__ float evalPermutation4(const float3 * colors, const float * weights, float3 color_sum, uint permutation, ushort * start, ushort * end) +{ + // Compute endpoints using least squares. + float alpha2_sum = 0.0f; + float beta2_sum = 0.0f; + float alphabeta_sum = 0.0f; + float3 alphax_sum = make_float3(0.0f, 0.0f, 0.0f); + + // Compute alpha & beta for this permutation. + for (int i = 0; i < 16; i++) + { + const uint bits = permutation >> (2*i); + + float beta = (bits & 1); + if (bits & 2) beta = (1 + beta) / 3.0f; + float alpha = 1.0f - beta; + + alpha2_sum += alpha * alpha * weights[i]; + beta2_sum += beta * beta * weights[i]; + alphabeta_sum += alpha * beta * weights[i]; + alphax_sum += alpha * colors[i]; + } + + float3 betax_sum = color_sum - alphax_sum; + + const float factor = 1.0f / (alpha2_sum * beta2_sum - alphabeta_sum * alphabeta_sum); + + float3 a = (alphax_sum * beta2_sum - betax_sum * alphabeta_sum) * factor; + float3 b = (betax_sum * alpha2_sum - alphax_sum * alphabeta_sum) * factor; + + // Round a, b to the closest 5-6-5 color and expand... + a = roundAndExpand565(a, start); + b = roundAndExpand565(b, end); + + // compute the error + float3 e = a * a * alpha2_sum + b * b * beta2_sum + 2.0f * (a * b * alphabeta_sum - a * alphax_sum - b * betax_sum); + + return dot(e, kColorMetricSqr); +} + +/* +__device__ float evalPermutation3(const float3 * colors, const float * weights, uint permutation, ushort * start, ushort * end) +{ + // Compute endpoints using least squares. + float alpha2_sum = 0.0f; + float beta2_sum = 0.0f; + float alphabeta_sum = 0.0f; + float3 alphax_sum = make_float3(0.0f, 0.0f, 0.0f); + + // Compute alpha & beta for this permutation. + for (int i = 0; i < 16; i++) + { + const uint bits = permutation >> (2*i); + + float beta = (bits & 1); + if (bits & 2) beta = 0.5f; + float alpha = 1.0f - beta; + + alpha2_sum += alpha * alpha * weights[i]; + beta2_sum += beta * beta * weights[i]; + alphabeta_sum += alpha * beta * weights[i]; + alphax_sum += alpha * colors[i]; + } + + float3 betax_sum = color_sum - alphax_sum; + + const float factor = 1.0f / (alpha2_sum * beta2_sum - alphabeta_sum * alphabeta_sum); + + float3 a = (alphax_sum * beta2_sum - betax_sum * alphabeta_sum) * factor; + float3 b = (betax_sum * alpha2_sum - alphax_sum * alphabeta_sum) * factor; + + // Round a, b to the closest 5-6-5 color and expand... + a = roundAndExpand565(a, start); + b = roundAndExpand565(b, end); + + // compute the error + float3 e = a * a * alpha2_sum + b * b * beta2_sum + 2.0f * (a * b * alphabeta_sum - a * alphax_sum - b * betax_sum); + + return dot(e, kColorMetricSqr); +} +*/ + +__device__ float evalPermutation4(const float2 * colors, float2 color_sum, uint permutation, ushort * start, ushort * end) +{ + // Compute endpoints using least squares. + float2 alphax_sum = make_float2(0.0f, 0.0f); + uint akku = 0; + + // Compute alpha & beta for this permutation. + #pragma unroll + for (int i = 0; i < 16; i++) + { + const uint bits = permutation >> (2*i); + + alphax_sum += alphaTable4[bits & 3] * colors[i]; + akku += prods4[bits & 3]; + } + + float alpha2_sum = float(akku >> 16); + float beta2_sum = float((akku >> 8) & 0xff); + float alphabeta_sum = float(akku & 0xff); + float2 betax_sum = 9.0f * color_sum - alphax_sum; + + const float factor = 1.0f / (alpha2_sum * beta2_sum - alphabeta_sum * alphabeta_sum); + + float2 a = (alphax_sum * beta2_sum - betax_sum * alphabeta_sum) * factor; + float2 b = (betax_sum * alpha2_sum - alphax_sum * alphabeta_sum) * factor; + + // Round a, b to the closest 5-6 color and expand... + a = roundAndExpand56(a, start); + b = roundAndExpand56(b, end); + + // compute the error + float2 e = a * a * alpha2_sum + b * b * beta2_sum + 2.0f * (a * b * alphabeta_sum - a * alphax_sum - b * betax_sum); + + return (1.0f / 9.0f) * (e.x + e.y); +} + +__device__ float evalPermutation3(const float2 * colors, float2 color_sum, uint permutation, ushort * start, ushort * end) +{ + // Compute endpoints using least squares. + float2 alphax_sum = make_float2(0.0f, 0.0f); + uint akku = 0; + + // Compute alpha & beta for this permutation. + #pragma unroll + for (int i = 0; i < 16; i++) + { + const uint bits = permutation >> (2*i); + + alphax_sum += alphaTable3[bits & 3] * colors[i]; + akku += prods3[bits & 3]; + } + + float alpha2_sum = float(akku >> 16); + float beta2_sum = float((akku >> 8) & 0xff); + float alphabeta_sum = float(akku & 0xff); + float2 betax_sum = 4.0f * color_sum - alphax_sum; + + const float factor = 1.0f / (alpha2_sum * beta2_sum - alphabeta_sum * alphabeta_sum); + + float2 a = (alphax_sum * beta2_sum - betax_sum * alphabeta_sum) * factor; + float2 b = (betax_sum * alpha2_sum - alphax_sum * alphabeta_sum) * factor; + + // Round a, b to the closest 5-6 color and expand... + a = roundAndExpand56(a, start); + b = roundAndExpand56(b, end); + + // compute the error + float2 e = a * a * alpha2_sum + b * b * beta2_sum + 2.0f * (a * b * alphabeta_sum - a * alphax_sum - b * betax_sum); + + return (1.0f / 4.0f) * (e.x + e.y); +} + +__device__ float evalPermutationCTX(const float2 * colors, float2 color_sum, uint permutation, ushort * start, ushort * end) +{ + // Compute endpoints using least squares. + float2 alphax_sum = make_float2(0.0f, 0.0f); + uint akku = 0; + + // Compute alpha & beta for this permutation. + #pragma unroll + for (int i = 0; i < 16; i++) + { + const uint bits = permutation >> (2*i); + + alphax_sum += alphaTable4[bits & 3] * colors[i]; + akku += prods4[bits & 3]; + } + + float alpha2_sum = float(akku >> 16); + float beta2_sum = float((akku >> 8) & 0xff); + float alphabeta_sum = float(akku & 0xff); + float2 betax_sum = 9.0f * color_sum - alphax_sum; + + const float factor = 1.0f / (alpha2_sum * beta2_sum - alphabeta_sum * alphabeta_sum); + + float2 a = (alphax_sum * beta2_sum - betax_sum * alphabeta_sum) * factor; + float2 b = (betax_sum * alpha2_sum - alphax_sum * alphabeta_sum) * factor; + + // Round a, b to the closest 8-8 color and expand... + a = roundAndExpand88(a, start); + b = roundAndExpand88(b, end); + + // compute the error + float2 e = a * a * alpha2_sum + b * b * beta2_sum + 2.0f * (a * b * alphabeta_sum - a * alphax_sum - b * betax_sum); + + return (1.0f / 9.0f) * (e.x + e.y); +} + + +//////////////////////////////////////////////////////////////////////////////// +// Evaluate all permutations +//////////////////////////////////////////////////////////////////////////////// +__device__ void evalAllPermutations(const float3 * colors, float3 colorSum, const uint * permutations, ushort & bestStart, ushort & bestEnd, uint & bestPermutation, float * errors) +{ + const int idx = threadIdx.x; + + float bestError = FLT_MAX; + + __shared__ uint s_permutations[160]; + + for(int i = 0; i < 16; i++) + { + int pidx = idx + NUM_THREADS * i; + if (pidx >= 992) break; + + ushort start, end; + uint permutation = permutations[pidx]; + if (pidx < 160) s_permutations[pidx] = permutation; + + float error = evalPermutation4(colors, colorSum, permutation, &start, &end); + + if (error < bestError) + { + bestError = error; + bestPermutation = permutation; + bestStart = start; + bestEnd = end; + } + } + + if (bestStart < bestEnd) + { + swap(bestEnd, bestStart); + bestPermutation ^= 0x55555555; // Flip indices. + } + + for(int i = 0; i < 3; i++) + { + int pidx = idx + NUM_THREADS * i; + if (pidx >= 160) break; + + ushort start, end; + uint permutation = s_permutations[pidx]; + float error = evalPermutation3(colors, colorSum, permutation, &start, &end); + + if (error < bestError) + { + bestError = error; + bestPermutation = permutation; + bestStart = start; + bestEnd = end; + + if (bestStart > bestEnd) + { + swap(bestEnd, bestStart); + bestPermutation ^= (~bestPermutation >> 1) & 0x55555555; // Flip indices. + } + } + } + + errors[idx] = bestError; +} + +/* +__device__ void evalAllPermutations(const float3 * colors, const float * weights, const uint * permutations, ushort & bestStart, ushort & bestEnd, uint & bestPermutation, float * errors) +{ + const int idx = threadIdx.x; + + float bestError = FLT_MAX; + + __shared__ uint s_permutations[160]; + + for(int i = 0; i < 16; i++) + { + int pidx = idx + NUM_THREADS * i; + if (pidx >= 992) break; + + ushort start, end; + uint permutation = permutations[pidx]; + if (pidx < 160) s_permutations[pidx] = permutation; + + float error = evalPermutation4(colors, weights, permutation, &start, &end); + + if (error < bestError) + { + bestError = error; + bestPermutation = permutation; + bestStart = start; + bestEnd = end; + } + } + + if (bestStart < bestEnd) + { + swap(bestEnd, bestStart); + bestPermutation ^= 0x55555555; // Flip indices. + } + + for(int i = 0; i < 3; i++) + { + int pidx = idx + NUM_THREADS * i; + if (pidx >= 160) break; + + ushort start, end; + uint permutation = s_permutations[pidx]; + float error = evalPermutation3(colors, weights, permutation, &start, &end); + + if (error < bestError) + { + bestError = error; + bestPermutation = permutation; + bestStart = start; + bestEnd = end; + + if (bestStart > bestEnd) + { + swap(bestEnd, bestStart); + bestPermutation ^= (~bestPermutation >> 1) & 0x55555555; // Flip indices. + } + } + } + + errors[idx] = bestError; +} +*/ + +__device__ void evalAllPermutations(const float2 * colors, float2 colorSum, const uint * permutations, ushort & bestStart, ushort & bestEnd, uint & bestPermutation, float * errors) +{ + const int idx = threadIdx.x; + + float bestError = FLT_MAX; + + __shared__ uint s_permutations[160]; + + for(int i = 0; i < 16; i++) + { + int pidx = idx + NUM_THREADS * i; + if (pidx >= 992) break; + + ushort start, end; + uint permutation = permutations[pidx]; + if (pidx < 160) s_permutations[pidx] = permutation; + + float error = evalPermutation4(colors, colorSum, permutation, &start, &end); + + if (error < bestError) + { + bestError = error; + bestPermutation = permutation; + bestStart = start; + bestEnd = end; + } + } + + if (bestStart < bestEnd) + { + swap(bestEnd, bestStart); + bestPermutation ^= 0x55555555; // Flip indices. + } + + for(int i = 0; i < 3; i++) + { + int pidx = idx + NUM_THREADS * i; + if (pidx >= 160) break; + + ushort start, end; + uint permutation = s_permutations[pidx]; + float error = evalPermutation3(colors, colorSum, permutation, &start, &end); + + if (error < bestError) + { + bestError = error; + bestPermutation = permutation; + bestStart = start; + bestEnd = end; + + if (bestStart > bestEnd) + { + swap(bestEnd, bestStart); + bestPermutation ^= (~bestPermutation >> 1) & 0x55555555; // Flip indices. + } + } + } + + errors[idx] = bestError; +} + +__device__ void evalLevel4Permutations(const float3 * colors, float3 colorSum, const uint * permutations, ushort & bestStart, ushort & bestEnd, uint & bestPermutation, float * errors) +{ + const int idx = threadIdx.x; + + float bestError = FLT_MAX; + + for(int i = 0; i < 16; i++) + { + int pidx = idx + NUM_THREADS * i; + if (pidx >= 992) break; + + ushort start, end; + uint permutation = permutations[pidx]; + + float error = evalPermutation4(colors, colorSum, permutation, &start, &end); + + if (error < bestError) + { + bestError = error; + bestPermutation = permutation; + bestStart = start; + bestEnd = end; + } + } + + if (bestStart < bestEnd) + { + swap(bestEnd, bestStart); + bestPermutation ^= 0x55555555; // Flip indices. + } + + errors[idx] = bestError; +} + +__device__ void evalLevel4Permutations(const float3 * colors, const float * weights, float3 colorSum, const uint * permutations, ushort & bestStart, ushort & bestEnd, uint & bestPermutation, float * errors) +{ + const int idx = threadIdx.x; + + float bestError = FLT_MAX; + + for(int i = 0; i < 16; i++) + { + int pidx = idx + NUM_THREADS * i; + if (pidx >= 992) break; + + ushort start, end; + uint permutation = permutations[pidx]; + + float error = evalPermutation4(colors, weights, colorSum, permutation, &start, &end); + + if (error < bestError) + { + bestError = error; + bestPermutation = permutation; + bestStart = start; + bestEnd = end; + } + } + + if (bestStart < bestEnd) + { + swap(bestEnd, bestStart); + bestPermutation ^= 0x55555555; // Flip indices. + } + + errors[idx] = bestError; +} + +__device__ void evalAllPermutationsCTX(const float2 * colors, float2 colorSum, const uint * permutations, ushort & bestStart, ushort & bestEnd, uint & bestPermutation, float * errors) +{ + const int idx = threadIdx.x; + + float bestError = FLT_MAX; + + for(int i = 0; i < 16; i++) + { + int pidx = idx + NUM_THREADS * i; + if (pidx >= 704) break; + + ushort start, end; + uint permutation = permutations[pidx]; + + float error = evalPermutationCTX(colors, colorSum, permutation, &start, &end); + + if (error < bestError) + { + bestError = error; + bestPermutation = permutation; + bestStart = start; + bestEnd = end; + } + } + + if (bestStart < bestEnd) + { + swap(bestEnd, bestStart); + bestPermutation ^= 0x55555555; // Flip indices. + } + + errors[idx] = bestError; +} + + +//////////////////////////////////////////////////////////////////////////////// +// Find index with minimum error +//////////////////////////////////////////////////////////////////////////////// +__device__ int findMinError(float * errors) +{ + const int idx = threadIdx.x; + + __shared__ int indices[NUM_THREADS]; + indices[idx] = idx; + + for(int d = NUM_THREADS/2; d > 32; d >>= 1) + { + __syncthreads(); + + if (idx < d) + { + float err0 = errors[idx]; + float err1 = errors[idx + d]; + + if (err1 < err0) { + errors[idx] = err1; + indices[idx] = indices[idx + d]; + } + } + } + + __syncthreads(); + + // unroll last 6 iterations + if (idx < 32) + { + if (errors[idx + 32] < errors[idx]) { + errors[idx] = errors[idx + 32]; + indices[idx] = indices[idx + 32]; + } + if (errors[idx + 16] < errors[idx]) { + errors[idx] = errors[idx + 16]; + indices[idx] = indices[idx + 16]; + } + if (errors[idx + 8] < errors[idx]) { + errors[idx] = errors[idx + 8]; + indices[idx] = indices[idx + 8]; + } + if (errors[idx + 4] < errors[idx]) { + errors[idx] = errors[idx + 4]; + indices[idx] = indices[idx + 4]; + } + if (errors[idx + 2] < errors[idx]) { + errors[idx] = errors[idx + 2]; + indices[idx] = indices[idx + 2]; + } + if (errors[idx + 1] < errors[idx]) { + errors[idx] = errors[idx + 1]; + indices[idx] = indices[idx + 1]; + } + } + + __syncthreads(); + + return indices[0]; +} + + +//////////////////////////////////////////////////////////////////////////////// +// Save DXT block +//////////////////////////////////////////////////////////////////////////////// +__device__ void saveBlockDXT1(ushort start, ushort end, uint permutation, int xrefs[16], uint2 * result) +{ + const int bid = blockIdx.x; + + if (start == end) + { + permutation = 0; + } + + // Reorder permutation. + uint indices = 0; + for(int i = 0; i < 16; i++) + { + int ref = xrefs[i]; + indices |= ((permutation >> (2 * ref)) & 3) << (2 * i); + } + + // Write endpoints. + result[bid].x = (end << 16) | start; + + // Write palette indices. + result[bid].y = indices; +} + +__device__ void saveBlockDXT1_Parallel(uint endpoints, float3 colors[16], int xrefs[16], uint * result) +{ + const int tid = threadIdx.x; + const int bid = blockIdx.x; + + if (tid < 16) + { + int3 color = float3ToInt3(colors[xrefs[tid]]); + + ushort endpoint0 = endpoints & 0xFFFF; + ushort endpoint1 = endpoints >> 16; + + int3 palette[4]; + palette[0] = color16ToInt3(endpoint0); + palette[1] = color16ToInt3(endpoint1); + + int d0 = colorDistance(palette[0], color); + int d1 = colorDistance(palette[1], color); + + uint index; + if (endpoint0 > endpoint1) + { + palette[2].x = (2 * palette[0].x + palette[1].x) / 3; + palette[2].y = (2 * palette[0].y + palette[1].y) / 3; + palette[2].z = (2 * palette[0].z + palette[1].z) / 3; + + palette[3].x = (2 * palette[1].x + palette[0].x) / 3; + palette[3].y = (2 * palette[1].y + palette[0].y) / 3; + palette[3].z = (2 * palette[1].z + palette[0].z) / 3; + + int d2 = colorDistance(palette[2], color); + int d3 = colorDistance(palette[3], color); + + // Compute the index that best fit color. + uint b0 = d0 > d3; + uint b1 = d1 > d2; + uint b2 = d0 > d2; + uint b3 = d1 > d3; + uint b4 = d2 > d3; + + uint x0 = b1 & b2; + uint x1 = b0 & b3; + uint x2 = b0 & b4; + + index = (x2 | ((x0 | x1) << 1)); + } + else { + palette[2].x = (palette[0].x + palette[1].x) / 2; + palette[2].y = (palette[0].y + palette[1].y) / 2; + palette[2].z = (palette[0].z + palette[1].z) / 2; + + int d2 = colorDistance(palette[2], color); + + index = 0; + if (d1 < d0 && d1 < d2) index = 1; + else if (d2 < d0) index = 2; + } + + __shared__ uint indices[16]; + + indices[tid] = index << (2 * tid); + if (tid < 8) indices[tid] |= indices[tid+8]; + if (tid < 4) indices[tid] |= indices[tid+4]; + if (tid < 2) indices[tid] |= indices[tid+2]; + if (tid < 1) indices[tid] |= indices[tid+1]; + + if (tid < 2) { + result[2 * bid + tid] = tid == 0 ? endpoints : indices[0]; + } + } +} + +__device__ void saveBlockDXT1_Parallel(uint endpoints, uint permutation, int xrefs[16], uint * result) +{ + const int tid = threadIdx.x; + const int bid = blockIdx.x; + + if (tid < 16) + { + // Reorder permutation. + uint index = ((permutation >> (2 * xrefs[tid])) & 3) << (2 * tid); + __shared__ uint indices[16]; + + indices[tid] = index; + if (tid < 8) indices[tid] |= indices[tid+8]; + if (tid < 4) indices[tid] |= indices[tid+4]; + if (tid < 2) indices[tid] |= indices[tid+2]; + if (tid < 1) indices[tid] |= indices[tid+1]; + + if (tid < 2) { + result[2 * bid + tid] = tid == 0 ? endpoints : indices[0]; + } + } +} + + +__device__ void saveBlockCTX1(ushort start, ushort end, uint permutation, int xrefs[16], uint2 * result) +{ + saveBlockDXT1(start, end, permutation, xrefs, result); +} + +__device__ void saveSingleColorBlockDXT1(float3 color, uint2 * result) +{ + const int bid = blockIdx.x; + + int r = color.x * 255; + int g = color.y * 255; + int b = color.z * 255; + + ushort color0 = (OMatch5[r][0] << 11) | (OMatch6[g][0] << 5) | OMatch5[b][0]; + ushort color1 = (OMatch5[r][1] << 11) | (OMatch6[g][1] << 5) | OMatch5[b][1]; + + if (color0 < color1) + { + result[bid].x = (color0 << 16) | color1; + result[bid].y = 0xffffffff; + } + else + { + result[bid].x = (color1 << 16) | color0; + result[bid].y = 0xaaaaaaaa; + } +} + +__device__ void saveSingleColorBlockDXT1(float2 color, uint2 * result) +{ + const int bid = blockIdx.x; + + int r = color.x * 255; + int g = color.y * 255; + + ushort color0 = (OMatch5[r][0] << 11) | (OMatch6[g][0] << 5); + ushort color1 = (OMatch5[r][1] << 11) | (OMatch6[g][1] << 5); + + if (color0 < color1) + { + result[bid].x = (color0 << 16) | color1; + result[bid].y = 0xffffffff; + } + else + { + result[bid].x = (color1 << 16) | color0; + result[bid].y = 0xaaaaaaaa; + } +} + +__device__ void saveSingleColorBlockCTX1(float2 color, uint2 * result) +{ + const int bid = blockIdx.x; + + int r = color.x * 255; + int g = color.y * 255; + + ushort color0 = (r << 8) | (g); + + result[bid].x = (color0 << 16) | color0; + result[bid].y = 0x00000000; +} + + +//////////////////////////////////////////////////////////////////////////////// +// Compress color block +//////////////////////////////////////////////////////////////////////////////// + +__global__ void compressDXT1(uint firstBlock, uint blockWidth, const uint * permutations, uint2 * result) +{ + __shared__ float3 colors[16]; + __shared__ float3 sums[16]; + __shared__ int xrefs[16]; + __shared__ int sameColor; + + loadColorBlockTex(firstBlock, blockWidth, colors, sums, xrefs, &sameColor); + + __syncthreads(); + + if (sameColor) + { + if (threadIdx.x == 0) saveSingleColorBlockDXT1(colors[0], result); + return; + } + + ushort bestStart, bestEnd; + uint bestPermutation; + + __shared__ float errors[NUM_THREADS]; + evalAllPermutations(colors, sums[0], permutations, bestStart, bestEnd, bestPermutation, errors); + + // Use a parallel reduction to find minimum error. + const int minIdx = findMinError(errors); + + __shared__ uint s_bestEndPoints; + __shared__ uint s_bestPermutation; + + // Only write the result of the winner thread. + if (threadIdx.x == minIdx) + { + s_bestEndPoints = (bestEnd << 16) | bestStart; + s_bestPermutation = (bestStart != bestEnd) ? bestPermutation : 0; + } + + __syncthreads(); + + saveBlockDXT1_Parallel(s_bestEndPoints, colors, xrefs, (uint *)result); + //saveBlockDXT1_Parallel(s_bestEndPoints, s_bestPermutation, xrefs, (uint *)result); +} + + +__global__ void compressLevel4DXT1(uint firstBlock, uint blockWidth, const uint * permutations, uint2 * result) +{ + __shared__ float3 colors[16]; + __shared__ float3 sums[16]; + __shared__ int xrefs[16]; + __shared__ int sameColor; + + loadColorBlockTex(firstBlock, blockWidth, colors, sums, xrefs, &sameColor); + + __syncthreads(); + + if (sameColor) + { + if (threadIdx.x == 0) saveSingleColorBlockDXT1(colors[0], result); + return; + } + + ushort bestStart, bestEnd; + uint bestPermutation; + + __shared__ float errors[NUM_THREADS]; + + evalLevel4Permutations(colors, sums[0], permutations, bestStart, bestEnd, bestPermutation, errors); + + // Use a parallel reduction to find minimum error. + const int minIdx = findMinError(errors); + + // Only write the result of the winner thread. + if (threadIdx.x == minIdx) + { + saveBlockDXT1(bestStart, bestEnd, bestPermutation, xrefs, result); + } +} + +__global__ void compressWeightedDXT1(uint firstBlock, uint blockWidth, const uint * permutations, uint2 * result) +{ + __shared__ float3 colors[16]; + __shared__ float3 sums[16]; + __shared__ float weights[16]; + __shared__ int xrefs[16]; + __shared__ int sameColor; + + loadColorBlockTex(firstBlock, blockWidth, colors, sums, weights, xrefs, &sameColor); + + __syncthreads(); + + if (sameColor) + { + if (threadIdx.x == 0) saveSingleColorBlockDXT1(colors[0], result); + return; + } + + ushort bestStart, bestEnd; + uint bestPermutation; + + __shared__ float errors[NUM_THREADS]; + + evalLevel4Permutations(colors, weights, sums[0], permutations, bestStart, bestEnd, bestPermutation, errors); + + // Use a parallel reduction to find minimum error. + int minIdx = findMinError(errors); + + // Only write the result of the winner thread. + if (threadIdx.x == minIdx) + { + saveBlockDXT1(bestStart, bestEnd, bestPermutation, xrefs, result); + } +} + + +__global__ void compressNormalDXT1(const uint * permutations, const uint * image, uint2 * result) +{ + __shared__ float2 colors[16]; + __shared__ float2 sums[16]; + __shared__ int xrefs[16]; + __shared__ int sameColor; + + loadColorBlock(image, colors, sums, xrefs, &sameColor); + + __syncthreads(); + + if (sameColor) + { + if (threadIdx.x == 0) saveSingleColorBlockDXT1(colors[0], result); + return; + } + + ushort bestStart, bestEnd; + uint bestPermutation; + + __shared__ float errors[NUM_THREADS]; + + evalAllPermutations(colors, sums[0], permutations, bestStart, bestEnd, bestPermutation, errors); + + // Use a parallel reduction to find minimum error. + const int minIdx = findMinError(errors); + + // Only write the result of the winner thread. + if (threadIdx.x == minIdx) + { + saveBlockDXT1(bestStart, bestEnd, bestPermutation, xrefs, result); + } +} + +__global__ void compressCTX1(const uint * permutations, const uint * image, uint2 * result) +{ + __shared__ float2 colors[16]; + __shared__ float2 sums[16]; + __shared__ int xrefs[16]; + __shared__ int sameColor; + + loadColorBlock(image, colors, sums, xrefs, &sameColor); + + __syncthreads(); + + if (sameColor) + { + if (threadIdx.x == 0) saveSingleColorBlockCTX1(colors[0], result); + return; + } + + ushort bestStart, bestEnd; + uint bestPermutation; + + __shared__ float errors[NUM_THREADS]; + + evalAllPermutationsCTX(colors, sums[0], permutations, bestStart, bestEnd, bestPermutation, errors); + + // Use a parallel reduction to find minimum error. + const int minIdx = findMinError(errors); + + // Only write the result of the winner thread. + if (threadIdx.x == minIdx) + { + saveBlockCTX1(bestStart, bestEnd, bestPermutation, xrefs, result); + } +} + + +/* +__device__ float computeError(const float weights[16], uchar a0, uchar a1) +{ + float palette[6]; + palette[0] = (6.0f/7.0f * a0 + 1.0f/7.0f * a1); + palette[1] = (5.0f/7.0f * a0 + 2.0f/7.0f * a1); + palette[2] = (4.0f/7.0f * a0 + 3.0f/7.0f * a1); + palette[3] = (3.0f/7.0f * a0 + 4.0f/7.0f * a1); + palette[4] = (2.0f/7.0f * a0 + 5.0f/7.0f * a1); + palette[5] = (1.0f/7.0f * a0 + 6.0f/7.0f * a1); + + float total = 0.0f; + + for (uint i = 0; i < 16; i++) + { + float alpha = weights[i]; + + float error = a0 - alpha; + error = min(error, palette[0] - alpha); + error = min(error, palette[1] - alpha); + error = min(error, palette[2] - alpha); + error = min(error, palette[3] - alpha); + error = min(error, palette[4] - alpha); + error = min(error, palette[5] - alpha); + error = min(error, a1 - alpha); + + total += error; + } + + return total; +} + +inline __device__ uchar roundAndExpand(float a) +{ + return rintf(__saturatef(a) * 255.0f); +} +*/ +/* +__device__ void optimizeAlpha8(const float alphas[16], uchar & a0, uchar & a1) +{ + float alpha2_sum = 0; + float beta2_sum = 0; + float alphabeta_sum = 0; + float alphax_sum = 0; + float betax_sum = 0; + + for (int i = 0; i < 16; i++) + { + uint idx = index[i]; + float alpha; + if (idx < 2) alpha = 1.0f - idx; + else alpha = (8.0f - idx) / 7.0f; + + float beta = 1 - alpha; + + alpha2_sum += alpha * alpha; + beta2_sum += beta * beta; + alphabeta_sum += alpha * beta; + alphax_sum += alpha * alphas[i]; + betax_sum += beta * alphas[i]; + } + + const float factor = 1.0f / (alpha2_sum * beta2_sum - alphabeta_sum * alphabeta_sum); + + float a = (alphax_sum * beta2_sum - betax_sum * alphabeta_sum) * factor; + float b = (betax_sum * alpha2_sum - alphax_sum * alphabeta_sum) * factor; + + a0 = roundAndExpand8(a); + a1 = roundAndExpand8(b); +} +*/ +/* +__device__ void compressAlpha(const float alphas[16], uint4 * result) +{ + const int tid = threadIdx.x; + + // Compress alpha block! + // Brute force approach: + // Try all color pairs: 256*256/2 = 32768, 32768/64 = 512 iterations? + + // Determine min & max alphas + + float A0, A1; + + if (tid < 16) + { + __shared__ uint s_alphas[16]; + + s_alphas[tid] = alphas[tid]; + s_alphas[tid] = min(s_alphas[tid], s_alphas[tid^8]); + s_alphas[tid] = min(s_alphas[tid], s_alphas[tid^4]); + s_alphas[tid] = min(s_alphas[tid], s_alphas[tid^2]); + s_alphas[tid] = min(s_alphas[tid], s_alphas[tid^1]); + A0 = s_alphas[tid]; + + s_alphas[tid] = alphas[tid]; + s_alphas[tid] = max(s_alphas[tid], s_alphas[tid^8]); + s_alphas[tid] = max(s_alphas[tid], s_alphas[tid^4]); + s_alphas[tid] = max(s_alphas[tid], s_alphas[tid^2]); + s_alphas[tid] = max(s_alphas[tid], s_alphas[tid^1]); + A1 = s_alphas[tid]; + } + + __syncthreads(); + + int minIdx = 0; + + if (A1 - A0 > 8) + { + float bestError = FLT_MAX; + + // 64 threads -> 8x8 + // divide [A1-A0] in partitions. + // test endpoints + + for (int i = 0; i < 128; i++) + { + uint idx = (i * NUM_THREADS + tid) * 4; + uchar a0 = idx & 255; + uchar a1 = idx >> 8; + + float error = computeError(alphas, a0, a1); + + if (error < bestError) + { + bestError = error; + A0 = a0; + A1 = a1; + } + } + + __shared__ float errors[NUM_THREADS]; + errors[tid] = bestError; + + // Minimize error. + minIdx = findMinError(errors); + + } + + if (minIdx == tid) + { + // @@ Compute indices. + + // @@ Write alpha block. + } +} + +__global__ void compressDXT5(const uint * permutations, const uint * image, uint4 * result) +{ + __shared__ float3 colors[16]; + __shared__ float3 sums[16]; + __shared__ float weights[16]; + __shared__ int xrefs[16]; + + loadColorBlock(image, colors, sums, weights, xrefs); + + __syncthreads(); + + compressAlpha(weights, result); + + ushort bestStart, bestEnd; + uint bestPermutation; + + __shared__ float errors[NUM_THREADS]; + + evalLevel4Permutations(colors, weights, sums[0], permutations, bestStart, bestEnd, bestPermutation, errors); + + // Use a parallel reduction to find minimum error. + int minIdx = findMinError(errors); + + // Only write the result of the winner thread. + if (threadIdx.x == minIdx) + { + saveBlockDXT1(bestStart, bestEnd, bestPermutation, xrefs, (uint2 *)result); + } +} +*/ + +/*__device__ void evaluatePalette(uint alpha0, uint alpha1, uint alphas[8]) +{ + alpha[0] = alpha0; + alpha[1] = alpha1; + alpha[2] = (6 * alpha[0] + 1 * alpha[1]) / 7; // bit code 010 + alpha[3] = (5 * alpha[0] + 2 * alpha[1]) / 7; // bit code 011 + alpha[4] = (4 * alpha[0] + 3 * alpha[1]) / 7; // bit code 100 + alpha[5] = (3 * alpha[0] + 4 * alpha[1]) / 7; // bit code 101 + alpha[6] = (2 * alpha[0] + 5 * alpha[1]) / 7; // bit code 110 + alpha[7] = (1 * alpha[0] + 6 * alpha[1]) / 7; // bit code 111 +} + +__device__ uint computeAlphaError(const uint block[16], uint alpha0, uint alpha1, int bestError = INT_MAX) +{ + uint8 alphas[8]; + evaluatePalette(alpha0, alpha1, alphas); + + int totalError = 0; + + for (uint i = 0; i < 16; i++) + { + uint8 alpha = block[i]; + + // @@ It should be possible to do this much faster. + + int minDist = INT_MAX; + for (uint p = 0; p < 8; p++) + { + int dist = alphaDistance(alpha, alphas[p]); + minDist = min(dist, minDist); + } + + + + totalError += minDist; + + if (totalError > bestError) + { + // early out + return totalError; + } + } + + return totalError; +} + + +void compressDXT5A(uint alpha[16]) +{ + // Get min/max alpha. + for (uint i = 0; i < 16; i++) + { + mina = min(mina, alpha[i]); + maxa = max(maxa, alpha[i]); + } + + dxtBlock->alpha0 = maxa; + dxtBlock->alpha1 = mina; + + if (maxa - mina > 8) + { + int besterror = computeAlphaError(rgba, dxtBlock); + int besta0 = maxa; + int besta1 = mina; + + // Expand search space a bit. + const int alphaExpand = 8; + mina = (mina <= alphaExpand) ? 0 : mina - alphaExpand; + maxa = (maxa <= 255-alphaExpand) ? 255 : maxa + alphaExpand; + + for (int a0 = mina+9; a0 < maxa; a0++) + { + for (int a1 = mina; a1 < a0-8; a1++) + { + nvDebugCheck(a0 - a1 > 8); + + dxtBlock->alpha0 = a0; + dxtBlock->alpha1 = a1; + int error = computeAlphaError(rgba, dxtBlock, besterror); + + if (error < besterror) + { + besterror = error; + besta0 = a0; + besta1 = a1; + } + } + } + + dxtBlock->alpha0 = besta0; + dxtBlock->alpha1 = besta1; + } +} + +__global__ void compressDXT5n(uint blockNum, uint2 * d_result) +{ + uint idx = blockIdx.x * 128 + threadIdx.x; + + if (idx >= blockNum) + { + return; + } + + // @@ Ideally we would load the data to shared mem to achieve coalesced global mem access. + // @@ Blocks would require too much shared memory (8k) and limit occupancy. + + // @@ Ideally we should use SIMD processing, multiple threads (4-8) processing the same block. + // That simplifies coalescing, and reduces divergence. + + // @@ Experiment with texture. That's probably the most simple approach. + + uint x[16]; + uint y[16]; + + +} +*/ + + +//////////////////////////////////////////////////////////////////////////////// +// Setup kernel +//////////////////////////////////////////////////////////////////////////////// + +extern "C" void setupOMatchTables(const void * OMatch5Src, size_t OMatch5Size, const void * OMatch6Src, size_t OMatch6Size) +{ + // Init single color lookup contant tables. + cudaMemcpyToSymbol(OMatch5, OMatch5Src, OMatch5Size, 0, cudaMemcpyHostToDevice); + cudaMemcpyToSymbol(OMatch6, OMatch6Src, OMatch6Size, 0, cudaMemcpyHostToDevice); +} + +extern "C" void setupCompressKernel(const float weights[3]) +{ + // Set constants. + cudaMemcpyToSymbol(kColorMetric, weights, sizeof(float) * 3, 0); + + float weightsSqr[3]; + weightsSqr[0] = weights[0] * weights[0]; + weightsSqr[1] = weights[1] * weights[1]; + weightsSqr[2] = weights[2] * weights[2]; + + cudaMemcpyToSymbol(kColorMetricSqr, weightsSqr, sizeof(float) * 3, 0); +} + +extern "C" void bindTextureToArray(cudaArray * d_data) +{ + // Setup texture + tex.normalized = false; + tex.filterMode = cudaFilterModePoint; + tex.addressMode[0] = cudaAddressModeClamp; + tex.addressMode[1] = cudaAddressModeClamp; + + cudaBindTextureToArray(tex, d_data); +} + + + +//////////////////////////////////////////////////////////////////////////////// +// Launch kernel +//////////////////////////////////////////////////////////////////////////////// + +// DXT1 compressors: +extern "C" void compressKernelDXT1(uint firstBlock, uint blockNum, uint blockWidth, uint * d_result, uint * d_bitmaps) +{ + compressDXT1<<>>(firstBlock, blockWidth, d_bitmaps, (uint2 *)d_result); +} + +extern "C" void compressKernelDXT1_Level4(uint firstBlock, uint blockNum, uint blockWidth, uint * d_result, uint * d_bitmaps) +{ + compressLevel4DXT1<<>>(firstBlock, blockWidth, d_bitmaps, (uint2 *)d_result); +} + +extern "C" void compressWeightedKernelDXT1(uint firstBlock, uint blockNum, uint blockWidth, uint * d_result, uint * d_bitmaps) +{ + compressWeightedDXT1<<>>(firstBlock, blockWidth, d_bitmaps, (uint2 *)d_result); +} + +// @@ DXT1a compressors. + + +// @@ DXT3 compressors: +extern "C" void compressKernelDXT3(uint firstBlock, uint blockNum, uint blockWidth, uint * d_result, uint * d_bitmaps) +{ + //compressDXT3<<>>(firstBlock, blockWidth, d_bitmaps, (uint2 *)d_result); +} + +extern "C" void compressWeightedKernelDXT3(uint firstBlock, uint blockNum, uint blockWidth, uint * d_result, uint * d_bitmaps) +{ + //compressWeightedDXT3<<>>(firstBlock, blockWidth, d_bitmaps, (uint2 *)d_result); +} + + +// @@ DXT5 compressors. +extern "C" void compressKernelDXT5(uint firstBlock, uint blockNum, uint w, uint * d_result, uint * d_bitmaps) +{ + //compressDXT5<<>>(firstBlock, w, d_bitmaps, (uint2 *)d_result); +} + +extern "C" void compressWeightedKernelDXT5(uint firstBlock, uint blockNum, uint w, uint * d_result, uint * d_bitmaps) +{ + //compressWeightedDXT5<<>>(firstBlock, w, d_bitmaps, (uint2 *)d_result); +} + + + + + +/* +extern "C" void compressNormalKernelDXT1(uint blockNum, uint * d_data, uint * d_result, uint * d_bitmaps) +{ + compressNormalDXT1<<>>(d_bitmaps, d_data, (uint2 *)d_result); +} + +extern "C" void compressKernelCTX1(uint blockNum, uint * d_data, uint * d_result, uint * d_bitmaps) +{ + compressCTX1<<>>(d_bitmaps, d_data, (uint2 *)d_result); +} +*/ +/* +extern "C" void compressKernelDXT5n(uint blockNum, cudaArray * d_data, uint * d_result) +{ +// compressDXT5n<<>>(blockNum, (uint2 *)d_result); +} +*/ diff --git a/cuda_code/ComputeProductKernel.cu b/cuda_code/ComputeProductKernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..242448617496ac77585eb3ee009d31ef1ed66dbb --- /dev/null +++ b/cuda_code/ComputeProductKernel.cu @@ -0,0 +1,90 @@ +typedef unsigned char uint8_t; + +struct __device_builtin__ __align__(_NCS_) uint8n +{ + uint8_t _VARNAMES_; +}; + +extern "C" +__global__ void compute_product( + const uint8_t* __restrict__ A, + const float* __restrict__ B, + const char* __restrict__ isEmpty, + const int* __restrict__ divStart, + const int* __restrict__ divSize, + float* __restrict__ V, + int* __restrict__ I, + int N, int L, int O, int nProbe +) { + const int tid = threadIdx.x; // thread ID + const int qid = blockIdx.x; // query ID + // const uint8n* A2 = reinterpret_cast( const_cast(A) ) + const uint8n* A2 = reinterpret_cast(A); // ? + + // Load precomputed distances + extern __shared__ volatile float Bsh[]; +#pragma unroll + if (tid < 256){ + for (int i = 0; i < _M_; i++){ + int bz = i; + int by = qid; + int bx = tid; + Bsh[i * _K_ + tid] = B[(bz * L * _K_) + (by * _K_) + (bx)]; + } + } + __syncthreads(); + // Load A and compute distance + int iN = tid; + int counter = tid; + int start = 0; + int size = 0; + int cDiv = -1; + bool break_loop = false; + while (iN < N){ + while ( (iN - start) >= size){ + cDiv ++; + if (cDiv >= nProbe){ + break_loop = true; + break; + } + int residual = iN - start - size; + start = divStart[(qid) * nProbe + (cDiv)]; + iN = start + residual; + size = divSize[(qid) * nProbe + (cDiv)]; + if (iN >= N){ + break_loop = true; + break; + } + } + if (break_loop) + break; + + float sum = 0.f; +#pragma unroll + for (int i = 0; i < _M_ / _NCS_; i++){ + uint8n Avals = A2[(i * N) + (iN)]; +_CODEBLOCK_ + } + // write to V and I + int isCurrentEmpty; + isCurrentEmpty = isEmpty[iN]; + + /* + if (isCurrentEmpty == 0){ + V[(qid) * O + counter] = sum; + I[(qid) * O + counter] = iN; + } else { + V[(qid) * O + counter] = -999999.f; + I[(qid) * O + counter] = -1; + } + */ + + if (counter < O){ + V[(qid) * O + counter] = isCurrentEmpty == 0 ? sum : -999999.f; + I[(qid) * O + counter] = isCurrentEmpty == 0 ? iN : -1; + // atomicAdd(V + (qid) * O + counter, isCurrentEmpty == 0 ? sum : -99999.f); + } + iN += _TPB_; + counter += _TPB_; + } +} \ No newline at end of file diff --git a/cuda_code/ConstraintEllipsoidGPU_1.cu b/cuda_code/ConstraintEllipsoidGPU_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..7dfefefc60b8163f6f13e92a575eec2fbb7ad8c3 --- /dev/null +++ b/cuda_code/ConstraintEllipsoidGPU_1.cu @@ -0,0 +1,103 @@ +// Copyright (c) 2009-2016 The Regents of the University of Michigan +// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. + + +// Maintainer: joaander + +#include "ConstraintEllipsoidGPU.cuh" +#include "EvaluatorConstraint.h" +#include "EvaluatorConstraintEllipsoid.h" + +#include + +/*! \file ConstraintEllipsoidGPU.cu + \brief Defines GPU kernel code for calculating ellipsoid constraint forces. Used by ConstraintEllipsoidGPU. +*/ + +//! Kernel for caculating ellipsoid constraint forces on the GPU +/*! \param d_group_members List of members in the group + \param group_size number of members in the group + \param N number of particles in system + \param d_pos particle positions on device + \param P Position of the ellipsoid + \param rx radius of the ellipsoid in x direction + \param ry radius of the ellipsoid in y direction + \param rz radius of the ellipsoid in z direction + \param deltaT step size from the Integrator +*/ +extern "C" __global__ +void gpu_compute_constraint_ellipsoid_constraint_kernel(const unsigned int *d_group_members, + unsigned int group_size, + const unsigned int N, + Scalar4 *d_pos, + Scalar3 P, + Scalar rx, + Scalar ry, + Scalar rz) + { + // start by identifying which particle we are to handle + // determine which particle this thread works on + int group_idx = blockIdx.x * blockDim.x + threadIdx.x; + + if (group_idx >= group_size) + return; + + unsigned int idx = d_group_members[group_idx]; + + // read in position, velocity, net force, and mass + Scalar4 pos = d_pos[idx]; + + // convert to Scalar3's for passing to the evaluators + Scalar3 X = make_scalar3(pos.x, pos.y, pos.z); + + // evaluate the constraint position + EvaluatorConstraintEllipsoid Ellipsoid(P, rx, ry, rz); + Scalar3 C = Ellipsoid.evalClosest(X); + + // apply the constraint + d_pos[idx] = make_scalar4(C.x, C.y, C.z, Scalar(0.0)); + } + + +/*! \param d_group_members List of members in the group + \param group_size number of members in the group + \param N nunmber of particles + \param d_pos particle positions on the device + \param P Position of the ellipsoid + \param rx radius of the ellipsoid in x direction + \param ry radius of the ellipsoid in y direction + \param rz radius of the ellipsoid in z direction + \param deltaT step size from the Integrator + \param block_size Block size to execute on the GPU + + \returns Any error code resulting from the kernel launch + \note Always returns cudaSuccess in release builds to avoid the cudaThreadSynchronize() +*/ +cudaError_t gpu_compute_constraint_ellipsoid_constraint(const unsigned int *d_group_members, + unsigned int group_size, + const unsigned int N, + Scalar4 *d_pos, + const Scalar3 P, + Scalar rx, + Scalar ry, + Scalar rz, + unsigned int block_size) + { + assert(d_group_members); + + // setup the grid to run the kernel + dim3 grid( group_size / block_size + 1, 1, 1); + dim3 threads(block_size, 1, 1); + + // run the kernel + gpu_compute_constraint_ellipsoid_constraint_kernel<<< grid, threads>>>(d_group_members, + group_size, + N, + d_pos, + P, + rx, + ry, + rz); + + return cudaSuccess; + } diff --git a/cuda_code/CopySurface.cu b/cuda_code/CopySurface.cu new file mode 100644 index 0000000000000000000000000000000000000000..4e7a2f625b76ad7e87847162ac53612d377e04b1 --- /dev/null +++ b/cuda_code/CopySurface.cu @@ -0,0 +1,52 @@ +/*************************************************************************** +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of NVIDIA CORPORATION nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY +# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +***************************************************************************/ +#include "CopySurface.h" +#include + +// The CUDA kernel. This sample simply copies the input surface. +template +__global__ void copySurface(cudaSurfaceObject_t input, cudaSurfaceObject_t output, unsigned int width, unsigned int height) +{ + unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; + unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; + if (x < width && y < height) + { + T data; + surf2Dread(&data, input, sizeof(T) * x, y); + surf2Dwrite(data, output, sizeof(T) * x, y); + } +} + +// A wrapper function that launches the kernel. +void launchCopySurface(cudaSurfaceObject_t input, cudaSurfaceObject_t output, unsigned int width, unsigned int height, unsigned int format) +{ + dim3 dimBlock(16, 16); + dim3 dimGrid((width + dimBlock.x - 1) / dimBlock.x, (height + dimBlock.y - 1) / dimBlock.y); + if (format == cudaChannelFormatKindFloat) copySurface<<>>(input, output, width, height); + else copySurface<<>>(input, output, width, height); +} diff --git a/cuda_code/Copy_15.cu b/cuda_code/Copy_15.cu new file mode 100644 index 0000000000000000000000000000000000000000..ffc4e3edc981f76042ca5343fe3df6fd2a41026f --- /dev/null +++ b/cuda_code/Copy_15.cu @@ -0,0 +1,225 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __HIP_PLATFORM_HCC__ +#include +#endif + +namespace at { +namespace native { + +using namespace at::cuda; + +// device-to-device copy, does type conversion +void copy_device_to_device(TensorIterator& iter, bool non_blocking) { + int64_t numel = iter.numel(); + + // We can memcpy the memory if both tensors have the same type AND both + // tensors are contiguous after dimension coalescing and reordering. + bool same_type = iter.dtype(0) == iter.dtype(1); + bool same_conj = iter.tensor(0).is_conj() == iter.tensor(1).is_conj(); + bool memcpy_eligible = same_type && same_conj && iter.is_contiguous(); + + Device dst_device = iter.device(0); + Device src_device = iter.device(1); + + CUDAGuard device_guard(src_device); + + // We always perform the copy on the source device, using the current stream + // on the source device, and we fully synchronize on both src and dst's + // current streams for completion of the copy. We have to explicitly do this + // for non-contig copies. This mimics the behavior of cross-device + // cudaMemcpyAsync on the default stream. + CUDAStream copy_stream = getCurrentCUDAStream(src_device.index()); + if (src_device != dst_device) { + // This is a cross-device copy on the src current stream and dst current + // stream. We perform a two-way barrier between both devices' streams + // before the copy. This ensures that any write-after-write and + // write-after-read dependencies on the destination side are handled, so + // that no one is operating on the dst memory when we perform the copy. + // src waits on dst barrier (src already waits on src) + CUDAEvent dst_ready; + device_guard.set_device(dst_device); + dst_ready.record(getCurrentCUDAStream(dst_device.index())); + + device_guard.set_device(src_device); + dst_ready.block(copy_stream); + } + + if (memcpy_eligible) { + void *dst = iter.data_ptr(0); + void *src = iter.data_ptr(1); + size_t size = numel * iter.element_size(0); + if (src != dst || src_device != dst_device) { + // Perform the copy + AT_CUDA_CHECK(cudaMemcpyAsync( + dst, src, size, + cudaMemcpyDeviceToDevice, + copy_stream)); + } + } else { + auto dtype = iter.dtype(0); + if (isQIntType(dtype)) { + AT_DISPATCH_QINT_TYPES(dtype, "copy_", [&] { + gpu_kernel(iter, [] GPU_LAMBDA(scalar_t x) { return x; }); + }); + } else { + if (!same_conj && same_type) { + AT_DISPATCH_COMPLEX_TYPES( + dtype, "copy_conj_", [&] { + gpu_kernel(iter, [] GPU_LAMBDA(scalar_t x) { return std::conj(x); }); + }); + } else { + AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( + kHalf, kBool, kBFloat16, dtype, "copy_", [&] { + gpu_kernel(iter, [] GPU_LAMBDA(scalar_t x) { return x; }); + }); + } + } + } + + if (src_device != dst_device) { + // dst waits on src barrier (dst already waits on dst). We cannot + // operate on dst's copy until the copy is complete. + + // Still on src_device, record stream event + CUDAEvent src_ready; + src_ready.record(copy_stream); + + device_guard.set_device(dst_device); + src_ready.block(getCurrentCUDAStream(dst_device.index())); + } + + AT_CUDA_CHECK(cudaGetLastError()); +} + +static bool copy_requires_temporaries(TensorIterator& iter, bool p2p_enabled) { + Device dst_device = iter.device(0); + Device src_device = iter.device(1); + + if (dst_device == src_device) { + // We never require temporaries for copies on the same GPU. + TORCH_INTERNAL_ASSERT(dst_device.is_cuda() && src_device.is_cuda()); + return false; + } + + bool same_dtype = iter.dtype(0) == iter.dtype(1); + if (same_dtype && iter.is_contiguous()) { + // Contiguous same-dtype copies can always use cudaMemcpyAsync + return false; + } else if (dst_device.is_cuda() && src_device.is_cuda()) { + // Copies between GPUs can use the copy kernel if P2P is supported + return !p2p_enabled; + } else { + // The remaining cases require temporaries. For example, this includes + // non-contiguous copies between CPU and GPU. + return true; + } +} + +static bool maybe_enable_p2p_access(Device dst_device, Device src_device) { + if (dst_device.is_cpu() || src_device.is_cpu()) { + return false; + } + return THCState_getPeerToPeerAccess( + globalContext().getTHCState(), src_device.index(), dst_device.index()); +} + +static void copy_kernel_cuda(TensorIterator& iter, bool non_blocking) { + AT_ASSERT(iter.ntensors() == 2); + + Device dst_device = iter.device(0); + Device src_device = iter.device(1); + + // Enable p2p access between devices. (No-op if it involves the CPU) + bool p2p_enabled = maybe_enable_p2p_access(dst_device, src_device); + + if (copy_requires_temporaries(iter, p2p_enabled)) { + // NB: this involves recursive calls to copy. Be careful that those copies + // don't require temporaries or you will cause an infinite recursion! + auto& dst = iter.tensor(0); + Tensor dst_contig; + Tensor src_contig; + + // Type conversions are performed on the CPU for CPU-GPU copies and on + // the src device for GPU-GPU copies. + if (iter.device_type(0) == kCUDA) { + dst_contig = dst.is_contiguous() ? dst : at::empty_like(dst, LEGACY_CONTIGUOUS_MEMORY_FORMAT); + src_contig = iter.tensor(1).to(iter.dtype(0)).expand_as(dst).contiguous(); + } else { + bool same_type = iter.dtype(0) == iter.dtype(1); + dst_contig = (dst.is_contiguous() && same_type) ? dst : at::empty_like(dst, iter.dtype(1), LEGACY_CONTIGUOUS_MEMORY_FORMAT); + src_contig = iter.tensor(1).expand_as(dst).contiguous(); + } + + // propagate the correct conjugate bit + dst_contig._set_conj(dst.is_conj()); + src_contig._set_conj(iter.tensor(1).is_conj()); + + // perform a same-dtype copy on contiguous tensors + TORCH_INTERNAL_ASSERT(dst_contig.sizes().equals(src_contig.sizes())); + TORCH_INTERNAL_ASSERT(dst_contig.scalar_type() == src_contig.scalar_type()); + dst_contig.copy_(src_contig, non_blocking); + + // if necessary, copy back into dst + if (!dst_contig.is_same(dst)) { + TORCH_INTERNAL_ASSERT(dst_contig.device() == dst.device()); + dst.copy_(dst_contig, non_blocking); + } + return; + } + + // Copy on GPU (or between GPUs) + if (dst_device.is_cuda() && src_device.is_cuda()) { + copy_device_to_device(iter, non_blocking); + return; + } + + // Copy between CPU and GPU + cuda::OptionalCUDAGuard device_guard; + cudaMemcpyKind kind; + if (dst_device.is_cuda() && src_device.is_cpu()) { + device_guard.set_device(dst_device); + kind = cudaMemcpyHostToDevice; + } else if (dst_device.is_cpu() && src_device.is_cuda()) { + device_guard.set_device(src_device); + kind = cudaMemcpyDeviceToHost; + } else { + TORCH_INTERNAL_ASSERT(false, "unsupported devices in GPU copy_()"); + } + + void* dst = iter.data_ptr(0); + void* src = iter.data_ptr(1); + int64_t nbytes = iter.numel() * iter.element_size(0); + CUDAStream stream = getCurrentCUDAStream(); + + if (non_blocking) { + AT_CUDA_CHECK(cudaMemcpyAsync(dst, src, nbytes, kind, stream)); + void* ptr = (dst_device == kCPU ? dst : src); + AT_CUDA_CHECK(THCCachingHostAllocator_recordEvent(ptr, stream)); + } else { +#if HIP_VERSION >= 301 + AT_CUDA_CHECK(hipMemcpyWithStream(dst, src, nbytes, kind, stream)); +#else + AT_CUDA_CHECK(cudaMemcpyAsync(dst, src, nbytes, kind, stream)); + AT_CUDA_CHECK(cudaStreamSynchronize(stream)); +#endif + } + + if (iter.tensor(0).is_conj() != iter.tensor(1).is_conj()) { + iter.tensor(0).conj_physical_(); + } +} + +REGISTER_DISPATCH(copy_stub, ©_kernel_cuda); + +} // namespace native +} // namespace at diff --git a/cuda_code/CudaAllocator_2.cu b/cuda_code/CudaAllocator_2.cu new file mode 100644 index 0000000000000000000000000000000000000000..244d045b751177593b3724409bd88450c236131b --- /dev/null +++ b/cuda_code/CudaAllocator_2.cu @@ -0,0 +1,343 @@ +//============================================================================ +// Copyright (c) Kitware, Inc. +// All rights reserved. +// See LICENSE.txt for details. +// +// This software is distributed WITHOUT ANY WARRANTY; without even +// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +// PURPOSE. See the above copyright notice for more information. +//============================================================================ + +#include +#include +#include +#include +#include +#include +#include +#include +#define NO_VTKM_MANAGED_MEMORY "NO_VTKM_MANAGED_MEMORY" + +#include +#include + +VTKM_THIRDPARTY_PRE_INCLUDE +#include +VTKM_THIRDPARTY_POST_INCLUDE + +// These static vars are in an anon namespace to work around MSVC linker issues. +namespace +{ +#if CUDART_VERSION >= 8000 +// Has CudaAllocator::Initialize been called by any thread? +static std::once_flag IsInitialized; +#endif + +// Holds how VTK-m currently allocates memory. +// When VTK-m is initialized we set this based on the hardware support ( HardwareSupportsManagedMemory ). +// The user can explicitly disable managed memory through an enviornment variable +// or by calling a function on the CudaAllocator. +// Likewise managed memory can be re-enabled by calling a function on CudaAllocator +// if and only if the underlying hardware supports pageable managed memory +static bool ManagedMemoryEnabled = false; + +// True if concurrent pagable managed memory is supported by the machines hardware. +static bool HardwareSupportsManagedMemory = false; + +// Avoid overhead of cudaMemAdvise and cudaMemPrefetchAsync for small buffers. +// This value should be > 0 or else these functions will error out. +static std::size_t Threshold = 1 << 20; +} + +namespace vtkm +{ +namespace cont +{ +namespace cuda +{ +namespace internal +{ + +bool CudaAllocator::UsingManagedMemory() +{ + CudaAllocator::Initialize(); + return ManagedMemoryEnabled; +} + +void CudaAllocator::ForceManagedMemoryOff() +{ + if (HardwareSupportsManagedMemory) + { + ManagedMemoryEnabled = false; + VTKM_LOG_F(vtkm::cont::LogLevel::Info, "CudaAllocator disabling managed memory"); + } + else + { + VTKM_LOG_F( + vtkm::cont::LogLevel::Warn, + "CudaAllocator trying to disable managed memory on hardware that doesn't support it"); + } +} + +void CudaAllocator::ForceManagedMemoryOn() +{ + if (HardwareSupportsManagedMemory) + { + ManagedMemoryEnabled = true; + VTKM_LOG_F(vtkm::cont::LogLevel::Info, "CudaAllocator enabling managed memory"); + } + else + { + VTKM_LOG_F(vtkm::cont::LogLevel::Warn, + "CudaAllocator trying to enable managed memory on hardware that doesn't support it"); + } +} + +bool CudaAllocator::IsDevicePointer(const void* ptr) +{ + CudaAllocator::Initialize(); + if (!ptr) + { + return false; + } + + cudaPointerAttributes attr; + cudaError_t err = cudaPointerGetAttributes(&attr, ptr); + // This function will return invalid value if the pointer is unknown to the + // cuda runtime. Manually catch this value since it's not really an error. + if (err == cudaErrorInvalidValue) + { + cudaGetLastError(); // Clear the error so we don't raise it later... + return false; + } + VTKM_CUDA_CALL(err /*= cudaPointerGetAttributes(&attr, ptr)*/); + return attr.devicePointer == ptr; +} + +bool CudaAllocator::IsManagedPointer(const void* ptr) +{ + if (!ptr || !ManagedMemoryEnabled) + { + return false; + } + + cudaPointerAttributes attr; + cudaError_t err = cudaPointerGetAttributes(&attr, ptr); + // This function will return invalid value if the pointer is unknown to the + // cuda runtime. Manually catch this value since it's not really an error. + if (err == cudaErrorInvalidValue) + { + cudaGetLastError(); // Clear the error so we don't raise it later... + return false; + } + VTKM_CUDA_CALL(err /*= cudaPointerGetAttributes(&attr, ptr)*/); +#if CUDART_VERSION < 10000 // isManaged deprecated in CUDA 10. + return attr.isManaged != 0; +#else // attr.type doesn't exist before CUDA 10 + return attr.type == cudaMemoryTypeManaged; +#endif +} + +void* CudaAllocator::Allocate(std::size_t numBytes) +{ + CudaAllocator::Initialize(); + // When numBytes is zero cudaMallocManaged returns an error and the behavior + // of cudaMalloc is not documented. Just return nullptr. + if (numBytes == 0) + { + return nullptr; + } + + void* ptr = nullptr; + if (ManagedMemoryEnabled) + { + VTKM_CUDA_CALL(cudaMallocManaged(&ptr, numBytes)); + } + else + { + VTKM_CUDA_CALL(cudaMalloc(&ptr, numBytes)); + } + + { + VTKM_LOG_F(vtkm::cont::LogLevel::MemExec, + "Allocated CUDA array of %s at %p.", + vtkm::cont::GetSizeString(numBytes).c_str(), + ptr); + } + + return ptr; +} + +void* CudaAllocator::AllocateUnManaged(std::size_t numBytes) +{ + void* ptr = nullptr; + VTKM_CUDA_CALL(cudaMalloc(&ptr, numBytes)); + { + VTKM_LOG_F(vtkm::cont::LogLevel::MemExec, + "Allocated CUDA array of %s at %p.", + vtkm::cont::GetSizeString(numBytes).c_str(), + ptr); + } + return ptr; +} + +void CudaAllocator::Free(void* ptr) +{ + VTKM_LOG_F(vtkm::cont::LogLevel::MemExec, "Freeing CUDA allocation at %p.", ptr); + VTKM_CUDA_CALL(cudaFree(ptr)); +} + +void CudaAllocator::FreeDeferred(void* ptr, std::size_t numBytes) +{ + static std::mutex deferredMutex; + static std::vector deferredPointers; + static std::size_t deferredSize = 0; + constexpr std::size_t bufferLimit = 2 << 24; //16MB buffer + + { + VTKM_LOG_F(vtkm::cont::LogLevel::MemExec, + "Deferring free of CUDA allocation at %p of %s.", + ptr, + vtkm::cont::GetSizeString(numBytes).c_str()); + } + + std::vector toFree; + // critical section + { + std::lock_guard lock(deferredMutex); + deferredPointers.push_back(ptr); + deferredSize += numBytes; + if (deferredSize >= bufferLimit) + { + toFree.swap(deferredPointers); + deferredSize = 0; + } + } + + for (auto&& p : toFree) + { + VTKM_LOG_F(vtkm::cont::LogLevel::MemExec, "Freeing deferred CUDA allocation at %p.", p); + VTKM_CUDA_CALL(cudaFree(p)); + } +} + +void CudaAllocator::PrepareForControl(const void* ptr, std::size_t numBytes) +{ + if (IsManagedPointer(ptr) && numBytes >= Threshold) + { +#if CUDART_VERSION >= 8000 + // TODO these hints need to be benchmarked and adjusted once we start + // sharing the pointers between cont/exec + VTKM_CUDA_CALL(cudaMemAdvise(ptr, numBytes, cudaMemAdviseSetAccessedBy, cudaCpuDeviceId)); + VTKM_CUDA_CALL(cudaMemPrefetchAsync(ptr, numBytes, cudaCpuDeviceId, cudaStreamPerThread)); +#endif // CUDA >= 8.0 + } +} + +void CudaAllocator::PrepareForInput(const void* ptr, std::size_t numBytes) +{ + if (IsManagedPointer(ptr) && numBytes >= Threshold) + { +#if CUDART_VERSION >= 8000 + vtkm::Id dev; + vtkm::cont::RuntimeDeviceInformation() + .GetRuntimeConfiguration(vtkm::cont::DeviceAdapterTagCuda()) + .GetDeviceInstance(dev); + // VTKM_CUDA_CALL(cudaMemAdvise(ptr, numBytes, cudaMemAdviseSetPreferredLocation, dev)); + // VTKM_CUDA_CALL(cudaMemAdvise(ptr, numBytes, cudaMemAdviseSetReadMostly, dev)); + VTKM_CUDA_CALL(cudaMemAdvise(ptr, numBytes, cudaMemAdviseSetAccessedBy, dev)); + VTKM_CUDA_CALL(cudaMemPrefetchAsync(ptr, numBytes, dev, cudaStreamPerThread)); +#endif // CUDA >= 8.0 + } +} + +void CudaAllocator::PrepareForOutput(const void* ptr, std::size_t numBytes) +{ + if (IsManagedPointer(ptr) && numBytes >= Threshold) + { +#if CUDART_VERSION >= 8000 + vtkm::Id dev; + vtkm::cont::RuntimeDeviceInformation() + .GetRuntimeConfiguration(vtkm::cont::DeviceAdapterTagCuda()) + .GetDeviceInstance(dev); + // VTKM_CUDA_CALL(cudaMemAdvise(ptr, numBytes, cudaMemAdviseSetPreferredLocation, dev)); + // VTKM_CUDA_CALL(cudaMemAdvise(ptr, numBytes, cudaMemAdviseUnsetReadMostly, dev)); + VTKM_CUDA_CALL(cudaMemAdvise(ptr, numBytes, cudaMemAdviseSetAccessedBy, dev)); + VTKM_CUDA_CALL(cudaMemPrefetchAsync(ptr, numBytes, dev, cudaStreamPerThread)); +#endif // CUDA >= 8.0 + } +} + +void CudaAllocator::PrepareForInPlace(const void* ptr, std::size_t numBytes) +{ + if (IsManagedPointer(ptr) && numBytes >= Threshold) + { +#if CUDART_VERSION >= 8000 + vtkm::Id dev; + vtkm::cont::RuntimeDeviceInformation() + .GetRuntimeConfiguration(vtkm::cont::DeviceAdapterTagCuda()) + .GetDeviceInstance(dev); + // VTKM_CUDA_CALL(cudaMemAdvise(ptr, numBytes, cudaMemAdviseSetPreferredLocation, dev)); + // VTKM_CUDA_CALL(cudaMemAdvise(ptr, numBytes, cudaMemAdviseUnsetReadMostly, dev)); + VTKM_CUDA_CALL(cudaMemAdvise(ptr, numBytes, cudaMemAdviseSetAccessedBy, dev)); + VTKM_CUDA_CALL(cudaMemPrefetchAsync(ptr, numBytes, dev, cudaStreamPerThread)); +#endif // CUDA >= 8.0 + } +} + +void CudaAllocator::Initialize() +{ +#if CUDART_VERSION >= 8000 + std::call_once(IsInitialized, []() { + auto cudaDeviceConfig = dynamic_cast< + vtkm::cont::internal::RuntimeDeviceConfiguration&>( + vtkm::cont::RuntimeDeviceInformation{}.GetRuntimeConfiguration( + vtkm::cont::DeviceAdapterTagCuda())); + vtkm::Id numDevices; + cudaDeviceConfig.GetMaxDevices(numDevices); + + if (numDevices == 0) + { + return; + } + + // Check all devices, use the feature set supported by all + bool managedMemorySupported = true; + std::vector cudaProp; + cudaDeviceConfig.GetCudaDeviceProp(cudaProp); + for (int i = 0; i < numDevices && managedMemorySupported; ++i) + { + // We check for concurrentManagedAccess, as devices with only the + // managedAccess property have extra synchronization requirements. + managedMemorySupported = managedMemorySupported && cudaProp[i].concurrentManagedAccess; + } + + HardwareSupportsManagedMemory = managedMemorySupported; + ManagedMemoryEnabled = managedMemorySupported; + + VTKM_LOG_F(vtkm::cont::LogLevel::Info, + "CudaAllocator hardware %s managed memory", + HardwareSupportsManagedMemory ? "supports" : "doesn't support"); + +// Check if users want to disable managed memory +#pragma warning(push) +// getenv is not thread safe on windows but since it's inside a call_once block so +// it's fine to suppress the warning here. +#pragma warning(disable : 4996) + const char* buf = std::getenv(NO_VTKM_MANAGED_MEMORY); +#pragma warning(pop) + if (managedMemorySupported && buf != nullptr) + { //only makes sense to disable managed memory if the hardware supports it + //in the first place + ManagedMemoryEnabled = false; + VTKM_LOG_F( + vtkm::cont::LogLevel::Info, + "CudaAllocator disabling managed memory due to NO_VTKM_MANAGED_MEMORY env variable"); + } + }); +#endif +} +} +} +} +} // end namespace vtkm::cont::cuda::internal diff --git a/cuda_code/CudaKernel.cu b/cuda_code/CudaKernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..ba18e0e23e069a250898431fd01073e47fe28f17 --- /dev/null +++ b/cuda_code/CudaKernel.cu @@ -0,0 +1,12 @@ +typedef struct KernelData +{ + float a; + float b; + float result; +} KernelData; + +__global__ void vectorAddition(KernelData* data) +{ + int index = blockIdx.x * blockDim.x + threadIdx.x; + data[index].result = data[index].a + data[index].b; +} diff --git a/cuda_code/CudaKernel_11.cu b/cuda_code/CudaKernel_11.cu new file mode 100644 index 0000000000000000000000000000000000000000..36454e42a0682471071e311a92995a9c414c7ca7 --- /dev/null +++ b/cuda_code/CudaKernel_11.cu @@ -0,0 +1,105 @@ +#include +#include +#include "CudaKernel.h" + +using namespace std; + +#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ ) +#define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ ) +#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) + + +texture tex1; + +static cudaArray *cuArray = NULL; + +//Kernel for x direction sobel +__global__ void implement_x_sobel(float* output,int width,int height,int widthStep) +{ + int x = blockIdx.x * blockDim.x + threadIdx.x; + int y = blockIdx.y * blockDim.y + threadIdx.y; + + //Make sure that thread is inside image bounds + if(x(); + + CudaSafeCall(cudaMallocArray(&cuArray,&channelDesc,width,height)); + + //Never use 1D memory copy if host and device pointers have different widthStep. + // You don't know the width step of CUDA array, so its better to use cudaMemcpy2D... + cudaMemcpy2DToArray(cuArray,0,0,input,widthStep,width * sizeof(float),height,cudaMemcpyHostToDevice); + + cudaBindTextureToArray(tex1,cuArray,channelDesc); + + float * D_output_x; + CudaSafeCall(cudaMalloc(&D_output_x,widthStep*height)); + + dim3 blocksize(16,16); + dim3 gridsize; + gridsize.x=(width+blocksize.x-1)/blocksize.x; + gridsize.y=(height+blocksize.y-1)/blocksize.y; + + implement_x_sobel<<>>(D_output_x,width,height,widthStep/sizeof(float)); + + cudaThreadSynchronize(); + CudaCheckError(); + + //Don't forget to unbind the texture + cudaUnbindTexture(tex1); + + CudaSafeCall(cudaMemcpy(output,D_output_x,height*widthStep,cudaMemcpyDeviceToHost)); + + cudaFree(D_output_x); + cudaFreeArray(cuArray); +} \ No newline at end of file diff --git a/cuda_code/CudnnMaxPool_2.cu b/cuda_code/CudnnMaxPool_2.cu new file mode 100644 index 0000000000000000000000000000000000000000..ef621c134504cb241abefa00858cba858fa05b1c --- /dev/null +++ b/cuda_code/CudnnMaxPool_2.cu @@ -0,0 +1,210 @@ +#include "gpu_runtime.h" + +int CuDNN_DLGpuMax_Pooling2d(const DLArrayHandle input,const size_t kernel_H, const size_t kernel_W, DLArrayHandle output, const size_t padding, const size_t stride, DLStreamHandle stream_handle = NULL, ProfilerHandle p = NULL){ + // create handle + // CUDNN_CALL(cudnnCreate(&cudnn)); + int dev_id = (input->ctx).device_id; + cudnn_init(dev_id, stream_handle); + + // input + size_t input_N = input->shape[0]; + size_t input_C = input->shape[1]; + size_t input_H = input->shape[2]; + size_t input_W = input->shape[3]; + const float * input_data = (const float*) input->data; + + //output + size_t output_H = output->shape[2]; + size_t output_W = output->shape[3]; + float *output_data = (float *) output->data; + if(p != NULL){ + int size_input = 1, size_output = 1; + for(int i = 0; i < input -> ndim; i++) + size_input *= input -> shape[i]; + for(int i = 0; i < output -> ndim; i++) + size_output *= output -> shape[i]; + p -> input_memory = 1.0 * (size_input) * sizeof(float) / 1024 / 1024; + p -> output_memory = 1.0 * size_output * sizeof(float) / 1024 / 1024; + p -> workspace_memory = 0; + cudaEvent_t start, stop; + cudaEventCreate(&start); + cudaEventRecord(start,0); + //pooling descriptor + cudnnPoolingDescriptor_t maxpool_desc; + CUDNN_CALL(cudnnCreatePoolingDescriptor(&maxpool_desc)); + // std::cout<<"padding = "< +#include "common.h" + +// dim3(GS) +// dim3(BS) +// /*number of layers*/ +template +__global__ void kernel_reduction( + T *odata, T *idata, + const unsigned int width, + const unsigned int height, + const unsigned int steps, + const unsigned int layer /*layer size = steps*height*/ +){ + __shared__ T smem[BS]; + const unsigned int tid = threadIdx.x; + const unsigned int ty = blockIdx.x; + unsigned int i = 0; + unsigned int y = 0; + unsigned int x = 0; + T sumv = 0; + + // for each layer + for (; i < N; i++){ + sumv = 0; + for (y = ty; y < height; y+=GS){ + for (x = tid; x < width; x+=BS){ + sumv += idata[y*steps + x]; + } + } + + smem[tid] = sumv; + __syncthreads(); + + if (BS >= 512){ if (tid < 256) smem[tid] = sumv = sumv + smem[tid + 256]; __syncthreads(); } + if (BS >= 256){ if (tid < 128) smem[tid] = sumv = sumv + smem[tid + 128]; __syncthreads(); } + if (BS >= 128){ if (tid < 64) smem[tid] = sumv = sumv + smem[tid + 64]; __syncthreads(); } + if (BS >= 64){ if (tid < 32) smem[tid] = sumv = sumv + smem[tid + 32]; __syncthreads(); } + if (BS >= 32){ if (tid < 16) smem[tid] = sumv = sumv + smem[tid + 16]; __syncthreads(); } + if (BS >= 16){ if (tid < 8) smem[tid] = sumv = sumv + smem[tid + 8]; __syncthreads(); } + if (BS >= 8){ if (tid < 4) smem[tid] = sumv = sumv + smem[tid + 4]; __syncthreads(); } + if (BS >= 4){ if (tid < 2) smem[tid] = sumv = sumv + smem[tid + 2]; __syncthreads(); } + if (BS >= 2){ if (tid < 1){ odata[i*GS + ty] = sumv + smem[1]; } } + idata += layer; + } +} +// dim3(N) /*number of layers*/ +// dim3(BS) +template +__global__ void kernel_reduction(T* odata, const T* idata){ + + __shared__ T smem[BS]; + T sumv = 0; + unsigned int tid = threadIdx.x; + unsigned int i = blockIdx.x; + smem[tid] = sumv = idata[i*BS + tid]; + __syncthreads(); + if (BS >= 512){ if (tid < 256) smem[tid] = sumv = sumv + smem[tid + 256]; __syncthreads(); } + if (BS >= 256){ if (tid < 128) smem[tid] = sumv = sumv + smem[tid + 128]; __syncthreads(); } + if (BS >= 128){ if (tid < 64) smem[tid] = sumv = sumv + smem[tid + 64]; __syncthreads(); } + if (BS >= 64){ if (tid < 32) smem[tid] = sumv = sumv + smem[tid + 32]; __syncthreads(); } + if (BS >= 32){ if (tid < 16) smem[tid] = sumv = sumv + smem[tid + 16]; __syncthreads(); } + if (BS >= 16){ if (tid < 8) smem[tid] = sumv = sumv + smem[tid + 8]; __syncthreads(); } + if (BS >= 8){ if (tid < 4) smem[tid] = sumv = sumv + smem[tid + 4]; __syncthreads(); } + if (BS >= 4){ if (tid < 2) smem[tid] = sumv = sumv + smem[tid + 2]; __syncthreads(); } + if (BS >= 2){ if (tid < 1) { odata[i] = sumv + smem[1]; } } +} + +// dim3(N) /*number of layers*/ +// dim3(BS) +template +__global__ void kernel_reduction_factor(T* odata, const T* idata, const float factor){ + + __shared__ T smem[BS]; + T sumv = 0; + unsigned int tid = threadIdx.x; + unsigned int i = blockIdx.x; + smem[tid] = sumv = idata[i*BS + tid]; + __syncthreads(); + if (BS >= 512){ if (tid < 256) smem[tid] = sumv = sumv + smem[tid + 256]; __syncthreads(); } + if (BS >= 256){ if (tid < 128) smem[tid] = sumv = sumv + smem[tid + 128]; __syncthreads(); } + if (BS >= 128){ if (tid < 64) smem[tid] = sumv = sumv + smem[tid + 64]; __syncthreads(); } + if (BS >= 64){ if (tid < 32) smem[tid] = sumv = sumv + smem[tid + 32]; __syncthreads(); } + if (BS >= 32){ if (tid < 16) smem[tid] = sumv = sumv + smem[tid + 16]; __syncthreads(); } + if (BS >= 16){ if (tid < 8) smem[tid] = sumv = sumv + smem[tid + 8]; __syncthreads(); } + if (BS >= 8){ if (tid < 4) smem[tid] = sumv = sumv + smem[tid + 4]; __syncthreads(); } + if (BS >= 4){ if (tid < 2) smem[tid] = sumv = sumv + smem[tid + 2]; __syncthreads(); } + if (BS >= 2){ if (tid < 1) { odata[i] = (sumv + smem[1])*factor; } } +} + +template void +run_sum_(T *odata, T *idata, const unsigned int width, const unsigned int height, const unsigned int steps) +{ + if (GS > N){ + kernel_reduction <<>>(odata + GS, idata, width, height, steps, steps*height); + kernel_reduction <<>>(odata, odata + GS); + } + else{ + cerr << "not support yet." << endl; + exit(EXIT_FAILURE); + } +} + +template void run_sum_ +(float *odata, float *idata, const unsigned int width, const unsigned int height, const unsigned int steps); +template void run_sum_ +(int *odata, int *idata, const unsigned int width, const unsigned int height, const unsigned int steps); + + +template void +run_mean_(T *odata, T *idata, const unsigned int width, const unsigned int height, const unsigned int steps) +{ + if (GS > N){ + kernel_reduction << > >(odata + GS, idata, width, height, steps, steps*height); + kernel_reduction_factor << > >(odata, odata + GS, 1.f/float(width*height)); + } + else{ + cerr << "not support yet." << endl; + exit(EXIT_FAILURE); + } +} + +template void run_mean_ +(float *odata, float *idata, const unsigned int width, const unsigned int height, const unsigned int steps); +template void run_mean_ +(int *odata, int *idata, const unsigned int width, const unsigned int height, const unsigned int steps); \ No newline at end of file diff --git a/cuda_code/DepthmapDenoiseWeightedHuber.cu b/cuda_code/DepthmapDenoiseWeightedHuber.cu new file mode 100644 index 0000000000000000000000000000000000000000..db30fe18935d5f515d9d2fadc97f33d71bf7eb10 --- /dev/null +++ b/cuda_code/DepthmapDenoiseWeightedHuber.cu @@ -0,0 +1,942 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +//! OpenDTAM Variant of Chambolle & Pock denoising +//! +//! The complicated half of the DTAM algorithm's mapping core, +//! but can be used independently to refine depthmaps. +//! +//! Written by Paul Foster for GSoC 2014 OpenDTAM project. +//! High level algorithm described by Richard Newcombe, Steven J. Lovegrove, and Andrew J. Davison. +//! "DTAM: Dense tracking and mapping in real-time." +//! Which was in turn based on Chambolle & Pock's +//! "A first-order primal-dual algorithm for convex problems with applications to imaging." + +#include //for cudaSafeCall,CV_Assert + +#include "DepthmapDenoiseWeightedHuber.cuh" + +namespace cv { namespace cuda { namespace device { + namespace dtam_denoise{ + + +static unsigned int arows;//TODO:make sure this is still reentrant + +void loadConstants(uint h_rows, uint, uint , uint , + float* , float* , float* , float* , float* , + float*) { + + arows=h_rows; +} + +cudaStream_t localStream=0; + +const int BLOCKX2D=32; +const int BLOCKY2D=32; +#define GENERATE_CUDA_FUNC2D(funcName,arglist,notypes) \ +static __global__ void funcName arglist; \ +void funcName##Caller arglist{ \ + dim3 dimBlock(BLOCKX2D,BLOCKY2D); \ + dim3 dimGrid((acols + dimBlock.x - 1) / dimBlock.x, \ + (arows + dimBlock.y - 1) / dimBlock.y); \ + funcName<<>>notypes; \ + cudaSafeCall( cudaGetLastError() );\ +};static __global__ void funcName arglist + + +#define GENERATE_CUDA_FUNC2DROWS(funcName,arglist,notypes) \ +static __global__ void funcName arglist; \ +void funcName##Caller arglist{ \ + dim3 dimBlock(BLOCKX2D,BLOCKY2D); \ + dim3 dimGrid(1, \ + (arows + dimBlock.y - 1) / dimBlock.y); \ + funcName<<>>notypes; \ + cudaSafeCall( cudaGetLastError() );\ +};static __global__ void funcName arglist + + +static __global__ void computeG1 (float* pp, float* g1p, float* gxp, float* gyp, int cols); +static __global__ void computeG2 (float* pp, float* g1p, float* gxp, float* gyp, int cols); +void computeGCaller (float* pp, float* g1p, float* gxp, float* gyp, int cols){ +// dim3 dimBlock(BLOCKX2D,BLOCKY2D); + dim3 dimBlock(BLOCKX2D,4); + dim3 dimGrid(1, + (arows + dimBlock.y - 1) / dimBlock.y); + + computeG1<<>>(pp, g1p, gxp, gyp, cols); + cudaDeviceSynchronize(); + computeG2<<>>(pp, g1p, gxp, gyp, cols); + cudaDeviceSynchronize(); + + cudaSafeCall( cudaGetLastError() ); +}; + +GENERATE_CUDA_FUNC2DROWS(computeG1, + (float* pp, float* g1p, float* gxp, float* gyp, int cols), + (pp, g1p, gxp, gyp, cols)) { + #if __CUDA_ARCH__>300 +//TODO: make compatible with cuda 2.0 and lower (remove shuffles). Probably through texture fetch + +//Original pseudocode for this function: + // //subscripts u,d,l,r mean up,down,left,right + // void computeG(){ + // // g0 is the strongest nearby gradient (excluding point defects) + // g0x=fabsf(pr-pl);//|dx| + // g0y=fabsf(pd-pu);//|dy| + // g0=max(g0x,g0y); + // // g1 is the scaled g0 through the g function exp(-alpha*x^beta) + // g1=sqrt(g0); //beta=0.5 + // alpha=3.5; + // g1=exp(-alpha*g1); + // //hard to explain this without a picture, but breaks are where both neighboring pixels are near a change + // gx=max(g1r,g1); + // gy=max(g1d,g1); + // gu=gyu; //upper spring is the lower spring of the pixel above + // gd=gy; //lower spring + // gr=gx; //right spring + // gl=gxl; //left spring is the right spring of the pixel to the left + // } + const float alpha=3.5f; + int x = threadIdx.x; + int y = blockIdx.y * blockDim.y + threadIdx.y; + int upoff=-(y!=0)*cols; + int dnoff=(y=30){ + pr=tmp; + } + pl=ph; + pu=pp[pt+upoff]; + pd=pp[pt+dnoff]; + + + // g0 is the strongest nearby gradient (excluding point defects) + gt=fabsf(pr-pl); + g0x=__shfl_up(gt,1);//?xxxxxx no prior val + gsav=__shfl_down(gt,31);//x000000 for next time + g0x=threadIdx.x>0?g0x:0.0f;//0xxxxxx + g0y=fabsf(pd-pu); + + g0=fmaxf(g0x,g0y); + // g1 is the scaled g0 through the g function + g1=sqrt(g0); + g1=exp(-alpha*g1); + //save + g1p[pt]=g1; + + x+=32; + //itr 1:n-2 + for(;x=30?tmp:pr; + + pl=ph; + pu=pp[pt+upoff]; + pd=pp[pt+dnoff]; + + // g0 is the strongest nearby gradient (excluding point defects) + gt=fabsf(pr-pl); + g0x=__shfl_up(gt,1);//?xxxxxx + g0x=threadIdx.x>0?g0x:gsav;//xxxxxxx + gsav=__shfl_down(gt,31);//x000000 for next time + g0y=fabsf(pd-pu); + + g0=fmaxf(g0x,g0y); + + // g1 is the scaled g0 through the g function + g1=sqrt(g0); + g1=exp(-alpha*g1); + //save + g1p[pt]=g1; + } + + //itr n-1 + pt=x+y*cols; + ph=pn; + pr=__shfl_down(ph,2); + pl=ph; + pu=pp[pt+upoff]; + pd=pp[pt+dnoff]; + + // g0 is the strongest nearby gradient (excluding point defects) + gt=fabsf(pr-pl); + g0x=__shfl_up(gt,1);//?xxxxxx + g0x=threadIdx.x>0?g0x:gsav;//xxxxxxx + g0y=fabsf(pd-pu); + + g0=fmaxf(g0x,g0y); + // g1 is the scaled g0 through the g function + g1=sqrt(g0); + g1=exp(-alpha*g1); + //save + g1p[pt]=g1; +#endif +} +GENERATE_CUDA_FUNC2DROWS(computeG2, + (float* pp, float* g1p, float* gxp, float* gyp, int cols), + (pp, g1p, gxp, gyp, cols)) { + #if __CUDA_ARCH__>300 + int x = threadIdx.x; + int y = blockIdx.y * blockDim.y + threadIdx.y; + int dnoff=(y=31){ + g1r=tmp; + } + g1l=g1h; + g1u=g1h; + g1d=g1p[pt+dnoff]; + + gx=fmaxf(g1l,g1r); + gy=fmaxf(g1u,g1d); + + //save + gxp[pt]=gx; + gyp[pt]=gy; + x+=32; + //itr 1:n-2 + for(;x=31?tmp:g1r; + + g1l=g1h; + g1u=g1h; + g1d=g1p[pt+dnoff]; + + gx=fmaxf(g1l,g1r); + gy=fmaxf(g1u,g1d); + //save + gxp[pt]=gx; + gyp[pt]=gy; + } + + //itr n-1 + pt=x+y*cols; + g1h=g1n; + g1r=__shfl_down(g1h,1); + g1l=g1h; + g1u=g1h; + g1d=g1p[pt+dnoff]; + + gx=fmaxf(g1l,g1r); + gy=fmaxf(g1u,g1d); + + + //save + gxp[pt]=gx; + gyp[pt]=gy; +#endif +} + + +//This version is faster, but makes synchronization errors at the lines between parts 1 and 2. +//Could be fixed by a second pass for part 2 over the stitch lines, but I don't have time to figure that out +//right now. +GENERATE_CUDA_FUNC2DROWS(computeGunsafe, + (float* pp, float* g1p, float* gxp, float* gyp, int cols), + (pp, g1p, gxp, gyp, cols)) { + #if __CUDA_ARCH__>300 +//TODO: make compatible with cuda 2.0 and lower (remove shuffles). Probably through texture fetch +//TODO: rerun kernel on lines with y%32==31 or y%32==0 to fix stitch lines + +//Original pseudocode for this function: + // //subscripts u,d,l,r mean up,down,left,right + // void computeG(){ + // // g0 is the strongest nearby gradient (excluding point defects) + // g0x=fabsf(pr-pl);//|dx| + // g0y=fabsf(pd-pu);//|dy| + // g0=max(g0x,g0y); + // // g1 is the scaled g0 through the g function exp(-alpha*x^beta) + // g1=sqrt(g0); //beta=0.5 + // alpha=3.5; + // g1=exp(-alpha*g1); + // //hard to explain this without a picture, but breaks are where both neighboring pixels are near a change + // gx=max(g1r,g1); + // gy=max(g1d,g1); + // gu=gyu; //upper spring is the lower spring of the pixel above + // gd=gy; //lower spring + // gr=gx; //right spring + // gl=gxl; //left spring is the right spring of the pixel to the left + // } + const float alpha=3.5f; + int x = threadIdx.x; + int y = blockIdx.y * blockDim.y + threadIdx.y; + int upoff=-(y!=0)*cols; + int dnoff=(y=30){ + pr=tmp; + } + pl=ph; + pu=pp[pt+upoff]; + pd=pp[pt+dnoff]; + + + // g0 is the strongest nearby gradient (excluding point defects) + gt=fabsf(pr-pl); + g0x=__shfl_up(gt,1);//?xxxxxx no prior val + gsav=__shfl_down(gt,31);//x000000 for next time + g0x=threadIdx.x>0?g0x:0.0f;//0xxxxxx + g0y=fabsf(pd-pu); + + g0=fmaxf(g0x,g0y); + // g1 is the scaled g0 through the g function + g1=sqrt(g0); + g1=exp(-alpha*g1); + //save + g1p[pt]=g1; + + x+=32; + //itr 1:n-2 + for(;x=30?tmp:pr; + + pl=ph; + pu=pp[pt+upoff]; + pd=pp[pt+dnoff]; + + // g0 is the strongest nearby gradient (excluding point defects) + gt=fabsf(pr-pl); + g0x=__shfl_up(gt,1);//?xxxxxx + g0x=threadIdx.x>0?g0x:gsav;//xxxxxxx + gsav=__shfl_down(gt,31);//x000000 for next time + g0y=fabsf(pd-pu); + + g0=fmaxf(g0x,g0y); + + // g1 is the scaled g0 through the g function + g1=sqrt(g0); + g1=exp(-alpha*g1); + //save + g1p[pt]=g1; + } + + //itr n-1 + pt=x+y*cols; + ph=pn; + pr=__shfl_down(ph,2); + pl=ph; + pu=pp[pt+upoff]; + pd=pp[pt+dnoff]; + + // g0 is the strongest nearby gradient (excluding point defects) + gt=fabsf(pr-pl); + g0x=__shfl_up(gt,1);//?xxxxxx + g0x=threadIdx.x>0?g0x:gsav;//xxxxxxx + g0y=fabsf(pd-pu); + + g0=fmaxf(g0x,g0y); + // g1 is the scaled g0 through the g function + g1=sqrt(g0); + g1=exp(-alpha*g1); + //save + g1p[pt]=g1; + +//part2, find gx,gy + x = threadIdx.x; + y = blockIdx.y * blockDim.y + threadIdx.y; + //itr0 + pt=x+y*cols; + + g1h=g1p[pt]; + g1n=g1p[pt+blockDim.x]; + g1r=__shfl_down(g1h,1); + tmp=__shfl_up(g1n,31); + if(threadIdx.x>=31){ + g1r=tmp; + } + g1l=g1h; + g1u=g1h; + g1d=g1p[pt+dnoff]; + + gx=fmaxf(g1l,g1r); + gy=fmaxf(g1u,g1d); + + //save + gxp[pt]=gx; + gyp[pt]=gy; + x+=32; + //itr 1:n-2 + for(;x=31?tmp:g1r; + + g1l=g1h; + g1u=g1h; + g1d=g1p[pt+dnoff]; + + gx=fmaxf(g1l,g1r); + gy=fmaxf(g1u,g1d); + //save + gxp[pt]=gx; + gyp[pt]=gy; + } + + //itr n-1 + pt=x+y*cols; + g1h=g1n; + g1r=__shfl_down(g1h,1); + g1l=g1h; + g1u=g1h; + g1d=g1p[pt+dnoff]; + + gx=fmaxf(g1l,g1r); + gy=fmaxf(g1u,g1d); + + + //save + gxp[pt]=gx; + gyp[pt]=gy; +#endif + +} +__device__ inline float saturate(float x){ + //return x; + return x/fmaxf(1.0f,fabsf(x)); +} +// static __global__ void updateQD (float* gqxpt, float* gqypt, float *dpt, float * apt, +// float *gxpt, float *gypt, float sigma_q, float sigma_d, float epsilon, +// float theta);//DANGER, no interblock synchronization = weird instability +static __global__ void updateQ (float* gqxpt, float* gqypt, float *dpt, float * apt, + float *gxpt, float *gypt, int cols, float sigma_q, float sigma_d, float epsilon, + float theta); +static __global__ void updateD (float* gqxpt, float* gqypt, float *dpt, float * apt, + float *gxpt, float *gypt, int cols, float sigma_q, float sigma_d, float epsilon, + float theta); + +void updateQDCaller(float* gqxpt, float* gqypt, float *dpt, float * apt, + float *gxpt, float *gypt, int cols, float sigma_q, float sigma_d, float epsilon, + float theta) { + + dim3 dimBlock(BLOCKX2D, BLOCKY2D); + dim3 dimGrid(1, (arows + dimBlock.y - 1) / dimBlock.y); + CV_Assert(dimGrid.y>0); + cudaSafeCall( cudaGetLastError() ); + updateQ<<>>( gqxpt, gqypt, dpt, apt, + gxpt, gypt, cols, sigma_q, sigma_d, epsilon, theta); + cudaSafeCall( cudaGetLastError() ); + updateD<<>>( gqxpt, gqypt, dpt, apt, + gxpt, gypt, cols, sigma_q, sigma_d, epsilon, theta); + cudaSafeCall( cudaGetLastError() ); +}; + +// static __global__ void updateQD (float* gqxpt, float* gqypt, float *dpt, float * apt, +// float *gxpt, float *gypt, float sigma_q, float sigma_d, float epsilon, +// float theta) { +// //TODO: make compatible with cuda 2.0 and lower (remove shuffles). Probably through texture fetch +// +// //Original pseudocode for this function: +// //void updateQD(){ +// // //shifts are shuffles! +// // for (all x in blocks of warpsize;;){ +// // //qx update +// // float dh,dn,qxh,gx,gqx,qyh,gy,gqy; +// // //load(dh,dn,gxh,gqx);//load here, next(the block to the right), local constant, old x force(with cached multiply) +// // dr=dh<<1; +// // tmp=dn>>31; +// // if (rt) +// // dr=tmp; +// // qxh=gqx/gxh; +// // qxh = (qxh+sigma_q*gxh*(dr-dh))/(1+sigma_q*epsilon);//basic spring force equation f=k(x-x0) +// // gqx = saturate(gxh*qxh);//spring saturates (with cached multiply), saturation force proportional to prob. of not an edge. +// // gqxpt[pt]=gqx; +// // +// // //qy update +// // s[bpt]=dn; +// // if(!btm){ +// // dd=s[bpt+bdnoff]; +// // }else{ +// // dd=dpt[pt+dnoff]; +// // } +// // qyh=gqy/gy; +// // qyh=(qyh+sigma_q*gyh*(dd-dh))/(1+sigma_q*epsilon); +// // gqy=saturate(gyh*qyh); +// // gqypt[pt]=gqy; +// // +// // //dx update +// // gqr=gqx; +// // gql=gqx>>1; +// // if (lf) +// // gql=gqsave; +// // gqsave=gqx<<31;//save for next iter +// // dacc = gqr - gql;//dx part +// // +// // //dy update and d store +// // gqd=gqy; +// // s[bpt]=gqy; +// // if(!top) +// // gqu=s[bpt+bupoff]; +// // else +// // gqu=gqxpt[pt + upoff]; +// // dacc += gqd-gqu; //dy part +// // d = (d + sigma_d*(dacc+1/theta*ah))/(1+sigma_d/theta); +// // dpt[pt]=d; +// // } +// //} +// __shared__ float s[32*BLOCKY2D]; +// int x = threadIdx.x; +// int y = blockIdx.y * blockDim.y + threadIdx.y; +// bool rt=x==31; +// bool lf=x==0; +// bool top=y==0; +// bool btm=y==rows-1; +// bool btop=threadIdx.y==0; +// bool bbtm=threadIdx.y==blockDim.y-1; +// int pt, bpt,bdnoff ,dnoff, bupoff, upoff; +// +// +// float tmp,gqsave; +// gqsave=0; +// bpt = threadIdx.x+threadIdx.y*blockDim.x; +// bdnoff=blockDim.x; +// dnoff=(!btm)*cols; +// bupoff=-blockDim.x; +// upoff=-(!top)*cols; +// +// pt=x+y*cols; +// +// float dh,dn; +// dn=dpt[pt]; +// +// for(;x>31; +// if (rt) +// dr=tmp; +// qxh=gqx/gxh; +// qxh = (qxh+sigma_q*gxh*(dr-dh))/(1+sigma_q*epsilon);//basic spring force equation f=k(x-x0) +// gqx = saturate(gxh*qxh);//spring saturates (with cached multiply), saturation force proportional to prob. of not an edge. +// gqxpt[pt]=gqx; +// +// //qy update +// s[bpt]=dn; +// if(!btm){ +// dd=s[bpt+bdnoff]; +// }else{ +// dd=dpt[pt+dnoff]; +// } +// qyh=gqy/gy; +// qyh=(qyh+sigma_q*gyh*(dd-dh))/(1+sigma_q*epsilon); +// gqy=saturate(gyh*qyh); +// gqypt[pt]=gqy; +// +// //dx update +// gqr=gqx; +// gql=gqx>>1; +// if (lf) +// gql=gqsave; +// gqsave=gqx<<31;//save for next iter +// dacc = gqr - gql;//dx part +// +// //dy update and d store +// gqd=gqy; +// s[bpt]=gqy; +// if(!top) +// gqu=s[bpt+bupoff]; +// else +// gqu=gqxpt[pt + upoff]; +// dacc += gqd-gqu; //dy part +// d = (d + sigma_d*(dacc+1/theta*ah))/(1+sigma_d/theta); +// dpt[pt]=d; +// } +//} +#if __CUDA_ARCH__>300 + __shared__ float s[32*BLOCKY2D]; + int x = threadIdx.x; + int y = blockIdx.y * blockDim.y + threadIdx.y; + bool rt=x==31; + + bool bbtm=threadIdx.y==blockDim.y-1; + int pt, bpt,bdnoff ,dnoff; + + float tmp; + bpt = threadIdx.x+threadIdx.y*blockDim.x; + bdnoff=blockDim.x; + dnoff=(y300 + //TODO: make compatible with cuda 2.0 and lower (remove shuffles). Probably through texture fetch + + //Original pseudocode for this function: +//void updateQD(){ +// //shifts are shuffles! +// for (all x in blocks of warpsize){ +// //qx update +// float dh,dn,qxh,gx,gqx,qyh,gy,gqy; +// //load(dh,dn,gxh,gqx);//load here, next(the block to the right), local constant, old x force(with cached multiply) +// dr=dh<<1; +// tmp=dn>>31; +// if (rt) +// dr=tmp; +// qxh=gqx/gxh; +// qxh = (qxh+sigma_q*gxh*(dr-dh))/(1+sigma_q*epsilon);//basic spring force equation f=k(x-x0) +// gqx = saturate(gxh*qxh);//spring saturates (with cached multiply), saturation force proportional to prob. of not an edge. +// gqxpt[pt]=gqx; +// +// //qy update +// s[bpt]=dn; +// if(!btm){ +// dd=s[bpt+bdnoff]; +// }else{ +// dd=dpt[pt+dnoff]; +// } +// qyh=gqy/gy; +// qyh=(qyh+sigma_q*gyh*(dd-dh))/(1+sigma_q*epsilon); +// gqy=saturate(gyh*qyh); +// gqypt[pt]=gqy; +// +// //dx update +// gqr=gqx; +// gql=gqx>>1; +// if (lf) +// gql=gqsave; +// gqsave=gqx<<31;//save for next iter +// dacc = gqr - gql;//dx part +// +// //dy update and d store +// gqd=gqy; +// s[bpt]=gqy; +// if(!top) +// gqu=s[bpt+bupoff]; +// else +// gqu=gqxpt[pt + upoff]; +// dacc += gqd-gqu; //dy part +// d = (d + sigma_d*(dacc+1/theta*ah))/(1+sigma_d/theta); +// dpt[pt]=d; +// } +//} + __shared__ float s[32*BLOCKY2D]; + int x = threadIdx.x; + int y = blockIdx.y * blockDim.y + threadIdx.y; + bool lf=x==0; + bool top=y==0; + bool btop=threadIdx.y==0; + int pt, bpt, bupoff, upoff; + + + float gqsave=0; + bpt = threadIdx.x+threadIdx.y*blockDim.x; + + bupoff=-blockDim.x; + upoff=-(!top)*cols; + + pt=x+y*cols; + + for(;xup_bound) + prod_tmp = up_bound; + if(prod_tmpup_bound) + Cvalue=up_bound; + if(Cvalue +#include +#include +#include +#include +#include +#include "cudacommon.h" +#include "OptionParser.h" +#include "ResultDatabase.h" +#include "Timer.h" +#include "support.h" + +// Forward declarations for texture memory test and benchmark kernels +void TestTextureMem(ResultDatabase &resultDB, OptionParser &op, double scalet); +__global__ void +readGlobalMemoryCoalesced(float *data, float *output, int size, int repeat); +__global__ void readGlobalMemoryUnit(float *data, float *output, int size, int repeat); +__global__ void readLocalMemory(const float *data, float *output, int size, int repeat); +__global__ void writeGlobalMemoryCoalesced(float *output, int size, int repeat); +__global__ void writeGlobalMemoryUnit(float *output, int size, int repeat); +__global__ void writeLocalMemory(float *output, int size, int repeat); +__device__ int getRand(int seed, int mod); +__global__ void readTexels(int n, float *d_out, int width); +__global__ void readTexelsInCache(int n, float *d_out); +__global__ void readTexelsRandom(int n, float *d_out, int width, int height); +// Texture to use for the benchmarks +texture texA; + +// **************************************************************************** +// Function: addBenchmarkSpecOptions +// +// Purpose: +// Add benchmark specific options parsing. Note that device memory has no +// benchmark specific options, so this is just a stub. +// +// Arguments: +// op: the options parser / parameter database +// +// Returns: nothing +// +// Programmer: Kyle Spafford +// Creation: December 11, 2009 +// +// Modifications: +// +// **************************************************************************** +void addBenchmarkSpecOptions(OptionParser &op) +{ + ; +} + +// **************************************************************************** +// Function: runBenchmark +// +// Purpose: +// This benchmark measures the device memory bandwidth for several areas +// of memory including global, shared, and texture memories for several +// types of access patterns. +// +// Arguments: +// resultDB: the benchmark stores its results in this ResultDatabase +// op: the options parser / parameter database +// +// Returns: nothing +// +// Programmer: Kyle Spafford +// Creation: September 08, 2009 +// +// Modifications: +// Gabriel Marin, 06/09/2010: Change memory access patterns to eliminate +// data reuse. Add auto-scaling factor. +// +// **************************************************************************** +void RunBenchmark(ResultDatabase &resultDB, + OptionParser &op) +{ + int npasses = op.getOptionInt("passes"); + size_t minGroupSize = 32; + size_t maxGroupSize = 512; + size_t globalWorkSize = 32768; // 64 * maxGroupSize = 64 * 512; + unsigned int memSize = 64*1024*1024; // 64MB buffer + const long availMem = findAvailBytes(); + while (memSize*2 > availMem) + memSize >>= 1; // keep it a power of 2 + + const unsigned int numWordsFloat = memSize / sizeof(float); + + // Initialize host memory + float *h_in = new float[numWordsFloat]; + float *h_out = new float[numWordsFloat]; + srand48(8650341L); + for (int i = 0; i < numWordsFloat; ++i) + { + h_in[i] = (float)(drand48()*numWordsFloat); + } + + // Allocate some device memory + float *d_mem1, *d_mem2; + char sizeStr[128]; + + cudaMalloc((void**)&d_mem1, sizeof(float)*(numWordsFloat)); + CHECK_CUDA_ERROR(); + cudaMalloc((void**)&d_mem2, sizeof(float)*(numWordsFloat)); + CHECK_CUDA_ERROR(); + + cudaEvent_t start, stop; + cudaEventCreate(&start); + cudaEventCreate(&stop); + CHECK_CUDA_ERROR(); + + cudaEventRecord(start, 0); + readGlobalMemoryCoalesced<<<512, 64>>> + (d_mem1, d_mem2, numWordsFloat, 256); + cudaEventRecord(stop, 0); + cudaEventSynchronize(stop); + CHECK_CUDA_ERROR(); + float t = 0.0f; + cudaEventElapsedTime(&t, start, stop); + t /= 1.e3; + double scalet = 0.15 / t; + + const unsigned int maxRepeatsCoal = 256*scalet; + const unsigned int maxRepeatsUnit = 16*scalet; + const unsigned int maxRepeatsLocal = 300*scalet; + + for (int p = 0; p < npasses; p++) + { + // Run the kernel for each group size + cout << "Running benchmarks, pass: " << p << "\n"; + for (int threads=minGroupSize; threads<=maxGroupSize ; threads*=2) + { + const unsigned int blocks = globalWorkSize / threads; + double bdwth; + sprintf (sizeStr, "blockSize:%03d", threads); + + // Test 1 + cudaEventRecord(start, 0); + readGlobalMemoryCoalesced<<>> + (d_mem1, d_mem2, numWordsFloat, maxRepeatsCoal); + cudaEventRecord(stop, 0); + cudaEventSynchronize(stop); + CHECK_CUDA_ERROR(); + t = 0.0f; + cudaEventElapsedTime(&t, start, stop); + t /= 1.e3; + bdwth = ((double) globalWorkSize * maxRepeatsCoal * 16 * sizeof(float)) + / (t * 1000. * 1000. * 1000.); + resultDB.AddResult("readGlobalMemoryCoalesced", sizeStr, "GB/s", + bdwth); + + // Test 2 + cudaEventRecord(start, 0); + readGlobalMemoryUnit<<>> + (d_mem1, d_mem2, numWordsFloat, maxRepeatsUnit); + cudaEventRecord(stop, 0); + cudaEventSynchronize(stop); + CHECK_CUDA_ERROR(); + cudaEventElapsedTime(&t, start, stop); + t /= 1.e3; + bdwth = ((double) globalWorkSize * maxRepeatsUnit * 16 * sizeof(float)) + / (t * 1000. * 1000. * 1000.); + resultDB.AddResult("readGlobalMemoryUnit", sizeStr, "GB/s", bdwth); + + // Test 3 + cudaEventRecord(start, 0); + readLocalMemory<<>> + (d_mem1, d_mem2, numWordsFloat, maxRepeatsLocal); + cudaEventRecord(stop, 0); + cudaEventSynchronize(stop); + CHECK_CUDA_ERROR(); + cudaEventElapsedTime(&t, start, stop); + t /= 1.e3; + bdwth = ((double) globalWorkSize * maxRepeatsLocal * 16 * sizeof(float)) + / (t * 1000. * 1000. * 1000.); + resultDB.AddResult("readLocalMemory", sizeStr, "GB/s", bdwth); + + // Test 4 + cudaEventRecord(start, 0); + writeGlobalMemoryCoalesced<<>> + (d_mem2, numWordsFloat, maxRepeatsCoal); + cudaEventRecord(stop, 0); + cudaEventSynchronize(stop); + CHECK_CUDA_ERROR(); + cudaEventElapsedTime(&t, start, stop); + t /= 1.e3; + bdwth = ((double) globalWorkSize * maxRepeatsCoal * 16 * sizeof(float)) + / (t * 1000. * 1000. * 1000.); + resultDB.AddResult("writeGlobalMemoryCoalesced", sizeStr, "GB/s", + bdwth); + + // Test 5 + cudaEventRecord(start, 0); + writeGlobalMemoryUnit<<>> + (d_mem2, numWordsFloat, maxRepeatsUnit); + cudaEventRecord(stop, 0); + cudaEventSynchronize(stop); + CHECK_CUDA_ERROR(); + cudaEventElapsedTime(&t, start, stop); + t /= 1.e3; + bdwth = ((double) globalWorkSize * maxRepeatsUnit * 16 * sizeof(float)) + / (t * 1000. * 1000. * 1000.); + resultDB.AddResult("writeGlobalMemoryUnit", sizeStr, "GB/s", + bdwth); + + // Test 6 + cudaEventRecord(start, 0); + writeLocalMemory<<>> + (d_mem2, numWordsFloat, maxRepeatsLocal); + cudaEventRecord(stop, 0); + cudaEventSynchronize(stop); + CHECK_CUDA_ERROR(); + cudaEventElapsedTime(&t, start, stop); + t /= 1.e3; + bdwth = ((double) globalWorkSize * maxRepeatsLocal * 16 * sizeof(float)) + / (t * 1000. * 1000. * 1000.); + resultDB.AddResult("writeLocalMemory", sizeStr, "GB/s", bdwth); + } + } + cudaFree(d_mem1); + cudaFree(d_mem2); + delete[] h_in; + delete[] h_out; + cudaEventDestroy(start); + cudaEventDestroy(stop); + TestTextureMem(resultDB, op, scalet); +} + +// **************************************************************************** +// Function: TestTextureMem +// +// Purpose: +// Measures the bandwidth of texture memory for several access patterns +// using a 2D texture including sequential, "random", and repeated access to +// texture cache. Texture memory is often a viable alternative to global +// memory, especially when data access patterns prevent good coalescing. +// +// Arguments: +// resultDB: results from the benchmark are stored to this resultd database +// op: the options parser / parameter database +// scalet: auto-scaling factor for the number of repetitions +// +// Returns: nothing +// +// Programmer: Kyle Spafford +// Creation: December 11, 2009 +// +// Modifications: +// Gabriel Marin 06/09/2010: add auto-scaling factor +// +// Jeremy Meredith, Tue Nov 23 13:45:54 EST 2010 +// Change data sizes to be larger, and textures to be 2D to match OpenCL +// variant. Dropped #iterations to compensate. Had to remove validation +// for now, which also matches the current OpenCL variant's behavior. +// +// **************************************************************************** +void TestTextureMem(ResultDatabase &resultDB, OptionParser &op, double scalet) +{ + // Number of times to repeat each test + const unsigned int passes = op.getOptionInt("passes"); + // Sizes of textures tested (in kb) + const unsigned int nsizes = 5; + const unsigned int sizes[] = { 16, 64, 256, 1024, 4096 }; + // Number of texel accesses by each kernel + const unsigned int kernelRepFactor = 1024; + // Number of times to repeat each kernel per test + const unsigned int iterations = 1*scalet; + + cudaEvent_t start, stop; + cudaEventCreate(&start); + cudaEventCreate(&stop); + CHECK_CUDA_ERROR(); + + // make sure our texture behaves like we want.... + texA.normalized = false; + texA.addressMode[0] = cudaAddressModeClamp; + texA.addressMode[1] = cudaAddressModeClamp; + texA.filterMode = cudaFilterModePoint; + + for (int j = 0; j < nsizes; j++) + { + cout << "Benchmarking Texture Memory, Test: " << j+1 << " / 5\n"; + const unsigned int size = 1024 * sizes[j]; + const unsigned int numFloat = size / sizeof(float); + const unsigned int numFloat4 = size / sizeof(float4); + size_t width, height; + + // Image memory sizes should be power of 2. + size_t sizeLog = lround(log2(double(numFloat4))); + height = 1 << (sizeLog >> 1); // height is the smaller size + width = numFloat4 / height; + + const dim3 blockSize(16, 8); + const dim3 gridSize(width/blockSize.x, height/blockSize.y); + + float *h_in = new float[numFloat]; + float *h_out = new float[numFloat4]; + float *d_out; + cudaMalloc((void**) &d_out, numFloat4 * sizeof(float)); + CHECK_CUDA_ERROR(); + + // Fill input data with some pattern + for (unsigned int i = 0; i < numFloat; i++) + { + h_in[i] = (float) i; + if (i < numFloat4) + { + h_out[i] = 0.0f; + } + } + + // Allocate a cuda array + cudaArray* cuArray; + cudaMallocArray(&cuArray, &texA.channelDesc, width, height); + CHECK_CUDA_ERROR(); + + // Copy in source data + cudaMemcpyToArray(cuArray, 0, 0, h_in, size, cudaMemcpyHostToDevice); + CHECK_CUDA_ERROR(); + + // Bind texture to the array + cudaBindTextureToArray(texA, cuArray); + CHECK_CUDA_ERROR(); + + for (int p = 0; p < passes; p++) + { + // Test 1: Repeated Linear Access + float t = 0.0f; + + cudaEventRecord(start, 0); + // read texels from texture + for (int iter = 0; iter < iterations; iter++) + { + readTexels<<>>(kernelRepFactor, d_out, + width); + } + cudaEventRecord(stop, 0); + CHECK_CUDA_ERROR(); + cudaEventSynchronize(stop); + CHECK_CUDA_ERROR(); + cudaEventElapsedTime(&t, start, stop); + t /= 1.e3; + + // Calculate speed in GB/s + double speed = (double)kernelRepFactor * (double)iterations * + (double)(size/(1000.*1000.*1000.)) / (t); + + char sizeStr[256]; + sprintf(sizeStr, "% 6dkB", size / 1024); + resultDB.AddResult("TextureRepeatedLinearAccess", sizeStr, "GB/sec", + speed); + + // Verify results + cudaMemcpy(h_out, d_out, numFloat4*sizeof(float), + cudaMemcpyDeviceToHost); + + // Test 2 Repeated Cache Access + cudaEventRecord(start, 0); + for (int iter = 0; iter < iterations; iter++) + { + readTexelsInCache<<>> + (kernelRepFactor, d_out); + } + cudaEventRecord(stop, 0); + cudaEventSynchronize(stop); + CHECK_CUDA_ERROR(); + cudaEventElapsedTime(&t, start, stop); + t /= 1.e3; + + // Calculate speed in GB/s + speed = (double)kernelRepFactor * (double)iterations * + ((double)size/(1000.*1000.*1000.)) / (t); + + sprintf(sizeStr, "% 6dkB", size / 1024); + resultDB.AddResult("TextureRepeatedCacheHit", sizeStr, "GB/sec", + speed); + + // Verify results + cudaMemcpy(h_out, d_out, numFloat4*sizeof(float), + cudaMemcpyDeviceToHost); + + // Test 3 Repeated "Random" Access + cudaEventRecord(start, 0); + + // read texels from texture + for (int iter = 0; iter < iterations; iter++) + { + readTexelsRandom<<>> + (kernelRepFactor, d_out, width, height); + } + + cudaEventRecord(stop, 0); + cudaEventSynchronize(stop); + CHECK_CUDA_ERROR(); + cudaEventElapsedTime(&t, start, stop); + t /= 1.e3; + + // Calculate speed in GB/s + speed = (double)kernelRepFactor * (double)iterations * + ((double)size/(1000.*1000.*1000.)) / (t); + + sprintf(sizeStr, "% 6dkB", size / 1024); + resultDB.AddResult("TextureRepeatedRandomAccess", sizeStr, + "GB/sec", speed); + } + delete[] h_in; + delete[] h_out; + cudaFree(d_out); + cudaFreeArray(cuArray); + cudaUnbindTexture(texA); + } + cudaEventDestroy(start); + cudaEventDestroy(stop); +} + +// Begin benchmark kernels +__global__ void +readGlobalMemoryCoalesced(float *data, float *output, int size, int repeat) +{ + int gid = threadIdx.x + (blockDim.x * blockIdx.x), j = 0; + float sum = 0; + int s = gid; + for (j=0 ; j +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +namespace faiss { +namespace gpu { + +template +void runDistance( + bool computeL2, + GpuResources* res, + Tensor& centroids, + bool centroidsRowMajor, + Tensor* centroidNorms, + Tensor& queries, + bool queriesRowMajor, + int k, + Tensor& outDistances, + Tensor& outIndices, + bool ignoreOutDistances) { + // The # of centroids in `centroids` based on memory layout + auto numCentroids = centroids.getSize(centroidsRowMajor ? 0 : 1); + + // The # of queries in `queries` based on memory layout + auto numQueries = queries.getSize(queriesRowMajor ? 0 : 1); + + // The dimensions of the vectors to consider + auto dim = queries.getSize(queriesRowMajor ? 1 : 0); + FAISS_ASSERT( + (numQueries == 0 || numCentroids == 0) || + dim == centroids.getSize(centroidsRowMajor ? 1 : 0)); + + FAISS_ASSERT(outDistances.getSize(0) == numQueries); + FAISS_ASSERT(outIndices.getSize(0) == numQueries); + FAISS_ASSERT(outDistances.getSize(1) == k); + FAISS_ASSERT(outIndices.getSize(1) == k); + + auto defaultStream = res->getDefaultStreamCurrentDevice(); + + // If we're querying against a 0 sized set, just return empty results + if (centroids.numElements() == 0) { + thrust::fill( + thrust::cuda::par.on(defaultStream), + outDistances.data(), + outDistances.end(), + Limits::getMax()); + + thrust::fill( + thrust::cuda::par.on(defaultStream), + outIndices.data(), + outIndices.end(), + -1); + + return; + } + + // L2: If ||c||^2 is not pre-computed, calculate it + DeviceTensor cNorms; + if (computeL2 && !centroidNorms) { + cNorms = DeviceTensor( + res, + makeTempAlloc(AllocType::Other, defaultStream), + {numCentroids}); + runL2Norm(centroids, centroidsRowMajor, cNorms, true, defaultStream); + centroidNorms = &cNorms; + } + + // + // Prepare norm vector ||q||^2; ||c||^2 is already pre-computed + // + DeviceTensor queryNorms( + res, + makeTempAlloc(AllocType::Other, defaultStream), + {(int)numQueries}); + + // ||q||^2 + if (computeL2) { + runL2Norm(queries, queriesRowMajor, queryNorms, true, defaultStream); + } + + // By default, aim to use up to 512 MB of memory for the processing, with + // both number of queries and number of centroids being at least 512. + int tileRows = 0; + int tileCols = 0; + chooseTileSize( + numQueries, + numCentroids, + dim, + sizeof(T), + res->getTempMemoryAvailableCurrentDevice(), + tileRows, + tileCols); + + int numColTiles = utils::divUp(numCentroids, tileCols); + + // We can have any number of vectors to query against, even less than k, in + // which case we'll return -1 for the index + FAISS_ASSERT(k <= GPU_MAX_SELECTION_K); // select limitation + + // Temporary output memory space we'll use + DeviceTensor distanceBuf1( + res, + makeTempAlloc(AllocType::Other, defaultStream), + {tileRows, tileCols}); + DeviceTensor distanceBuf2( + res, + makeTempAlloc(AllocType::Other, defaultStream), + {tileRows, tileCols}); + DeviceTensor* distanceBufs[2] = { + &distanceBuf1, &distanceBuf2}; + + DeviceTensor outDistanceBuf1( + res, + makeTempAlloc(AllocType::Other, defaultStream), + {tileRows, numColTiles * k}); + DeviceTensor outDistanceBuf2( + res, + makeTempAlloc(AllocType::Other, defaultStream), + {tileRows, numColTiles * k}); + DeviceTensor* outDistanceBufs[2] = { + &outDistanceBuf1, &outDistanceBuf2}; + + DeviceTensor outIndexBuf1( + res, + makeTempAlloc(AllocType::Other, defaultStream), + {tileRows, numColTiles * k}); + DeviceTensor outIndexBuf2( + res, + makeTempAlloc(AllocType::Other, defaultStream), + {tileRows, numColTiles * k}); + DeviceTensor* outIndexBufs[2] = { + &outIndexBuf1, &outIndexBuf2}; + + auto streams = res->getAlternateStreamsCurrentDevice(); + streamWait(streams, {defaultStream}); + + int curStream = 0; + bool interrupt = false; + + // Tile over the input queries + for (int i = 0; i < numQueries; i += tileRows) { + if (interrupt || InterruptCallback::is_interrupted()) { + interrupt = true; + break; + } + + int curQuerySize = std::min(tileRows, numQueries - i); + + auto outDistanceView = outDistances.narrow(0, i, curQuerySize); + auto outIndexView = outIndices.narrow(0, i, curQuerySize); + + auto queryView = + queries.narrow(queriesRowMajor ? 0 : 1, i, curQuerySize); + auto queryNormNiew = queryNorms.narrow(0, i, curQuerySize); + + auto outDistanceBufRowView = + outDistanceBufs[curStream]->narrow(0, 0, curQuerySize); + auto outIndexBufRowView = + outIndexBufs[curStream]->narrow(0, 0, curQuerySize); + + // Tile over the centroids + for (int j = 0; j < numCentroids; j += tileCols) { + if (InterruptCallback::is_interrupted()) { + interrupt = true; + break; + } + + int curCentroidSize = std::min(tileCols, numCentroids - j); + int curColTile = j / tileCols; + + auto centroidsView = sliceCentroids( + centroids, centroidsRowMajor, j, curCentroidSize); + + auto distanceBufView = distanceBufs[curStream] + ->narrow(0, 0, curQuerySize) + .narrow(1, 0, curCentroidSize); + + auto outDistanceBufColView = + outDistanceBufRowView.narrow(1, k * curColTile, k); + auto outIndexBufColView = + outIndexBufRowView.narrow(1, k * curColTile, k); + + // L2: distance is ||c||^2 - 2qc + ||q||^2, we compute -2qc + // IP: just compute qc + // (query id x dim) x (centroid id, dim)' = (query id, centroid id) + runMatrixMult( + distanceBufView, + false, // not transposed + queryView, + !queriesRowMajor, // transposed MM if col major + centroidsView, + centroidsRowMajor, // transposed MM if row major + computeL2 ? -2.0f : 1.0f, + 0.0f, + res->getBlasHandleCurrentDevice(), + streams[curStream]); + + if (computeL2) { + // For L2 distance, we use this fused kernel that performs both + // adding ||c||^2 to -2qc and k-selection, so we only need two + // passes (one write by the gemm, one read here) over the huge + // region of output memory + // + // If we aren't tiling along the number of centroids, we can + // perform the output work directly + if (tileCols == numCentroids) { + // Write into the final output + runL2SelectMin( + distanceBufView, + *centroidNorms, + outDistanceView, + outIndexView, + k, + streams[curStream]); + + if (!ignoreOutDistances) { + // expand (query id) to (query id, k) by duplicating + // along rows top-k ||c||^2 - 2qc + ||q||^2 in the form + // (query id, k) + runSumAlongRows( + queryNormNiew, + outDistanceView, + true, // L2 distances should not go below zero + // due to roundoff error + streams[curStream]); + } + } else { + auto centroidNormsView = + centroidNorms->narrow(0, j, curCentroidSize); + + // Write into our intermediate output + runL2SelectMin( + distanceBufView, + centroidNormsView, + outDistanceBufColView, + outIndexBufColView, + k, + streams[curStream]); + + if (!ignoreOutDistances) { + // expand (query id) to (query id, k) by duplicating + // along rows top-k ||c||^2 - 2qc + ||q||^2 in the form + // (query id, k) + runSumAlongRows( + queryNormNiew, + outDistanceBufColView, + true, // L2 distances should not go below zero + // due to roundoff error + streams[curStream]); + } + } + } else { + // For IP, just k-select the output for this tile + if (tileCols == numCentroids) { + // Write into the final output + runBlockSelect( + distanceBufView, + outDistanceView, + outIndexView, + true, + k, + streams[curStream]); + } else { + // Write into the intermediate output + runBlockSelect( + distanceBufView, + outDistanceBufColView, + outIndexBufColView, + true, + k, + streams[curStream]); + } + } + } + + // As we're finished with processing a full set of centroids, perform + // the final k-selection + if (tileCols != numCentroids) { + // The indices are tile-relative; for each tile of k, we need to add + // tileCols to the index + runIncrementIndex( + outIndexBufRowView, k, tileCols, streams[curStream]); + + runBlockSelectPair( + outDistanceBufRowView, + outIndexBufRowView, + outDistanceView, + outIndexView, + computeL2 ? false : true, + k, + streams[curStream]); + } + + curStream = (curStream + 1) % 2; + } + + // Have the desired ordering stream wait on the multi-stream + streamWait({defaultStream}, streams); + + if (interrupt) { + FAISS_THROW_MSG("interrupted"); + } +} + +template +void runL2Distance( + GpuResources* res, + Tensor& centroids, + bool centroidsRowMajor, + Tensor* centroidNorms, + Tensor& queries, + bool queriesRowMajor, + int k, + Tensor& outDistances, + Tensor& outIndices, + bool ignoreOutDistances = false) { + runDistance( + true, // L2 + res, + centroids, + centroidsRowMajor, + centroidNorms, + queries, + queriesRowMajor, + k, + outDistances, + outIndices, + ignoreOutDistances); +} + +template +void runIPDistance( + GpuResources* res, + Tensor& centroids, + bool centroidsRowMajor, + Tensor& queries, + bool queriesRowMajor, + int k, + Tensor& outDistances, + Tensor& outIndices) { + runDistance( + false, // IP + res, + centroids, + centroidsRowMajor, + nullptr, // no centroid norms provided + queries, + queriesRowMajor, + k, + outDistances, + outIndices, + false); +} + +// +// Instantiations of the distance templates +// + +void runIPDistance( + GpuResources* res, + Tensor& vectors, + bool vectorsRowMajor, + Tensor& queries, + bool queriesRowMajor, + int k, + Tensor& outDistances, + Tensor& outIndices) { + runIPDistance( + res, + vectors, + vectorsRowMajor, + queries, + queriesRowMajor, + k, + outDistances, + outIndices); +} + +void runIPDistance( + GpuResources* res, + Tensor& vectors, + bool vectorsRowMajor, + Tensor& queries, + bool queriesRowMajor, + int k, + Tensor& outDistances, + Tensor& outIndices) { + runIPDistance( + res, + vectors, + vectorsRowMajor, + queries, + queriesRowMajor, + k, + outDistances, + outIndices); +} + +void runL2Distance( + GpuResources* res, + Tensor& vectors, + bool vectorsRowMajor, + Tensor* vectorNorms, + Tensor& queries, + bool queriesRowMajor, + int k, + Tensor& outDistances, + Tensor& outIndices, + bool ignoreOutDistances) { + runL2Distance( + res, + vectors, + vectorsRowMajor, + vectorNorms, + queries, + queriesRowMajor, + k, + outDistances, + outIndices, + ignoreOutDistances); +} + +void runL2Distance( + GpuResources* res, + Tensor& vectors, + bool vectorsRowMajor, + Tensor* vectorNorms, + Tensor& queries, + bool queriesRowMajor, + int k, + Tensor& outDistances, + Tensor& outIndices, + bool ignoreOutDistances) { + runL2Distance( + res, + vectors, + vectorsRowMajor, + vectorNorms, + queries, + queriesRowMajor, + k, + outDistances, + outIndices, + ignoreOutDistances); +} + +} // namespace gpu +} // namespace faiss diff --git a/cuda_code/DistributionExponentialKernel.cu b/cuda_code/DistributionExponentialKernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..f28a910e9980b3119201be3a323cf5e4e7a748a1 --- /dev/null +++ b/cuda_code/DistributionExponentialKernel.cu @@ -0,0 +1,37 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include +#include +#include +#include + +namespace at { namespace native { + +void exponential_kernel(TensorIteratorBase& iter, double lambda, c10::optional gen) { + auto generator = get_generator_or_default(gen, cuda::detail::getDefaultCUDAGenerator()); + at::native::templates::cuda::exponential_kernel(iter, lambda, generator); +} + +REGISTER_DISPATCH(exponential_stub, &exponential_kernel); + +}} // namespace at::native diff --git a/cuda_code/DistributionExponentialKernel_1.cu b/cuda_code/DistributionExponentialKernel_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..4dac756a2aaff8a355e654e36c8736cdd8708959 --- /dev/null +++ b/cuda_code/DistributionExponentialKernel_1.cu @@ -0,0 +1,15 @@ +#define TORCH_ASSERT_NO_OPERATORS +#include +#include +#include + +namespace at { namespace native { + +void exponential_kernel(TensorIteratorBase& iter, double lambda, c10::optional gen) { + auto generator = get_generator_or_default(gen, cuda::detail::getDefaultCUDAGenerator()); + at::native::templates::cuda::exponential_kernel(iter, lambda, generator); +} + +REGISTER_DISPATCH(exponential_stub, &exponential_kernel); + +}} // namespace at::native diff --git a/cuda_code/DnDHgels.cu b/cuda_code/DnDHgels.cu new file mode 100644 index 0000000000000000000000000000000000000000..c038c8ecb901f958cb311b5a1061c0ab9dfbeb21 --- /dev/null +++ b/cuda_code/DnDHgels.cu @@ -0,0 +1,67 @@ +#include // cudaMalloc, cudaMemcpy, etc. +#include // cusolverDn +#include "../../cusolver_utils.h" +#include // printf +#include // EXIT_FAILURE + +int main(void) { + + int m = 3; + int n = 3; + int nrhs = 3; + int lda = n; + int ldb = n; + int ldx = n; + double hA[] = {1, 2, 3, 2, 5, 5, 3, 5, 12}; + double hB[] = {1, 2, 3, 2, 5, 5, 3, 5, 12}; + double hX[] = {0, 0, 0, 0, 0, 0, 0, 0, 0}; + + double hX_result[] = {1, 0, 0, 0, 1, 0, 0, 0, 1}; + + double *dA, *dB, *dX; + CUDA_CHECK( cudaMalloc((void**) &dA, m * n * sizeof(double))); + CUDA_CHECK( cudaMalloc((void**) &dB, m * nrhs * sizeof(double))); + CUDA_CHECK( cudaMalloc((void**) &dX, m * nrhs * sizeof(double))); + CUDA_CHECK( cudaMemcpy(dA, hA, m * n * sizeof(double), cudaMemcpyHostToDevice) ); + CUDA_CHECK( cudaMemcpy(dB, hB, m * nrhs * sizeof(double), cudaMemcpyHostToDevice) ); + CUDA_CHECK( cudaMemcpy(dX, hX, m * nrhs * sizeof(double), cudaMemcpyHostToDevice) ); + + cusolverDnHandle_t handle = NULL; + CUSOLVER_CHECK(cusolverDnCreate(&handle)); + + size_t lwork_bytes; + CUSOLVER_CHECK(cusolverDnDHgels_bufferSize(handle, m, n, nrhs, NULL, lda, NULL, ldb, NULL, ldx, NULL, &lwork_bytes)); + //printf("%d\n", lwork_bytes); + + void *dWorkspace; + cudaMalloc((void**)&dWorkspace, lwork_bytes); + + int *devInfo; + int niter; + CUDA_CHECK( cudaMalloc((void**) &devInfo, sizeof(int))); + CUSOLVER_CHECK(cusolverDnDHgels(handle, m, n, nrhs, dA, lda, dB, ldb, dX, ldx, dWorkspace, lwork_bytes, &niter, devInfo)); + int hdevInfo; + CUDA_CHECK( cudaMemcpy(&hdevInfo, devInfo, sizeof(int), cudaMemcpyDeviceToHost) ); + double values[n*nrhs]; + CUDA_CHECK( cudaMemcpy(values, dX, n * nrhs * sizeof(double), cudaMemcpyDeviceToHost) ); + + int correct = (hdevInfo == 0); + for (int i = 0; i < n * nrhs; i++) { + printf("%f == %f\n", values[i], hX_result[i]); + if (fabsf(values[i] - hX_result[i]) > 0.001) { + correct = 0; + //break; + } + } + + if (correct == 1) { + printf("DnDHgels test PASSED\n"); + } else { + printf("DnDHgels test FAILED\n"); + } + + CUSOLVER_CHECK(cusolverDnDestroy(handle)); + + return EXIT_SUCCESS; + +} \ No newline at end of file diff --git a/cuda_code/DnSXgels.cu b/cuda_code/DnSXgels.cu new file mode 100644 index 0000000000000000000000000000000000000000..deffe9324afc209c1c1f31055af30b38f36f2ba1 --- /dev/null +++ b/cuda_code/DnSXgels.cu @@ -0,0 +1,67 @@ +#include // cudaMalloc, cudaMemcpy, etc. +#include // cusolverDn +#include "../../cusolver_utils.h" +#include // printf +#include // EXIT_FAILURE + +int main(void) { + + int m = 3; + int n = 3; + int nrhs = 3; + int lda = n; + int ldb = n; + int ldx = n; + float hA[] = {1, 2, 3, 2, 5, 5, 3, 5, 12}; + float hB[] = {1, 2, 3, 2, 5, 5, 3, 5, 12}; + float hX[] = {0, 0, 0, 0, 0, 0, 0, 0, 0}; + + float hX_result[] = {1, 0, 0, 0, 1, 0, 0, 0, 1}; + + float *dA, *dB, *dX; + CUDA_CHECK( cudaMalloc((void**) &dA, m * n * sizeof(float))); + CUDA_CHECK( cudaMalloc((void**) &dB, m * nrhs * sizeof(float))); + CUDA_CHECK( cudaMalloc((void**) &dX, m * nrhs * sizeof(float))); + CUDA_CHECK( cudaMemcpy(dA, hA, m * n * sizeof(float), cudaMemcpyHostToDevice) ); + CUDA_CHECK( cudaMemcpy(dB, hB, m * nrhs * sizeof(float), cudaMemcpyHostToDevice) ); + CUDA_CHECK( cudaMemcpy(dX, hX, m * nrhs * sizeof(float), cudaMemcpyHostToDevice) ); + + cusolverDnHandle_t handle = NULL; + CUSOLVER_CHECK(cusolverDnCreate(&handle)); + + size_t lwork_bytes; + CUSOLVER_CHECK(cusolverDnSXgels_bufferSize(handle, m, n, nrhs, NULL, lda, NULL, ldb, NULL, ldx, NULL, &lwork_bytes)); + //printf("%d\n", lwork_bytes); + + void *dWorkspace; + cudaMalloc((void**)&dWorkspace, lwork_bytes); + + int *devInfo; + int niter; + CUDA_CHECK( cudaMalloc((void**) &devInfo, sizeof(int))); + CUSOLVER_CHECK(cusolverDnSXgels(handle, m, n, nrhs, dA, lda, dB, ldb, dX, ldx, dWorkspace, lwork_bytes, &niter, devInfo)); + int hdevInfo; + CUDA_CHECK( cudaMemcpy(&hdevInfo, devInfo, sizeof(int), cudaMemcpyDeviceToHost) ); + float values[n*nrhs]; + CUDA_CHECK( cudaMemcpy(values, dX, n * nrhs * sizeof(float), cudaMemcpyDeviceToHost) ); + + int correct = (hdevInfo == 0); + for (int i = 0; i < n * nrhs; i++) { + printf("%f == %f\n", values[i], hX_result[i]); + if (fabsf(values[i] - hX_result[i]) > 0.001) { + correct = 0; + //break; + } + } + + if (correct == 1) { + printf("DnSXgels test PASSED\n"); + } else { + printf("DnSXgels test FAILED\n"); + } + + CUSOLVER_CHECK(cusolverDnDestroy(handle)); + + return EXIT_SUCCESS; + +} \ No newline at end of file diff --git a/cuda_code/ECG_7.cu b/cuda_code/ECG_7.cu new file mode 100644 index 0000000000000000000000000000000000000000..77c5ef7c0cee204a44692dd1b7ea503e6d678c17 --- /dev/null +++ b/cuda_code/ECG_7.cu @@ -0,0 +1,191 @@ +/* + * Copyright (c) 2019-2020, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include +#include +#include +#include +#include +#include "utilities/graph_utils.cuh" + +namespace { +template +__device__ IndexType +binsearch_maxle(const IndexType *vec, const IndexType val, IndexType low, IndexType high) +{ + while (true) { + if (low == high) return low; // we know it exists + if ((low + 1) == high) return (vec[high] <= val) ? high : low; + + IndexType mid = low + (high - low) / 2; + + if (vec[mid] > val) + high = mid - 1; + else + low = mid; + } +} + +template +__global__ void match_check_kernel(IdxT size, + IdxT num_verts, + IdxT *offsets, + IdxT *indices, + IdxT *permutation, + IdxT *parts, + ValT *weights) +{ + IdxT tid = blockIdx.x * blockDim.x + threadIdx.x; + while (tid < size) { + IdxT source = binsearch_maxle(offsets, tid, (IdxT)0, num_verts); + IdxT dest = indices[tid]; + if (parts[permutation[source]] == parts[permutation[dest]]) weights[tid] += 1; + tid += gridDim.x * blockDim.x; + } +} + +struct prg { + __host__ __device__ float operator()(int n) + { + thrust::default_random_engine rng; + thrust::uniform_real_distribution dist(0.0, 1.0); + rng.discard(n); + return dist(rng); + } +}; + +template +struct update_functor { + ValT min_value; + ValT ensemble_size; + update_functor(ValT minv, ValT es) : min_value(minv), ensemble_size(es) {} + __host__ __device__ ValT operator()(ValT input) + { + return min_value + (1 - min_value) * (input / ensemble_size); + } +}; + +/** + * Computes a random permutation vector of length size. A permutation vector of length n + * contains all values [0..n-1] exactly once. + * @param size The length of the permutation vector to generate + * @param seed A seed value for the random number generator, the generator will discard this many + * values before using values. Calling this method with the same seed will result in the same + * permutation vector. + * @return A pointer to memory containing the requested permutation vector. The caller is + * responsible for freeing the allocated memory using ALLOC_FREE_TRY(). + */ +template +void get_permutation_vector(T size, T seed, T *permutation, cudaStream_t stream) +{ + rmm::device_vector randoms_v(size); + + thrust::counting_iterator index(seed); + thrust::transform( + rmm::exec_policy(stream)->on(stream), index, index + size, randoms_v.begin(), prg()); + thrust::sequence(rmm::exec_policy(stream)->on(stream), permutation, permutation + size, 0); + thrust::sort_by_key( + rmm::exec_policy(stream)->on(stream), randoms_v.begin(), randoms_v.end(), permutation); +} + +} // anonymous namespace + +namespace cugraph { + +template +void ecg(GraphCSRView const &graph, WT min_weight, VT ensemble_size, VT *ecg_parts) +{ + CUGRAPH_EXPECTS(graph.edge_data != nullptr, "API error, louvain expects a weighted graph"); + CUGRAPH_EXPECTS(ecg_parts != nullptr, "Invalid API parameter: ecg_parts is NULL"); + + cudaStream_t stream{0}; + + rmm::device_vector ecg_weights_v(graph.edge_data, graph.edge_data + graph.number_of_edges); + + VT size{graph.number_of_vertices}; + VT seed{0}; + // VT seed{1}; // Note... this seed won't work for the unit tests... retest after fixing Louvain. + + auto permuted_graph = + std::make_unique>(size, graph.number_of_edges, graph.has_data()); + + // Iterate over each member of the ensemble + for (VT i = 0; i < ensemble_size; i++) { + // Take random permutation of the graph + rmm::device_vector permutation_v(size); + VT *d_permutation = permutation_v.data().get(); + + get_permutation_vector(size, seed, d_permutation, stream); + seed += size; + + detail::permute_graph(graph, d_permutation, permuted_graph->view()); + + // Run Louvain clustering on the random permutation + rmm::device_vector parts_v(size); + VT *d_parts = parts_v.data().get(); + + WT final_modularity; + VT num_level; + + cugraph::louvain(permuted_graph->view(), &final_modularity, &num_level, d_parts, 1); + + // For each edge in the graph determine whether the endpoints are in the same partition + // Keep a sum for each edge of the total number of times its endpoints are in the same partition + dim3 grid, block; + block.x = 512; + grid.x = min(VT{CUDA_MAX_BLOCKS}, (graph.number_of_edges / 512 + 1)); + match_check_kernel<<>>(graph.number_of_edges, + graph.number_of_vertices, + graph.offsets, + graph.indices, + permutation_v.data().get(), + d_parts, + ecg_weights_v.data().get()); + } + + // Set weights = min_weight + (1 - min-weight)*sum/ensemble_size + update_functor uf(min_weight, ensemble_size); + thrust::transform(rmm::exec_policy(stream)->on(stream), + ecg_weights_v.data().get(), + ecg_weights_v.data().get() + graph.number_of_edges, + ecg_weights_v.data().get(), + uf); + + // Run Louvain on the original graph using the computed weights + GraphCSRView louvain_graph; + louvain_graph.indices = graph.indices; + louvain_graph.offsets = graph.offsets; + louvain_graph.edge_data = ecg_weights_v.data().get(); + louvain_graph.number_of_vertices = graph.number_of_vertices; + louvain_graph.number_of_edges = graph.number_of_edges; + + WT final_modularity; + VT num_level; + cugraph::louvain(louvain_graph, &final_modularity, &num_level, ecg_parts, 100); +} + +// Explicit template instantiations. +template void ecg(GraphCSRView const &graph, + float min_weight, + int32_t ensemble_size, + int32_t *ecg_parts); +template void ecg(GraphCSRView const &graph, + double min_weight, + int32_t ensemble_size, + int32_t *ecg_parts); +} // namespace cugraph diff --git a/cuda_code/EmbeddingBag_3.cu b/cuda_code/EmbeddingBag_3.cu new file mode 100644 index 0000000000000000000000000000000000000000..ae13be2bf5fe24a281b1473d4de52976bf83ee56 --- /dev/null +++ b/cuda_code/EmbeddingBag_3.cu @@ -0,0 +1,490 @@ +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + +#include + +namespace at { +namespace native { + +namespace { + +constexpr int MODE_SUM = 0; +constexpr int MODE_MEAN = 1; +constexpr int MODE_MAX = 2; + +// This kernel assumes that all input tensors except `weight` and +// per_sample_weights are contiguous. +template +__global__ void EmbeddingBag_updateOutputKernel( + index_t *input, index_t *offsets, scalar_t *weight, scalar_t *output, + index_t *offset2bag, int64_t numIndices, int64_t numBags, + int64_t featureSize, int64_t weight_stride0, int64_t weight_stride1, + int mode, index_t *bag_size, index_t *max_indices, + scalar_t* per_sample_weights, int64_t per_sample_weights_stride) { + + // the strategy here is that each bag x feature is handled by a single thread + + using accscalar_t = acc_type; + int64_t chunksPerBag = THCCeilDiv(featureSize, (int64_t)blockDim.x); + int64_t numChunks = numBags * chunksPerBag; + int64_t chunkOffset = blockIdx.x * blockDim.y + threadIdx.y; + int64_t chunkStride = gridDim.x * blockDim.y; + + for (int64_t chunk = chunkOffset; chunk < numChunks; chunk += chunkStride) { + int64_t featureDim = (chunk % chunksPerBag) * blockDim.x + threadIdx.x; + if (featureDim < featureSize) { + int64_t bag = chunk / chunksPerBag; + scalar_t *weightFeat = weight + featureDim * weight_stride1; + int64_t begin = bag == 0 ? 0 : offsets[bag]; // forces first offset to be 0 instead of asserting on it + int64_t end = (bag < numBags - 1) ? (offsets[bag + 1]) : numIndices; + CUDA_KERNEL_ASSERT(end >= begin); + + accscalar_t weightFeatSum = 0; + scalar_t weightFeatMax; + + int64_t bag_size_ = 0; + int64_t maxWord = -1; + for (int64_t emb = begin; emb < end; emb++) { + const int64_t weightRow = input[emb] * weight_stride0; + scalar_t weightValue = weightFeat[weightRow]; + + if (mode == MODE_MAX) { + if (emb == begin || weightValue > weightFeatMax) { + weightFeatMax = weightValue; + maxWord = input[emb]; + } + } else { + if (per_sample_weights) { + accscalar_t scaleWeightBy = static_cast( + per_sample_weights[emb * per_sample_weights_stride]); + weightFeatSum += scaleWeightBy * static_cast(weightValue); + } else { + weightFeatSum += static_cast(weightValue); + } + } + + bag_size_++; + if (featureDim == 0) { + offset2bag[emb] = bag; + } + } + if (mode == MODE_MEAN) { + if (end == begin) { + bag_size[bag] = 0; + } else { + weightFeatSum = weightFeatSum / static_cast(bag_size_); + bag_size[bag] = bag_size_; + } + } + + if (mode == MODE_MEAN || mode == MODE_SUM) { + output[bag * featureSize + featureDim] = static_cast(weightFeatSum); + } + else if (mode == MODE_MAX) { + if (end == begin) { + // If bag is empty, set output to 0. + weightFeatMax = 0; + } + max_indices[bag * featureSize + featureDim] = maxWord; + output[bag * featureSize + featureDim] = weightFeatMax; + } + } + } +} + + + +Tensor embedding_bag_backward_cuda_sum_avg( + const Tensor &grad, + const Tensor &indices, + const Tensor &offset2bag, + const Tensor &bag_size, + int64_t num_weights, + bool scale_grad_by_freq, int64_t mode, + const Tensor& per_sample_weights) { + + auto grad_weight = at::zeros({num_weights, grad.size(1)}, grad.options()); + + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + ptrdiff_t numel = indices.numel(); + + if (numel == 0) { + // all empty bags + return at::zeros({num_weights, grad.size(1)}, grad.options()); + } + + int64_t stride = grad_weight.stride(0); + + auto sorted_indices = at::empty_like(indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT); + auto orig_indices = at::empty_like(indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT); + Tensor count; + + AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), "embedding_bag_backward_cuda_sum_avg", [&] () { + using device_ptr = thrust::device_ptr; + + // Sort the inputs into sorted with the corresponding indices; we + // don't need a stable or multidimensional sort, so just use Thrust + // directly + { + sorted_indices.copy_(indices); + + auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); + auto policy = thrust::cuda::par(allocator).on(stream); + + // Fill sortedOrigIndices with sequential indices + auto count_iter = thrust::counting_iterator(0); + auto orig_data = device_ptr(orig_indices.data_ptr()); + thrust::copy(policy, count_iter, count_iter + numel, orig_data); + + // Sort; a stable sort is not required + auto sorted_data = device_ptr(sorted_indices.data_ptr()); + thrust::sort_by_key(policy, sorted_data, sorted_data + numel, orig_data, + ThrustLTOp()); + } + + if (scale_grad_by_freq) { + count = at::empty_like(indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT); + + auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); + auto policy = thrust::cuda::par(allocator).on(stream); + + // Compute an increasing sequence per unique item in sortedIndices: + // sorted: 2 5 5 5 7 7 8 9 9 + // count: 1 1 2 3 1 2 1 1 2 + auto sorted_data = device_ptr(sorted_indices.data_ptr()); + auto count_data = device_ptr(count.data_ptr()); + thrust::inclusive_scan_by_key(policy, sorted_data, sorted_data + numel, + thrust::make_constant_iterator(1), + count_data); + + // Take the maximum of each count per unique key in reverse: + // sorted: 2 5 5 5 7 7 8 9 9 + // count: 1 3 3 3 2 2 1 2 2 + thrust::inclusive_scan_by_key( + policy, thrust::make_reverse_iterator(sorted_data + numel), + thrust::make_reverse_iterator(sorted_data), + thrust::make_reverse_iterator(count_data + numel), + thrust::make_reverse_iterator(count_data + numel), + thrust::equal_to(), thrust::maximum()); + } + }); + return embedding_backward_cuda_kernel(grad, orig_indices, sorted_indices, + count, num_weights, /* padding_idx= */ -1, scale_grad_by_freq, + mode == MODE_MEAN, offset2bag, bag_size, per_sample_weights); +} + +template +__global__ void EmbeddingBag_accGradParametersKernel_max( + index_t *max_indices, scalar_t *gradOutput, + scalar_t *gradWeight, int64_t stride, int64_t numBags) { + + using accscalar_t = acc_type; + + int64_t chunksPerBag = THCCeilDiv(stride, (int64_t)blockDim.x); + int64_t numChunks = numBags * chunksPerBag; + int64_t chunkOffset = blockIdx.x * blockDim.y + threadIdx.y; + int64_t chunkStride = gridDim.x * blockDim.y; + + for (int64_t chunk = chunkOffset; chunk < numChunks; chunk += chunkStride) { + int64_t featureDim = (chunk % chunksPerBag) * blockDim.x + threadIdx.x; + if (featureDim < stride) { + int64_t bag = chunk / chunksPerBag; + + index_t word_idx = max_indices[bag * stride + featureDim]; + if (word_idx >= 0) { + // If bag is empty, we have max_indices[idx] set to -1 in forward. + gpuAtomicAdd(&(gradWeight[word_idx * stride + featureDim]), + gradOutput[bag * stride + featureDim]); + } + } + } +} + +Tensor embedding_bag_backward_cuda_max(const Tensor &grad, + const Tensor &max_indices, + int64_t num_weights) { + + auto grad_weight = at::zeros({num_weights, grad.size(1)}, grad.options()); + + int64_t stride = grad_weight.stride(0); + + int64_t numBags = grad.size(0); + + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + +#ifdef __HIP_PLATFORM_HCC__ + dim3 block = dim3(64, 4); +#else + dim3 block = dim3(32, 8); +#endif + int grid = 1024; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + grad.scalar_type(), "embedding_bag_backward_cuda_max", [&] { + AT_DISPATCH_INDEX_TYPES(max_indices.scalar_type(), "embedding_bag_backward_cuda_max", [&] () { + EmbeddingBag_accGradParametersKernel_max< + scalar_t, index_t><<>>( + max_indices.data_ptr(), grad.data_ptr(), + grad_weight.data_ptr(), stride, numBags); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + }); + }); + + return grad_weight; +} +} + +// Assumes all input tensors are contiguous. +// See NOTE [ embedding_bag Native Functions ] in native_functions.yaml for details +std::tuple +_embedding_bag_forward_only_cuda(const Tensor &weight, const Tensor &indices, + const Tensor &offsets, const bool scale_grad_by_freq, + const int64_t mode, bool sparse, const c10::optional& per_sample_weights_opt, + bool include_last_offset) { + // See [Note: hacky wrapper removal for optional tensor] + const Tensor& per_sample_weights = c10::value_or_else(per_sample_weights_opt, [] {return Tensor();}); + + return _embedding_bag_cuda( + weight, + indices, + offsets, + scale_grad_by_freq, + mode, + sparse, + per_sample_weights, + include_last_offset); +} + +// Assumes all input tensors are contiguous. +// See NOTE [ embedding_bag Native Functions ] in native_functions.yaml for details +std::tuple +_embedding_bag_cuda(const Tensor &weight, const Tensor &indices, + const Tensor &offsets, const bool scale_grad_by_freq, + const int64_t mode, bool sparse, const c10::optional& per_sample_weights_opt, + bool include_last_offset) { + // See [Note: hacky wrapper removal for optional tensor] + const Tensor& per_sample_weights = c10::value_or_else(per_sample_weights_opt, [] {return Tensor();}); + + auto indices_arg = TensorArg(indices, "indices", 1); + checkScalarTypes("embedding_bag_cuda", indices_arg, {kLong, kInt}); + auto offsets_arg = TensorArg(offsets, "offsets", 1); + checkScalarTypes("embedding_bag_cuda", offsets_arg, {kLong, kInt}); + checkSameType("embedding_bag_cuda", indices_arg, offsets_arg); + auto weight_arg = TensorArg(weight, "weight", 1); + checkSameGPU("embedding_bag_cuda", weight_arg, indices_arg); + checkSameGPU("embedding_bag_cuda", weight_arg, offsets_arg); + + int64_t numIndices = indices.size(0); + int64_t numBags = offsets.size(0); + if (include_last_offset) { + // Check https://github.com/pytorch/pytorch/issues/29019 + // We plan to add one more element in offsets, which is equal to the size of + // indices. Currently for cuda devices, we still use the legacy + // implementation even this flag is enabled. + TORCH_CHECK( + numBags >= 1, "include_last_offset: numBags should be at least 1"); + numBags -= 1; + } + int64_t featureSize = weight.size(1); + + auto bag_size = at::empty(offsets.sizes(), indices.options()); + auto offset2bag = + at::empty({indices.size(0)}, indices.options()); // offset2bag = [0 0 0 0 0] + + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + auto output = at::empty({numBags, featureSize}, weight.options()); + + Tensor max_indices; + + if (mode == MODE_MAX) { + max_indices = at::empty({numBags, featureSize}, indices.options()); + } else { + // No need to allocate if we aren't doing a backwards pass + max_indices = at::empty({0}, indices.options()); + } + +#ifdef __HIP_PLATFORM_HCC__ + dim3 block = dim3(64, 4); +#else + dim3 block = dim3(32, 8); +#endif + int grid = 1024; + AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, weight.scalar_type(), "embedding_bag_cuda", [&] { + AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), "embedding_bag_cuda", [&] () { + EmbeddingBag_updateOutputKernel<<>>( + indices.data_ptr(), offsets.data_ptr(), + weight.data_ptr(), output.data_ptr(), + offset2bag.data_ptr(), numIndices, numBags, featureSize, + weight.stride(0), weight.stride(1), mode, bag_size.data_ptr(), + mode == MODE_MAX ? max_indices.data_ptr() : NULL, + per_sample_weights.defined() ? per_sample_weights.data_ptr() : NULL, + per_sample_weights.defined() ? per_sample_weights.stride(0) : 0); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + }); + }); + + return std::tuple(output, offset2bag, bag_size, max_indices); +} + +Tensor _embedding_bag_dense_backward_cuda(const Tensor &grad_, const Tensor &indices, + const Tensor &offsets, + const Tensor &offset2bag, + const Tensor &bag_size_, + const Tensor &max_indices, + int64_t num_weights, + bool scale_grad_by_freq, int64_t mode, const c10::optional& per_sample_weights_opt) { + // See [Note: hacky wrapper removal for optional tensor] + const Tensor& per_sample_weights = c10::value_or_else(per_sample_weights_opt, [] {return Tensor();}); + + // See Note [Writing Nondeterministic Operations] + // Nondeterministic because of atomicAdd usage + globalContext().alertNotDeterministic("_embedding_bag_dense_backward_cuda"); + + // indices, offsets and offset2bag are assumed having correct dtypes and + // contiguous here due to the checks in _embedding_bag_backward in + // EmbeddingBag.cpp. + // Also see NOTE [ embedding_bag Native Functions ] in native_functions.yaml + // for more details. + + Tensor grad = grad_.contiguous(); + auto indices_arg = TensorArg(indices, "indices", 1); + auto offsets_arg = TensorArg(offsets, "offsets", 1); + auto grad_arg = TensorArg(grad, "grad", 1); + checkSameGPU("embedding_bag_cuda", grad_arg, offsets_arg); + checkSameGPU("embedding_bag_cuda", grad_arg, indices_arg); + + + switch (mode) { + case MODE_SUM: + case MODE_MEAN: + if (mode == MODE_MEAN) + AT_ASSERT(!per_sample_weights.defined()); + return embedding_bag_backward_cuda_sum_avg(grad, indices, offset2bag, + bag_size_, num_weights, scale_grad_by_freq, mode, per_sample_weights); + + case MODE_MAX: + AT_ASSERT(!per_sample_weights.defined()); + return embedding_bag_backward_cuda_max(grad, max_indices, num_weights); + + default: + AT_ERROR( + "Unknown mode for embedding_bag_backward_cuda ", mode); + } +} + +template +__inline__ __device__ +static scalar_t warpReduceSum(scalar_t val) { + for (int offset = C10_WARP_SIZE/2; offset > 0; offset /= 2) + val += WARP_SHFL_DOWN(val, offset); + return val; +} + +template +__global__ static void _embedding_bag_per_sample_weights_backward_kernel( + const scalar_t* grad, int64_t grad_stride0, int64_t grad_stride1, + const scalar_t* weight, int64_t weight_stride0, int64_t weight_stride1, + const index_t* indices, // contiguous + const index_t* offset2bag, // contiguous + int64_t num_samples, + int64_t embedding_features, + scalar_t* output) { + using accscalar_t = acc_type; + const int idx = threadIdx.x + blockIdx.x * blockDim.x; + const int warp = idx / C10_WARP_SIZE; + const int thread_in_warp = idx % C10_WARP_SIZE; + const int num_warps = blockDim.x * gridDim.x / C10_WARP_SIZE; + + // Each warp is responsible for the accumulation of one sample. + // This involves doing one dot product between grad[bag_idx] and weight[embedding_idx]. + for (int sample_idx = warp; sample_idx < num_samples; sample_idx += num_warps) { + accscalar_t result = 0.; + const int bag_idx = (int)offset2bag[sample_idx]; + const int embedding_idx = (int)indices[sample_idx]; + for (int feature_idx = thread_in_warp; feature_idx < embedding_features; + feature_idx += C10_WARP_SIZE) { + result += + grad[grad_stride0 * bag_idx + grad_stride1 * feature_idx] * + weight[weight_stride0 * embedding_idx + weight_stride1 * feature_idx]; + } + result = warpReduceSum(result); + if (thread_in_warp == 0) { + output[sample_idx] = result; + } + } +} + +Tensor _embedding_bag_per_sample_weights_backward_cuda( + const Tensor& grad, + const Tensor& weight, // NB: embedding table, not per_sample_weights + const Tensor& indices, + const Tensor& offsets, + const Tensor& offset2bag, + int64_t mode) { + TORCH_CHECK( + mode == MODE_SUM, + "embedding_bag_backward: per_sample_weights only supported for mode='sum'"); + + AT_ASSERT(grad.dim() == 2); + auto embedding_features = grad.size(1); + + AT_ASSERT(indices.dim() == 1); + auto num_samples = indices.size(0); + + AT_ASSERT(weight.dim() == 2); + AT_ASSERT(weight.size(1) == embedding_features); + + const int threads_per_block = 512; + const int warps_per_block = threads_per_block / C10_WARP_SIZE; + + dim3 block(threads_per_block); + dim3 grid((num_samples + warps_per_block - 1) / warps_per_block); + + auto output = at::empty({num_samples}, grad.options()); + + // Early return when there is no samples in the batch. This saves unnecesary kernel + // launch, but also prevents cudaGetLastError() to complain about invalid launch args + if (num_samples == 0) { + return output; + } + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + grad.scalar_type(), "_embedding_bag_per_sample_weights_backward_cuda", [&]() { + AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), "_embedding_bag_per_sample_weights_backward_cuda", [&]() { + _embedding_bag_per_sample_weights_backward_kernel + <<>>( + grad.data_ptr(), grad.stride(0), grad.stride(1), + weight.data_ptr(), weight.stride(0), weight.stride(1), + indices.data_ptr(), + offset2bag.data_ptr(), + num_samples, + embedding_features, + output.data_ptr()); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + }); + } + ); + return output; +} + +} +} diff --git a/cuda_code/Embedding_6.cu b/cuda_code/Embedding_6.cu new file mode 100644 index 0000000000000000000000000000000000000000..365db61d06c0e45bece74631c2058636ae400f0b --- /dev/null +++ b/cuda_code/Embedding_6.cu @@ -0,0 +1,379 @@ +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include + +#include + + +namespace at { namespace native { + +namespace { + +#ifdef __HIP_PLATFORM_HCC__ +static const int BLOCKDIMY = 16; +#else +static const int BLOCKDIMY = 32; +#endif + +template + +__global__ void embedding_backward_feature_kernel + (int64_t* indices, + const scalar_t* __restrict__ grad, + scalar_t* __restrict__ grad_weight, + int n, // OK to pass as int, we don't expect 2 billion+ samples in one shot + int64_t stride, + int padding_idx) +{ + extern __shared__ char buf[]; + accscalar_t* smem = (accscalar_t*)buf; + accscalar_t* my_s = smem + C10_WARP_SIZE*threadIdx.y; + int* indices_batch = (int*)(buf + sizeof(accscalar_t)*C10_WARP_SIZE*blockDim.y); + + const int s = (int)stride; // OK to make int, we don't expect 2 billion+ embedding row size + + const int f = threadIdx.x + blockIdx.x*blockDim.x; // feature_dim + + for(int batch_start = 0; batch_start < n; batch_start += blockDim.x*blockDim.y) + { + // Entire block cooperates to load a batch of 1024 indices to process + int tid = threadIdx.x + threadIdx.y*blockDim.x; + if(batch_start + tid < n) + indices_batch[tid] = (int)indices[batch_start + tid]; + + int batch_end = batch_start + blockDim.x*blockDim.y < n ? + batch_start + blockDim.x*blockDim.y : n; + + // Loop over the batch of <= 1024 loaded indices in chunks of blockDim.y = 32 + for(int chunk_start = batch_start; chunk_start < batch_end; chunk_start += blockDim.y) + { + // This does double duty: it makes sure indices_batch is ready, and it makes sure match-group + // leaders are done with their accumulates before other warps start loading again. + __syncthreads(); + + int n_this_chunk = (batch_end - chunk_start) < blockDim.y ? + (batch_end - chunk_start) : blockDim.y; + + int src_row = chunk_start + threadIdx.y; + int dst_row = indices_batch[src_row - batch_start]; // This warp's target row in grad_weight + + // All warps load their smem segments with incoming grad data + if(src_row < n && f < s && dst_row != padding_idx) + my_s[threadIdx.x] = static_cast(grad[src_row*stride + f]); + + __syncthreads(); + + // To ensure determinism, we can't just have each warp add its grad data to its dst_row. + // We need to check if any other warps pulled grad data targeting dst_row. + // If so, we elect the first warp in each matching group as the leader. + // Each leader warp serializes the accumulates targeting dst_row in shared memory, + // then finishes by adding the accumulated buffer to dst_row in grad_weight. + if(dst_row != padding_idx && src_row < n) // Per-warp exit condition, safe with ballot_sync + { + int match_found_this_thread = + (dst_row == indices_batch[chunk_start - batch_start + threadIdx.x]); + if(threadIdx.x >= n_this_chunk) + match_found_this_thread = 0; +#ifdef __HIP_PLATFORM_HCC__ + unsigned long long int matchmask = WARP_BALLOT(match_found_this_thread); + int first_remaining_peer = __ffsll(matchmask) - 1; +#else + unsigned int matchmask = WARP_BALLOT(match_found_this_thread); + int first_remaining_peer = __ffs(matchmask) - 1; +#endif + + if(threadIdx.y == first_remaining_peer) // Nominate lowest-indexed warp as the leader + { + matchmask ^= (1 << first_remaining_peer); + while(matchmask) + { +#ifdef __HIP_PLATFORM_HCC__ + first_remaining_peer = __ffsll(matchmask) - 1; +#else + first_remaining_peer = __ffs(matchmask) - 1; +#endif + my_s[threadIdx.x] += smem[threadIdx.x + C10_WARP_SIZE*first_remaining_peer]; + matchmask ^= (1 << first_remaining_peer); + } + if(f < s) + grad_weight[dst_row*stride + f] += static_cast(my_s[threadIdx.x]); + } + } + } + } +} + + +template +__global__ void embedding_backward_kernel( + int64_t* input, int64_t* indices, scalar_t* grad_output, scalar_t* grad_weight, + int64_t* count, int64_t numel, int64_t stride, int padding_idx) { + + using accscalar_t = acc_type; + int idx = blockIdx.x * 4 + threadIdx.y; + + // Each warp is responsible for an input into the LookupTable. + // If the preceding input has the same as this input, then the warp + // exits immediately. The warp also processes subsequent inputs with the + // same value. + // + // Input Warp + // 1 + // 1 ( exits without doing any work) + // 5 + // 8 + + // Number of values proceessed by each thread (grain size) + const int SZ = 4; + + if (idx < numel + && (idx == 0 || input[idx] != input[idx - 1]) + && input[idx] != padding_idx) { + do { + const int start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ; + const int weight_row = ((int) input[idx]) * stride; + const int grad_row = ((int) indices[idx]) * stride; + const accscalar_t scale = count ? (accscalar_t)1.0 / count[idx] : 1.0; + + accscalar_t gradient[SZ]; + accscalar_t weight[SZ]; + + #pragma unroll + for (int ii = 0; ii < SZ; ii++) { + int feature_dim = start_feature + ii * C10_WARP_SIZE; + if (feature_dim < stride) { + gradient[ii] = static_cast(grad_output[grad_row + feature_dim]); + weight[ii] = static_cast(grad_weight[weight_row + feature_dim]); + } + } + + #pragma unroll + for (int ii = 0; ii < SZ; ii++) { + weight[ii] += gradient[ii] * scale; + } + + #pragma unroll + for (int ii = 0; ii < SZ; ii++) { + int feature_dim = start_feature + ii * C10_WARP_SIZE; + if (feature_dim < stride) { + grad_weight[weight_row + feature_dim] = static_cast(weight[ii]); + } + } + + idx++; + } while (idx < numel && input[idx] == input[idx - 1]); + } +} + +/* Calculate norms of the rows of weight_ptr given by idx_ptr and capture them in norms */ +template +__global__ void renorm_kernel( + scalar_t* weights, int64_t* indices, accscalar_t max_norm, + accscalar_t norm_type, int64_t dim, + int64_t weights_stride0, int64_t weights_stride1) { + + // Some casting hacks since dynamic shared memory and templates don't work together: + extern __shared__ unsigned char smem[]; + auto sdata = reinterpret_cast(smem); + + int tid = threadIdx.x; + int base_index = indices[blockIdx.x] * weights_stride0; + + accscalar_t v = 0; + for (int i = tid; i < dim; i += blockDim.x) { + auto x = static_cast(weights[base_index + i * weights_stride1]); + if (norm_type == 1) { + v += std::abs(x); + } else if (norm_type == 2) { + v += x * x; + } else { + v += std::pow(x, norm_type); + } + } + + using Op = ReduceAdd; + v = reduceBlock(sdata, blockDim.x, v, Op(), 0); + + if (tid == 0) { + sdata[0] = std::pow(v, static_cast(1.0 / norm_type)); + } + __syncthreads(); + + // now we renormalize the blocks that need it + if (sdata[0] > max_norm) { + auto factor = static_cast(max_norm / (sdata[0] + 1e-7)); + for (int i = tid; i < dim; i += blockDim.x) { + weights[base_index + i * weights_stride1] *= factor; + } + } +} + +} // anonymous namespace + +Tensor embedding_dense_backward_cuda(const Tensor & grad_, const Tensor & indices, + int64_t num_weights, int64_t padding_idx, + bool scale_grad_by_freq) { + auto grad_arg = TensorArg(grad_, "grad", 1); + auto indices_arg = TensorArg(indices, "indices", 1); + checkScalarType("embedding_backward", indices_arg, kLong); + checkSameGPU("embedding_backward", grad_arg, indices_arg); + + auto num_indices = indices.numel(); + auto grad = grad_.contiguous().view({num_indices, grad_.size(-1)}); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + if (num_indices <= 768 && !scale_grad_by_freq) { + auto indices_contig = indices.contiguous(); + auto grad_weight = at::zeros({num_weights, grad_.size(-1)}, grad_.options()); + int64_t stride = grad_weight.stride(0); + dim3 grid(THCCeilDiv(stride, (int64_t)C10_WARP_SIZE)); + dim3 block(C10_WARP_SIZE, BLOCKDIMY); + + AT_DISPATCH_FLOATING_TYPES_AND2( + at::ScalarType::Half, at::ScalarType::BFloat16, + grad.scalar_type(), + "embedding_backward", + [&] + { + AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "embedding_backward", [&] { + using accscalar_t = acc_type; + embedding_backward_feature_kernel + <<>> + (indices_contig.data_ptr(), + grad.data_ptr(), + grad_weight.data_ptr(), + static_cast(num_indices), + static_cast(stride), + static_cast(padding_idx)); + }); + }); + + AT_CUDA_CHECK(cudaGetLastError()); + return grad_weight; + } + + auto sorted_indices = at::empty_like(indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT); + auto orig_indices = at::empty_like(indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT); + using device_ptr = thrust::device_ptr; + + // Sort the inputs into sorted with the corresponding indices; we + // don't need a stable or multidimensional sort, so just use Thrust + // directly + { + sorted_indices.copy_(indices); + + auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); + auto policy = thrust::cuda::par(allocator).on(stream); + + // Fill sortedOrigIndices with sequential indices + auto count_iter = thrust::counting_iterator(0); + auto orig_data = device_ptr(orig_indices.data_ptr()); + thrust::copy(policy, count_iter, count_iter + num_indices, orig_data); + + // Sort; a stable sort is not required + auto sorted_data = device_ptr(sorted_indices.data_ptr()); + thrust::sort_by_key(policy, sorted_data, sorted_data + num_indices, orig_data, + ThrustLTOp()); + } + + Tensor count; + if (scale_grad_by_freq) { + count = at::empty_like(indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT); + + auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); + auto policy = thrust::cuda::par(allocator).on(stream); + + // Compute an increasing sequence per unique item in sortedIndices: + // sorted: 2 5 5 5 7 7 8 9 9 + // count: 1 1 2 3 1 2 1 1 2 + auto sorted_data = device_ptr(sorted_indices.data_ptr()); + auto count_data = device_ptr(count.data_ptr()); + thrust::inclusive_scan_by_key( + policy, + sorted_data, + sorted_data + num_indices, + thrust::make_constant_iterator(1), + count_data + ); + + // Take the maximum of each count per unique key in reverse: + // sorted: 2 5 5 5 7 7 8 9 9 + // count: 1 3 3 3 2 2 1 2 2 + thrust::inclusive_scan_by_key( + policy, + thrust::make_reverse_iterator(sorted_data + num_indices), + thrust::make_reverse_iterator(sorted_data), + thrust::make_reverse_iterator(count_data + num_indices), + thrust::make_reverse_iterator(count_data + num_indices), + thrust::equal_to(), + thrust::maximum() + ); + } + + return embedding_backward_cuda_kernel(grad, orig_indices, + sorted_indices, count, num_weights, padding_idx); +} + +Tensor & embedding_renorm_cuda_(Tensor & self, const Tensor & indices, + double max_norm, double norm_type) { + auto self_arg = TensorArg(self, "self", 1); + auto indices_arg = TensorArg(indices, "indices", 1); + checkDim("embedding_renorm_", self_arg, 2); + checkSameGPU("embedding_renorm", self_arg, indices_arg); + + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); + auto policy = thrust::cuda::par(allocator).on(stream); + + using device_ptr = thrust::device_ptr; + + auto num_indices = indices.numel(); + auto indices_contig = indices.contiguous(); + auto indices_data = device_ptr(indices_contig.data_ptr()); + + // FIXME: thrust::unique only removes consecutive elements that are equal. + // We have race conditions when indices contain duplicates which are not + // adjacent + auto unique_indices = at::empty(indices.numel(), indices.options()); + auto unique_data = device_ptr(unique_indices.data_ptr()); + auto end = thrust::unique_copy(policy, indices_data, indices_data + num_indices, unique_data); + auto num_unique_indices = static_cast(end - unique_data); + + dim3 grid(num_unique_indices); + dim3 block(128); + int dim = self.stride(0); + + AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "embedding_backward", [&] { + AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "embedding_backward", [&] { + using accscalar_t = acc_type; + renorm_kernel<<>>( + self.data_ptr(), + unique_indices.data_ptr(), + static_cast(max_norm), + static_cast(norm_type), + dim, self.stride(0), self.stride(1)); + }); + }); + AT_CUDA_CHECK(cudaGetLastError()); + + return self; +} + + +}} // namespace at::native diff --git a/cuda_code/Embedding_9.cu b/cuda_code/Embedding_9.cu new file mode 100644 index 0000000000000000000000000000000000000000..04072ab4abc4d83a9537c0d9c2099006b2d94223 --- /dev/null +++ b/cuda_code/Embedding_9.cu @@ -0,0 +1,389 @@ +#include "ATen/ATen.h" +#include "ATen/AccumulateType.h" +#include "ATen/TensorUtils.h" +#include "ATen/cuda/CUDAContext.h" +#include "c10/util/Exception.h" + +#include +#include +#include +#include + +#include +#include + + +namespace at { namespace native { + +namespace { + +#ifdef __HIP_PLATFORM_HCC__ +static const int WARP_SIZE = 64; +static const int BLOCKDIMY = 16; +#else +static const int WARP_SIZE = 32; +static const int BLOCKDIMY = 32; +#endif + +template + +__global__ void embedding_backward_feature_kernel + (int64_t* indices, + const scalar_t* __restrict__ grad, + scalar_t* __restrict__ grad_weight, + int n, // OK to pass as int, we don't expect 2 billion+ samples in one shot + int64_t stride, + int padding_idx) +{ + extern __shared__ char buf[]; + accscalar_t* smem = (accscalar_t*)buf; + accscalar_t* my_s = smem + WARP_SIZE*threadIdx.y; + int* indices_batch = (int*)(buf + sizeof(accscalar_t)*WARP_SIZE*blockDim.y); + + const int s = (int)stride; // OK to make int, we don't expect 2 billion+ embedding row size + + const int f = threadIdx.x + blockIdx.x*blockDim.x; // feature_dim + + for(int batch_start = 0; batch_start < n; batch_start += blockDim.x*blockDim.y) + { + // Entire block cooperates to load a batch of 1024 indices to process + int tid = threadIdx.x + threadIdx.y*blockDim.x; + if(batch_start + tid < n) + indices_batch[tid] = (int)indices[batch_start + tid]; + + int batch_end = batch_start + blockDim.x*blockDim.y < n ? + batch_start + blockDim.x*blockDim.y : n; + + // Loop over the batch of <= 1024 loaded indices in chunks of blockDim.y = 32 + for(int chunk_start = batch_start; chunk_start < batch_end; chunk_start += blockDim.y) + { + // This does double duty: it makes sure indices_batch is ready, and it makes sure match-group + // leaders are done with their accumulates before other warps start loading again. + __syncthreads(); + + int n_this_chunk = (batch_end - chunk_start) < blockDim.y ? + (batch_end - chunk_start) : blockDim.y; + + int src_row = chunk_start + threadIdx.y; + int dst_row = indices_batch[src_row - batch_start]; // This warp's target row in grad_weight + + // All warps load their smem segments with incoming grad data + if(src_row < n && f < s && dst_row != padding_idx) + my_s[threadIdx.x] = static_cast(grad[src_row*stride + f]); + + __syncthreads(); + + // To ensure determinism, we can't just have each warp add its grad data to its dst_row. + // We need to check if any other warps pulled grad data targeting dst_row. + // If so, we elect the first warp in each matching group as the leader. + // Each leader warp serializes the accumulates targeting dst_row in shared memory, + // then finishes by adding the accumulated buffer to dst_row in grad_weight. + if(dst_row != padding_idx && src_row < n) // Per-warp exit condition, safe with ballot_sync + { + int match_found_this_thread = + (dst_row == indices_batch[chunk_start - batch_start + threadIdx.x]); + if(threadIdx.x >= n_this_chunk) + match_found_this_thread = 0; +#ifdef __HIP_PLATFORM_HCC__ + unsigned long long int matchmask = WARP_BALLOT(match_found_this_thread); + int first_remaining_peer = __ffsll(matchmask) - 1; +#else + unsigned int matchmask = WARP_BALLOT(match_found_this_thread); + int first_remaining_peer = __ffs(matchmask) - 1; +#endif + + if(threadIdx.y == first_remaining_peer) // Nominate lowest-indexed warp as the leader + { + matchmask ^= (1 << first_remaining_peer); + while(matchmask) + { +#ifdef __HIP_PLATFORM_HCC__ + first_remaining_peer = __ffsll(matchmask) - 1; +#else + first_remaining_peer = __ffs(matchmask) - 1; +#endif + my_s[threadIdx.x] += smem[threadIdx.x + WARP_SIZE*first_remaining_peer]; + matchmask ^= (1 << first_remaining_peer); + } + if(f < s) + grad_weight[dst_row*stride + f] += static_cast(my_s[threadIdx.x]); + } + } + } + } +} + + +template +__global__ void embedding_backward_kernel( + int64_t* input, int64_t* indices, scalar_t* grad_output, scalar_t* grad_weight, + int64_t* count, int64_t numel, int64_t stride, int padding_idx) { + + using accscalar_t = acc_type; + int idx = blockIdx.x * 4 + threadIdx.y; + + // Each warp is responsible for an input into the LookupTable. + // If the preceding input has the same as this input, then the warp + // exits immediately. The warp also processes subsequent inputs with the + // same value. + // + // Input Warp + // 1 + // 1 ( exits without doing any work) + // 5 + // 8 + + // Number of values proceessed by each thread (grain size) + const int SZ = 4; + + if (idx < numel + && (idx == 0 || input[idx] != input[idx - 1]) + && input[idx] != padding_idx) { + do { + const int start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ; + const int weight_row = ((int) input[idx]) * stride; + const int grad_row = ((int) indices[idx]) * stride; + const accscalar_t scale = count ? (accscalar_t)1.0 / count[idx] : 1.0; + + accscalar_t gradient[SZ]; + accscalar_t weight[SZ]; + + #pragma unroll + for (int ii = 0; ii < SZ; ii++) { + int feature_dim = start_feature + ii * WARP_SIZE; + if (feature_dim < stride) { + gradient[ii] = static_cast(grad_output[grad_row + feature_dim]); + weight[ii] = static_cast(grad_weight[weight_row + feature_dim]); + } + } + + #pragma unroll + for (int ii = 0; ii < SZ; ii++) { + weight[ii] += gradient[ii] * scale; + } + + #pragma unroll + for (int ii = 0; ii < SZ; ii++) { + int feature_dim = start_feature + ii * WARP_SIZE; + if (feature_dim < stride) { + grad_weight[weight_row + feature_dim] = static_cast(weight[ii]); + } + } + + idx++; + } while (idx < numel && input[idx] == input[idx - 1]); + } +} + +/* Calculate norms of the rows of weight_ptr given by idx_ptr and capture them in norms */ +template +__global__ void renorm_kernel( + scalar_t* weights, int64_t* indices, accscalar_t max_norm, + accscalar_t norm_type, int64_t dim, + int64_t weights_stride0, int64_t weights_stride1) { + + // Some casting hacks since dynamic shared memory and templates don't work together: + extern __shared__ unsigned char smem[]; + auto sdata = reinterpret_cast(smem); + + int tid = threadIdx.x; + int base_index = indices[blockIdx.x] * weights_stride0; + + accscalar_t v = 0; + for (int i = tid; i < dim; i += blockDim.x) { + auto x = static_cast(weights[base_index + i * weights_stride1]); + if (norm_type == 1) { + v += std::abs(x); + } else if (norm_type == 2) { + v += x * x; + } else { + v += std::pow(x, norm_type); + } + } + + using Op = ReduceAdd; + v = reduceBlock(sdata, blockDim.x, v, Op(), 0); + + if (tid == 0) { + sdata[0] = std::pow(v, static_cast(1.0 / norm_type)); + } + __syncthreads(); + + // now we renormalize the blocks that need it + if (sdata[0] > max_norm) { + auto factor = static_cast(max_norm / (sdata[0] + 1e-7)); + for (int i = tid; i < dim; i += blockDim.x) { + weights[base_index + i * weights_stride1] *= factor; + } + } +} + +} // anonymous namespace + +Tensor embedding_dense_backward_cuda(const Tensor & grad_, const Tensor & indices, + int64_t num_weights, int64_t padding_idx, + bool scale_grad_by_freq) { + auto grad_arg = TensorArg(grad_, "grad", 1); + auto indices_arg = TensorArg(indices, "indices", 1); + checkScalarType("embedding_backward", indices_arg, kLong); + checkSameGPU("embedding_backward", grad_arg, indices_arg); + + auto num_indices = indices.numel(); + auto grad = grad_.contiguous().view({num_indices, grad_.size(-1)}); + auto grad_weight = at::zeros({num_weights, grad_.size(-1)}, grad_.options()); + + int64_t stride = grad_weight.stride(0); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + if (num_indices <= 768 && !scale_grad_by_freq) { + auto indices_contig = indices.contiguous(); + + dim3 grid(THCCeilDiv(stride, (int64_t)WARP_SIZE)); + dim3 block(WARP_SIZE, BLOCKDIMY); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF + (grad.type(), + "embedding_backward", + [&] + { + using accscalar_t = acc_type; + embedding_backward_feature_kernel + <<>> + (indices_contig.data(), + grad.data(), + grad_weight.data(), + static_cast(num_indices), + static_cast(stride), + static_cast(padding_idx)); + }); + + THCudaCheck(cudaGetLastError()); + return grad_weight; + } + + auto sorted_indices = at::empty_like(indices); + auto orig_indices = at::empty_like(indices); + using device_ptr = thrust::device_ptr; + + // Sort the inputs into sorted with the corresponding indices; we + // don't need a stable or multidimensional sort, so just use Thrust + // directly + { + sorted_indices.copy_(indices); + + auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); + auto policy = thrust::cuda::par(allocator).on(stream); + + // Fill sortedOrigIndices with sequential indices + auto count_iter = thrust::counting_iterator(0); + auto orig_data = device_ptr(orig_indices.data()); + thrust::copy(policy, count_iter, count_iter + num_indices, orig_data); + + // Sort; a stable sort is not required + auto sorted_data = device_ptr(sorted_indices.data()); + thrust::sort_by_key(policy, sorted_data, sorted_data + num_indices, orig_data, + ThrustLTOp()); + } + + Tensor count; + if (scale_grad_by_freq) { + count = at::empty_like(indices); + + auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); + auto policy = thrust::cuda::par(allocator).on(stream); + + // Compute an increasing sequence per unique item in sortedIndices: + // sorted: 2 5 5 5 7 7 8 9 9 + // count: 1 1 2 3 1 2 1 1 2 + auto sorted_data = device_ptr(sorted_indices.data()); + auto count_data = device_ptr(count.data()); + thrust::inclusive_scan_by_key( + policy, + sorted_data, + sorted_data + num_indices, + thrust::make_constant_iterator(1), + count_data + ); + + // Take the maximum of each count per unique key in reverse: + // sorted: 2 5 5 5 7 7 8 9 9 + // count: 1 3 3 3 2 2 1 2 2 + thrust::inclusive_scan_by_key( + policy, + thrust::make_reverse_iterator(sorted_data + num_indices), + thrust::make_reverse_iterator(sorted_data), + thrust::make_reverse_iterator(count_data + num_indices), + thrust::make_reverse_iterator(count_data + num_indices), + thrust::equal_to(), + thrust::maximum() + ); + } + + dim3 grid(THCCeilDiv(num_indices, (int64_t) 4), THCCeilDiv(stride, (int64_t) 128)); + dim3 block(32, 4); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.type(), "embedding_backward", [&] { + embedding_backward_kernel<<>>( + sorted_indices.data(), + orig_indices.data(), + grad.data(), + grad_weight.data(), + count.defined() ? count.data() : nullptr, + num_indices, + stride, + padding_idx); + }); + THCudaCheck(cudaGetLastError()); + + return grad_weight; +} + +Tensor & embedding_renorm_cuda_(Tensor & self, const Tensor & indices, + double max_norm, double norm_type) { + auto self_arg = TensorArg(self, "self", 1); + auto indices_arg = TensorArg(indices, "indices", 1); + checkDim("embedding_renorm_", self_arg, 2); + checkSameGPU("embedding_renorm", self_arg, indices_arg); + + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); + auto policy = thrust::cuda::par(allocator).on(stream); + + using device_ptr = thrust::device_ptr; + + auto num_indices = indices.numel(); + auto indices_contig = indices.contiguous(); + auto indices_data = device_ptr(indices_contig.data()); + + // FIXME: thrust::unique only removes consecutive elements that are equal. + // We have race conditions when indices contain duplicates which are not + // adjacent + auto unique_indices = at::empty(indices.numel(), indices.options()); + auto unique_data = device_ptr(unique_indices.data()); + auto end = thrust::unique_copy(policy, indices_data, indices_data + num_indices, unique_data); + auto num_unique_indices = static_cast(end - unique_data); + + dim3 grid(num_unique_indices); + dim3 block(128); + int dim = self.stride(0); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.type(), "embedding_backward", [&] { + using accscalar_t = acc_type; + renorm_kernel<<>>( + self.data(), + unique_indices.data(), + static_cast(max_norm), + static_cast(norm_type), + dim, self.stride(0), self.stride(1)); + }); + THCudaCheck(cudaGetLastError()); + + return self; +} + + +}} // namespace at::native diff --git a/cuda_code/ExponentiationKernel.cu b/cuda_code/ExponentiationKernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..61797fe740c3a25c06df85ccd934ec3c7e79e849 --- /dev/null +++ b/cuda_code/ExponentiationKernel.cu @@ -0,0 +1,35 @@ +#include "../../cuda.h" +#include "../../symbols/NaN.cuh" + +__global__ void exponentiationKernel ( + int batchSize, + int numberRows, + int numberEntriesPerInstance, + int numberIterations, + float* source, + float* destination) { + int indexInstance = blockIdx.x; + int indexColumn = blockIdx.y; + + int startInstanceWithinBatch = indexInstance * numberEntriesPerInstance; + int startColumnWithinInstance = indexColumn * numberRows; + int startRowWithinColumn = threadIdx.x * numberIterations; + + int startColumnWithinBatch = startInstanceWithinBatch + startColumnWithinInstance; + + int firstEntryWithinBatch = startColumnWithinBatch + startRowWithinColumn; + int startNextColumn = startColumnWithinBatch + numberRows; + + if(firstEntryWithinBatch < startNextColumn) { + int lastEntryWithinBatch = min(firstEntryWithinBatch + numberIterations, startNextColumn); + + if(indexInstance < batchSize) { + for(int indexEntry = firstEntryWithinBatch; indexEntry < lastEntryWithinBatch; indexEntry++) { + destination[indexEntry] = expf(source[indexEntry]); + } + } + else { + setToNaN(destination, firstEntryWithinBatch, lastEntryWithinBatch); + } + } +} \ No newline at end of file diff --git a/cuda_code/FBFFT.cu b/cuda_code/FBFFT.cu new file mode 100644 index 0000000000000000000000000000000000000000..6a93fe97c38ca9885161101adeca77a5ca7f9488 --- /dev/null +++ b/cuda_code/FBFFT.cu @@ -0,0 +1,1013 @@ +// Copyright 2004-present Facebook. All Rights Reserved. + +#include "cuda/Complex.cuh" +#include "cuda/ComputeCapabilities.cuh" +#include "cuda/CudaUtils.cuh" +#include "cuda/DeviceTensor.cuh" +#include "fft/CuFFTWrapper.cuh" +#include "THCTensor.h" +#include "DeviceTensorUtils.h" + +#include +#include + +using namespace facebook::cuda; + +namespace facebook { namespace deeplearning { namespace torch { + +namespace detail { + +#define PI 3.14159265358979323846264338327f + +__device__ __forceinline__ +unsigned int reverse(unsigned int x, unsigned int nbits) { + return __brev(x) >> (32 - nbits); +} + +// This adjustment modulo FFTSize is used as a stepping stone to cram multiple +// FFTs of size < 32 into a single warp. +// The invariant is: +// assert(FFTPerWarp * FFTSize == blockDim.x || FFTPerWarp == 1); +// This has no effect if FFTSize >= 32 or FFTPerWarp == 1. +// This is for the cases 2, 4, 8 and 16 and buys us additional perf. +template +__device__ __forceinline__ int adjustedThreadIdxX() { + if (FFTSize < 32) { + return (threadIdx.x & (FFTSize - 1)); + } else { + return threadIdx.x; + } +} + +template +__device__ __forceinline__ int adjustedThreadIdxY() { + if (FFTSize < 32) { + return (threadIdx.y & (FFTSize - 1)); + } else { + return threadIdx.y; + } +} + +// Computes the batch number based on the fact that batches are divided by: +// - blockIdx.x, each block computes a chunk of bacthes, +// - threadIdx.z, each z dimensions computes a subchcunk of batches to +// increase occupancy, +// - exactly FFTPerWarp FFTs are processed by one warp +// These 3 subdivisions interact to compute the actual batch size. +template +__device__ __forceinline__ int adjustedBatch() { + if (FFTSize < 32) { + int LogFFTSize = getMSB(); + int LogFFTPerWarp = getMSB(); + return (threadIdx.x >> LogFFTSize) + + (blockIdx.x << LogFFTPerWarp) + + ((threadIdx.z * gridDim.x) << LogFFTPerWarp); + } else { + return blockIdx.x + threadIdx.z * gridDim.x; + } +} + +template +struct FFT1DCoeffs { + enum { + RegisterPerWarp = (FFTSize + WARP_SIZE - 1) / WARP_SIZE + }; + __device__ __forceinline__ Complex& operator[](int i) { + return coeff[i]; + } + __device__ __forceinline__ Complex operator[](int i) const { + return coeff[i]; + } + + Complex coeff[RegisterPerWarp]; +}; + +static const int kNumTwiddles = 256; +__constant__ Complex twiddles[kNumTwiddles]; + +template +struct FFT1DRoots : public FFT1DCoeffs { + // Computes the twiddles for the least amount possible of registers and uses + // trigonometric symmetries to populate the other registers. + // We always compute at least 1 warpful of value using sincos. + // For FFTs <= 32 we are done + // For FFTs >= 32, given the number of registers per warp we know which + // register indices fall at PI/4, PI/2, PI and 2*PI. + // Since we always compute at least 1 warpful of values, we only consider + // exact subdivisions of WARP_SIZE for symmetries. + // For instance: + // - for FFTSize == 64, we have 2 registers corresponding to each half of + // the unit circle. We compute the first register (and not less by + // construction) and then we can use symmetry wrt -PI to fill the other + // register. + // - for FFTSize == 128, we have 4 registers corresponding to each + // quadrant of the unit circle. We compute the first register (and not + // less by construction) and then we can use symmetry wrt -PI/2 and -PI + // to fill the other registers. + // + // This is critical performance-wise and works well atm with unrolling. + // + // Twiddles are more efficiently computed for 1D FFTs and more efficiently + // loaded from constant memory for 2D FFTs. + __device__ __forceinline__ void twiddles1D() { + // Note that we ever only need half the twiddles; see ASCII diagram: + // for FFT-256 we only use w^0 .. w^127 and then recursively only halves. + if (this->RegisterPerWarp >= 4) { +#pragma unroll + for (int index = 0; index < this->RegisterPerWarp / 2; ++index) { + // Can always use adjustedThreadIdxX since blockDim.x == WARP_SIZE + // is enforced + int x = adjustedThreadIdxX() + index * WARP_SIZE; + if (index < ceil((int)this->RegisterPerWarp, 4)) { + // Compute in any case + (*this)[index].sincos(-2.0f * PI * (1.0f / (float)FFTSize) * x); + } else if (index < ceil((int)this->RegisterPerWarp, 2)) { + // Symmetry wrt -PI/2 + (*this)[index] = + (*this)[index - ceil((int)this->RegisterPerWarp, 4)] + .transpose() + .conjugate(); + } else { + // Symmetry wrt -PI + (*this)[index] = -(*this)[this->RegisterPerWarp - index]; + } + } + } else if (this->RegisterPerWarp == 2) { + // Compute in any case, can always use adjustedThreadIdxX since + // blockDim.x == WARP_SIZE is enforced + int x = adjustedThreadIdxX(); + (*this)[0].sincos(-2.0f * PI * (1.0f / (float)FFTSize) * x); + // Symmetry wrt -PI, skip since only need half + } else { + // Compute in any case + // adjustedThreadIdxX() lets us cram multiple < 32 FFTs in + // a warp + int x = adjustedThreadIdxX(); + (*this)[0].sincos(-2.0f * PI * (1.0f / (float)FFTSize) * x); + } + } + + __device__ __forceinline__ void twiddlesFromConstant1D() { +#pragma unroll + for (int index = 0; index < this->RegisterPerWarp / 2; ++index) { + int x = getLaneId() + index * WARP_SIZE; + (*this)[index] = twiddles[x * (kNumTwiddles / FFTSize)]; + } + } + +}; + +template +struct FFT1DBitReversal { + enum { + RegisterPerWarp = (FFTSize + WARP_SIZE - 1) / WARP_SIZE + }; + __device__ __forceinline__ int& operator[](int i) { + return bitReversed[i]; + } + __device__ __forceinline__ int operator[](int i) const { + return bitReversed[i]; + } + + __device__ __forceinline__ void computeBitReversal(const int index) { + int LogFFTSize = cuda::getMSB(); + int x = adjustedThreadIdxX() + index * blockDim.x; + bitReversed[index] = reverse(x, LogFFTSize); + } + + int bitReversed[RegisterPerWarp]; +}; + +// Pure within a warp reversal for FFT sizes <= 32. +// For sizes >= 64 this is trickier since we need a cross-register, +// cross-warp bit reversal. +// Can be done inefficiently with a loop or local memory. +// Q: How can we make sure it will always unroll statically ? +// A: Just use shared memory for the bit reversal portion, it will only +// consume 2 * FFTSize floats per block. +template + __device__ __forceinline__ +void bitReverse1DWarp(FFT1DCoeffs& coeffs, + const FFT1DBitReversal& bits, + const int batch, + const int index) { + assert(coeffs.RegisterPerWarp == 1); + assert(index == 0); + assert(FFTSize <= WARP_SIZE); + + // Only reverse and permute within blockDim.x boundary which allows to cram + // multiple FFTs smaller than 32 into a single warp + int LogFFTPerWarp = cuda::getMSB(); + coeffs[index] = shfl(coeffs[index], + bits[index], + blockDim.x >> LogFFTPerWarp); +} + +// Helper function useful for maintaining the twiddle factor distribution +// invariant. Assuming registers r1 and r2, distributed across warps, +// we write r1[0, ... 31] and r2[0, ... 31]. +// This concatenates r1 | r2 and keeps only the entries from the even warps. +// r1 and r2 both contain these values on exit. +// This is useful for simplifying the distribution of twiddle factors. +// +// Consider the case FFT-128, by construction: +// r1[0, .. 31] == r3[0, .. 31] = [w^0 , .. w^31] +// r2[0, .. 31] == r4[0, .. 31] = [w^32, .. w^63] +// +// After selectEvenWarpDistributed, all registers are equal and we have: +// r1[0, .. 31] == ... == r4[0, .. 31] == [w^0, w^2, .. w^62] +// +// This occurs one more time to obtain: +// r1[0, .. 31] == ... == r4[0, .. 31] == [w^0, w^4, .. w^60, 16 x garbage] +// +// The garbage is never read in decimateInFrequency1D32. +// +// Formally: +// r1[k] <- concat(r1, r2) [2k] for k \in [0 .. WARP_SIZE - 1] +// r2 <- r1 +// +__device__ __forceinline__ +void selectEvenWarpDistributed(Complex& r1, Complex& r2) { + // E.g. stating from: + // r1[w^0, w^1, ... w^31] and r2[w^32, w^33, ...w^63] + // + // Set + // r1[w^0 , w^2 , ... w^30 | 16 x garbage] + // r2[16 x garbage | w^32, w^34, ... w^62] + // + // And merge into: + // r1[w^0 , w^2 , ... w^30 | w^32, w^34, ... w^62] + // + // Dark compiler magic: trying to reduce this down to Complex loses 10% + // perf. This seems related to instruction mix, divergence and the compiler + // not able to reorder instructions past divergent points (which is + // reasonable). + r1.re() = shfl(r1.re(), 2 * getLaneId()); + r2.re() = shfl(r2.re(), 2 * getLaneId() - WARP_SIZE); + if (threadIdx.x >= HALF_WARP_SIZE) { + r1.re() = r2.re(); + } + r1.im() = shfl(r1.im(), 2 * getLaneId()); + r2.im() = shfl(r2.im(), 2 * getLaneId() - WARP_SIZE); + if (threadIdx.x >= HALF_WARP_SIZE) { + r1.im() = r2.im(); + } + r2 = r1; +} + +template +__device__ __forceinline__ void load1D(const DeviceTensor& real, + const DeviceTensor& complex, + FFT1DCoeffs& coeffs, + FFT1DBitReversal& bits, + const int batch, + const int index) { + int LogFFTSize = getMSB(); + // adjustedThreadIdxX() lets us cram multiple < 32 FFTs in a warp + int x = adjustedThreadIdxX() + index * blockDim.x; + bits[index] = reverse(x, LogFFTSize); + + // Support zero padding without a need to copy the input data to a larger + // array. + // TODO: center the kernel wrt to zeros. + // TODO: support reflection padding: pass the kernel size to fill with + // reflection and then zero after that to pad till the FFT size. + // TODO: support complex input (just read the imaginary part) + // TODO: try to do something with float4 and shuffles + coeffs[index] = + Complex((x < real.getSize(1)) ? real[batch][x].ldg() : 0.0f, + 0.0f); +} + +template +__device__ __forceinline__ void store1D(DeviceTensor& real, + DeviceTensor& complex, + const FFT1DCoeffs& coeffs, + const int batch, + const int index) { + // adjustedThreadIdxX() lets us cram multiple < 32 FFTs in a warp + int x = adjustedThreadIdxX() + index * blockDim.x; + if (x < complex.getSize(1)) { + // TODO: try to do something with float4 and shuffles + *(complex[batch][x].dataAs()) = coeffs[index]; + } +} + +template +__device__ __forceinline__ +void decimateInFrequency1D32(FFT1DCoeffs& coeffs, + const FFT1DRoots& roots, + const int index) { + + // Cannot be static due to upstream mix of function calls + assert(FFTSize <= WARP_SIZE); + assert(index < coeffs.RegisterPerWarp); + + int LogFFTSize = getMSB(); + +#pragma unroll + for (int logStep = 1; logStep <= LogFFTSize; ++logStep) { + // Illustration for 1-D FFT of size 8, radix-2, decimation in frequency + // Step 1 amongst 2, + // Step 2 amongst 4, + // Step 4 amongst 8, + // ... + + Complex otherCoeff = shfl_xor(coeffs[index], + (FFTSize >> logStep), + (FFTSize >> (logStep - 1))); + + // Illustration for 1-D FFT of size 8, radix-2, decimation in frequency + // Vals {1} U {3} U {5} U {7} amongst 2, + // Vals [2, 3] U [6, 7] amongst 4, + // Vals [4, 7] amongst 8, + // ... + otherCoeff = (threadIdx.x & (FFTSize >> logStep)) ? + otherCoeff - coeffs[index] : coeffs[index] + otherCoeff; + + if (logStep < LogFFTSize) { + // Illustration for 1-D FFT of size 8, radix-2, decimation in frequency + // Twiddles [w^0, [w^0], w^0, [w^0], w^0, [w^0], w^0, [w^0]] amongst 2, + // Twiddles [w^0, w^0, [w^0, w^2], w^0, w^0, [w^0, w^2]] amongst 4, + // Twiddles [w^0, w^0, w^0, w^0, [w^0, w^1, w^2, w^3]] amongst 8, + // ... + int twiddleDee = (!(threadIdx.x & (FFTSize >> logStep))) ? + 0 : ((threadIdx.x & ((FFTSize >> logStep) - 1)) << (logStep - 1)); + Complex otherRoot = shfl(roots[index], twiddleDee); + coeffs[index] = otherCoeff * otherRoot; + } else { + // Last step just does radix-2 + / - which is what otherCoeff contains + coeffs[index] = otherCoeff; + } + } +} + +template +struct TwiddleRebalancer { + static __device__ __forceinline__ + void rebalance(FFT1DRoots&, int); +}; + +template <> struct TwiddleRebalancer<64> { + static __device__ __forceinline__ + void rebalance(FFT1DRoots<64>& roots, int) { + selectEvenWarpDistributed(roots[0], roots[1]); + } +}; + +template <> struct TwiddleRebalancer<128> { + static __device__ __forceinline__ + void rebalance(FFT1DRoots<128>& roots, int logStep) { + if (logStep == 1) { + selectEvenWarpDistributed(roots[0], roots[1]); + selectEvenWarpDistributed(roots[2], roots[3]); + roots[1] = roots[2]; + roots[2] = roots[0]; + } else { + assert(logStep == 2); + selectEvenWarpDistributed(roots[0], roots[1]); + roots[2] = roots[0]; + roots[3] = roots[0]; + } + } +}; + +template <> struct TwiddleRebalancer<256> { + static __device__ __forceinline__ + void rebalance(FFT1DRoots<256>& roots, int logStep) { + if (logStep == 1) { + selectEvenWarpDistributed(roots[0], roots[1]); + selectEvenWarpDistributed(roots[2], roots[3]); + selectEvenWarpDistributed(roots[4], roots[5]); + selectEvenWarpDistributed(roots[6], roots[7]); + roots[1] = roots[2]; + roots[2] = roots[4]; + roots[3] = roots[6]; + + roots[4] = roots[0]; + roots[5] = roots[1]; + roots[6] = roots[2]; + roots[7] = roots[3]; + } else if (logStep == 2) { + assert(logStep == 2); + selectEvenWarpDistributed(roots[0], roots[1]); + selectEvenWarpDistributed(roots[2], roots[3]); + + roots[1] = roots[2]; + + roots[2] = roots[0]; + roots[3] = roots[1]; + + roots[4] = roots[0]; + roots[5] = roots[1]; + roots[6] = roots[0]; + roots[7] = roots[1]; + } else { + assert(logStep == 3); + selectEvenWarpDistributed(roots[0], roots[1]); + + roots[1] = roots[0]; + + roots[2] = roots[0]; + roots[3] = roots[0]; + + roots[4] = roots[0]; + roots[5] = roots[0]; + roots[6] = roots[0]; + roots[7] = roots[0]; + } + } +}; + +// The following ASCII shows the breakdown of a 1-D FFT-256 into +// the size 128 and 64-steps. +// Each 64 step is followed by 2 32-steps. +// A 32 step is the granularity of distributed storage (each warp holding 1 +// value per 32-step). +// At this granularity, communication is exclusively across registers. +// Twiddle factors are continuously readjusted at each step. +// |-------| |-------| +// | Reg0 | | Reg0 | +// | | |-------| +// |-------| | Reg1 | +// | Reg1 | |-------| +// |-------| |-------| w^0 +// | Reg2 | | Reg2 | . +// |-------| |-------| . +// | Reg3 | | Reg3 | . +// |-------| |-------| w^126 (increment 2) +// +// |-------| w^0 |-------| +// | Reg4 | | Reg4 | +// | | |-------| +// |-------| | Reg5 | +// | Reg5 | . |-------| +// |-------| . |-------| w^0 +// | Reg6 | . | Reg6 | . +// |-------| |-------| . +// | Reg7 | | Reg7 | . +// |-------| w^127 (+= 1) |-------| w^126 (increment 2) +// +// E.g. for FFTSize = 256, we have 3 logSteps: +// the first with 8 registers: +// registers {{0, 4}, {1, 5}, {2, 6}, {3, 7}} communicate +// the second with 4 registers: +// registers {{0, 2}, {1, 3}, {4, 6}, {5, 7}} communicate +// the third with 2 register +// registers {{0, 1}, {2, 3}, {4, 5}, {6, 7}} communicate +// +// Note that everything is properly aligned modulo 32 and we don't need warp +// shuffles at all. The only exception may be the bit reversal phase which +// is currently implemented fully in shared memory since it would require +// fully unrolled, cross-register twiddles. +// +template +__device__ __forceinline__ +void decimateInFrequency1D(DeviceTensor& real, + DeviceTensor& complex, + FFT1DCoeffs& coeffs, + const int batch) { + // Cannot be static due to upstream mix of function calls + assert(FFTSize >= WARP_SIZE); + assert(blockDim.x == WARP_SIZE); + + int LogFFTSize = getMSB(); + + FFT1DBitReversal bits; +#pragma unroll + for (int i = 0; i < coeffs.RegisterPerWarp; ++i) { + load1D(real, complex, coeffs, bits, batch, i); + } + FFT1DRoots roots; + roots.twiddles1D(); + + assert(coeffs.RegisterPerWarp == 1 << (LogFFTSize - LOG_WARP_SIZE)); + const int kDeltaLog = LogFFTSize - LOG_WARP_SIZE; + { + // Computation is all within the same warp across registers. + // Unlike shuffles, things do not update in parallel so we do have + // WAR (a.k.a false) dependences -> need a swap temporary storage ! + // Make swap registers local to this scope + FFT1DCoeffs swap; +#pragma unroll + for (int logStep = 1; logStep <= kDeltaLog; ++logStep) { + // Always need to process all the registers, this is not a function of + // the logStep but only of the coeffs.RegisterPerWarp. + // The spacing between registers that communicate is however a function + // of logStep. +#pragma unroll + for (int reg = 0; reg < coeffs.RegisterPerWarp; ++reg) { + // By how many registers are we stepping ? + // e.g. LogFFTSize == 8, LOG_WARP_SIZE == 5, logStep == 1 -> + // kDeltaLog == 3, kDeltaStep = 4 + const int kDeltaStep = (1 << (kDeltaLog - logStep)); + assert(kDeltaStep >= 0); + assert(kDeltaStep < coeffs.RegisterPerWarp); + + // If bit kDeltaStep is step then sub else add + int reg2 = (reg & kDeltaStep) ? reg - kDeltaStep : reg + kDeltaStep; + // Sanity check + assert(reg != reg2); + + Complex otherCoeff = coeffs[reg2]; + otherCoeff = (reg > reg2) ? + otherCoeff - coeffs[reg] : coeffs[reg] + otherCoeff; + + // Only second half requires twiddling + if (reg > reg2) { + // Enforce this invariant: + // the register is exactly reg2 and no shuffle necessary until <= 32 + Complex otherRoot = roots[reg2]; + // Here we could write directly to vals and not swap but performance + // is higher writing swap, likely due to same register writing + // across branches and predicated code generated by the compiler. + swap.coeff[reg] = otherCoeff * otherRoot; + } else { + swap.coeff[reg] = otherCoeff; + } + } + + // Recover values from swap +#pragma unroll + for (int reg = 0; reg < coeffs.RegisterPerWarp; ++reg) { + coeffs[reg] = swap.coeff[reg]; + } + + // This piece of code serves the purpose of rebalancing the twiddle + // factors across registers within a warp by merging 2 consecutive + // registers and selecting the odd entries (effectively keeping: + // w^0, w^2 ... w^2*(N/2) out of w^0, w^1, ... w^N). + // Once this is done, we have something like: + // w^0 .. w^62 | garbage | w^64 .. w^128 | garbage + // That needs to be copied into: + // w^0 .. w^62 | w^64 .. w^128 | w^0 .. w^62 | w^64 .. w^128 + // + // In the general case, this has a recursive behavior with log-style RAW + // / WAR dependencies. + // It requires full unrolling or perf will die. + // This is what limits the FFT size to 256 atm. + // Cannot be static due to upstream mix of function calls + assert(1 <= coeffs.RegisterPerWarp && coeffs.RegisterPerWarp <= 8); + assert(32 <= FFTSize && FFTSize <= 256); + // TODO: Figure out how to replace the monstruosity within + TwiddleRebalancer::rebalance(roots, logStep); + } + } + + // At this point we reached the FFT32, do them all in sequence +#pragma unroll + for (int i = 0; i < (1 << kDeltaLog); ++i) { + decimateInFrequency1D32(coeffs, roots, i); + } + + { + // Bit reversal through shared memory because double indirection is not + // easily unrolled. + // TODO: see if we can use float4 + // TODO: purely in registers, starting at 256 smem already gnaws at + // occupancy. + // No need to sync, dependences within a single warp + __shared__ Complex buffer[BatchUnroll][FFTSize]; + assert(blockDim.z == BatchUnroll); +#pragma unroll + for (int reg = 0; reg < coeffs.RegisterPerWarp; ++reg) { + int x = getLaneId() + reg * WARP_SIZE; + buffer[threadIdx.z][x] = coeffs[reg]; + } + // No need to sync, dependences within a single warp +#pragma unroll + for (int reg = 0; reg < coeffs.RegisterPerWarp; ++reg) { + coeffs[reg] = buffer[threadIdx.z][bits[reg]]; + } + // No need to sync, dependences within a single warp + +#pragma unroll + for (int reg = 0; reg < coeffs.RegisterPerWarp; ++reg) { + store1D(real, complex, coeffs, batch, reg); + } + } + +} + + +template +__global__ void decimateInFrequency1DKernel(DeviceTensor real, + DeviceTensor complex) { + // Ensure proper usage of the BatchUnroll template parameter which controls + // static shared memory allocation for bit reversals of FFTs >= 64 + // TODO: default template parameter cuda-7 + cuda_static_assert((FFTSize > WARP_SIZE && BatchUnroll >= 1) || + (FFTSize <= WARP_SIZE && BatchUnroll == 1)); + cuda_static_assert(!(FFTPerWarp & (FFTPerWarp - 1))); + cuda_static_assert(FFTPerWarp * FFTSize <= WARP_SIZE || + FFTPerWarp == 1); + assert(FFTPerWarp * FFTSize == blockDim.x || FFTPerWarp == 1); + + int LogFFTSize = getMSB(); + int LogFFTPerWarp = getMSB(); + + // Enforce that the number of FFTs we perform is divisible by the number of + // FFTs per warp, otherwise weird divergence will occur and possibly bugs. + assert(real.getSize(0) % FFTPerWarp == 0); + const int batch = adjustedBatch(); + if (batch >= real.getSize(0)) { + return; + } + + if (FFTSize <= 32) { + FFT1DCoeffs coeffs; + FFT1DBitReversal bits; + load1D(real, complex, coeffs, bits, batch, 0); + FFT1DRoots roots; + roots.twiddles1D(); + decimateInFrequency1D32(coeffs, roots, 0); + bitReverse1DWarp(coeffs, bits, batch, 0); + store1D(real, complex, coeffs, batch, 0); + } else { + FFT1DCoeffs coeffs; + decimateInFrequency1D(real, complex, coeffs, batch); + } +} + +template +FFTParameters::ErrorCode fbfft1D( + DeviceTensor& real, + DeviceTensor& complex) { + // TODO: The limiter for size 256 is the twiddle cross-register shuffle + // implementation that is currently unrolled by hand. + // TODO: Starting 512, the occupancy goes down due to shared memory bit + // reversal. + assert(real.getSize(1) <= 256); + assert(BatchDims == 1); + +#define SELECT_FBFFT_1D_DIF_LE32(FFT_SIZE, BATCH_UNROLL, FFTS_PER_WARP) \ + if (real.getSize(1) == FFT_SIZE) { \ + cuda_static_assert(FFT_SIZE <= 32); \ + if (real.getSize(0) % FFTS_PER_WARP == 0) { \ + dim3 blocks(ceil(ceil(real.getSize(0), FFTS_PER_WARP), \ + BATCH_UNROLL)); \ + dim3 threads(real.getSize(1) * FFTS_PER_WARP, 1, BATCH_UNROLL); \ + decimateInFrequency1DKernel \ + <<>>(real, complex); \ + } else { \ + dim3 blocks(ceil(real.getSize(0), BATCH_UNROLL)); \ + dim3 threads(real.getSize(1), 1, BATCH_UNROLL); \ + decimateInFrequency1DKernel<<>>( \ + real, complex); \ + } \ + return FFTParameters::Success; \ + } + +#define SELECT_FBFFT_1D_DIF_GT32(FFT_SIZE, BATCH_UNROLL) \ + if (real.getSize(1) == FFT_SIZE) { \ + cuda_static_assert(FFT_SIZE > 32); \ + dim3 blocks(ceil(real.getSize(0), BATCH_UNROLL)); \ + dim3 threads(32, 1, BATCH_UNROLL); \ + decimateInFrequency1DKernel \ + <<>>(real, complex); \ + return FFTParameters::Success; \ + } + + SELECT_FBFFT_1D_DIF_LE32( 2, 32, 16); + SELECT_FBFFT_1D_DIF_LE32( 4, 16, 8); + SELECT_FBFFT_1D_DIF_LE32( 8, 8, 4); + SELECT_FBFFT_1D_DIF_LE32(16, 4, 2); + SELECT_FBFFT_1D_DIF_LE32(32, 4, 1); + SELECT_FBFFT_1D_DIF_GT32(64, 4); + SELECT_FBFFT_1D_DIF_GT32(128, 4); + SELECT_FBFFT_1D_DIF_GT32(256, 2); + + return FFTParameters::UnsupportedSize; +} + + +template +__device__ __forceinline__ void load2D(const DeviceTensor& real, + const DeviceTensor& complex, + FFT1DCoeffs& coeffs, + const int batch, + const int indexX, + const int indexY) { + int LogFFTSize = getMSB(); + // adjustedThreadIdxX() lets us cram multiple < 32 FFTs in a warp + int x = adjustedThreadIdxX() + indexX * blockDim.x; + // adjustedThreadIdxX() lets us cram multiple < 32 FFTs in a warp + int y = adjustedThreadIdxY() + indexY * blockDim.y; + + // Support zero padding without a need to copy the input data to a larger + // array. + // TODO: center the kernel wrt to zeros. + // TODO: support reflection padding: pass the kernel size to fill with + // reflection and then zero after that to pad till the FFT size. + // TODO: support complex input (just read the imaginary part) + // TODO: try to do something with float4 and shuffles + coeffs[indexX] = + Complex((y < real.getSize(1) && x < real.getSize(2)) ? + real[batch][y][x].ldg() : 0.0f, + 0.0f); +} + +template +__device__ __forceinline__ void store2D(DeviceTensor& real, + DeviceTensor& complex, + const FFT1DCoeffs& coeffs, + const int batch, + const int indexX, + const int indexY) { + // adjustedThreadIdxX() lets us cram multiple < 32 FFTs in a warp + int x = adjustedThreadIdxX() + indexX * blockDim.x; + // adjustedThreadIdxX() lets us cram multiple < 32 FFTs in a warp + int y = adjustedThreadIdxY() + indexY * blockDim.y; + if (y < complex.getSize(1) && x < complex.getSize(2)) { + // TODO: try to do something with float4 and shuffles + *(complex[batch][y][x].dataAs()) = coeffs[indexX]; + } +} + +// Performs cross warp transpose of the data in registers, synchronously for +// each register at a time and takes advantage of Hermitian symmetry. +// +// Invariants are: +// - not synchronized on entry of the loop +// - synchronized at each step of the loop +// - synchronized on exittemplate +template +__device__ __forceinline__ void transpose2DHermitian( + FFT1DCoeffs& coeffsLo, + FFT1DCoeffs& coeffsHi, + Complex(*buffer)[SMemRows][SMemRows / 2 + 1]) { +#pragma unroll + for (int reg = 0; reg < coeffsLo.RegisterPerWarp; ++reg) { + if (threadIdx.x < blockDim.x / 2 + 1) { + buffer[threadIdx.z][threadIdx.y][threadIdx.x] = coeffsLo.coeff[reg]; + buffer[threadIdx.z][threadIdx.y + blockDim.y][threadIdx.x] = + coeffsHi.coeff[reg]; + } + __syncthreads(); + coeffsLo.coeff[reg] = buffer[threadIdx.z][threadIdx.x][threadIdx.y]; + if (threadIdx.y == 0) { + coeffsHi.coeff[reg] = + buffer[threadIdx.z][threadIdx.x][threadIdx.y + blockDim.y]; + } + __syncthreads(); + } +} + +// Performs cross warp transpose of the data in registers, synchronously for +// each register at a time and takes advantage of Hermitian symmetry. +// +// Invariants are: +// - not synchronized on entry of the loop +// - synchronized at each step of the loop +// - synchronized on exittemplate +template +__device__ __forceinline__ void untranspose2DHermitianOutput( + FFT1DCoeffs& coeffsLo, + FFT1DCoeffs& coeffsHi, + Complex(*buffer)[SMemRows / 2 + 1][SMemRows + 1]) { +#pragma unroll + for (int reg = 0; reg < coeffsLo.RegisterPerWarp; ++reg) { + buffer[threadIdx.z][threadIdx.y][threadIdx.x] = coeffsLo.coeff[reg]; + if (threadIdx.y == 0) { + buffer[threadIdx.z][threadIdx.y + blockDim.y][threadIdx.x] = + coeffsHi.coeff[reg]; + } + __syncthreads(); + if (threadIdx.x < blockDim.x / 2 + 1) { + coeffsLo.coeff[reg] = buffer[threadIdx.z][threadIdx.x][threadIdx.y]; + coeffsHi.coeff[reg] = + buffer[threadIdx.z][threadIdx.x][threadIdx.y + blockDim.y]; + } + __syncthreads(); + } +} + +// In the 2-D real to complex case, we can exploit Hermitian symmetry. +// We exploit the symmetry to cut in half the amount of work for sizes >= 32. +// Given a square FFT of size NxN (power of 2), with Hermitian symmetry we +// only need to compute N x (N / 2 + 1) after transposition. +// The N / 2 + 1 factor is problematic because it typically results in sizes +// such as 32 x 17. This is a bad scenario for GPU occupancy. +// Instead, we implement this as 32 x 16 with a Lo and Hi register. +// Every threadIdx.y performs work on the Lo register but only threadIdx.y == +// 0 performs work on the Hi register. +// This results in a much better occupancy and a 30% performance improvement. +template +__global__ void decimateInFrequencyHermitian2D32Kernel( + DeviceTensor real, DeviceTensor complex) { + // Ensure proper usage of the BatchUnroll template parameter which controls + // static shared memory allocation for bit reversals of FFTs >= 64 + // TODO: default template parameter cuda-7 + // cuda_static_assert((FFTSize > WARP_SIZE && BatchUnroll >= 1) || + // (FFTSize <= WARP_SIZE && BatchUnroll == 1)); + + int LogFFTSize = getMSB(); + // Enforce that the number of FFTs we perform is divisible by the number of + // FFTs per warp, otherwise weird divergence will occur and possibly bugs. + const int batch = adjustedBatch(); + if (batch >= real.getSize(0)) { + return; + } + + FFT1DCoeffs coeffs; + __shared__ Complex buffer[BatchUnroll][WARP_SIZE / 2 + 1][WARP_SIZE + 1]; + + cuda_static_assert(FFTSize <= 32); + + FFT1DBitReversal bits; + bits.computeBitReversal(0); + // Twiddles is the same as for 1D but fully data parallel across threadIdx.y + FFT1DRoots roots; + roots.twiddles1D(); + + FFT1DCoeffs coeffsHi; + FFT1DCoeffs coeffsLo; + load2D(real, complex, coeffsLo, batch, 0, 0); + load2D(real, complex, coeffsHi, batch, 0, 1); + decimateInFrequency1D32(coeffsLo, roots, 0); + decimateInFrequency1D32(coeffsHi, roots, 0); + bitReverse1DWarp(coeffsLo, bits, batch, 0); + bitReverse1DWarp(coeffsHi, bits, batch, 0); + + transpose2DHermitian( + coeffsLo, + coeffsHi, + (Complex(*)[WARP_SIZE][WARP_SIZE / 2 + 1])buffer); + + decimateInFrequency1D32(coeffsLo, roots, 0); + // Bit reversal is the same as for 1D but fully data parallel across + // threadIdx.y + bitReverse1DWarp(coeffsLo, bits, batch, 0); + if (threadIdx.y == 0) { + decimateInFrequency1D32(coeffsHi, roots, 0); + // Bit reversal is the same as for 1D but fully data parallel across + // threadIdx.y + bitReverse1DWarp(coeffsHi, bits, batch, 0); + } + + untranspose2DHermitianOutput( + coeffsLo, + coeffsHi, + (Complex(*)[WARP_SIZE / 2 + 1][WARP_SIZE + 1])buffer); + + store2D(real, complex, coeffsLo, batch, 0, 0); + store2D(real, complex, coeffsHi, batch, 0, 1); +} + +// Performs cross warp transpose of the data in registers, synchronously for +// each register at a time. +// +// Invariants are: +// - not synchronized on entry of the loop +// - synchronized at each step of the loop +// - synchronized on exit +template +__device__ __forceinline__ void transpose2D( + FFT1DCoeffs& coeffs, + Complex(*buffer)[SMemRows][SMemRows + 1]) { +#pragma unroll + for (int reg = 0; reg < coeffs.RegisterPerWarp; ++reg) { + buffer[threadIdx.z][threadIdx.y][threadIdx.x] = coeffs[reg]; + __syncthreads(); + coeffs[reg] = buffer[threadIdx.z][threadIdx.x][threadIdx.y]; + __syncthreads(); + } +} + +template +__global__ void decimateInFrequency2D32Kernel(DeviceTensor real, + DeviceTensor complex) { + // Ensure proper usage of the BatchUnroll template parameter which controls + // static shared memory allocation for bit reversals of FFTs >= 64 + // TODO: default template parameter cuda-7 + // cuda_static_assert((FFTSize > WARP_SIZE && BatchUnroll >= 1) || + // (FFTSize <= WARP_SIZE && BatchUnroll == 1)); + cuda_static_assert(FFTSize < 32); + cuda_static_assert(!(FFTPerWarp & (FFTPerWarp - 1))); + cuda_static_assert(FFTPerWarp * FFTSize <= WARP_SIZE || + FFTPerWarp == 1); + assert(FFTPerWarp * FFTSize == blockDim.x || FFTPerWarp == 1); + assert(blockDim.x == blockDim.y); + + int LogFFTSize = getMSB(); + int LogFFTPerWarp = getMSB(); + // Enforce that the number of FFTs we perform is divisible by the number of + // FFTs per warp, otherwise weird divergence will occur and possibly bugs. + assert(real.getSize(0) % FFTPerWarp == 0); + const int batch = adjustedBatch(); + if (batch >= real.getSize(0)) { + return; + } + + __shared__ Complex + buffer[BatchUnroll][FFTSize * FFTPerWarp][FFTSize * FFTPerWarp + 1]; + + cuda_static_assert(FFTSize <= 32); + FFT1DCoeffs coeffs; + FFT1DBitReversal bits; + bits.computeBitReversal(0); + load2D(real, complex, coeffs, batch, 0, 0); + // Twiddles is the same as for 1D but fully data parallel across threadIdx.y + FFT1DRoots roots; + roots.twiddles1D(); + decimateInFrequency1D32(coeffs, roots, 0); + bitReverse1DWarp(coeffs, bits, batch, 0); + transpose2D(coeffs, buffer); + decimateInFrequency1D32(coeffs, roots, 0); + // Bit reversal is the same as for 1D but fully data parallel across + // threadIdx.y + bits.computeBitReversal(0); + bitReverse1DWarp(coeffs, bits, batch, 0); + transpose2D(coeffs, buffer); + store2D(real, complex, coeffs, batch, 0, 0); +} + +template +FFTParameters::ErrorCode fbfft2D(DeviceTensor& real, + DeviceTensor& complex) { + // TODO: The limiter for size 256 is the twiddle cross-register shuffle + // implementation that is currently unrolled by hand. + // TODO: Starting 512, the occupancy goes down due to shared memory bit + // reversal. + if (real.getSize(1) != real.getSize(2) || real.getSize(1) > 256) { + return FFTParameters::UnsupportedSize; + } + if (BatchDims != 1) { + return FFTParameters::UnsupportedDimension; + } + +#define SELECT_FBFFT_2D_DIF_MULTIPLE(FFT_SIZE, FFTS_PER_WARP, BATCH_UNROLL) \ + if (real.getSize(1) == FFT_SIZE) { \ + if (FFT_SIZE != real.getSize(2)) { \ + return FFTParameters::UnsupportedSize; \ + } \ + if (real.getSize(0) % FFTS_PER_WARP == 0) { \ + dim3 blocks(ceil(real.getSize(0), BATCH_UNROLL * FFTS_PER_WARP)); \ + dim3 threads(real.getSize(1) * FFTS_PER_WARP, \ + real.getSize(2) * FFTS_PER_WARP, \ + BATCH_UNROLL); \ + decimateInFrequency2D32Kernel \ + <<>>(real, complex); \ + } else { \ + dim3 blocks(ceil(real.getSize(0), BATCH_UNROLL)); \ + dim3 threads(real.getSize(1), \ + real.getSize(2) / 2, \ + BATCH_UNROLL); \ + decimateInFrequencyHermitian2D32Kernel \ + <<>>(real, complex); \ + } \ + return FFTParameters::Success; \ + } + +#define SELECT_FBFFT_2D_DIF_SINGLE(FFT_SIZE, FFTS_PER_WARP, BATCH_UNROLL) \ + if (real.getSize(1) == FFT_SIZE) { \ + if (FFT_SIZE != real.getSize(2)) { \ + return FFTParameters::UnsupportedSize; \ + } \ + dim3 blocks(ceil(real.getSize(0), BATCH_UNROLL)); \ + dim3 threads(real.getSize(1), \ + real.getSize(2) / 2, \ + BATCH_UNROLL); \ + decimateInFrequencyHermitian2D32Kernel \ + <<>>(real, complex); \ + return FFTParameters::Success; \ + } + + // TODO: limit this size with cuda-7. + // This really calls for a tight loop with constexpr + SELECT_FBFFT_2D_DIF_MULTIPLE(2, 8, 1); + SELECT_FBFFT_2D_DIF_MULTIPLE(4, 4, 4); + SELECT_FBFFT_2D_DIF_MULTIPLE(8, 1, 4); + SELECT_FBFFT_2D_DIF_SINGLE(16, 1, 1); + SELECT_FBFFT_2D_DIF_SINGLE(32, 1, 1); + + return FFTParameters::UnsupportedSize; +} + +} // detail + +template +FFTParameters::ErrorCode fbfft(THCudaTensor* r, + THCudaTensor* c, + FFTParameters params) { + assert(params.fbFFT()); + if (Batch == 1 && Dim == 2) { + DeviceTensor real = torchToDeviceTensorCast(r); + DeviceTensor complex = torchToDeviceTensorCast(c); + return detail::fbfft1D(real, complex); + } else if (Batch == 1 && Dim == 3) { + DeviceTensor real = torchToDeviceTensorCast(r); + DeviceTensor complex = torchToDeviceTensorCast(c); + return detail::fbfft2D(real, complex); + } else { + return FFTParameters::UnsupportedDimension; + } +} + +template FFTParameters::ErrorCode +fbfft<1, 2>(THCudaTensor* real, THCudaTensor* complex, FFTParameters params); + +template FFTParameters::ErrorCode +fbfft<1, 3>(THCudaTensor* real, THCudaTensor* complex, FFTParameters params); + +} } } // namespace diff --git a/cuda_code/FeatureTypes.cu b/cuda_code/FeatureTypes.cu new file mode 100644 index 0000000000000000000000000000000000000000..f992fcea74f79a83063da44b12b053613eb18405 --- /dev/null +++ b/cuda_code/FeatureTypes.cu @@ -0,0 +1,269 @@ +/* + * + * Created on: May 17, 2017 + * Author: Mario Lüder + * + */ + + +#include "FeatureTypes.cuh" + +#include +#include +#include "utilities.cuh" + +#define FEATURE_DATA_MAX_SIZE 31 * 1024 +__constant__ uint8_t g_FeatureData[FEATURE_DATA_MAX_SIZE]; + +FeatureTypes::~FeatureTypes() +{ + if (data != NULL) + { + delete [] data; + data = NULL; + } + + if (gpuData != NULL) + { + CUDA_CHECK_RETURN(cudaFree(gpuData)); + gpuData = NULL; + } +} + +void FeatureTypes::generateClassifier(const double scale, const uint32_t windowWidth, + const uint32_t windowHeight, bool calcOnlySize, uint32_t & memsize) +{ + memsize = 0; + assert(data || calcOnlySize); + + std::vector featureTypeOffsets; + + // count of feature types + memsize += sizeof(uint32_t); + + // calculate the size first + uint32_t countFeatureTypes = this->size(); + + if (!calcOnlySize) + *(uint32_t*) (data) = countFeatureTypes; + + const uint32_t sizeFeatureTypeOffsets = countFeatureTypes + * sizeof(uint32_t); + memsize += sizeFeatureTypeOffsets; + + for (uint32_t featureTypeIdx = 0; featureTypeIdx < countFeatureTypes; + ++featureTypeIdx) + { +#ifdef DEBUG + // std::cout << "Debug: featureTypeIdx:" << featureTypeIdx << std::endl; +#endif + // store the offset of each feature type + featureTypeOffsets.push_back(memsize); + + // header size for [ feature width, feature height, feature count ] + const uint32_t headerSize = 3 * sizeof(uint32_t); + + const FeatureType & featureType = at(featureTypeIdx); + const uint32_t featureHeightPx = featureType.mFeatureHeight + * featureType.mRect.height; + const uint32_t featureWidthPx = featureType.mFeatureWidth + * featureType.mRect.width; + const int32_t windowHeightMax = windowHeight - featureHeightPx; + const int32_t windowWidthMax = windowWidth - featureWidthPx; + + // assure the window size is big enough + assert(windowHeightMax > 0 && windowWidthMax > 0); + + // calculate how many feature can be generated in x direction + // + // the feature is scaled by + // scaledWidth = featureWidthPx * scale^n + // this is under the condition + // scaledWidth <= windowWidthMax + // n := times scale + // + // n is determined by + // n = log(windowWidthMax/featureWidthPx) / log(scale) + // + // the same is done with height + // + const uint32_t nWidthScales = (scale > 1.0) ? ((uint32_t) (log( + windowWidthMax / featureWidthPx) / log(scale))) : 1.0; + const uint32_t nHeightScales = (scale > 1.0) ? ((uint32_t) (log( + windowHeightMax / featureHeightPx) / log(scale))) : 1.0; + + const uint32_t countRectangles = featureType.mFeatureHeight + * featureType.mFeatureWidth; + const uint32_t countClassifier = nWidthScales * nHeightScales; + + // make sure that this value is the same as the number of feature types + assert(countRectangles == featureType.mTypes.size()); + + // the size of width, height and type - see FeatureRectangle + const uint32_t rectangleValuesSize = 3 * sizeof(int32_t); + + uint32_t offset = memsize; + + if (!calcOnlySize) + { + // write header + *(uint32_t*) (data + offset) = featureType.mFeatureWidth; + offset += sizeof(uint32_t); + *(uint32_t*) (data + offset) = featureType.mFeatureHeight; + offset += sizeof(uint32_t); + + // this data will change as we do not store all classifiers (because of rounding) + //*(uint32_t*)(data + offset) = countClassifier; offset += sizeof(uint32_t); + // instead, we remember the offset of the count variable + uint32_t offsetCountClassifier = offset; + offset += sizeof(uint32_t); + uint32_t countStoredClassifier = 0; + + Scale previousRowScale(0, 0); + + for (uint32_t heightScaleIdx = 0; heightScaleIdx < nHeightScales; + ++heightScaleIdx) + { + Scale previousColumnScale = previousRowScale; + + // scale the rectangle + const uint32_t scaledRectangleHeight = + (uint32_t) (featureType.mRect.height + * pow(scale, heightScaleIdx)); + + if (scaledRectangleHeight == previousRowScale.y) + { + continue; + } + + previousRowScale = Scale(0, scaledRectangleHeight); + + for (uint32_t widthScaleIdx = 0; widthScaleIdx < nWidthScales; + ++widthScaleIdx) + { + const uint32_t scaledRectangleWidth = + (uint32_t) (featureType.mRect.width + * pow(scale, widthScaleIdx)); + + if (previousColumnScale + != Scale(widthScaleIdx, scaledRectangleHeight)) + { + // store the scales for each each rectangle + for (uint32_t rectangleIdx = 0; + rectangleIdx < countRectangles; ++rectangleIdx) + { + *(uint32_t*) (data + offset) = scaledRectangleWidth; + offset += sizeof(uint32_t); + *(uint32_t*) (data + offset) = scaledRectangleHeight; + offset += sizeof(uint32_t); + *(int32_t*) (data + offset) = + featureType.mTypes[rectangleIdx]; + offset += sizeof(int32_t); + } + + previousColumnScale = Scale(widthScaleIdx, + scaledRectangleHeight); + countStoredClassifier++; + } + } + } +#ifdef DEBUG +// std::cout << "Debug: Count Stored Classifier:" +// << countStoredClassifier << std::endl; +#endif + + // store the classifier count + *(uint32_t*) (data + offsetCountClassifier) = countStoredClassifier; + memsize += headerSize + + countRectangles * countStoredClassifier + * rectangleValuesSize; + } + else + { + memsize += headerSize + + countRectangles * countClassifier * rectangleValuesSize; + } + + if (!calcOnlySize) + { + assert(offset == memsize); + } + } + + if (!calcOnlySize) + { + // store the offsets of the feature types + for (uint32_t featureTypeOffsetIdx = 0; + featureTypeOffsetIdx < featureTypeOffsets.size(); + ++featureTypeOffsetIdx) + { + *(uint32_t*) (data + sizeof(uint32_t) + + featureTypeOffsetIdx * sizeof(uint32_t)) = + featureTypeOffsets[featureTypeOffsetIdx]; + } + } +} + +void FeatureTypes::generateClassifier(const double scale, const uint32_t windowWidth, + const uint32_t windowHeight, bool copyToConst) +{ + if (data != NULL) + { + delete [] data; + data = NULL; + } + + if (gpuData != NULL) + { + CUDA_CHECK_RETURN(cudaFree(gpuData)); + gpuData = NULL; + } + + dataSize = 0; + + // calc first size + uint32_t maxSize = 0; + generateClassifier(scale, windowWidth, windowHeight, true, maxSize); +#ifdef DEBUG + std::cout << "Debug: generateClassifier estimated size:" << maxSize + << std::endl; +#endif + data = new uint8_t[maxSize]; + assert(data); + uint32_t usedSize = 0; + generateClassifier(scale, windowWidth, windowHeight, false, usedSize); +#ifdef DEBUG + std::cout << "Debug: generateClassifier used size:" << usedSize + << std::endl; +#endif + CUDA_CHECK_RETURN( + cudaMalloc((void ** )&gpuData, usedSize)); + + CUDA_CHECK_RETURN( + cudaMemcpy(gpuData, data, usedSize, cudaMemcpyHostToDevice)); + + dataSize = usedSize; + + if (copyToConst) + { + copyToConstantMemory(); + } +} + +void FeatureTypes::copyToConstantMemory() +{ + if (gpuData) + { + assert(dataSize <= FEATURE_DATA_MAX_SIZE); + CUDA_CHECK_RETURN(cudaMemcpyToSymbol(g_FeatureData, gpuData, dataSize)); + CUDA_CHECK_RETURN(cudaFree(gpuData)); + gpuData = NULL; + } +} + +uint8_t * FeatureTypes::getConstantFeatureData() +{ + uint8_t * constFeatureData = NULL; + CUDA_CHECK_RETURN(cudaGetSymbolAddress((void **)(&constFeatureData), g_FeatureData)); + return constFeatureData; +} diff --git a/cuda_code/FlatIndex_13.cu b/cuda_code/FlatIndex_13.cu new file mode 100644 index 0000000000000000000000000000000000000000..eb58505ebc94345165257811d541bf4c2257e5c4 --- /dev/null +++ b/cuda_code/FlatIndex_13.cu @@ -0,0 +1,282 @@ +/** + * Copyright (c) Facebook, Inc. and its affiliates. + * + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. + */ + + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace faiss { namespace gpu { + +FlatIndex::FlatIndex(GpuResources* res, + int dim, + bool useFloat16, + bool storeTransposed, + MemorySpace space) : + resources_(res), + dim_(dim), + useFloat16_(useFloat16), + storeTransposed_(storeTransposed), + space_(space), + num_(0), + rawData_(space) { +} + +bool +FlatIndex::getUseFloat16() const { + return useFloat16_; +} + +/// Returns the number of vectors we contain +int FlatIndex::getSize() const { + if (useFloat16_) { + return vectorsHalf_.getSize(0); + } else { + return vectors_.getSize(0); + } +} + +int FlatIndex::getDim() const { + if (useFloat16_) { + return vectorsHalf_.getSize(1); + } else { + return vectors_.getSize(1); + } +} + +void +FlatIndex::reserve(size_t numVecs, cudaStream_t stream) { + if (useFloat16_) { + rawData_.reserve(numVecs * dim_ * sizeof(half), stream); + } else { + rawData_.reserve(numVecs * dim_ * sizeof(float), stream); + } +} + +Tensor& +FlatIndex::getVectorsFloat32Ref() { + // Should not call this unless we are in float32 mode + FAISS_ASSERT(!useFloat16_); + + return vectors_; +} + +Tensor& +FlatIndex::getVectorsFloat16Ref() { + // Should not call this unless we are in float16 mode + FAISS_ASSERT(useFloat16_); + + return vectorsHalf_; +} + +DeviceTensor +FlatIndex::getVectorsFloat32Copy(cudaStream_t stream) { + return getVectorsFloat32Copy(0, num_, stream); +} + +DeviceTensor +FlatIndex::getVectorsFloat32Copy(int from, int num, cudaStream_t stream) { + DeviceTensor vecFloat32({num, dim_}, space_); + + if (useFloat16_) { + auto halfNarrow = vectorsHalf_.narrowOutermost(from, num); + convertTensor(stream, halfNarrow, vecFloat32); + } else { + vectors_.copyTo(vecFloat32, stream); + } + + return vecFloat32; +} + +void +FlatIndex::query(Tensor& input, + int k, + faiss::MetricType metric, + float metricArg, + Tensor& outDistances, + Tensor& outIndices, + bool exactDistance) { + auto stream = resources_->getDefaultStreamCurrentDevice(); + auto& mem = resources_->getMemoryManagerCurrentDevice(); + + if (useFloat16_) { + // We need to convert the input to float16 for comparison to ourselves + auto inputHalf = + convertTensor(resources_, stream, input); + + query(inputHalf, k, metric, metricArg, + outDistances, outIndices, exactDistance); + } else { + bfKnnOnDevice(resources_, + getCurrentDevice(), + stream, + storeTransposed_ ? vectorsTransposed_ : vectors_, + !storeTransposed_, // is vectors row major? + &norms_, + input, + true, // input is row major + k, + metric, + metricArg, + outDistances, + outIndices, + !exactDistance); + } +} + +void +FlatIndex::query(Tensor& input, + int k, + faiss::MetricType metric, + float metricArg, + Tensor& outDistances, + Tensor& outIndices, + bool exactDistance) { + FAISS_ASSERT(useFloat16_); + + bfKnnOnDevice(resources_, + getCurrentDevice(), + resources_->getDefaultStreamCurrentDevice(), + storeTransposed_ ? vectorsHalfTransposed_ : vectorsHalf_, + !storeTransposed_, // is vectors row major? + &norms_, + input, + true, // input is row major + k, + metric, + metricArg, + outDistances, + outIndices, + !exactDistance); +} + +void +FlatIndex::computeResidual(Tensor& vecs, + Tensor& listIds, + Tensor& residuals) { + if (useFloat16_) { + runCalcResidual(vecs, + getVectorsFloat16Ref(), + listIds, + residuals, + resources_->getDefaultStreamCurrentDevice()); + } else { + runCalcResidual(vecs, + getVectorsFloat32Ref(), + listIds, + residuals, + resources_->getDefaultStreamCurrentDevice()); + } +} + +void +FlatIndex::reconstruct(Tensor& listIds, + Tensor& vecs) { + if (useFloat16_) { + runReconstruct(listIds, + getVectorsFloat16Ref(), + vecs, + resources_->getDefaultStreamCurrentDevice()); + } else { + runReconstruct(listIds, + getVectorsFloat32Ref(), + vecs, + resources_->getDefaultStreamCurrentDevice()); + } +} + +void +FlatIndex::reconstruct(Tensor& listIds, + Tensor& vecs) { + auto listIds1 = listIds.downcastOuter<1>(); + auto vecs2 = vecs.downcastOuter<2>(); + + reconstruct(listIds1, vecs2); +} + +void +FlatIndex::add(const float* data, int numVecs, cudaStream_t stream) { + if (numVecs == 0) { + return; + } + + if (useFloat16_) { + // Make sure that `data` is on our device; we'll run the + // conversion on our device + auto devData = toDevice(resources_, + getCurrentDevice(), + (float*) data, + stream, + {numVecs, dim_}); + + auto devDataHalf = + convertTensor(resources_, stream, devData); + + rawData_.append((char*) devDataHalf.data(), + devDataHalf.getSizeInBytes(), + stream, + true /* reserve exactly */); + } else { + rawData_.append((char*) data, + (size_t) dim_ * numVecs * sizeof(float), + stream, + true /* reserve exactly */); + } + + num_ += numVecs; + + if (useFloat16_) { + DeviceTensor vectorsHalf( + (half*) rawData_.data(), {(int) num_, dim_}, space_); + vectorsHalf_ = std::move(vectorsHalf); + } else { + DeviceTensor vectors( + (float*) rawData_.data(), {(int) num_, dim_}, space_); + vectors_ = std::move(vectors); + } + + if (storeTransposed_) { + if (useFloat16_) { + vectorsHalfTransposed_ = + std::move(DeviceTensor({dim_, (int) num_}, space_)); + runTransposeAny(vectorsHalf_, 0, 1, vectorsHalfTransposed_, stream); + } else { + vectorsTransposed_ = + std::move(DeviceTensor({dim_, (int) num_}, space_)); + runTransposeAny(vectors_, 0, 1, vectorsTransposed_, stream); + } + } + + // Precompute L2 norms of our database + if (useFloat16_) { + DeviceTensor norms({(int) num_}, space_); + runL2Norm(vectorsHalf_, true, norms, true, stream); + norms_ = std::move(norms); + } else { + DeviceTensor norms({(int) num_}, space_); + runL2Norm(vectors_, true, norms, true, stream); + norms_ = std::move(norms); + } +} + +void +FlatIndex::reset() { + rawData_.clear(); + vectors_ = std::move(DeviceTensor()); + vectorsTransposed_ = std::move(DeviceTensor()); + vectorsHalf_ = std::move(DeviceTensor()); + vectorsHalfTransposed_ = std::move(DeviceTensor()); + norms_ = std::move(DeviceTensor()); + num_ = 0; +} + +} } diff --git a/cuda_code/ForceAlignmentCriterion_1.cu b/cuda_code/ForceAlignmentCriterion_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..c866a5cdb2f9be9fbf442bcafeb1f82ff912ce7b --- /dev/null +++ b/cuda_code/ForceAlignmentCriterion_1.cu @@ -0,0 +1,383 @@ +/** + * Copyright (c) Facebook, Inc. and its affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "libraries/criterion/cuda/ForceAlignmentCriterion.cuh" + +#include +#include + +#include "libraries/common/CudaUtils.cuh" +#include "libraries/common/Workspace.h" +#include "libraries/criterion/cuda/CriterionUtils.cuh" + +namespace { + +template +struct WorkspacePtrs { + explicit WorkspacePtrs(void* workspace, int B, int T, int N, int L) { + w2l::Workspace<> ws(workspace); + ws.request(&scale, B); + ws.request(&alpha, B, T, L); + ws.request(&alphaGrad, B, T, L); + ws.request(&transBatchGrad, B, N, N); + ws.request(&transBuf1, B, L); + ws.request(&transBuf2, B, L); + ws.request(&transBufGrad1, B, L); + ws.request(&transBufGrad2, B, L); + requiredSize = ws.requiredSize(); + } + + Float* scale; + double* alpha; + double* alphaGrad; + Float* transBatchGrad; + Float* transBuf1; + Float* transBuf2; + Float* transBufGrad1; + Float* transBufGrad2; + size_t requiredSize; +}; + +/* + * B thread blocks + * L threads/block (ideally) + */ +template +__global__ void forwardKernel( + int T, + int N, + int _L, + const Float* _input, + const int* _target, + const int* targetSize, + const Float* trans, + Float* _loss, + WorkspacePtrs ws) { + int b = blockIdx.x; + auto* alpha = &ws.alpha[b * T * _L]; + auto* input = &_input[b * T * N]; + auto* target = &_target[b * _L]; + auto* transBuf1 = &ws.transBuf1[b * _L]; + auto* transBuf2 = &ws.transBuf2[b * _L]; + int L = targetSize[b]; + + for (int i = threadIdx.x; i < L; i += blockDim.x) { + alpha[i] = i == 0 ? input[target[0]] : 0; + transBuf1[i] = trans[target[i] * N + target[i]]; + transBuf2[i] = i > 0 ? trans[target[i] * N + target[i - 1]] : 0; + } + + for (int t = 1; t < T; ++t) { + auto* inputCur = &input[t * N]; + auto* alphaPrev = &alpha[(t - 1) * L]; + auto* alphaCur = &alpha[t * L]; + + int high = t < L ? t : L; + int low = T - t < L ? L - (T - t) : 1; + + __syncthreads(); + + if (threadIdx.x == 0) { + if (T - t >= L) { + alphaCur[0] = alphaPrev[0] + transBuf1[0] + inputCur[target[0]]; + } + } else if (threadIdx.x == 1) { + if (t < L) { + alphaCur[high] = + alphaPrev[high - 1] + transBuf2[high] + inputCur[target[high]]; + } + } + + for (int i = low + threadIdx.x; i < high; i += blockDim.x) { + double s1 = alphaPrev[i] + transBuf1[i]; + double s2 = alphaPrev[i - 1] + transBuf2[i]; + // lse = logSumExp(s1, s2) + double lse = + s1 < s2 ? s2 + log(1 + exp(s1 - s2)) : s1 + log(1 + exp(s2 - s1)); + alphaCur[i] = lse + inputCur[target[i]]; + } + } + + __syncthreads(); + + if (threadIdx.x == 0) { + _loss[b] = alpha[T * L - 1] * ws.scale[b]; + } +} + +/* + * B thread blocks + * L threads/block (ideally) + */ +template +__global__ void backwardKernel( + int T, + int N, + int _L, + const int* _target, + const int* targetSize, + const Float* grad, + Float* _inputGrad, + Float* transGrad, + WorkspacePtrs ws) { + int b = blockIdx.x; + auto* alpha = &ws.alpha[b * T * _L]; + auto* alphaGrad = &ws.alphaGrad[b * T * _L]; + auto* inputGrad = &_inputGrad[b * T * N]; + auto* target = &_target[b * _L]; + auto* transBatchGrad = &ws.transBatchGrad[b * N * N]; + auto* transBuf1 = &ws.transBuf1[b * _L]; + auto* transBuf2 = &ws.transBuf2[b * _L]; + auto* transBufGrad1 = &ws.transBufGrad1[b * _L]; + auto* transBufGrad2 = &ws.transBufGrad2[b * _L]; + int L = targetSize[b]; + + if (threadIdx.x == 0) { + alphaGrad[T * L - 1] = 1; + } + + for (int t = T - 1; t > 0; --t) { + auto* inputCurGrad = &inputGrad[t * N]; + auto* alphaPrev = &alpha[(t - 1) * L]; + auto* alphaCurGrad = &alphaGrad[t * L]; + auto* alphaPrevGrad = &alphaGrad[(t - 1) * L]; + + int high = t < L ? t : L; + int low = T - t < L ? L - (T - t) : 1; + + int high1 = t < L ? t + 1 : L; + int low1 = T - t < L ? L - (T - t) : 0; + + __syncthreads(); + + for (int i = low1 + threadIdx.x; i < high1; i += blockDim.x) { + atomicAdd(&inputCurGrad[target[i]], alphaCurGrad[i]); + } + + if (threadIdx.x == 0) { + if (T - t >= L) { + atomicAdd(&alphaPrevGrad[0], alphaCurGrad[0]); + transBufGrad1[0] += alphaCurGrad[0]; + } + } else if (threadIdx.x == 1) { + if (t < L) { + atomicAdd(&alphaPrevGrad[high - 1], alphaCurGrad[high]); + transBufGrad2[high] += alphaCurGrad[high]; + } + } + + for (int i = low + threadIdx.x; i < high; i += blockDim.x) { + double s1 = alphaPrev[i] + transBuf1[i]; + double s2 = alphaPrev[i - 1] + transBuf2[i]; + // d1, d2 = dLogSumExp(s1, s2) + double d1, d2; + if (s1 < s2) { + d2 = 1 / (1 + exp(s1 - s2)); + d1 = 1 - d2; + } else { + d1 = 1 / (1 + exp(s2 - s1)); + d2 = 1 - d1; + } + atomicAdd(&alphaPrevGrad[i], d1 * alphaCurGrad[i]); + atomicAdd(&alphaPrevGrad[i - 1], d2 * alphaCurGrad[i]); + transBufGrad1[i] += d1 * alphaCurGrad[i]; + transBufGrad2[i] += d2 * alphaCurGrad[i]; + } + } + + __syncthreads(); + + __shared__ Float gradScale; + + if (threadIdx.x == 0) { + inputGrad[target[0]] += alphaGrad[0]; + gradScale = grad[b] * ws.scale[b]; + } + + for (int i = threadIdx.x; i < L; i += blockDim.x) { + atomicAdd(&transBatchGrad[target[i] * N + target[i]], transBufGrad1[i]); + if (i > 0) { + atomicAdd( + &transBatchGrad[target[i] * N + target[i - 1]], transBufGrad2[i]); + } + } + + __syncthreads(); + + for (int i = threadIdx.x; i < T * N; i += blockDim.x) { + inputGrad[i] *= gradScale; + } + + for (int i = threadIdx.x; i < N * N; i += blockDim.x) { + atomicAdd(&transGrad[i], gradScale * transBatchGrad[i]); + } +} + +template +__global__ void viterbiPathKernel( + int T, + int N, + int _L, + const Float* _input, + const int* _target, + const int* targetSize, + const Float* trans, + int* bestPaths, + WorkspacePtrs ws) { + int b = blockIdx.x; + auto* alpha = &ws.alpha[b * T * _L]; + auto* input = &_input[b * T * N]; + auto* target = &_target[b * _L]; + auto* transBuf1 = &ws.transBuf1[b * _L]; + auto* transBuf2 = &ws.transBuf2[b * _L]; + int L = targetSize[b]; + + for (int i = threadIdx.x; i < L * T; i += blockDim.x) { + alpha[i] = + i == 0 ? input[target[0]] : -std::numeric_limits::infinity(); + } + + for (int i = threadIdx.x; i < L; i += blockDim.x) { + transBuf1[i] = trans[target[i] * N + target[i]]; + transBuf2[i] = i > 0 ? trans[target[i] * N + target[i - 1]] : 0; + } + if (L > T || L == 0) { + return; + } + + for (int t = 1; t < T; ++t) { + auto* inputCur = &input[t * N]; + auto* alphaPrev = &alpha[(t - 1) * L]; + auto* alphaCur = &alpha[t * L]; + + int high = t < L ? t : L; + int low = T - t < L ? L - (T - t) : 1; + + // Ensure that all previous alphas have been computed + __syncthreads(); + + if (threadIdx.x == 0) { + if (T - t >= L) { + alphaCur[0] = alphaPrev[0] + transBuf1[0] + inputCur[target[0]]; + } + } else if (threadIdx.x == 1) { + if (t < L) { + alphaCur[high] = + alphaPrev[high - 1] + transBuf2[high] + inputCur[target[high]]; + } + } + + for (int i = low + threadIdx.x; i < high; i += blockDim.x) { + double s1 = alphaPrev[i] + transBuf1[i]; + double s2 = alphaPrev[i - 1] + transBuf2[i]; + alphaCur[i] = inputCur[target[i]] + max(s1, s2); + } + } + // Ensure all threads are finished and alphas have been computed before + // computing backward path + __syncthreads(); + if (threadIdx.x == 0) { + int ltrIdx = L - 1; + for (int t = T - 1; t > 0; t--) { + bestPaths[t + (b * T)] = target[ltrIdx]; + auto* alphaPrev = &alpha[(t - 1) * L]; + if (ltrIdx > 0) { + double s1 = alphaPrev[ltrIdx] + transBuf1[ltrIdx]; + double s2 = alphaPrev[ltrIdx - 1] + transBuf2[ltrIdx]; + if (s2 > s1) { + ltrIdx--; + } + } + } + bestPaths[b * T] = target[ltrIdx]; + } +} + +} // namespace + +namespace w2l { +namespace cuda { + +template +size_t +ForceAlignmentCriterion::getWorkspaceSize(int B, int T, int N, int L) { + return WorkspacePtrs(nullptr, B, T, N, L).requiredSize; +} + +template +void ForceAlignmentCriterion::forward( + int B, + int T, + int N, + int L, + CriterionScaleMode scaleMode, + const Float* input, + const int* target, + const int* targetSize, + const Float* trans, + Float* loss, + void* workspace, + cudaStream_t stream) { + int blockSize = std::min(256, (L + 31) / 32 * 32); + WorkspacePtrs ws(workspace, B, T, N, L); + CriterionUtils::computeScale( + B, T, N, scaleMode, targetSize, ws.scale, stream); + forwardKernel<<>>( + T, N, L, input, target, targetSize, trans, loss, ws); +} + +template +void ForceAlignmentCriterion::backward( + int B, + int T, + int N, + int L, + const int* target, + const int* targetSize, + const Float* grad, + Float* inputGrad, + Float* transGrad, + void* workspace, + cudaStream_t stream) { + int blockSize = std::min(256, (L + 31) / 32 * 32); + WorkspacePtrs ws(workspace, B, T, N, L); + setZero(inputGrad, B * T * N, stream); + setZero(transGrad, N * N, stream); + setZero(ws.alphaGrad, B * T * L, stream); + setZero(ws.transBatchGrad, B * N * N, stream); + setZero(ws.transBufGrad1, B * L, stream); + setZero(ws.transBufGrad2, B * L, stream); + backwardKernel<<>>( + T, N, L, target, targetSize, grad, inputGrad, transGrad, ws); +} + +template +void ForceAlignmentCriterion::viterbiPath( + int B, + int T, + int N, + int L, + const Float* input, + const int* target, + const int* targetSize, + const Float* trans, + int* bestPaths, + void* workspace, + cudaStream_t stream) { + int blockSize = std::min(256, (L + 31) / 32 * 32); + WorkspacePtrs ws(workspace, B, T, N, L); + setZero(ws.alpha, B * T * L, stream); + viterbiPathKernel<<>>( + T, N, L, input, target, targetSize, trans, bestPaths, ws); +} + +template struct ForceAlignmentCriterion; +template struct ForceAlignmentCriterion; + +} // namespace cuda +} // namespace w2l diff --git a/cuda_code/FractionalMaxPool2d_4.cu b/cuda_code/FractionalMaxPool2d_4.cu new file mode 100644 index 0000000000000000000000000000000000000000..70a20c0ca267fba312dc215742dd80128ef24998 --- /dev/null +++ b/cuda_code/FractionalMaxPool2d_4.cu @@ -0,0 +1,355 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +namespace at { +namespace native { + +using namespace at::cuda::detail; + +namespace { + +template +__device__ inline int get_interval(accscalar_t sample, + int index, int inputSize, int outputSize, int poolSize) { + accscalar_t alpha = static_cast(inputSize - poolSize) / + static_cast(outputSize - 1); + if (index == outputSize - 1) { + return inputSize - poolSize; + } else { + return static_cast((index + sample) * alpha) - + static_cast(sample * alpha); + } +} + +template +__global__ void fractional_max_pool2d_out_cuda_frame( + PackedTensorAccessor output, + PackedTensorAccessor indices, + PackedTensorAccessor input, + PackedTensorAccessor samples, + int poolSizeH, int poolSizeW) { + + using accscalar_t = at::acc_type; + + int ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x; + int plane = blockIdx.y; + int batch = blockIdx.z; + + // Each thread generates a specific output point + if (ourOutputPoint < output.size(2) * output.size(3)) { + int outputW = ourOutputPoint % output.size(3); + int outputH = ourOutputPoint / output.size(3); + + int poolW = get_interval( + static_cast(samples[batch][plane][0]), + outputW, input.size(3), output.size(3), poolSizeW); + int poolH = get_interval( + static_cast(samples[batch][plane][1]), + outputH, input.size(2), output.size(2), poolSizeH); + + scalar_t maxVal = at::numeric_limits::lower_bound(); + int maxIndex = poolH * input.size(3) + poolW; + + for (int h = poolH; h < poolH + poolSizeH; ++h) { + if (poolSizeW < 2 || poolSizeW > 7) { + for (int w = poolW; w < poolW + poolSizeW; ++w) { + scalar_t val = input[batch][plane][h][w]; + // for consistency with THNN, favor the first max + if (val > maxVal || THCNumerics::isnan(val)) { + maxIndex = h * input.size(3) + w; + maxVal = val; + } + } + } else { + for (int i = 0; i < poolSizeW; ++i) { + int w = i + poolW; + scalar_t val = input[batch][plane][h][w]; + // for consistency with THNN, favor the first max + if (val > maxVal || THCNumerics::isnan(val)) { + maxIndex = h * input.size(3) + w; + maxVal = val; + } + } + } + } + + indices[batch][plane][outputH][outputW] = maxIndex; + output[batch][plane][outputH][outputW] = maxVal; + } +} + +template +__global__ void fractional_max_pool2d_backward_out_cuda_frame( + PackedTensorAccessor gradInput, + PackedTensorAccessor gradOutput, + PackedTensorAccessor indices) { + // Output (h, w) point that this thread is responsible for + int ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x; + int plane = blockIdx.y; + int batch = blockIdx.z; + + // Each thread generates a specific output point + if (ourOutputPoint < gradOutput.size(2) * + gradOutput.size(3)) { + int outputW = ourOutputPoint % gradOutput.size(3); + int outputH = ourOutputPoint / gradOutput.size(3); + + int index = indices[batch][plane][outputH][outputW]; + assert(index >= 0); + int inputW = index % gradInput.size(3); + int inputH = index / gradInput.size(3); + assert(inputH < gradInput.size(2)); + + gpuAtomicAdd( + &gradInput[batch][plane][inputH][inputW], + gradOutput[batch][plane][outputH][outputW] + ); + } +} + +void fractional_max_pool2d_out_cuda_template( + Tensor & output, + Tensor& indices, + const Tensor& input, + IntArrayRef pool_size, + IntArrayRef output_size, + const Tensor& randomSamples) { + int planeDim = 0; + int dimh = 1; + int dimw = 2; + int numBatch = 1; + + int ndims = input.ndimension(); + TORCH_CHECK(input.numel() > 0, + "fractional_max_pool2d(): expected input to have non-empty ", + "spatial dimensions."); + + TORCH_CHECK((ndims == 3 || ndims == 4), + "non-empty 3D or 4D (batch mode) tensor expected for input"); + + if (ndims == 4) { + numBatch = input.size(0); + planeDim++; + dimh++; + dimw++; + } + + /* sizes */ + int numPlanes = input.size(planeDim); + int inputH = input.size(dimh); + int inputW = input.size(dimw); + + int outputH = output_size[0]; + int outputW = output_size[1]; + int poolSizeH = pool_size[0]; + int poolSizeW = pool_size[1]; + + TORCH_CHECK(outputH + poolSizeH - 1 <= inputH, + "fractional_max_pool2d(): pool_size height ", poolSizeH, + " too large relative to input height ", inputH); + TORCH_CHECK(outputW + poolSizeW - 1 <= inputW, + "pool_size width ", poolSizeW, + " too large relative to input width ", inputW); + + if (ndims == 3) { + /* resize output */ + output.resize_({numPlanes, outputH, outputW}); + /* indices will contain the locations for each output point */ + indices.resize_({numPlanes, outputH, outputW}); + } else { + output.resize_({numBatch, numPlanes, outputH, outputW}); + indices.resize_({numBatch, numPlanes, outputH, outputW}); + } + + auto output_ = output; + auto input_ = input; + auto indices_ = indices; + + if(ndims == 3) { + output_ = output_.reshape({1, numPlanes, outputH, outputW}); + indices_ = indices_.reshape({1, numPlanes, outputH, outputW}); + input_ = input_.reshape({1, input.size(0), input.size(1), input.size(2)}); + } + + // block is limited to 4 warps + // grid handles overflow per each plane + int outputPlaneSize = output_.size(2) * + output_.size(3); + dim3 grid((outputPlaneSize + 127) / 128, // ceil(outputPlaneSize / 128) + input_.size(1), + input_.size(0)); + dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), + "fractional_max_pool2d_out_cuda_frame", + [&] { + auto devInput = input_.packed_accessor(); + auto devOutput = output_.packed_accessor(); + auto devIndices = indices_.packed_accessor(); + auto devSamples = randomSamples.packed_accessor(); + fractional_max_pool2d_out_cuda_frame + <<>>( + devOutput, devIndices, devInput, devSamples, + poolSizeH, poolSizeW); + } + ); + AT_CUDA_CHECK(cudaGetLastError()); +} + +void fractional_max_pool2d_backward_out_cuda_template( + Tensor& gradInput, + const Tensor& gradOutput, + const Tensor& input, + IntArrayRef pool_size /* unused */, + IntArrayRef output_size, + const Tensor& indices) +{ + int dimh = 1; + int dimw = 2; + + int ndims = input.ndimension(); + if (ndims == 4) { + dimh++; + dimw++; + } + + /* sizes */ + int inputH = input.size(dimh); + int inputW = input.size(dimw); + + int outputH = output_size[0]; + int outputW = output_size[1]; + + TORCH_CHECK(outputH == gradOutput.size(dimh), + "fractional_max_pool2d(): gradOutput height unexpected"); + TORCH_CHECK(outputW == gradOutput.size(dimw), + "fractional_max_pool2d(): gradOutput width unexpected"); + + /* resize */ + gradInput.resize_as_(input); + gradInput.zero_(); + + auto gradInput_ = gradInput; + auto gradOutput_ = gradOutput; + auto indices_ = indices; + + if(ndims == 3) { + gradInput_ = gradInput_.reshape({1, input.size(0), inputH, inputW}); + gradOutput_ = gradOutput_.reshape({1, gradOutput.size(0), outputH, outputW}); + indices_ = indices_.reshape({1, indices_.size(0), outputH, outputW}); + } + + /* backprop */ + // block is limited to 4 warps + // grid handles overflow per each plane + int outputPlaneSize = gradOutput_.size(2) * + gradOutput_.size(3); + dim3 grid((outputPlaneSize + 127) / 128, // ceil(outputPlaneSize / 128) + gradInput_.size(1), + gradInput_.size(0)); + dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize); + + auto devIndices = indices.packed_accessor(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), + "fractional_max_pool2d_backward_out_cuda_frame", + [&] { + auto devGradInput = gradInput_.packed_accessor(); + auto devGradOutput = gradOutput_.packed_accessor(); + fractional_max_pool2d_backward_out_cuda_frame + <<>>( + devGradInput, devGradOutput, devIndices); + } + ); + AT_CUDA_CHECK(cudaGetLastError()); +} + +}// namespace + +std::tuple fractional_max_pool2d_out_cuda( + at::Tensor& output, + at::Tensor& indices, + const at::Tensor& input, + IntArrayRef pool_size, + IntArrayRef output_size, + const at::Tensor& randomSamples) +{ + fractional_max_pool2d_out_cuda_template( + output, + indices, + input, + pool_size, + output_size, + randomSamples); + return std::tuple(output, indices); +} + +std::tuple fractional_max_pool2d_cuda( + const at::Tensor& input, + IntArrayRef pool_size, + IntArrayRef output_size, + const at::Tensor& randomSamples) +{ + Tensor output = at::empty({0}, input.options()); + Tensor indices = at::empty({0}, input.options().dtype(kLong)); + fractional_max_pool2d_out_cuda_template( + output, + indices, + input, + pool_size, + output_size, + randomSamples); + return std::tuple(output, indices); +} + +Tensor& fractional_max_pool2d_backward_out_cuda( + at::Tensor& gradInput, + const at::Tensor& gradOutput_, + const at::Tensor& input, + IntArrayRef pool_size, + IntArrayRef output_size, + const at::Tensor& indices) +{ + fractional_max_pool2d_backward_out_cuda_template( + gradInput, + gradOutput_, + input, + pool_size, + output_size, + indices); + return gradInput; +} + +Tensor fractional_max_pool2d_backward_cuda( + const at::Tensor& gradOutput_, + const at::Tensor& input, + IntArrayRef pool_size, + IntArrayRef output_size, + const at::Tensor& indices) +{ + Tensor gradInput = at::empty({0}, input.options()); + fractional_max_pool2d_backward_out_cuda_template( + gradInput, + gradOutput_, + input, + pool_size, + output_size, + indices); + return gradInput; +} + +}// at::native +}// at diff --git a/cuda_code/FractionalMaxPool2d_6.cu b/cuda_code/FractionalMaxPool2d_6.cu new file mode 100644 index 0000000000000000000000000000000000000000..46ea4eadf1febe9d6a7348ce5b36d0343709ab52 --- /dev/null +++ b/cuda_code/FractionalMaxPool2d_6.cu @@ -0,0 +1,271 @@ +#define TORCH_ASSERT_ONLY_METHOD_OPERATORS +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +#include +#include +#endif + +#include +#include +#include + +namespace at { +namespace native { + +using namespace at::cuda::detail; + +namespace { + +template +__device__ inline int get_interval(accscalar_t sample, + int index, int inputSize, int outputSize, int poolSize) { + accscalar_t alpha = static_cast(inputSize - poolSize) / + static_cast(outputSize - 1); + if (index == outputSize - 1) { + return inputSize - poolSize; + } else { + return static_cast((index + sample) * alpha) - + static_cast(sample * alpha); + } +} + +template +__global__ void fractional_max_pool2d_out_cuda_frame( + PackedTensorAccessor output, + PackedTensorAccessor indices, + PackedTensorAccessor input, + PackedTensorAccessor samples, + int poolSizeH, int poolSizeW) { + + using accscalar_t = at::acc_type; + + int ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x; + int plane = blockIdx.y; + int batch = blockIdx.z; + + // Each thread generates a specific output point + if (ourOutputPoint < output.size(2) * output.size(3)) { + int outputW = ourOutputPoint % output.size(3); + int outputH = ourOutputPoint / output.size(3); + + int poolW = get_interval( + static_cast(samples[batch][plane][0]), + outputW, input.size(3), output.size(3), poolSizeW); + int poolH = get_interval( + static_cast(samples[batch][plane][1]), + outputH, input.size(2), output.size(2), poolSizeH); + + scalar_t maxVal = at::numeric_limits::lower_bound(); + int maxIndex = poolH * input.size(3) + poolW; + + for (int h = poolH; h < poolH + poolSizeH; ++h) { + if (poolSizeW < 2 || poolSizeW > 7) { + for (int w = poolW; w < poolW + poolSizeW; ++w) { + scalar_t val = input[batch][plane][h][w]; + // for consistency with THNN, favor the first max + if (val > maxVal || at::_isnan(val)) { + maxIndex = h * input.size(3) + w; + maxVal = val; + } + } + } else { + for (int i = 0; i < poolSizeW; ++i) { + int w = i + poolW; + scalar_t val = input[batch][plane][h][w]; + // for consistency with THNN, favor the first max + if (val > maxVal || at::_isnan(val)) { + maxIndex = h * input.size(3) + w; + maxVal = val; + } + } + } + } + + indices[batch][plane][outputH][outputW] = maxIndex; + output[batch][plane][outputH][outputW] = maxVal; + } +} + +template +__global__ void fractional_max_pool2d_backward_out_cuda_frame( + PackedTensorAccessor gradInput, + PackedTensorAccessor gradOutput, + PackedTensorAccessor indices) { + // Output (h, w) point that this thread is responsible for + int ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x; + int plane = blockIdx.y; + int batch = blockIdx.z; + + // Each thread generates a specific output point + if (ourOutputPoint < gradOutput.size(2) * + gradOutput.size(3)) { + int outputW = ourOutputPoint % gradOutput.size(3); + int outputH = ourOutputPoint / gradOutput.size(3); + + int index = indices[batch][plane][outputH][outputW]; + assert(index >= 0); + int inputW = index % gradInput.size(3); + int inputH = index / gradInput.size(3); + assert(inputH < gradInput.size(2)); + + gpuAtomicAddNoReturn( + &gradInput[batch][plane][inputH][inputW], + gradOutput[batch][plane][outputH][outputW] + ); + } +} + +} // anonymous namespace + +TORCH_IMPL_FUNC(fractional_max_pool2d_out_cuda) ( + const Tensor& input, + IntArrayRef pool_size, + IntArrayRef output_size, + const Tensor& randomSamples, + const Tensor& output, + const Tensor& indices +) { + int planeDim = 0; + int dimh = 1; + int dimw = 2; + + int ndims = input.ndimension(); + + if (ndims == 4) { + planeDim++; + dimh++; + dimw++; + } + + /* sizes */ + int numPlanes = input.size(planeDim); + + int outputH = output_size[0]; + int outputW = output_size[1]; + int poolSizeH = pool_size[0]; + int poolSizeW = pool_size[1]; + + auto output_ = output; + auto input_ = input; + auto indices_ = indices; + + if(ndims == 3) { + output_ = output_.reshape({1, numPlanes, outputH, outputW}); + indices_ = indices_.reshape({1, numPlanes, outputH, outputW}); + input_ = input_.reshape({1, input.size(0), input.size(1), input.size(2)}); + } + + if (output_.numel() == 0) { + return; + } + + // block is limited to 4 warps + // grid handles overflow per each plane + int outputPlaneSize = output_.size(2) * + output_.size(3); + dim3 grid((outputPlaneSize + 127) / 128, // ceil(outputPlaneSize / 128) + input_.size(1), + input_.size(0)); + dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), + "fractional_max_pool2d_out_cuda_frame", + [&] { + auto devInput = input_.packed_accessor(); + auto devOutput = output_.packed_accessor(); + auto devIndices = indices_.packed_accessor(); + auto devSamples = randomSamples.packed_accessor(); + fractional_max_pool2d_out_cuda_frame + <<>>( + devOutput, devIndices, devInput, devSamples, + poolSizeH, poolSizeW); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + } + ); +} + +TORCH_IMPL_FUNC(fractional_max_pool2d_backward_cuda)( + const Tensor& gradOutput, + const Tensor& input, + IntArrayRef pool_size /* unused */, + IntArrayRef output_size, + const Tensor& indices, + const Tensor& gradInput) +{ + + // See Note [Writing Nondeterministic Operations] + // Nondeterministic because of atomicAdd usage + globalContext().alertNotDeterministic("fractional_max_pool2d_backward_cuda"); + + int dimh = 1; + int dimw = 2; + + int ndims = input.ndimension(); + if (ndims == 4) { + dimh++; + dimw++; + } + + /* sizes */ + int inputH = input.size(dimh); + int inputW = input.size(dimw); + + int outputH = output_size[0]; + int outputW = output_size[1]; + + if (gradInput.numel() == 0) { + return; + } + + gradInput.zero_(); + + auto gradInput_ = gradInput; + auto gradOutput_ = gradOutput; + auto indices_ = indices; + + if(ndims == 3) { + gradInput_ = gradInput_.reshape({1, input.size(0), inputH, inputW}); + gradOutput_ = gradOutput_.reshape({1, gradOutput.size(0), outputH, outputW}); + indices_ = indices_.reshape({1, indices_.size(0), outputH, outputW}); + } + + /* backprop */ + // block is limited to 4 warps + // grid handles overflow per each plane + int outputPlaneSize = gradOutput_.size(2) * + gradOutput_.size(3); + dim3 grid((outputPlaneSize + 127) / 128, // ceil(outputPlaneSize / 128) + gradInput_.size(1), + gradInput_.size(0)); + dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize); + + auto devIndices = indices_.packed_accessor(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(), + "fractional_max_pool2d_backward_out_cuda_frame", + [&] { + auto devGradInput = gradInput_.packed_accessor(); + auto devGradOutput = gradOutput_.packed_accessor(); + fractional_max_pool2d_backward_out_cuda_frame + <<>>( + devGradInput, devGradOutput, devIndices); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + } + ); +} + +}// at::native +}// at diff --git a/cuda_code/FractionalMaxPool3d_8.cu b/cuda_code/FractionalMaxPool3d_8.cu new file mode 100644 index 0000000000000000000000000000000000000000..c44b49c004d4eeb654ff4bc9290512f8d97761ac --- /dev/null +++ b/cuda_code/FractionalMaxPool3d_8.cu @@ -0,0 +1,415 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +namespace at { +namespace native { + +using namespace at::cuda::detail; + +namespace { + +template +__device__ inline int64_t get_intervals( + accscalar_t sample, + int64_t index, + int64_t inputSize, + int64_t outputSize, + int64_t poolSize) { + accscalar_t alpha = static_cast(inputSize - poolSize) / + static_cast(outputSize - 1); + if (index == outputSize - 1) { + return inputSize - poolSize; + } else { + return static_cast((index + sample) * alpha) - \ + static_cast(sample * alpha); + } + } + +template +__global__ void fractional_max_pool3d_out_frame( + PackedTensorAccessor input, + PackedTensorAccessor output, + PackedTensorAccessor indices, + PackedTensorAccessor samples, + int64_t poolSizeT, int64_t poolSizeH, int64_t poolSizeW) { + using accscalar_t = at::acc_type; + // Output (t, h, w) point that this thread is responsible for + int64_t ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x; + int64_t plane = blockIdx.y; + int64_t batch = blockIdx.z; + // Each thread generates a specific output point + if (ourOutputPoint < output.size(2) * output.size(3) * + output.size(4)){ + int64_t outputT = ourOutputPoint / (output.size(3) * + output.size(4)); + int64_t outputH = (ourOutputPoint / output.size(4)) % + output.size(3); + int64_t outputW = ourOutputPoint % output.size(4); + + int64_t poolT = get_intervals( + static_cast(samples[batch][plane][0]), + outputT, input.size(2), output.size(2), poolSizeT); + int64_t poolH = get_intervals( + static_cast(samples[batch][plane][1]), + outputH, input.size(3), output.size(3), poolSizeH); + int64_t poolW = get_intervals( + static_cast(samples[batch][plane][2]), + outputW, input.size(4), output.size(4), poolSizeW); + + scalar_t maxVal = at::numeric_limits::lowest(); + int64_t maxIndex = -1; + + for(int64_t t = poolT; t < poolT + poolSizeT; ++ t) { + for (int64_t h = poolH; h < poolH + poolSizeH; ++h) { + if(poolSizeW < 2 || poolSizeW > 7) { + for (int64_t w = poolW; w < poolW + poolSizeW; ++w) { + scalar_t val = input[batch][plane][t][h][w]; + // for consistency with THNN, favor the first max + if (val > maxVal) { + maxIndex = t * input.size(3) * + input.size(4) + h * input.size(4) + w; + maxVal = val; + } + } + } else { + for (int64_t i = 0; i < poolSizeW; ++i) { + int64_t w = i + poolW; + scalar_t val = input[batch][plane][t][h][w]; + // for consistency with THNN, favor the first max + if (val > maxVal) { + maxIndex = t * input.size(3) * input.size(4) + + h * input.size(4) + w; + maxVal = val; + } + } + } + } + } + + assert(maxVal != at::numeric_limits::lowest()); + assert(maxIndex != -1); + + indices[batch][plane][outputT][outputH][outputW] = maxIndex; + output[batch][plane][outputT][outputH][outputW] = maxVal; + } + } + +template +__global__ void fractional_max_pool3d_backward_out_frame( + PackedTensorAccessor gradInput, + PackedTensorAccessor gradOutput, + PackedTensorAccessor indices) { + // Output (h, w) point that this thread is responsible for + int64_t ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x; + int64_t plane = blockIdx.y; + int64_t batch = blockIdx.z; + + // Each thread generates a specific output point + if (ourOutputPoint < gradOutput.size(2) * + gradOutput.size(3) * gradOutput.size(4)) { + int64_t outputW = ourOutputPoint % gradOutput.size(4); + int64_t outputH = (ourOutputPoint / gradOutput.size(4)) % + gradOutput.size(3); + int64_t outputT = ourOutputPoint / (gradOutput.size(3) * + gradOutput.size(4)); + + int64_t index = indices[batch][plane][outputT][outputH][outputW]; + assert(index >= 0); + int64_t inputW = index % gradInput.size(4); + int64_t inputH = (index / gradInput.size(4)) % + gradInput.size(3); + int64_t inputT = index / (gradInput.size(3) * + gradInput.size(4)); + assert(inputT < gradInput.size(2)); + + atomicAdd( + &gradInput[batch][plane][inputT][inputH][inputW], + gradOutput[batch][plane][outputT][outputH][outputW] + ); + } + } + +void fractional_max_pool3d_out_cuda_template( + Tensor& output, + Tensor& indices, + const Tensor& input, + IntArrayRef pool_size, + IntArrayRef output_size, + const Tensor& randomSamples) { + int64_t planeDim = 0; + int64_t dimt = 1; + int64_t dimh = 2; + int64_t dimw = 3; + int64_t numBatch = 1; + + int64_t outputT = output_size[0]; + int64_t outputH = output_size[1]; + int64_t outputW = output_size[2]; + int64_t poolSizeT = pool_size[0]; + int64_t poolSizeH = pool_size[1]; + int64_t poolSizeW = pool_size[2]; + + int64_t ndims = input.ndimension(); + TORCH_CHECK( + input.numel() != 0 && (ndims == 4 || ndims == 5), + "fractional_max_pool3d_out_cuda_template(): ", + "non-empty 4D or 5D (batch mode) tensor expected for input, but got: ", + ndims); + + if (ndims == 5) { + numBatch = input.size(0); + planeDim++; + dimt++; + dimh++; + dimw++; + } + + /* sizes */ + int64_t numPlanes = input.size(planeDim); + int64_t inputT = input.size(dimt); + int64_t inputH = input.size(dimh); + int64_t inputW = input.size(dimw); + + TORCH_CHECK( + outputT + poolSizeT - 1 < inputT, + "fractional_max_pool3d_out_cuda_template(): ", + "pool time (", poolSizeT, ") too large relative to input time (", + inputT, ")"); + TORCH_CHECK( + outputH + poolSizeH - 1 < inputH, + "fractional_max_pool3d_out_cuda_template(): ", + "pool height (", poolSizeH, ") too large relative to input height (", + inputH, ")"); + TORCH_CHECK( + outputW + poolSizeW - 1 < inputW, + "fractional_max_pool3d_out_cuda_template(): ", + "pool width (", poolSizeW, ") too large relative to input width (", + inputW, ")"); + + if (ndims == 4) { + /* resize output */ + output.resize_({numPlanes, outputT, outputH, outputW}); + /* indices will contain the locations for each output point */ + indices.resize_({numPlanes, outputT, outputH, outputW}); + } else { + /* resize output */ + output.resize_({numBatch, numPlanes, outputT, outputH, outputW}); + /* indices will contain the locations for each output point */ + indices.resize_({numBatch, numPlanes, outputT, outputH, outputW}); + } + + auto output_ = output; + auto indices_ = indices; + auto input_ = input; + if(ndims == 4) { + output_ = output_.reshape({1, numPlanes, outputT, outputH, outputW}); + indices_ = indices_.reshape({1, numPlanes, outputT, outputH, outputW}); + input_ = input_.reshape({1, numPlanes, inputT, inputH, inputW}); + } + + // block is limited to 4 warps + // grid handles overflow per each plane + int64_t outputPlaneSize = output_.size(2) * + output_.size(3) * output_.size(4); + dim3 grid( + (outputPlaneSize + 127) / 128, // ceil(outputPlaneSize / 128) + input_.size(1), + input_.size(0)); + dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + input.scalar_type(), + "fractional_max_pool3d_out_frame", + [&]{ + fractional_max_pool3d_out_frame + <<>>( + input_.packed_accessor(), + output_.packed_accessor(), + indices_.packed_accessor(), + randomSamples.packed_accessor(), + poolSizeT, poolSizeH, poolSizeW + ); + } + ); + TORCH_CHECK(cudaGetLastError() == cudaSuccess, + "fractional_max_pool2d_out_cuda_template failed with error code ", + cudaGetLastError()); + } + +void fractional_max_pool3d_backward_out_cuda_template( + Tensor& gradInput, + const Tensor& gradOutput, + const Tensor& input, + IntArrayRef pool_size /* unused */, + IntArrayRef output_size, + const Tensor& indices) { + int64_t dimt = 1; + int64_t dimh = 2; + int64_t dimw = 3; + + int64_t outputT = output_size[0]; + int64_t outputH = output_size[1]; + int64_t outputW = output_size[2]; + + int64_t ndims = input.ndimension(); + if (ndims == 5) { + dimt++; + dimh++; + dimw++; + } + + /* sizes */ + int64_t inputT = input.size(dimt); + int64_t inputH = input.size(dimh); + int64_t inputW = input.size(dimw); + + TORCH_CHECK( + outputT == gradOutput.size(dimt), + "fractional_max_pool3d_backward_out_cuda_template(): ", + "gradOutput time unexpected" + ); + TORCH_CHECK( + outputH == gradOutput.size(dimh), + "fractional_max_pool3d_backward_out_cuda_template(): ", + "gradOutput height unexpected" + ); + TORCH_CHECK( + outputW == gradOutput.size(dimw), + "fractional_max_pool3d_backward_out_cuda_template(): ", + "gradOutput width unexpected" + ); + + /* resize */ + gradInput.resize_as_(input); + gradInput.zero_(); + + auto gradInput_ = gradInput; + auto gradOutput_ = gradOutput; + auto indices_ = indices; + + if(ndims == 4) { + gradInput_ = gradInput_.reshape({1, gradInput.size(0), inputT, + inputH, inputW}); + gradOutput_ = gradOutput_.reshape({1, gradOutput.size(0), outputT, + outputH, outputW}); + indices_ = indices_.reshape({1, indices.size(0), outputT, outputH, + outputW}); + } + + /* backprop */ + // block is limited to 4 warps + // grid handles overflow per each plane + int64_t outputPlaneSize = gradOutput_.size(2) * + gradOutput_.size(3) * gradOutput_.size(4); + dim3 grid( + (outputPlaneSize + 127) / 128, // ceil(outputPlaneSize / 128) + gradInput_.size(1), + gradInput_.size(0)); + dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + gradOutput.scalar_type(), + "fractional_max_pool3d_backward_out_frame", + [&] { + fractional_max_pool3d_backward_out_frame + <<>>( + gradInput_.packed_accessor(), + gradOutput_.packed_accessor(), + indices_.packed_accessor() + ); + } + ); + TORCH_CHECK(cudaGetLastError() == cudaSuccess, + "fractional_max_pool2d_out_cuda_template failed with error code ", + cudaGetLastError()); + } + +}// namespace + +std::tuple fractional_max_pool3d_out_cuda( + at::Tensor& output, + at::Tensor& indices, + const at::Tensor& input, + IntArrayRef pool_size, + IntArrayRef output_size, + const at::Tensor& randomSamples) { + fractional_max_pool3d_out_cuda_template( + output, + indices, + input, + pool_size, + output_size, + randomSamples + ); + return std::tuple(output, indices); + } + +std::tuple fractional_max_pool3d_cuda( + const at::Tensor& input, + IntArrayRef pool_size, + IntArrayRef output_size, + const at::Tensor& randomSamples) { + Tensor output = at::empty({0}, input.options()); + Tensor indices = at::empty({0}, input.options().dtype(kLong)); + fractional_max_pool3d_out_cuda_template( + output, + indices, + input, + pool_size, + output_size, + randomSamples + ); + return std::tuple(output, indices); + } + +Tensor& fractional_max_pool3d_backward_out_cuda( + at::Tensor& gradInput, + const at::Tensor& gradOutput_, + const at::Tensor& input, + IntArrayRef pool_size, + IntArrayRef output_size, + const at::Tensor& indices) { + fractional_max_pool3d_backward_out_cuda_template( + gradInput, + gradOutput_, + input, + pool_size, + output_size, + indices + ); + return gradInput; + } + +Tensor fractional_max_pool3d_backward_cuda( + const at::Tensor& gradOutput, + const at::Tensor& input, + IntArrayRef pool_size, + IntArrayRef output_size, + const at::Tensor& indices) { + Tensor gradInput = at::empty({0}, input.options()); + fractional_max_pool3d_backward_out_cuda_template( + gradInput, + gradOutput, + input, + pool_size, + output_size, + indices + ); + return gradInput; + } + +}// native +}// at diff --git a/cuda_code/FullyConnectedLeakyIntegrateAndFireFixedBroadcastKernel.cu b/cuda_code/FullyConnectedLeakyIntegrateAndFireFixedBroadcastKernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..b8ce775cf09181378dbe66264d339cffd471a70c --- /dev/null +++ b/cuda_code/FullyConnectedLeakyIntegrateAndFireFixedBroadcastKernel.cu @@ -0,0 +1,279 @@ +#ifndef __FULLY_CONNECTED_LEAKY_INTEGRATE_AND_FIRE_FIXED_BROADCAST_KERNEL__ +#define __FULLY_CONNECTED_LEAKY_INTEGRATE_AND_FIRE_FIXED_BROADCAST_KERNEL__ +#include "FullyConnectedLeakyIntegrateAndFireKernel.cu" +#include "FullyConnectedInputOutputKernel.cu" +#include "FullyConnectedSpikePropagationKernel.cu" +#include "FullyConnectedLeakyIntegrateAndFireEligibilityGradientKernel.cu" +#include "FullyConnectedLeakyReadoutGradientKernel.cu" +/** + * Parallel FULLY_CONNECTED Kernel for a fully connected Network of neurons + * SpikePropagation version + */ +namespace SNN { + + namespace Kernels { + + namespace GPU { + + __device__ void fullyConnectedLeakyIntegrateAndFireFixedBroadcastKernel( + + /* the number of input neurons */ + unsigned numInputs, + + /* the number of hidden neurons */ + unsigned numHidden, + + /* the number of output neurons */ + unsigned numOutputs, + + /* the number of simmulation time steps */ + unsigned numSimulationTimesteps, + + /* the simulation timestep length */ + FloatType timeStepLength, + + /* neuron spike threshold */ + FloatType spikeThreshold, + + /* neuron refactory period */ + FloatType refactoryPeriod, + + /* the hidden voltage decay factor */ + FloatType hiddenDecayFactor, + + /* the readout voltage decay factor */ + FloatType readoutDecayFactor, + + /* the target firing rate */ + FloatType targetFiringRate, + + /* the firing rate gradient scalling factor */ + FloatType firingRateScallingFactor, + + /* the derivative dumping factor */ + FloatType derivativeDumpingFactor, + + /* the input neuron spikes over one simulation run */ + FloatType *inputSpikesOverTime, + + /* the hidden neurons firing rates */ + FloatType *firingRates, + + /* the hidden neurons number of spikes */ + FloatType *numSpikes, + + /* the synaptic input weights */ + FloatType *inputWeights, + + /* the synaptic input weights */ + FloatType *hiddenWeights, + + /* the synaptic input weights */ + FloatType *outputWeights, + + /* the feedback weights */ + FloatType *feedbackWeights, + + /* the network targets fore one simulation run */ + FloatType *targetsOverTime, + + /* the fixed braodcast gradients for input synapses */ + FloatType *inputFixedBroadcastGradients, + + /* the firing rate gradients for input synapses */ + FloatType *inputFiringRateGradients, + + /* the fixed braodcast gradients for hidden synapses */ + FloatType *hiddenFixedBroadcastGradients, + + /* the firing rate gradients for hidden synapses */ + FloatType *hiddenFiringRateGradients, + + /* the leaky readout gradients */ + FloatType *leakyReadoutGradients, + + /* the networks squared summed error */ + FloatType *networkError, + + /***** content managed by kernel ******/ + + /* the filtered eligibility traces */ + FloatType *filteredEligibilityTraces, + + /* the filtered hidden spikes */ + FloatType *filteredSpikes, + + /* hidden derivatives */ + FloatType *derivatives, + + /* input current for hidden and output neurons */ + FloatType *I, + + /* hidden and readout voltage */ + FloatType *v, + + /* hidden spikes */ + FloatType *hiddenSpikes, + + /* time since last spike for hidden neurons */ + FloatType *timeStepsSinceLastSpike + + ) { + cudaAssert(numHidden == blockDim.x); + + inputSpikesOverTime += blockIdx.x * numInputs * numSimulationTimesteps; + firingRates += blockIdx.x * numHidden; + numSpikes += blockIdx.x * numHidden; + targetsOverTime += blockIdx.x * numOutputs * numSimulationTimesteps; + inputFixedBroadcastGradients += blockIdx.x * numInputs * numHidden; + inputFiringRateGradients += blockIdx.x * numInputs * numHidden; + hiddenFixedBroadcastGradients += blockIdx.x * numHidden * numHidden; + hiddenFiringRateGradients += blockIdx.x * numHidden * numHidden; + leakyReadoutGradients += blockIdx.x * numHidden * numOutputs; + networkError += blockIdx.x; + filteredEligibilityTraces += blockIdx.x * (numInputs * numHidden + numHidden * numHidden); + filteredSpikes += blockIdx.x * (numInputs + numHidden); + derivatives += blockIdx.x * numHidden; + I += blockIdx.x * (numHidden + numOutputs); + v += blockIdx.x * (numHidden + numOutputs); + hiddenSpikes += blockIdx.x * numHidden; + timeStepsSinceLastSpike += blockIdx.x * numHidden; + + /* clear values */ + const int h = threadIdx.x; + filteredSpikes[h] = 0; + numSpikes[h] = 0; + hiddenSpikes[h] = 0; + I[h] = 0; + v[h] = 0; + timeStepsSinceLastSpike[h] = 2 * refactoryPeriod; + + if (h == 0) + networkError[0] = 0; + + if (h < numInputs) { + filteredSpikes[h + numHidden] = 0; + } + if (h < numOutputs) { + I[h + numHidden] = 0; + v[h + numHidden] = 0; + } + + for (unsigned i = 0; i < numInputs; i++) { + inputFiringRateGradients[i * numHidden + h] = 0; + inputFixedBroadcastGradients[i * numHidden + h] = 0; + filteredEligibilityTraces[i * numHidden + h] = 0; + } + for (unsigned i = 0; i < numHidden; i++) { + hiddenFiringRateGradients[i * numHidden + h] = 0; + hiddenFixedBroadcastGradients[i * numHidden + h] = 0; + filteredEligibilityTraces[numInputs * numHidden + i * numHidden + h] = 0; + } + for (unsigned i = 0; i < numOutputs; i++) { + leakyReadoutGradients[h * numOutputs + i] = 0; + } + __syncthreads(); + + for (unsigned t = 0; t < numSimulationTimesteps; t++) { + + fullyConnectedLeakyIntegrateAndFireKernel( + 0, + numHidden, + spikeThreshold, + hiddenDecayFactor, + refactoryPeriod, + derivativeDumpingFactor, + hiddenSpikes, + filteredSpikes + numInputs, + numSpikes, + I, + v, + derivatives, + timeStepsSinceLastSpike + ); + fullyConnectedInputOutputKernel( + numInputs, + numOutputs, + hiddenDecayFactor, + readoutDecayFactor, + I + numHidden, + v + numHidden, + (t > 0) ? inputSpikesOverTime + (t-1) * numInputs : NULL, + filteredSpikes + ); + + __syncthreads(); + + if (h == 0) { + for (unsigned o = 0; o < numOutputs; o++) { + networkError[0] += pow( + v[numHidden + o] - + targetsOverTime[t * numOutputs + o], + 2 + ); + } + } + + fullyConnectedSpikePropagationKernel( + numInputs, + numHidden, + numOutputs, + inputSpikesOverTime + t * numInputs, + hiddenSpikes, + I, + inputWeights, + hiddenWeights, + outputWeights + ); + fullyConnectedLeakyIntegrateAndFireEligibilityGradientKernel( + numInputs, + numHidden, + numOutputs, + targetFiringRate, + firingRateScallingFactor, + readoutDecayFactor, + filteredSpikes, + firingRates, + derivatives, + feedbackWeights, + v + numHidden, + targetsOverTime + t * numOutputs, + filteredEligibilityTraces, + inputFiringRateGradients, + inputFixedBroadcastGradients + ); + fullyConnectedLeakyIntegrateAndFireEligibilityGradientKernel( + numHidden, + numHidden, + numOutputs, + targetFiringRate, + firingRateScallingFactor, + readoutDecayFactor, + filteredSpikes + numInputs, + firingRates, + derivatives, + feedbackWeights, + v + numHidden, + targetsOverTime + t * numOutputs, + filteredEligibilityTraces + numInputs * numHidden, + hiddenFiringRateGradients, + hiddenFixedBroadcastGradients + ); + fullyConnectedLeakyReadoutGradientKernel( + numHidden, + numOutputs, + filteredSpikes + numInputs, + v + numHidden, + targetsOverTime + t * numOutputs, + leakyReadoutGradients + ); + + __syncthreads(); + } + + firingRates[h] = numSpikes[h] / (numSimulationTimesteps * timeStepLength); + } + } + } +} +#endif /* __FULLY_CONNECTED_LEAKY_INTEGRATE_AND_FIRE_FIXED_BROADCAST_KERNEL__ */ diff --git a/cuda_code/GIVoxelPages.cu b/cuda_code/GIVoxelPages.cu new file mode 100644 index 0000000000000000000000000000000000000000..98ad1bf1a77c061f66cdac16d39ae62b84ded37d --- /dev/null +++ b/cuda_code/GIVoxelPages.cu @@ -0,0 +1,1286 @@ +#include "GIVoxelPages.h" +#include "PageKernels.cuh" +#include "DrawBuffer.h" +#include "CudaInit.h" +#include "CudaTimer.h" +#include "GIVoxelCache.h" +#include "GISparseVoxelOctree.h" +#include "MeshBatchSkeletal.h" +#include "OGLTimer.h" +#include "IEUtility/IEMath.h" +#include "GLSLBindPoints.h" +#include "Camera.h" +#include +#include "IEUtility/IEAxisAalignedBB.h" + +inline static std::ostream& operator<<(std::ostream& ostr, const CSegmentInfo& segObj) +{ + uint16_t cascadeNo = (segObj.packed >> 14) & 0x0003; + uint16_t objType = (segObj.packed >> 12) & 0x0003; + uint16_t occupation = (segObj.packed >> 10) & 0x0003; + + ostr << cascadeNo << ", "; + ostr << segObj.batchId << ", "; + ostr << segObj.objId << " | "; + + ostr << segObj.objectSegmentId << " | "; + ostr << objType << " | "; + ostr << occupation << " | "; + return ostr; +} + +GIVoxelPages::PageRenderer::PageRenderer() + : debugBufferResource(nullptr) + , debugBufferCUDA(nullptr) + , drawParameterOffset(0) + , atomicIndexOffset(0) + , gridInfoOffset(0) + , voxelPositionOffset(0) + , voxelRenderOffset(0) +{} + +GIVoxelPages::PageRenderer::PageRenderer(const GIVoxelPages& pages) + : vRenderWorldVoxel(ShaderType::VERTEX, "Shaders/VoxRenderWorld.vert") + , fRenderWorldVoxel(ShaderType::FRAGMENT, "Shaders/VoxRender.frag") + , debugBufferResource(nullptr) + , debugBufferCUDA(nullptr) + , drawParameterOffset(0) + , atomicIndexOffset(0) + , gridInfoOffset(0) + , voxelPositionOffset(0) + , voxelRenderOffset(0) +{ + VoxelVAO::CubeOGL cube = VoxelVAO::LoadCubeDataFromGFG(); + size_t maxVoxelCount = pages.dPages.Size() * PageSize; + + // Since Grid info will be bound as SSBO it inneds to be properly aligned + size_t cubeOffset = cube.data.size(); + size_t cubeVertexOffset = cube.drawCount * sizeof(uint32_t); + + // Grid Info + size_t offset = cubeOffset; + offset = DeviceOGLParameters::SSBOAlignOffset(offset); + gridInfoOffset = offset; + offset += pages.svoParams->CascadeCount * sizeof(CVoxelGrid); + + // Atomic Index + drawParameterOffset = offset; + atomicIndexOffset = offset + offsetof(DrawPointIndexed, instanceCount); + offset += sizeof(DrawPointIndexed); + // Voxel Positions + voxelPositionOffset = offset; + offset += maxVoxelCount * sizeof(VoxelPosition); + // Voxel Albedo or Normal + voxelRenderOffset = offset; + offset += maxVoxelCount * sizeof(VoxelNormal); + static_assert(sizeof(VoxelNormal) == sizeof(VoxelAlbedo), "Implementation assumes all debug render types has the same size"); + + // Allocate + debugDrawBuffer.Resize(offset, false); + + // Now Register + size_t bufferSize = 0; + CUDA_CHECK(cudaGraphicsGLRegisterBuffer(&debugBufferResource, debugDrawBuffer.getGLBuffer(), + cudaGraphicsMapFlagsWriteDiscard)); + CUDA_CHECK(cudaGraphicsMapResources(1, &debugBufferResource)); + CUDA_CHECK(cudaGraphicsResourceGetMappedPointer(reinterpret_cast(&debugBufferCUDA), + &bufferSize, + debugBufferResource)); + assert(bufferSize == debugDrawBuffer.Capacity()); + + // Copy Cube Vertex and Indices + CUDA_CHECK(cudaMemcpy(debugBufferCUDA, + cube.data.data(), + cube.data.size(), + cudaMemcpyHostToDevice)); + + // Copy Grid Info + CUDA_CHECK(cudaMemcpy(debugBufferCUDA + gridInfoOffset, + pages.dVoxelGrids, + pages.svoParams->CascadeCount * sizeof(CVoxelGrid), + cudaMemcpyDeviceToDevice)); + + // Copy Draw Point + DrawPointIndexed dp = + { + cube.drawCount, + 0, // Instance count will be filled each frame + 0, + 0, + 0 + }; + CUDA_CHECK(cudaMemcpy(debugBufferCUDA + drawParameterOffset, + &dp, sizeof(DrawPointIndexed), + cudaMemcpyHostToDevice)); + + // All Done! (Unmap and continue) + CUDA_CHECK(cudaGraphicsUnmapResources(1, &debugBufferResource)); + debugBufferCUDA = nullptr; + + // Finally Generate VAO + debugDrawVao = VoxelVAO(debugDrawBuffer, + cubeVertexOffset, + voxelPositionOffset, + voxelRenderOffset); +} + +GIVoxelPages::PageRenderer::PageRenderer(PageRenderer&& other) + : vRenderWorldVoxel(std::move(other.vRenderWorldVoxel)) + , fRenderWorldVoxel(std::move(other.fRenderWorldVoxel)) + , debugBufferResource(other.debugBufferResource) + , debugDrawBuffer(std::move(other.debugDrawBuffer)) + , debugBufferCUDA(other.debugBufferCUDA) + , debugDrawVao(std::move(other.debugDrawVao)) + , drawParameterOffset(other.drawParameterOffset) + , atomicIndexOffset(other.atomicIndexOffset) + , gridInfoOffset(other.gridInfoOffset) + , voxelPositionOffset(other.voxelPositionOffset) + , voxelRenderOffset(other.voxelRenderOffset) +{ + other.debugBufferResource = nullptr; + other.debugBufferCUDA = nullptr; +} + +GIVoxelPages::PageRenderer& GIVoxelPages::PageRenderer::operator=(PageRenderer&& other) +{ + if(debugBufferResource) + CUDA_CHECK(cudaGraphicsUnregisterResource(debugBufferResource)); + + vRenderWorldVoxel = std::move(other.vRenderWorldVoxel); + fRenderWorldVoxel = std::move(other.fRenderWorldVoxel); + debugBufferResource = other.debugBufferResource; + debugDrawBuffer = std::move(other.debugDrawBuffer); + debugBufferCUDA = other.debugBufferCUDA; + debugDrawVao = std::move(other.debugDrawVao); + drawParameterOffset = other.drawParameterOffset; + atomicIndexOffset = other.atomicIndexOffset; + gridInfoOffset = other.gridInfoOffset; + voxelPositionOffset = other.voxelPositionOffset; + voxelRenderOffset = other.voxelRenderOffset; + + other.debugBufferResource = nullptr; + other.debugBufferCUDA = nullptr; + return *this; +} + +GIVoxelPages::PageRenderer::~PageRenderer() +{ + if(debugBufferResource) + CUDA_CHECK(cudaGraphicsUnregisterResource(debugBufferResource)); +} + +double GIVoxelPages::PageRenderer::Draw(bool doTiming, + uint32_t cascade, + VoxelRenderType renderType, + const Camera& camera, + const GIVoxelCache& cache, + const GIVoxelPages& pages, + bool useCache) +{ + // Skip if not allocated + if(!Allocated()) return 0.0; + + CudaTimer cT; + if(doTiming) cT.Start(); + + // Map Buffer + size_t bufferSize = 0; + CUDA_CHECK(cudaGraphicsMapResources(1, &debugBufferResource)); + CUDA_CHECK(cudaGraphicsResourceGetMappedPointer(reinterpret_cast(&debugBufferCUDA), + &bufferSize, + debugBufferResource)); + assert(bufferSize == debugDrawBuffer.Capacity()); + + // Copy Requested Data + // Gen pointers + VoxelPosition* voxelPosition = reinterpret_cast(debugBufferCUDA + voxelPositionOffset); + unsigned int* voxelRender = reinterpret_cast(debugBufferCUDA + voxelRenderOffset); + unsigned int* atomicIndex = reinterpret_cast(debugBufferCUDA + atomicIndexOffset); + + // Clear atomic counter + CUDA_CHECK(cudaMemset(atomicIndex, 0x00, sizeof(unsigned int))); + + // Load new Grid Positions + // Copy Grid Info + CUDA_CHECK(cudaMemcpy2D(debugBufferCUDA + gridInfoOffset, sizeof(CVoxelGrid), + pages.dVoxelGrids, sizeof(CVoxelGrid), + sizeof(float3), pages.svoParams->CascadeCount, + cudaMemcpyDeviceToDevice)); + + // KC + int gridSize = CudaInit::GenBlockSize(static_cast(pages.dPages.Size() * PageSize)); + int blockSize = CudaInit::TBP; + CopyPage<<>>(// OGL Buffer + voxelPosition, + voxelRender, + *atomicIndex, + // Voxel Cache + cache.getDeviceCascadePointersDevice().Data(), + // Voxel Pages + reinterpret_cast(pages.dPages.Data()), + // + static_cast(pages.batches->size()), + cascade, + renderType, + useCache); + CUDA_KERNEL_CHECK(); + + //// DEBUG + //uint32_t nodesInCirculation = 0; + //CUDA_CHECK(cudaMemcpy(&nodesInCirculation, atomicIndex, sizeof(uint32_t), cudaMemcpyDeviceToHost)); + //GI_LOG("Total Valid node count in pages : %d", nodesInCirculation); + + // Unmap buffer and continue + CUDA_CHECK(cudaGraphicsUnmapResources(1, &debugBufferResource)); + debugBufferCUDA = nullptr; + + // Timing + OGLTimer t; + if(doTiming) + { + cT.Stop(); + t.Start(); + } + + // Now render + // Framebuffer + glBindFramebuffer(GL_FRAMEBUFFER, 0); + glViewport(0, 0, + static_cast(camera.width), + static_cast(camera.height)); + + // State + glDisable(GL_MULTISAMPLE); + glEnable(GL_DEPTH_TEST); + glEnable(GL_CULL_FACE); + glDepthFunc(GL_LEQUAL); + glDepthMask(true); + glColorMask(true, true, true, true); + glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); + + // Shaders + Shader::Unbind(ShaderType::GEOMETRY); + vRenderWorldVoxel.Bind(); + glUniform1ui(U_RENDER_TYPE, static_cast(renderType)); + fRenderWorldVoxel.Bind(); + + // Uniforms + debugDrawBuffer.BindAsShaderStorageBuffer(LU_VOXEL_GRID_INFO, + static_cast(gridInfoOffset), + static_cast(pages.svoParams->CascadeCount * sizeof(CVoxelGrid))); + + // Draw Indirect Buffer + debugDrawBuffer.BindAsDrawIndirectBuffer(); + + // VAO + debugDrawVao.Bind(); + debugDrawVao.Draw(static_cast(drawParameterOffset)); + + // Timer + if(doTiming) + { + t.Stop(); + return t.ElapsedMS() + cT.ElapsedMilliS(); + } + return 0.0; +} + +bool GIVoxelPages::PageRenderer::Allocated() const +{ + return vRenderWorldVoxel.IsValid(); +} + +GIVoxelPages::MultiPage::MultiPage(size_t pageCount) +{ + assert(pageCount != 0); + size_t sizePerPage = GIVoxelPages::PageSize * + (sizeof(CVoxelPos) + + sizeof(CVoxelNorm) + + sizeof(CVoxelOccupancy)) + + + GIVoxelPages::SegmentSize * + (sizeof(unsigned char) + + sizeof(CSegmentInfo)); + + size_t totalSize = sizePerPage * pageCount; + pageData.Resize(totalSize); + pageData.Memset(0x00, 0, totalSize); + + uint8_t* dPtr = pageData.Data(); + ptrdiff_t offset = 0; + for(size_t i = 0; i < pageCount; i++) + { + CVoxelPage page = {}; + + page.dGridVoxPos = reinterpret_cast(dPtr + offset); + offset += GIVoxelPages::PageSize * sizeof(CVoxelPos); + + page.dGridVoxNorm = reinterpret_cast(dPtr + offset); + offset += GIVoxelPages::PageSize * sizeof(CVoxelNorm); + + page.dGridVoxOccupancy = reinterpret_cast(dPtr + offset); + offset += GIVoxelPages::PageSize * sizeof(CVoxelOccupancy); + + page.dEmptySegmentPos = reinterpret_cast(dPtr + offset); + offset += GIVoxelPages::SegmentSize * sizeof(unsigned char); + + page.dSegmentInfo = reinterpret_cast(dPtr + offset); + offset += GIVoxelPages::SegmentSize * sizeof(CSegmentInfo); + + page.dEmptySegmentStackSize = GIVoxelPages::SegmentPerPage; + pages.push_back(page); + } + assert(offset == pageData.Size()); + + // KC to Initialize Empty Segment Stack + int gridSize = CudaInit::GenBlockSizeSmall(static_cast(pageCount * GIVoxelPages::SegmentPerPage)); + int blockSize = CudaInit::TBP; + InitializePage<<>>(pages.front().dEmptySegmentPos, pageCount); + CUDA_KERNEL_CHECK(); +} + +GIVoxelPages::MultiPage::MultiPage(MultiPage&& other) + : pageData(std::move(other.pageData)) + , pages(std::move(other.pages)) +{} + +size_t GIVoxelPages::MultiPage::PageCount() const +{ + return pages.size(); +} + +const std::vector& GIVoxelPages::MultiPage::Pages() const +{ + return pages; +} + +uint16_t GIVoxelPages::PackSegmentInfo(const uint8_t cascadeId, + const CObjectType type, + const CSegmentOccupation occupation, + const bool firstOccurance) +{ + // MSB to LSB + // 2 bit cascadeId + // 2 bit object type + // 2 bit segment occupation + uint16_t packed = 0; + packed |= (static_cast(cascadeId) & 0x0003) << 14; + packed |= (static_cast(type) & 0x0003) << 12; + packed |= (static_cast(occupation) & 0x0003) << 10; + packed |= (static_cast(firstOccurance) & 0x0001) << 9; + return packed; +} + +void GIVoxelPages::GenerateGPUData(const GIVoxelCache& cache) +{ + // Generate SegInfos + std::vector grids; + std::vector segInfos; + std::vector> checkBase(batches->size()); + + for(uint32_t cascadeId = 0; cascadeId < svoParams->CascadeCount; cascadeId++) + { + CVoxelGrid grid = {}; + grid.depth = svoParams->CascadeBaseLevel + svoParams->CascadeCount - cascadeId - 1; + grid.dimension = + { + svoParams->CascadeBaseLevelSize, + svoParams->CascadeBaseLevelSize, + svoParams->CascadeBaseLevelSize + }; + grid.position = {0.0f, 0.0f, 0.0f}; + grid.span = svoParams->BaseSpan * static_cast(1 << cascadeId); + grids.push_back(grid); + + for(uint32_t batchId = 0; batchId < batches->size(); batchId++) + { + if((*batches)[batchId]->DrawCount() == 0) continue; + if(cascadeId == 0) checkBase[batchId].resize((*batches)[batchId]->DrawCount(), true); + + bool nonRigid = (*batches)[batchId]->MeshType() == MeshBatchType::SKELETAL; + const std::vector voxInfo = cache.CopyMeshObjectInfo(cascadeId, batchId); + + for(uint32_t objId = 0; objId < voxInfo.size(); objId++) + { + const CMeshVoxelInfo& info = voxInfo[objId]; + bool firstOccurance = false; + if(info.voxCount != 0 && checkBase[batchId][objId] == true) + { + checkBase[batchId][objId] = false; + firstOccurance = true; + } + + uint32_t segmentCount = (info.voxCount + SegmentSize - 1) / SegmentSize; + for(uint32_t segId = 0; segId < segmentCount; segId++) + { + CObjectType objType = (nonRigid) ? CObjectType::SKEL_DYNAMIC : CObjectType::DYNAMIC; + + CSegmentInfo segInfo; + segInfo.batchId = static_cast(batchId); + segInfo.objectSegmentId = static_cast(segId); + segInfo.objId = static_cast(objId); + segInfo.packed = PackSegmentInfo(static_cast(cascadeId), objType, + CSegmentOccupation::OCCUPIED, + firstOccurance); + + segInfos.push_back(segInfo); + } + } + } + } + + // Determine Buffer Size + size_t bufferSize = segInfos.size() * (sizeof(CSegmentInfo) + + sizeof(ushort2)); + bufferSize += batches->size() * sizeof(BatchOGLData); + bufferSize += svoParams->CascadeCount * sizeof(CVoxelGrid); + + // Send Data to Buffer + gpuData.Resize(bufferSize); + size_t bufferOffset = 0; + // Grids + CUDA_CHECK(cudaMemcpy(gpuData.Data() + bufferOffset, + reinterpret_cast(grids.data()), + grids.size() * sizeof(CVoxelGrid), + cudaMemcpyHostToDevice)); + dVoxelGrids = reinterpret_cast(gpuData.Data() + bufferOffset); + bufferOffset += grids.size() * sizeof(CVoxelGrid); + // OGL Data + CUDA_CHECK(cudaMemset(gpuData.Data() + bufferOffset, 0, + batches->size() * sizeof(BatchOGLData))); + dBatchOGLData = reinterpret_cast(gpuData.Data() + bufferOffset); + bufferOffset += batches->size() * sizeof(BatchOGLData); + // Segments Alloc + CUDA_CHECK(cudaMemset(gpuData.Data() + bufferOffset, 0xFFFFFFFF, + segInfos.size() * sizeof(ushort2))); + dSegmentAllocInfo = reinterpret_cast(gpuData.Data() + bufferOffset); + bufferOffset += segInfos.size() * sizeof(ushort2); + // Segments + CUDA_CHECK(cudaMemcpy(gpuData.Data() + bufferOffset, + reinterpret_cast(segInfos.data()), + segInfos.size() * sizeof(CSegmentInfo), + cudaMemcpyHostToDevice)); + dSegmentInfo = reinterpret_cast(gpuData.Data() + bufferOffset); + bufferOffset += segInfos.size() * sizeof(CSegmentInfo); + assert(bufferOffset == gpuData.Size()); + segmentAmount = static_cast(segInfos.size()); +} + +void GIVoxelPages::AllocatePages(size_t voxelCapacity) +{ + size_t pageCount = (voxelCapacity + PageSize - 1) / PageSize; + size_t oldSize = dPages.Size(); + + hPages.emplace_back(pageCount); + dPages.Resize(oldSize + hPages.back().PageCount()); + dPages.Assign(oldSize, hPages.back().PageCount(), hPages.back().Pages().data()); +} + +void GIVoxelPages::MapOGLResources() +{ + CUDA_CHECK(cudaGraphicsMapResources(static_cast(batchOGLResources.size()), batchOGLResources.data())); + + std::vector newOGLData; + size_t batchIndex = 0; + for(size_t i = 0; i < batches->size(); i++) + { + MeshBatchI& currentBatch = *(*batches)[i]; + if(currentBatch.DrawCount() == 0) + { + newOGLData.push_back({}); + continue; + } + + size_t size; + uint8_t* glPointer = nullptr; + CUDA_CHECK(cudaGraphicsResourceGetMappedPointer(reinterpret_cast(&glPointer), + &size, batchOGLResources[batchIndex])); + + size_t aabbByteOffset = (*batches)[i]->getDrawBuffer().getAABBOffset(); + size_t modelTransformByteOffset = (*batches)[i]->getDrawBuffer().getModelTransformOffset(); + size_t modelTransformIndexByteOffset = (*batches)[i]->getDrawBuffer().getModelTransformIndexOffset(); + + BatchOGLData batchGL = {}; + batchGL.dAABBs = reinterpret_cast(glPointer + aabbByteOffset); + batchGL.dModelTransforms = reinterpret_cast(glPointer + modelTransformByteOffset); + batchGL.dModelTransformIndices = reinterpret_cast(glPointer + modelTransformIndexByteOffset); + + batchIndex++; + if((*batches)[i]->MeshType() == MeshBatchType::SKELETAL) + { + CUDA_CHECK(cudaGraphicsResourceGetMappedPointer(reinterpret_cast(&glPointer), + &size, batchOGLResources[batchIndex])); + batchGL.dJointTransforms = reinterpret_cast(glPointer); + batchIndex++; + } + newOGLData.push_back(batchGL); + } + + // Copy generated pointers to GPU + CUDA_CHECK(cudaMemcpy(dBatchOGLData, + newOGLData.data(), + batches->size() * sizeof(BatchOGLData), + cudaMemcpyHostToDevice)); +} + +void GIVoxelPages::UnmapOGLResources() +{ + CUDA_CHECK(cudaGraphicsUnmapResources(static_cast(batchOGLResources.size()), batchOGLResources.data())); +} + +void GIVoxelPages::Update(double& ioTime, + double& transTime, + const GIVoxelCache& caches, + const IEVector3& camPos, + bool doTiming) +{ + UpdateGridPositions(camPos); + MapOGLResources(); + ioTime = VoxelIO(doTiming); + transTime = Transform(caches, doTiming); + UnmapOGLResources(); +} + +GIVoxelPages::GIVoxelPages() + : batches(nullptr) + , svoParams(nullptr) + , segmentAmount(0) + , dVoxelGrids(nullptr) + , dBatchOGLData(nullptr) + , dSegmentInfo(nullptr) + , dSegmentAllocInfo(nullptr) + +{} + +GIVoxelPages::GIVoxelPages(const GIVoxelCache& cache, + const std::vector* batches, + const OctreeParameters& octreeParams) + : batches(batches) + , svoParams(&octreeParams) + , segmentAmount(0) + , dVoxelGrids(nullptr) + , dBatchOGLData(nullptr) + , dSegmentInfo(nullptr) + , dSegmentAllocInfo(nullptr) +{ + for(uint32_t i = 0; i < batches->size(); i++) + { + MeshBatchI& batch = *(*batches)[i]; + if(batch.DrawCount() == 0) continue; + + GLuint bufferId = batch.getDrawBuffer().getGLBuffer(); + cudaGraphicsResource_t glResource; + CUDA_CHECK(cudaGraphicsGLRegisterBuffer(&glResource, + bufferId, + cudaGraphicsMapFlagsReadOnly)); + batchOGLResources.push_back(glResource); + + if(batch.MeshType() == MeshBatchType::SKELETAL) + { + GLuint jointBuffer = static_cast(batch).getJointTransforms().getGLBuffer(); + CUDA_CHECK(cudaGraphicsGLRegisterBuffer(&glResource, + jointBuffer, + cudaGraphicsMapFlagsReadOnly)); + batchOGLResources.push_back(glResource); + } + } + GenerateGPUData(cache); + AllocatePages(segmentAmount * SegmentSize); +} + +GIVoxelPages::GIVoxelPages(GIVoxelPages&& other) + : batches(other.batches) + , svoParams(other.svoParams) + , segmentAmount(other.segmentAmount) + , outermostGridPosition(other.outermostGridPosition) + , gpuData(std::move(other.gpuData)) + , dVoxelGrids(other.dVoxelGrids) + , dBatchOGLData(other.dBatchOGLData) + , dSegmentInfo(other.dSegmentInfo) + , dSegmentAllocInfo(other.dSegmentAllocInfo) + , hPages(std::move(other.hPages)) + , dPages(std::move(other.dPages)) + , batchOGLResources(std::move(other.batchOGLResources)) + , pageRenderer(std::move(other.pageRenderer)) +{ + assert(other.batchOGLResources.empty()); +} + +GIVoxelPages& GIVoxelPages::operator=(GIVoxelPages&& other) +{ + assert(&other != this); + for(cudaGraphicsResource_t resc : batchOGLResources) + { + CUDA_CHECK(cudaGraphicsUnregisterResource(resc)); + } + + batches = other.batches; + svoParams = other.svoParams; + segmentAmount = other.segmentAmount; + outermostGridPosition = other.outermostGridPosition; + gpuData = std::move(other.gpuData); + dVoxelGrids = other.dVoxelGrids; + dBatchOGLData = other.dBatchOGLData; + dSegmentInfo = other.dSegmentInfo; + dSegmentAllocInfo = other.dSegmentAllocInfo; + hPages = std::move(other.hPages); + dPages = std::move(other.dPages); + batchOGLResources = std::move(other.batchOGLResources); + pageRenderer = std::move(other.pageRenderer); + return *this; +} + +GIVoxelPages::~GIVoxelPages() +{ + for(cudaGraphicsResource_t resc : batchOGLResources) + { + CUDA_CHECK(cudaGraphicsUnregisterResource(resc)); + } +} + +void GIVoxelPages::UpdateGridPositions(const IEVector3& cameraPos) +{ + std::vector positions; + GenerateGridPositions(positions, cameraPos); + + // Copy new positions + CUDA_CHECK(cudaMemcpy2D(dVoxelGrids, sizeof(CVoxelGrid), + positions.data(), sizeof(IEVector3), + sizeof(IEVector3), svoParams->CascadeCount, + cudaMemcpyHostToDevice)); +} + +void GIVoxelPages::GenerateGridPositions(std::vector& gridPositions, + const IEVector3& cameraPos) +{ + // Calculate outermost span position + float outerSpan = svoParams->BaseSpan * static_cast(1 << (svoParams->CascadeCount - 1)); + IEVector3 voxelCornerPos = cameraPos - outerSpan * (svoParams->CascadeBaseLevelSize - 1) * 0.5f; + + // Align outermost cascade + // TODO: Better solution for higher level voxel jittering + float rootSnapLevelMultiplier = static_cast(0x1 << 4); + + // Removes Jitterin on base cascade level + float snapSpan = outerSpan * rootSnapLevelMultiplier; + voxelCornerPos[0] -= std::fmod(voxelCornerPos[0] + snapSpan * 0.5f, snapSpan); + voxelCornerPos[1] -= std::fmod(voxelCornerPos[1] + snapSpan * 0.5f, snapSpan); + voxelCornerPos[2] -= std::fmod(voxelCornerPos[2] + snapSpan * 0.5f, snapSpan); + + //// Grid Aligned Center + //IEVector3 voxelCenter = voxelCornerPos + outerSpan * (svoParams->CascadeBaseLevelSize - 1) * 0.5f; + //std::vector positions(svoParams->CascadeCount); + //for(uint32_t i = 0; i < svoParams->CascadeCount; i++) + //{ + // float multiplier = (0x1 << i) * (svoParams->CascadeBaseLevelSize - 1) * 0.5f; + // positions[i] = voxelCenter - multiplier; + //} + + // Now align inner cascades according to outermost + // In all system cacades and its data lied from inner to outer + gridPositions.resize(svoParams->CascadeCount); + float baseHalf = svoParams->BaseSpan * 0.5f * svoParams->CascadeBaseLevelSize; + float seriesTotal = IEMathFunctions::GeomSeries(svoParams->CascadeCount - 2, 2.0f); + for(uint32_t i = 0; i < svoParams->CascadeCount; i++) + { + int32_t termLast = i - 1; + float lastTermSum = (termLast >= 0) ? IEMathFunctions::GeomSeries(termLast, 2.0f) : 0; + float subSeries = seriesTotal - lastTermSum; + float displacement = subSeries * baseHalf; + gridPositions[i] = voxelCornerPos + displacement; + } + outermostGridPosition = gridPositions.back(); +} + +double GIVoxelPages::VoxelIO(bool doTiming) +{ + CudaTimer t; + if(doTiming) t.Start(); + + // KC + int gridSize = CudaInit::GenBlockSizeSmall(static_cast(segmentAmount)); + int blockSize = CudaInit::TBPSmall; + // Voxel I-O (Deallocate first then allocate) + VoxelDeallocate<<>>(// Voxel System + dPages.Data(), + dVoxelGrids, + // Helper Structures + dSegmentAllocInfo, + dSegmentInfo, + // Per Object Related + dBatchOGLData, + // Limits + segmentAmount); + + VoxelAllocate<<>>(// Voxel System + dPages.Data(), + dVoxelGrids, + // Helper Structures + dSegmentAllocInfo, + dSegmentInfo, + // Per Object Related + dBatchOGLData, + // Limits + segmentAmount, + static_cast(dPages.Size())); + CUDA_KERNEL_CHECK(); + if(doTiming) + { + t.Stop(); + return t.ElapsedMilliS(); + } + return 0.0; +} + +double GIVoxelPages::Transform(const GIVoxelCache& cache, + bool doTiming) +{ + CudaTimer t; + if(doTiming) t.Start(); + + // KC + int gridSize = CudaInit::GenBlockSizeSmall(static_cast(dPages.Size() * PageSize)); + int blockSize = CudaInit::TBPSmall; + VoxelTransform<<>>(// Voxel Pages + dPages.Data(), + dVoxelGrids, + // OGL Related + dBatchOGLData, + // Voxel Cache Related + cache.getDeviceCascadePointersDevice().Data(), + // Limits + static_cast(batches->size())); + cudaDeviceSynchronize(); + CUDA_KERNEL_CHECK(); + if(doTiming) + { + t.Stop(); + return t.ElapsedMilliS(); + } + return 0.0; +} + +uint64_t GIVoxelPages::MemoryUsage() const +{ + size_t totalSize = gpuData.Size(); + totalSize += dPages.Size() * sizeof(CVoxelPage); + totalSize += dPages.Size() * PageSize * (sizeof(CVoxelPos) + + sizeof(CVoxelNorm) + + sizeof(CVoxelOccupancy)); + totalSize += dPages.Size() * SegmentPerPage * (sizeof(unsigned char) + + sizeof(CSegmentInfo)); + return totalSize; +} + +uint32_t GIVoxelPages::PageCount() const +{ + return static_cast(dPages.Size()); +} + +void GIVoxelPages::DumpPageSegments(const char* fileName, size_t offset, size_t pageCount) const +{ + if(pageCount == 0) pageCount = dPages.Size() - offset; + assert(offset + pageCount <= dPages.Size()); + + std::vector pages(pageCount); + CUDA_CHECK(cudaMemcpy(pages.data(), dPages.Data() + offset, + pageCount * sizeof(CVoxelPage), + cudaMemcpyDeviceToHost)); + + std::vector infos(pageCount * SegmentPerPage); + for(size_t i = 0; i < pageCount; i++) + { + const CVoxelPage& p = pages[i]; + CUDA_CHECK(cudaMemcpy(infos.data() + i * SegmentPerPage, + p.dSegmentInfo, + SegmentPerPage * sizeof(CSegmentInfo), + cudaMemcpyDeviceToHost)); + } + + + std::ofstream fOut; + fOut.open(fileName); + for(const CSegmentInfo& data : infos) + { + fOut << std::uppercase << std::hex << data; + fOut << "\t\t\t" << std::nouppercase << std::dec << data; + fOut << std::endl; + } +} + +void GIVoxelPages::DumpPageEmptyPositions(const char* fileName, size_t offset, size_t pageCount) const +{ + if(pageCount == 0) pageCount = dPages.Size() - offset; + assert(offset + pageCount <= dPages.Size()); + + std::vector pages(pageCount); + CUDA_CHECK(cudaMemcpy(pages.data(), dPages.Data() + offset, + pageCount * sizeof(CVoxelPage), + cudaMemcpyDeviceToHost)); + + std::vector emptySpots(pageCount * SegmentPerPage); + for(size_t i = 0; i < pageCount; i++) + { + const CVoxelPage& p = pages[i]; + CUDA_CHECK(cudaMemcpy(emptySpots.data() + i * SegmentPerPage, + p.dEmptySegmentPos, + SegmentPerPage * sizeof(unsigned char), + cudaMemcpyDeviceToHost)); + } + + std::ofstream fOut; + fOut.open(fileName); + for(const unsigned char& data : emptySpots) + { + fOut << std::uppercase << std::hex << static_cast(data); + fOut << "\t\t\t" << std::nouppercase << std::dec << static_cast(data); + fOut << std::endl; + } +} + +void GIVoxelPages::DumpSegmentAllocation(const char* fileName, size_t offset, size_t segmentCount) const +{ + if(segmentCount == 0) segmentCount = segmentAmount - offset; + assert(offset + segmentCount <= segmentAmount); + + std::vector segments(segmentCount); + CUDA_CHECK(cudaMemcpy(segments.data(), dSegmentInfo + offset, + segmentCount * sizeof(ushort2), + cudaMemcpyDeviceToHost)); + + std::ofstream fOut; + fOut.open(fileName); + for(const ushort2& data : segments) + { + fOut << std::uppercase << std::hex << data; + fOut << "\t\t\t" << std::nouppercase << std::dec << data; + fOut << std::endl; + } +} + +void GIVoxelPages::DumpSegmentInfo(const char* fileName, size_t offset, size_t segmentCount) const +{ + if(segmentCount == 0) segmentCount = segmentAmount - offset; + assert(offset + segmentCount <= segmentAmount); + + std::vector segments(segmentCount); + CUDA_CHECK(cudaMemcpy(segments.data(), dSegmentInfo + offset, + segmentCount * sizeof(CSegmentInfo), + cudaMemcpyDeviceToHost)); + + std::ofstream fOut; + fOut.open(fileName); + for(const CSegmentInfo& data : segments) + { + fOut << std::uppercase << std::hex << data; + fOut << "\t\t\t" << std::nouppercase << std::dec << data; + fOut << std::endl; + } +} + +void GIVoxelPages::AllocateDraw() +{ + if(!pageRenderer.Allocated()) + { + pageRenderer = PageRenderer(*this); + } +} + +double GIVoxelPages::Draw(bool doTiming, + uint32_t cascadeCount, + VoxelRenderType renderType, + const Camera& camera, + const GIVoxelCache& cache) +{ + return pageRenderer.Draw(doTiming, cascadeCount, renderType, camera, cache, *this, false); +} + +void GIVoxelPages::DeallocateDraw() +{ + if(pageRenderer.Allocated()) + { + pageRenderer = PageRenderer(); + } +} + +const CVoxelPageConst* GIVoxelPages::getVoxelPagesDevice() const +{ + return reinterpret_cast(dPages.Data()); +} + +const CVoxelGrid* GIVoxelPages::getVoxelGridsDevice() const +{ + return dVoxelGrids; +} + +const IEVector3& GIVoxelPages::getOutermostGridPosition() const +{ + return outermostGridPosition; +} + +size_t GIVoxelPages::VoxelCountInCirculation(const GIVoxelCache& cache) const +{ + CudaVector dCounter(1); + dCounter.Memset(0, 0, 1); + std::vector hCounter(1, 0); + + // KC + int gridSize = CudaInit::GenBlockSizeSmall(static_cast(dPages.Size() * PageSize)); + int blockSize = CudaInit::TBPSmall; + CountVoxelsInPageSystem<<>>(dCounter.Data(), + // Voxel Cache + cache.getDeviceCascadePointersDevice().Data(), + // Voxel Pages + reinterpret_cast(dPages.Data()), + // Limits + static_cast(batches->size())); + + CUDA_CHECK(cudaMemcpy(hCounter.data(), dCounter.Data(), sizeof(uint32_t), + cudaMemcpyDeviceToHost)); + return hCounter[0]; +} + +GIVoxelPagesFrame::FastVoxelizer::FastVoxelizer() + : denseResource(nullptr) + , octreeParams(nullptr) +{} + +GIVoxelPagesFrame::FastVoxelizer::FastVoxelizer(const OctreeParameters* octreeParams) + : denseResource(nullptr) + , octreeParams(octreeParams) + , lockTexture(0) + , vertVoxelizeFast(ShaderType::VERTEX, "Shaders/VoxelizeFast.vert") + , vertVoxelizeFastSkeletal(ShaderType::VERTEX, "Shaders/VoxelizeFastSkel.vert") + , geomVoxelize(ShaderType::GEOMETRY, "Shaders/VoxelizeGeom.geom") + , fragVoxelizeFast(ShaderType::FRAGMENT, "Shaders/VoxelizeFast.frag") +{ + size_t offset = 0; + + // Dense + incrementOffset = offset; + offset += sizeof(uint32_t); + // Increment + offset = DeviceOGLParameters::SSBOAlignOffset(offset); + denseOffset = offset; + offset += octreeParams->CascadeBaseLevelSize * + octreeParams->CascadeBaseLevelSize * + octreeParams->CascadeBaseLevelSize * sizeof(uint32_t) * 2; + + // Gen OGL Buffers + oglData.Resize(offset, false); + oglData.Memset(0x0u); + + // Lock texture + uint32_t zero = 0; + glGenTextures(1, &lockTexture); + glBindTexture(GL_TEXTURE_3D, lockTexture); + glTexStorage3D(GL_TEXTURE_3D, 1, GL_R32UI, + octreeParams->CascadeBaseLevelSize, + octreeParams->CascadeBaseLevelSize, + octreeParams->CascadeBaseLevelSize); + glClearTexImage(lockTexture, 0, GL_RED_INTEGER, GL_UNSIGNED_INT, &zero); + + // Register only buffer texture is not used on cuda portion + CUDA_CHECK(cudaGraphicsGLRegisterBuffer(&denseResource, + oglData.getGLBuffer(), + cudaGraphicsRegisterFlagsNone)); +} + +GIVoxelPagesFrame::FastVoxelizer::FastVoxelizer(FastVoxelizer&& other) + : oglData(std::move(other.oglData)) + , lockTexture(other.lockTexture) + , denseResource(other.denseResource) + , octreeParams(other.octreeParams) + , incrementOffset(other.incrementOffset) + , denseOffset(other.denseOffset) + , vertVoxelizeFast(std::move(other.vertVoxelizeFast)) + , vertVoxelizeFastSkeletal(std::move(other.vertVoxelizeFastSkeletal)) + , geomVoxelize(std::move(other.geomVoxelize)) + , fragVoxelizeFast(std::move(other.fragVoxelizeFast)) +{ + other.lockTexture = 0; + other.denseResource = nullptr; +} + +GIVoxelPagesFrame::FastVoxelizer& GIVoxelPagesFrame::FastVoxelizer::operator=(FastVoxelizer&& other) +{ + assert(this != &other); + if(denseResource) CUDA_CHECK(cudaGraphicsUnregisterResource(denseResource)); + oglData = std::move(other.oglData); + lockTexture = other.lockTexture; + denseResource = other.denseResource; + octreeParams = other.octreeParams; + incrementOffset = other.incrementOffset; + denseOffset = other.denseOffset; + vertVoxelizeFast = std::move(other.vertVoxelizeFast); + vertVoxelizeFastSkeletal = std::move(other.vertVoxelizeFastSkeletal); + geomVoxelize = std::move(other.geomVoxelize); + fragVoxelizeFast = std::move(other.fragVoxelizeFast); + other.denseResource = nullptr; + other.lockTexture = 0; + return *this; +} + +GIVoxelPagesFrame::FastVoxelizer::~FastVoxelizer() +{ + if(lockTexture) glDeleteTextures(1, &lockTexture); + if(denseResource) CUDA_CHECK(cudaGraphicsUnregisterResource(denseResource)); +} + +double GIVoxelPagesFrame::FastVoxelizer::Voxelize(const std::vector& batches, + const IEVector3& gridCorner, float span, + bool doTiming) +{ + OGLTimer t; + if(doTiming) t.Start(); + + // States + glDisable(GL_DEPTH_TEST); + glDisable(GL_CULL_FACE); + glEnable(GL_MULTISAMPLE); + //glEnable(GL_CONSERVATIVE_RASTERIZATION_NV); + glDepthMask(false); + glStencilMask(0x0000); + glColorMask(false, false, false, false); + + //DEBUG + glBindFramebuffer(GL_FRAMEBUFFER, 0); + glColorMask(true, true, true, true); + + // Viewport (Voxel Dim) + GLsizei totalSize = static_cast(octreeParams->CascadeBaseLevelSize); + glViewport(0, 0, totalSize, totalSize); + + // Volume Size + float volumeSize = static_cast(octreeParams->CascadeBaseLevelSize) * span; + + // Images + glBindImageTexture(I_LOCK, lockTexture, 0, false, 0, GL_READ_WRITE, GL_R32UI); + + // Shaders and shader uniforms + vertVoxelizeFast.Bind(); + glUniform3f(U_VOLUME_SIZE, volumeSize, volumeSize, volumeSize); + glUniform3f(U_VOLUME_CORNER, gridCorner[0], gridCorner[1], gridCorner[2]); + vertVoxelizeFastSkeletal.Bind(); + glUniform3f(U_VOLUME_SIZE, volumeSize, volumeSize, volumeSize); + glUniform3f(U_VOLUME_CORNER, gridCorner[0], gridCorner[1], gridCorner[2]); + geomVoxelize.Bind(); + //Shader::Unbind(ShaderType::GEOMETRY); + fragVoxelizeFast.Bind(); + glUniform1f(U_SPAN, span); + glUniform3ui(U_GRID_SIZE, octreeParams->CascadeBaseLevelSize, + octreeParams->CascadeBaseLevelSize, + octreeParams->CascadeBaseLevelSize); + glUniform3f(U_VOLUME_CORNER, gridCorner[0], gridCorner[1], gridCorner[2]); + + // Dense Buffer & GridTransform buffer + oglData.BindAsShaderStorageBuffer(LU_ALLOCATOR, static_cast(incrementOffset), + sizeof(uint32_t)); + oglData.BindAsShaderStorageBuffer(LU_VOXEL_RENDER, static_cast(denseOffset), + octreeParams->CascadeBaseLevelSize * + octreeParams->CascadeBaseLevelSize * + octreeParams->CascadeBaseLevelSize * sizeof(uint64_t)); + + for(MeshBatchI* batch : batches) + { + if(batch->DrawCount() == 0) continue; + + DrawBuffer& drawBuffer = batch->getDrawBuffer(); + VertexBuffer& vertexBuffer = batch->getVertexBuffer(); + + // Batch Binds + vertexBuffer.Bind(); + drawBuffer.BindModelTransform(LU_MTRANSFORM); + drawBuffer.BindAsDrawIndirectBuffer(); + if(batch->MeshType() == MeshBatchType::SKELETAL) + { + MeshBatchSkeletal* batchPtr = static_cast(batch); + batchPtr->getJointTransforms().BindAsShaderStorageBuffer(LU_JOINT_TRANS); + vertVoxelizeFastSkeletal.Bind(); + } + else vertVoxelizeFast.Bind(); + + // For each object + for(uint32_t drawId = 0; drawId < batch->DrawCount(); drawId++) + { + // TODO: do aabb check here + //// Do a AABB check with grid and skip if out of bounds + //const auto& aabbData = drawBuffer.getAABB(drawId); + //IEAxisAlignedBB3 objectAABB(aabbData.min, aabbData.max); + //if(!objectAABB.Intersects(gridAABB)) continue; + + // Bind material and draw + drawBuffer.BindMaterialForDraw(drawId); + drawBuffer.DrawCallSingle(drawId); + } + } + glMemoryBarrier(GL_SHADER_STORAGE_BARRIER_BIT); + + if(doTiming) + { + t.Stop(); + return t.ElapsedMS(); + } + return 0.0; +} + +double GIVoxelPagesFrame::FastVoxelizer::Filter(uint32_t& voxelCount, + uint32_t segmentOffset, + CudaVector& dVoxelPages, + uint32_t cascadeId, + bool doTiming) +{ + CudaTimer t; + if(doTiming) t.Start(); + + // Map to CUDA + uint8_t* oglDataCUDA; size_t size = 0; + CUDA_CHECK(cudaGraphicsMapResources(1, &denseResource)); + CUDA_CHECK(cudaGraphicsResourceGetMappedPointer(reinterpret_cast(&oglDataCUDA), + &size, denseResource)); + uint2* dDenseData = reinterpret_cast(oglDataCUDA + denseOffset); + uint32_t& dAllocator = *reinterpret_cast(oglDataCUDA + incrementOffset); + uint32_t hAllocatedVoxels; + CUDA_CHECK(cudaMemcpy(&hAllocatedVoxels, &dAllocator, sizeof(uint32_t), cudaMemcpyDeviceToHost)); + + + GI_LOG("Voxel Count %d", hAllocatedVoxels); + + // Clear + // Clearing using cuda is faster + CUDA_CHECK(cudaMemset(&dAllocator, 0x0, sizeof(uint32_t))); + + // Filter valid voxels to page system + int totalSize = octreeParams->CascadeBaseLevelSize * + octreeParams->CascadeBaseLevelSize * + octreeParams->CascadeBaseLevelSize; + int gridSize = CudaInit::GenBlockSize(totalSize); + int blockSize = CudaInit::TBP; + + // KC + FilterVoxels<<>>(// Voxel System + dVoxelPages.Data(), + // Dense Data from OGL + dAllocator, + dDenseData, + segmentOffset, + // Limits + cascadeId, + octreeParams->CascadeBaseLevelSize); + CUDA_KERNEL_CHECK(); + + // Assertion for + CUDA_CHECK(cudaMemcpy(&voxelCount, &dAllocator, sizeof(uint32_t), cudaMemcpyDeviceToHost)); + //assert(voxelCount == hAllocatedVoxels); + GI_LOG("Voxel Count2 %d", voxelCount); + + if(voxelCount != hAllocatedVoxels) + GI_ERROR_LOG("Mismatch of voxel counts in page system!"); + + // Clear Again (for next ogl usage) + CUDA_CHECK(cudaMemset(&dAllocator, 0x0, sizeof(uint32_t))); + + // Unmap + CUDA_CHECK(cudaGraphicsUnmapResources(1, &denseResource)); + + if(doTiming) + { + t.Stop(); + return t.ElapsedMilliS(); + } + return 0.0; +} + +double GIVoxelPagesFrame::FastVoxelizer::FastVoxelize(uint32_t& usedSegmentCount, + CudaVector& dVoxelPages, + const std::vector& batches, + const std::vector& gridPositions, + bool doTiming) +{ + assert(denseResource); + double voxelTime = 0.0; + double filterTime = 0.0; + + // Initial State + uint32_t usedSegmentOffset = 0; + for(uint32_t i = 0; i < octreeParams->CascadeCount; i++) + { + // Do Voxelization + float span = octreeParams->BaseSpan * static_cast(1 << i); + + // Voxelization Fills Dense Array + voxelTime += Voxelize(batches, gridPositions[i], span, doTiming); + + // Filter uses dense array and count to fake create + uint32_t voxelCount; + filterTime += Filter(voxelCount, + usedSegmentOffset, + dVoxelPages, + i, + doTiming); + + GI_LOG("----------"); + + // Find used segments by this voxelization + uint32_t cascadeUsedSegments = (voxelCount + GIVoxelPages::SegmentSize - 1) / GIVoxelPages::SegmentSize; + usedSegmentOffset += cascadeUsedSegments; + } + + GI_LOG("Voxel Time %f", voxelTime); + GI_LOG("Filer Time %f", filterTime); + GI_LOG("----------"); + + usedSegmentCount = usedSegmentOffset; + return voxelTime + filterTime; +} + +GIVoxelPagesFrame::GIVoxelPagesFrame(const GIVoxelCache& cache, + const std::vector* batches, + const OctreeParameters& octreeParams) + : GIVoxelPages(cache, batches, octreeParams) + , fastVoxelizer(&octreeParams) + , usedSegmentCount(0) +{} + +void GIVoxelPagesFrame::ClearPages() +{ + if(usedSegmentCount == 0) return; + + // Filter valid voxels to page system + int totalSize = usedSegmentCount * GIVoxelPages::SegmentSize; + int gridSize = CudaInit::GenBlockSize(totalSize); + int blockSize = CudaInit::TBP; + + // KC + ::ClearPages<<>>(dPages.Data()); + CUDA_KERNEL_CHECK(); +} + + +void GIVoxelPagesFrame::Update(double& ioTime, + double& transTime, + const GIVoxelCache& caches, + const IEVector3& camPos, + bool doTiming) +{ + std::vector gridPositions; + GenerateGridPositions(gridPositions, camPos); + + // Clear pages from previous frame + ClearPages(); + + transTime = fastVoxelizer.FastVoxelize(usedSegmentCount, dPages, *batches, + gridPositions, + doTiming); + + ioTime = 0.0f; +} + +double GIVoxelPagesFrame::Draw(bool doTiming, + uint32_t cascadeCount, + VoxelRenderType renderType, + const Camera& camera, + const GIVoxelCache& cache) +{ + return pageRenderer.Draw(doTiming, cascadeCount, renderType, camera, cache, *this, false); +} \ No newline at end of file diff --git a/cuda_code/GPUKruskalPlugin.cu b/cuda_code/GPUKruskalPlugin.cu new file mode 100644 index 0000000000000000000000000000000000000000..67db290b782dad77c2fd7676c86fa54880842f76 --- /dev/null +++ b/cuda_code/GPUKruskalPlugin.cu @@ -0,0 +1,228 @@ +#include "GPUKruskalPlugin.h" + +void GPUKruskalPlugin::convert_array_to_three_way(unsigned short *original_array, unsigned short* vert_out, + unsigned short* weights, unsigned short* vert_in, int numVert){ + + unsigned short i,j; + int pos = 0; + for(i = 0;i < numVert; i++){ + for(j = i; j < numVert; j++){ + *(vert_out + pos) = i; + *(vert_in + pos) = j; + *(weights + pos) = *(original_array + i * numVert + j); + + pos++; + } + } + + printf("pos: %d\n",pos); +} + +void GPUKruskalPlugin::input(std::string file) { + inputfile = file; + std::ifstream ifile(inputfile.c_str(), std::ios::in); + while (!ifile.eof()) { + std::string key, value; + ifile >> key; + ifile >> value; + parameters[key] = value; + } + std::string matrixfile = std::string(PluginManager::prefix())+"/"+parameters["matrix"]; + numVert = atoi(parameters["N"].c_str()); + /***************************************************************************************/ + v_out = (unsigned short*)calloc((numVert * (numVert - 1))/2 + numVert, sizeof(unsigned short)); + v_in = (unsigned short*)calloc((numVert * (numVert - 1))/2 + numVert, sizeof(unsigned short)); + weights = (unsigned short*)calloc((numVert * (numVert - 1))/2 + numVert, sizeof(unsigned short)); + + gpuResult = (unsigned short*)calloc(numVert * numVert, sizeof(unsigned short)); //creating a graph size NxN with zero connection between vertices + + theGraph = (unsigned short*) malloc(numVert*numVert*sizeof(unsigned short)); + std::ifstream myinput(matrixfile.c_str(), std::ios::in); + int i; + int M = numVert*numVert; + for (i = 0; i < M; ++i) { + short k; + myinput >> k; + theGraph[i] = k; +} +convert_array_to_three_way(theGraph,v_out, weights, v_in,numVert); +} + +void GPUKruskalPlugin::run() { + int size = numVert * numVert; + char* unionMatrix; + + char* checkArray; //it is where each thread reports after checking the union-find-matrix + + size_t pitch; + + unsigned short* d_weights_original; //this is where the original graph is gonna be copied in the device + unsigned short* d_result; //where the resulting spanning tree is gonna be placed in the device + + int* d_weights_copy; + + int* d_order; + + int* vertList; //it is gonna be only size 2. + + /**************************************************************************************** + * Alocating memory in the device + *****************************************************************************************/ + + cudaMalloc(&d_weights_original, size * sizeof(unsigned short)); //graph in the device + cudaMalloc(&d_weights_copy, size * sizeof(int)); //Array that is gonna be used in the sort + + cudaMalloc(&d_order, size * sizeof(int));//would store a sorted array of number to keep track of the indexes to move + + cudaMalloc(&vertList,2 * sizeof(int)); + + cudaMallocPitch(&unionMatrix, &pitch, + (numVert) * sizeof(char), numVert); //allocating memory for the union-find-matrix + + cudaMalloc(&checkArray, (numVert)*sizeof(char)); //allocating memory for the checkArray + + + cudaMemcpy(d_weights_original, theGraph, size * sizeof(unsigned short), cudaMemcpyHostToDevice); //Transfering the 1D array from the CPU's DRAM into the Device's DRAM + + cudaCheckErrors("cudaMalloc fail"); + /**************************************************************************************** + * End allocating Memory in the device + *****************************************************************************************/ + int numThreads = 1024; + int numBlocks = numVert / numThreads + 1; + int numBlocks_d = (numVert*numVert) / numThreads + 1; + + dim3 threadsPerBlock(32,32); + dim3 numBlocks2D(numVert/threadsPerBlock.x + 1,numVert/threadsPerBlock.y + 1); + + fillOrder<<>>(d_order,size,numVert); + cudaCheckErrors("filling arrays fail"); + + copyingGraphs<<>>(d_weights_original, d_weights_copy, size); + cudaCheckErrors("Copying arrays fail"); + + /**************************************************************************************** + * Optimizing space + *****************************************************************************************/ + cudaFree(d_weights_original); + + cudaMalloc(&d_result, size * sizeof(unsigned short)); //Resulting graph + /**************************************************************************************** + * Sorting Section + *****************************************************************************************/ + + thrust::sort_by_key(thrust::device_ptr(d_weights_copy) , thrust::device_ptr(d_weights_copy + size), thrust::device_ptr (d_order)); + cudaCheckErrors("Sort fail"); + + /**************************************************************************************** + * End Sorting + *****************************************************************************************/ + typeof(devFound) found; + int totalCost; + + resetResult<<>>(d_result,numVert*numVert); //reset resulting graph + + resetArray<<>>(checkArray,numVert); //resetting the checking array to all 0s + + resetGlobalFound<<<1,1>>>(); //resseting the global found variable to 0 + cudaCheckErrors("Reset Found fail"); + + initializeUnionMatrix<<>>(unionMatrix,pitch,numVert); //initializing union-find-matrix + cudaCheckErrors("Union find initialization fail"); + + int j; + int counter = 0; + + for(j = 0;(j < size) && (counter < numVert - 1);j++){ //if we got the min spaming tree + getValue<<<1,1>>>(d_order, vertList,j,numVert); + + //checking if those vertices are not in any set + checkSet<<>>(unionMatrix,checkArray,pitch,numVert,vertList); + + /*************************************************************************************** + * Inserting the node after it was checked that it didnt exist + ****************************************************************************************/ + + arrayCheck<<>>( checkArray,numVert); + + cudaMemcpyFromSymbol(&found, devFound, sizeof(found), 0, cudaMemcpyDeviceToHost); + + if(found == 0){ + + //insertResultingEdge<<>>(d_edges,d_resultEdges,counter,j); + addToMinWeight<<<1,1>>>(d_weights_copy,j); + + counter++; + + //updating unionMatrix + setValue<<<1,1>>>(unionMatrix,vertList, pitch); + + //Or both inserted vertices's columns + orCol<<>>(unionMatrix,pitch,numVert,vertList); + + //Freaki fast union find + update<<>>(unionMatrix,pitch,numVert,vertList); + + //inserting edge into the resulting graph + insertToResult<<<1,2>>>(d_weights_copy,d_result,vertList,numVert,j); + + } + + resetArray<<>>(checkArray,numVert); //resetting the checking array to all 0s + resetGlobalFound<<<1,1>>>(); //resseting the global found variable to 0 + + + } + + + cudaMemcpyFromSymbol(&totalCost, dev_totalCost, sizeof(totalCost), 0, cudaMemcpyDeviceToHost); + cudaMemcpy(gpuResult,d_result, size * sizeof(unsigned short), cudaMemcpyDeviceToHost); + + printf("\n\tMinimum cost = %d\n",totalCost); + + + cudaFree(vertList); + cudaFree(d_weights_copy); + cudaFree(d_result); + cudaFree(d_order); + cudaFree(checkArray); + cudaFree(unionMatrix); + +} + +void GPUKruskalPlugin::output(std::string file) { + printf("\n"); + FILE* fileToWrite = fopen(file.c_str(),"w+"); + + if(fileToWrite){ + printf("File % s created!\n",file.c_str()); + }else{ + fprintf(stderr,"Error creating %s file\n",file.c_str()); + } + + printf("Writing into file...\n"); + + int i,j, counter = 0; + unsigned short value; + + for(i = 0; i < numVert;i++){ + for(j = i + 1; j < numVert;j++){ + value = *(gpuResult + i * numVert + j); + if(value != 0){ + fprintf(fileToWrite,"edge (%d,%d) =%d\n",i,j,value); + counter++; + } + + } + } + + + + if(fileToWrite){ + fprintf(fileToWrite,"Total amount of edges inserted: %d\n",counter); + printf("File %s written successfully!\n",file.c_str()); + fclose(fileToWrite); + } +} +PluginProxy GPUKruskalPluginProxy = PluginProxy("GPUKruskal", PluginManager::getInstance()); + diff --git a/cuda_code/GPUMatrix_22.cu b/cuda_code/GPUMatrix_22.cu new file mode 100644 index 0000000000000000000000000000000000000000..8c9b0cf40a912363f2d230ff03c1adcbaa17ce97 --- /dev/null +++ b/cuda_code/GPUMatrix_22.cu @@ -0,0 +1,4968 @@ +// +// Copyright (c) Microsoft. All rights reserved. +// Licensed under the MIT license. See LICENSE.md file in the project root for full license information. +// + +#include "stdafx.h" +#include "Basics.h" +#include "BestGpu.h" + +#ifndef CPUONLY + +#include "GPUMatrix.h" +#include "GPUMatrixCUDAKernels.cuh" +//#include "GPUSparseMatrix.h" +#include "GPUTensor.h" +#include "CommonMatrix.h" +#define TENSOR_OPS_DECL __device__ __host__ +#include "TensorOps.h" +#include "device_launch_parameters.h" +#include +#include +#include +#include +#include "cublas_v2.h" +#include +#include +#include "CntkBatchNormalization.cuh" +#include "Convolution.cuh" +#include "CuDnnRNN.h" + +#pragma comment(lib, "cudart.lib") // instruct linker to reference these libs +#pragma comment(lib, "cublas.lib") +#pragma comment(lib, "cusparse.lib") +#pragma comment(lib, "curand.lib") + +#pragma warning(disable : 4267) // conversion from 'size_t' to 'unsigned int'; happens in CUDA <<>> syntax if a and b are size_t +#pragma warning(disable : 4127) // conditional expression is constant; "if (sizeof(ElemType)==sizeof(float))" triggers this +#pragma warning(disable : 4702) // unreachable code; triggered for unknown reasons + +#define DEFAULT_THREAD_PER_DIM 16 + +#define UNCONST(t, c, uc) GPUMatrix& uc = const_cast&>(c); + +#ifdef _WIN32 +// thread local storage to access the current stream, initialize to default stream +__declspec(thread) +#endif + cudaStream_t t_stream = cudaStreamDefault; + +#define DEFAULT_THREAD_PER_DIM 16 + +extern int _ConvertSMVer2Cores(int major, int minor); // forward declaration + +// SetStream - set the stream that will be used by the GPU routines +void MATH_API SetStream(cudaStream_t stream) +{ + t_stream = stream; +} + +// GetStream - get the stream that will be used by the GPU routines +cudaStream_t MATH_API GetStream() +{ + return t_stream; +} + +// Helper macro patterns for elementwise methods +#define DEF_ELEMWISE_INPLACE_FUNC(f) \ + template \ + GPUMatrix& GPUMatrix::Inplace##f() \ + { \ + performElementWiseFunction(ElementWiseOperator::op##f, Data()); \ + return *this; \ + } +#define DEF_ELEMWISE_ASSIGN_FUNC(f) \ + template \ + GPUMatrix& GPUMatrix::Assign##f##Of(const GPUMatrix& a) \ + { \ + if (a.IsEmpty()) \ + LogicError("Assign##f##Of: Matrix a is empty."); \ + if (this != &a) \ + RequireSize(a.GetNumRows(), a.GetNumCols()); \ + performElementWiseFunction(ElementWiseOperator::op##f, a.Data()); \ + return *this; \ + } + +template <> +const char* CudaErrString(cudaError_t x) +{ + cudaDeviceSynchronize(); + return cudaGetErrorString(x); +} +template <> +const char* CudaErrString(cublasStatus_t e) +{ + cudaDeviceSynchronize(); + switch (e) + { + case CUBLAS_STATUS_SUCCESS: return "CUBLAS_STATUS_SUCCESS"; + case CUBLAS_STATUS_NOT_INITIALIZED: return "CUBLAS_STATUS_NOT_INITIALIZED"; + case CUBLAS_STATUS_ALLOC_FAILED: return "CUBLAS_STATUS_ALLOC_FAILED"; + case CUBLAS_STATUS_INVALID_VALUE: return "CUBLAS_STATUS_INVALID_VALUE"; + case CUBLAS_STATUS_ARCH_MISMATCH: return "CUBLAS_STATUS_ARCH_MISMATCH"; + case CUBLAS_STATUS_MAPPING_ERROR: return "CUBLAS_STATUS_MAPPING_ERROR"; + case CUBLAS_STATUS_EXECUTION_FAILED: return "CUBLAS_STATUS_EXECUTION_FAILED"; + case CUBLAS_STATUS_INTERNAL_ERROR: return "CUBLAS_STATUS_INTERNAL_ERROR"; + case CUBLAS_STATUS_NOT_SUPPORTED: return "CUBLAS_STATUS_NOT_SUPPORTED"; + case CUBLAS_STATUS_LICENSE_ERROR: return "CUBLAS_STATUS_LICENSE_ERROR"; + default: return "(look for CUBLAS_STATUS_xxx in cublas_api.h)"; + } +} +template <> +const char* CudaErrString(curandStatus) +{ + cudaDeviceSynchronize(); + return "(see curand.h & look for curandStatus or CURAND_STATUS_xxx)"; +} + +namespace Microsoft { namespace MSR { namespace CNTK { + +/*static*/ std::vector GridDim::s_cachedDeviceProps; +/*static*/ std::once_flag GridDim::s_cachedDevicePropsInitFlag; + +/*static*/ bool SyncGuard::s_isSyncEnabled = false; + +/*static*/ void SyncGuard::EnableSync() +{ + s_isSyncEnabled = true; +} + +/*static*/ bool SyncGuard::IsSyncEnabled() +{ + return s_isSyncEnabled; +} + +SyncGuard::SyncGuard(bool forceSync /*= false*/) + : m_forceSync(forceSync) +{ + m_done = nullptr; + if (m_forceSync || s_isSyncEnabled) + { + CUDA_CALL(cudaGetLastError()); + CUDA_CALL(cudaEventCreate(&m_done)); + } +} + +SyncGuard::~SyncGuard() +{ + if (m_forceSync || s_isSyncEnabled) + { + // The regular use of this destructor is to synchronize the GPU, but also + // to check for errors. So this destructor is where CUDA errors would be thrown. + // If this destructor runs during stack unwinding, then a different error has + // already happened that should be reported; so we only clean up the resource. + if (std::uncaught_exception()) + cudaEventDestroy(m_done); + else + { + // failures in a prior launch might be reported here + CUDA_CALL(cudaEventRecord(m_done)); + CUDA_CALL(cudaEventSynchronize(m_done)); + CUDA_CALL(cudaEventDestroy(m_done)); + } + } +} + +template +AllocatedElemType* TracingGPUMemoryAllocator::Allocate(int deviceId, size_t numRows, size_t numCols) +{ + if (IsTraceEnabled()) + { + auto freeAndTotalMemory = GetFreeAndTotalMemoryInMBs(deviceId); + fprintf(stderr, "Allocating Matrix<%s> (Rows = %d, Cols = %d) buffer on DeviceId = %d; GPU Memory Free = %d MB of %d MB\n", typeid(AllocatedElemType).name(), (int)numRows, (int)numCols, (int)deviceId, (int)freeAndTotalMemory.first, (int)freeAndTotalMemory.second); + Microsoft::MSR::CNTK::DebugUtil::PrintCallStack(); + } + + AllocatedElemType* deviceBufferPtr = AllocateNoTrace(deviceId, numRows * numCols); + + if (IsTraceEnabled()) + { + fprintf(stderr, "Allocated DeviceData = %p\n", (void*) deviceBufferPtr); + } + + return deviceBufferPtr; +} + +template +AllocatedElemType* TracingGPUMemoryAllocator::Allocate(int deviceId, size_t numElements) +{ + if (IsTraceEnabled()) + { + auto freeAndTotalMemory = GetFreeAndTotalMemoryInMBs(deviceId); + fprintf(stderr, "Allocating array<%s> (NumElements = %d) on DeviceId = %d; GPU Memory Free = %d MB of %d MB\n", typeid(AllocatedElemType).name(), (int)numElements, (int)deviceId, (int)freeAndTotalMemory.first, (int)freeAndTotalMemory.second); + Microsoft::MSR::CNTK::DebugUtil::PrintCallStack(); + } + + AllocatedElemType* deviceBufferPtr = AllocateNoTrace(deviceId, numElements); + + if (IsTraceEnabled()) + { + fprintf(stderr, "Allocated DeviceData = %p\n", (void*)deviceBufferPtr); + } + + return deviceBufferPtr; +} + +template +void TracingGPUMemoryAllocator::Free(int deviceId, AllocatedElemType* bufferPtr, bool ignoreCUDARetCode /*= false*/) +{ + PrepareDevice(deviceId); + if (ignoreCUDARetCode) + cudaFree((void*) bufferPtr); + else + CUDA_CALL(cudaFree((void*) bufferPtr)); + + if (IsTraceEnabled()) + { + auto freeAndTotalMemory = GetFreeAndTotalMemoryInMBs(deviceId); + fprintf(stderr, "Freed buffer<%s> DeviceData = %p on DeviceId = %d; GPU Memory Free = %d MB of %d MB\n", typeid(AllocatedElemType).name(), (void*) bufferPtr, (int) deviceId, (int) freeAndTotalMemory.first, (int) freeAndTotalMemory.second); + Microsoft::MSR::CNTK::DebugUtil::PrintCallStack(); + } +} + +// Computes the smallest multiple of k greater or equal to n +static inline size_t asMultipleOf(size_t n, size_t k) { return n + n % k; } + +template +AllocatedElemType* TracingGPUMemoryAllocator::AllocateNoTrace(int deviceId, size_t numElements) +{ + AllocatedElemType* deviceBufferPtr; + + PrepareDevice(deviceId); + // In case numElements is odd we allocate a buffer with one more element. The reason is + // we might call curandGenerateNormal (e.g. for Gaussian noise injection) which would fail + // if the number of elements it needs to generate is odd. + CUDA_CALL(cudaMalloc((void**) &deviceBufferPtr, sizeof(AllocatedElemType) * asMultipleOf(numElements, 2))); + + return deviceBufferPtr; +} + +std::pair TracingGPUMemoryAllocator::GetFreeAndTotalMemoryInMBs(int deviceId) +{ + PrepareDevice(deviceId); + + size_t free, total; + CUDA_CALL(cudaMemGetInfo(&free, &total)); + + size_t numBytesPerMB = 1 << 20; + return {free / numBytesPerMB, total / numBytesPerMB}; +} + +// PrepareDevice - Setup the correct cuda context for an operation +// deviceId - the device on which the operation will take place +void PrepareDevice(DEVICEID_TYPE deviceId) +{ + THREAD_LOCAL static DEVICEID_TYPE currentDevice = DEVICEID_NOTYETDETERMINED; + // and if we last set the device to be this device we are good + if (deviceId == currentDevice) + return; + CUDA_CALL(cudaSetDevice(deviceId)); + currentDevice = deviceId; +} + +#pragma region DeviceBoundNumber class + +template +DeviceBoundNumber::DeviceBoundNumber(const DeviceBoundNumber& /*deepCopy*/) +{ + NOT_IMPLEMENTED; +} + +template +DeviceBoundNumber::DeviceBoundNumber(DeviceBoundNumber&& shallowCopy) +{ + ShallowCopyFrom(shallowCopy.m_data, shallowCopy.m_computeDevice); + shallowCopy.m_data = NULL; +} + +template +void DeviceBoundNumber::ShallowCopyFrom(ElemType* newVal, int newValsDevceId) +{ + m_computeDevice = newValsDevceId; + m_data = newVal; +} + +template +DeviceBoundNumber::~DeviceBoundNumber() +{ + if (m_data != NULL) + { + if (m_computeDevice < 0) + { + delete m_data; + m_data = NULL; + } + else + { + TracingGPUMemoryAllocator::Free(m_computeDevice, m_data); + } + } +} + +#pragma endregion DeviceBoundNumber class + +#pragma region Helper functions +template +cublasHandle_t _initCUBLAS(int devId) +{ + PrepareDevice((DEVICEID_TYPE) devId); + cublasHandle_t cuHandle; + CUBLAS_CALL(cublasCreate(&cuHandle)); + return cuHandle; +} + +template +void GPUMatrix::SetDevice(DEVICEID_TYPE deviceId) +{ + assert(deviceId >= 0); + CUDA_CALL(cudaSetDevice(deviceId)); +} + +// PrepareDevice - Setup the correct cuda context for an operation +// deviceId - the device on which the operation will take place +// defaults to -1, which means use matrices current device +template +DEVICEID_TYPE GPUMatrix::PrepareDevice(DEVICEID_TYPE deviceId /*=-1*/) const +{ + // if default value use current compute device + DEVICEID_TYPE newId = deviceId >= 0 ? deviceId : GetComputeDeviceId(); + + Microsoft::MSR::CNTK::PrepareDevice(newId); + return newId; +} + +template +ElemType* GPUMatrix::CopyToArray() const +{ + size_t numElements = GetNumElements(); + if (numElements != 0) + { + PrepareDevice(); + ElemType* pArray = new ElemType[numElements]; + CUDA_CALL(cudaMemcpy(pArray, Data(), sizeof(ElemType) * m_numRows * m_numCols, cudaMemcpyDeviceToHost)); + return pArray; + } + else + { + return NULL; + } +} + +//memory will be allocated by the callee if not enough but need to be deleted by the caller after it's done +//return number of elements copied +template +size_t GPUMatrix::CopyToArray(ElemType*& arrayCopyTo, size_t& currentArraySize) const +{ + size_t numElements = GetNumElements(); + + if (numElements > currentArraySize) + { + delete arrayCopyTo; + arrayCopyTo = new ElemType[numElements]; + currentArraySize = numElements; + } + + if (numElements != 0) + { + PrepareDevice(); + CUDA_CALL(cudaMemcpy(arrayCopyTo, Data(), sizeof(ElemType) * numElements, cudaMemcpyDeviceToHost)); + } + + return numElements; +} + +template +void GPUMatrix::CopySection(size_t numRows, size_t numCols, ElemType* dst, size_t colStride) const +{ + CUBLAS_CALL(cublasGetMatrix((int) numRows, (int) numCols, sizeof(ElemType), + Data(), (int) GetNumRows(), dst, (int) colStride)); +} +template +void GPUMatrix::ChangeDeviceTo(DEVICEID_TYPE to_id) +{ + if (to_id == CPUDEVICE) + LogicError("to_id must be valid GPU"); + if (GetComputeDeviceId() == to_id) + return; + + ElemType* d_dst = TracingGPUMemoryAllocator::Allocate(to_id, m_numRows, m_numCols); + + SetSizeAllocated(m_numRows * m_numCols); + + // check to make sure we have something to copy (on init we often have zero sized allocations) + if (GetSizeAllocated() > 0) + { +#if 0 // see the backlog item # 1220 + // IOMMU DMAR needs to be disabled for CUDA P2P, otherwise it will silently hang. + // Unfortunately, cudaDeviceCanAccessPeer returns true irrespective of the IOMMU settings. + // More details: https://bugzilla.kernel.org/show_bug.cgi?id=188271 + // http://docs.nvidia.com/cuda/gpudirect-rdma/#supported-systems + // TODO: enable UVA p2p access once this is fixed. + + // first try peer access + int canAccessPeer = false; + CUDA_CALL(cudaDeviceCanAccessPeer(&canAccessPeer, to_id, GetComputeDeviceId())); + if (canAccessPeer) + { + cudaError_t cudaStatus = cudaDeviceEnablePeerAccess(GetComputeDeviceId(), 0); + if (cudaStatus != cudaErrorPeerAccessAlreadyEnabled) + { + CUDA_CALL(cudaStatus); + } + CUDA_CALL(cudaMemcpyPeer(d_dst, to_id, Data(), GetComputeDeviceId(), sizeof(ElemType) * m_numRows * m_numCols)); + } + else +#endif + { + // peer access didn't work, just copy normal + // make this more efficient by keeping some buffers available for each copy + ElemType* h_dst = NULL; + PrepareDevice(); + CUDA_CALL(cudaMallocHost((void**) &h_dst, sizeof(ElemType) * m_numRows * m_numCols)); + CUDA_CALL(cudaMemcpy(h_dst, Data(), sizeof(ElemType) * m_numRows * m_numCols, cudaMemcpyDeviceToHost)); + PrepareDevice((DEVICEID_TYPE) to_id); + CUDA_CALL(cudaMemcpy(d_dst, h_dst, sizeof(ElemType) * m_numRows * m_numCols, cudaMemcpyHostToDevice)); + CUDA_CALL(cudaFreeHost(h_dst)); + } + } + + TracingGPUMemoryAllocator::Free(GetComputeDeviceId(), Buffer()); + SetBuffer(d_dst, m_numRows * m_numCols * sizeof(ElemType)); + + PrepareDevice((DEVICEID_TYPE) to_id); + SetComputeDeviceId(to_id); +} + +template +void GPUMatrix::performElementWiseFunction(ElementWiseOperator kind, const ElemType* src) +{ + PrepareDevice(); + CUDA_LONG N = (CUDA_LONG) GetNumElements(); + int blocksPerGrid = (int) ceil(1.0 * N / GridDim::maxThreadsPerBlock); + SyncGuard syncGuard; + switch (kind) + { + case ElementWiseOperator::opSigmoid: + return _elementWiseSigmoidOnCuda<<>>(src, Data(), N); + case ElementWiseOperator::opTanh: + return _elementWiseTanhOnCuda<<>>(src, Data(), N); + case ElementWiseOperator::opSqrt: + return _elementWiseSqrtOnCuda<<>>(src, Data(), N); + case ElementWiseOperator::opExp: + return _elementWiseExpOnCuda<<>>(src, Data(), N); + case ElementWiseOperator::opLog: + return _elementWiseLogOnCuda<<>>(src, Data(), N); + case ElementWiseOperator::opAbs: + return _elementWiseAbsOnCuda<<>>(src, Data(), N); + case ElementWiseOperator::opLinearRectifierDerivative: + return _elementWiseLinRectDerivativeOnCuda<<>>(src, Data(), N); + case ElementWiseOperator::opCosine: + return _elementWiseCosineOnCuda<<>>(src, Data(), N); + case ElementWiseOperator::opNegativeSine: + return _elementWiseNegativeSineOnCuda<<>>(src, Data(), N); + case ElementWiseOperator::opSigmoidDerivative: + return _elementWiseSigmoidDerivativeOnCuda<<>>(src, Data(), N); + default: LogicError("performElementWiseFunction: unexpected op code %d", (int)kind); + } +} + +#pragma endregion Helper functions + +#pragma region Constructors and Destructor + +// should only be used by constructors +template +void GPUMatrix::ZeroInit(int deviceId) +{ + BaseMatrix::ZeroInit(); + SetComputeDeviceId(deviceId); +} + +template +GPUMatrix::GPUMatrix(int deviceId) +{ + ZeroInit(deviceId); +}; + +template +GPUMatrix::GPUMatrix(const size_t numRows, const size_t numCols, int deviceId) +{ + ZeroInit(deviceId); + m_numRows = numRows; + m_numCols = numCols; + SetSizeAllocated(GetNumElements()); + + if (GetNumElements() != 0) + { + SetBuffer(TracingGPUMemoryAllocator::Allocate(GetComputeDeviceId(), m_numRows, m_numCols), GetNumElements() * sizeof(ElemType)); + CUDA_CALL(cudaMemset(Buffer(), 0, sizeof(ElemType) * GetSizeAllocated())); + } +}; + +template +GPUMatrix::GPUMatrix(const size_t numRows, const size_t numCols, int deviceId, ElemType* pArray, const size_t matrixFlags) +{ + ZeroInit(deviceId); + SetValue(numRows, numCols, deviceId, pArray, matrixFlags); +}; + +template +GPUMatrix::GPUMatrix(const GPUMatrix& deepCopyFrom) +{ + ZeroInit(); + SetValue(deepCopyFrom); +} + +template +GPUMatrix::GPUMatrix(GPUMatrix&& moveFrom) +{ + ShallowCopyFrom(moveFrom); + moveFrom.ZeroValues(); +} + +//assignment operator, deep copy +template +GPUMatrix& GPUMatrix::operator=(const GPUMatrix& deepCopyFrom) +{ + if (this != &deepCopyFrom) + { + SetValue(deepCopyFrom); + } + return *this; +} + +//move assignment operator, shallow copy +template +GPUMatrix& GPUMatrix::operator=(GPUMatrix&& moveFrom) +{ + if (this != &moveFrom) + { + ShallowCopyFrom(moveFrom); + moveFrom.ZeroValues(); + } + return *this; +} + +template +GPUMatrix::~GPUMatrix(void) +{ +} + +// TODO: This should be in the storage object. +// Clear will clear your storage, zeroinit just drops it on the ground. +template +void GPUMatrix::Clear() +{ + VerifyWritable(__FUNCTION__); + //if (OwnBuffer() && m_pArray != NULL) + if (m_sob != nullptr) + { + if (GetComputeDeviceId()>= 0) + { + // BUG: We do not check the CUDA return code for cudaFree here since this may get called + // during processExit when cudaFree will fail. The destruction of CUDA objects during + // process exit must be avoided + ReleaseStorageMemory(); + } + } + + ZeroInit(GetComputeDeviceId()); +} +#pragma endregion Constructors and Destructor + +template +std::unique_ptr> GPUMatrix::GetOrCreateWorkspace() const +{ + // REVIEW alexeyk: not thread-safe, fine for now. + if (m_workspace == nullptr) + m_workspace = std::make_unique>>>(); + assert(m_workspace != nullptr); + auto deviceId = GetComputeDeviceId(); + return m_workspace->pop_or_create([deviceId]() + { + return std::make_unique>(deviceId); + }); +} + +template +void GPUMatrix::ReleaseWorkspace(std::unique_ptr> src) const +{ + assert(m_workspace != nullptr); + m_workspace->push(std::move(src)); +} + +#pragma region Basic Operators +template +GPUMatrix GPUMatrix::ColumnSlice(size_t startColumn, size_t numCols) const +{ + if (startColumn + numCols > GetNumCols()) + InvalidArgument("The slice (%d+%d) is out of range of the source matrix (%d).", (int) startColumn, (int) numCols, (int) GetNumCols()); + + GPUMatrix slice(GetComputeDeviceId()); + + slice.ShallowCopyFrom(*this); + slice.m_numCols = numCols; + slice.m_sliceViewOffset = m_sliceViewOffset + startColumn * GetNumRows(); + + return slice; +} + +template +GPUMatrix& GPUMatrix::AssignColumnSlice(const GPUMatrix& fromMatrix, size_t startColumn, size_t numCols) +{ + if (numCols == 0) + LogicError("The slice cannot have 0 columns."); + + if (startColumn + numCols > fromMatrix.GetNumCols()) + InvalidArgument("The slice (%d+%d) is out of range of the source matrix (%d).", (int) startColumn, (int) numCols, (int) fromMatrix.GetNumCols()); + + Clear(); + + ShallowCopyFrom(fromMatrix); + m_numCols = numCols; + m_sliceViewOffset = fromMatrix.m_sliceViewOffset + startColumn * GetNumRows(); + + return *this; +} + +template +GPUMatrix& GPUMatrix::SetColumnSlice(const GPUMatrix& fromMatrix, size_t startColumn, size_t numCols) +{ + if (startColumn + numCols > GetNumCols()) + LogicError("The slice is out of range of the destination matrix."); + if (numCols > fromMatrix.GetNumCols()) + InvalidArgument("The slice (%d) is out of range of the source matrix (%d).", (int) numCols, (int) fromMatrix.GetNumCols()); + if (m_numRows != fromMatrix.m_numRows) + LogicError("The number of rows in source and destination matrices do not match"); + + if (m_numRows * numCols > 0) // TODO: remove if unnecessary + CUDA_CALL(cudaMemcpy(Data() + LocateColumn(startColumn), fromMatrix.Data(), sizeof(ElemType) * m_numRows * numCols, cudaMemcpyDeviceToDevice)); + + return *this; +} + +template +void GPUMatrix::CopyColumnsStrided(const GPUMatrix& fromMatrix, size_t numCols, size_t srcNumColsStride, size_t destNumColsStride) +{ + if ((((numCols - 1) * srcNumColsStride) + 1) > fromMatrix.m_numCols) + LogicError("The numCols to copy and srcNumColsStride specified is out of range of the source matrix."); + if ((((numCols - 1) * destNumColsStride) + 1) > m_numCols) + LogicError("The numCols to copy and srcNumColsStride specified is out of range of the destination matrix."); + if (m_numRows != fromMatrix.m_numRows) + LogicError("The number of rows in source and destination matrices do not match"); + + if ((m_numRows * numCols) > 0) + { + // Launch a kernel to do the strided copy + CUDA_LONG N = (CUDA_LONG)(m_numRows * numCols); + int blocksPerGrid = (int) ceil(1.0 * N / GridDim::maxThreadsPerBlock); + PrepareDevice(); + SyncGuard syncGuard; + _copyColumnsStrided<<>>(Data(), fromMatrix.Data(), N, (CUDA_LONG) m_numRows, (CUDA_LONG) destNumColsStride, (CUDA_LONG) srcNumColsStride); + } +} + +//for each column of a, we assign all rows of a to this starting from startIndex +template +GPUMatrix& GPUMatrix::AssignToRowSliceValuesOf(const GPUMatrix& a, const size_t startIndex, const size_t numRows) +{ + if (a.IsEmpty()) + LogicError("AddToRowSliceValuesOf: input matrix a is empty."); + + if (a.GetNumRows() != numRows) + LogicError("AddToRowSliceValuesOf: a.GetNumRows() != numRows."); + + if (startIndex + numRows > GetNumRows()) + LogicError("AddToRowSliceValuesOf: startIndex + numRows exceeds GetNumRows()."); + + if (a.GetNumCols() != GetNumCols()) + LogicError("AddToRowSliceValuesOf: columns does not match."); + + CUDA_LONG N = (CUDA_LONG) a.GetNumElements(); + int blocksPerGrid = (int) ceil(1.0 * N / GridDim::maxThreadsPerBlock); + PrepareDevice(); + SyncGuard syncGuard; + _assignToRowSliceValuesOf<<>>(Data(), a.Data(), N, (CUDA_LONG) startIndex, (CUDA_LONG) GetNumRows(), (CUDA_LONG) a.GetNumRows()); + return *this; +} + +//for each column of a, we assign numRows starting from startIndex to this +template +GPUMatrix& GPUMatrix::AssignRowSliceValuesOf(const GPUMatrix& a, const size_t startIndex, const size_t numRows) +{ + if (a.IsEmpty()) + LogicError("AssignRowSliceValuesOf: input matrix a is empty."); + + if (startIndex + numRows > a.GetNumRows()) + LogicError("AssignRowSliceValuesOf: startIndex + numRows exceeds a.GetNumRows()."); + + RequireSize(numRows, a.GetNumCols()); + + CUDA_LONG N = (CUDA_LONG) GetNumElements(); + int blocksPerGrid = (int) ceil(1.0 * N / GridDim::maxThreadsPerBlock); + PrepareDevice(); + SyncGuard syncGuard; + _assignRowSliceValuesOf<<>>(Data(), a.Data(), N, (CUDA_LONG) startIndex, (CUDA_LONG) numRows, (CUDA_LONG) a.GetNumRows()); + return *this; +} + +//for the row slice of this starting from startIndex we add a to it. +template +GPUMatrix& GPUMatrix::AddToRowSliceValuesOf(const GPUMatrix& a, const size_t startIndex, const size_t numRows) +{ + if (a.IsEmpty()) + LogicError("AddToRowSliceValuesOf: input matrix a is empty."); + + if (a.GetNumRows() != numRows) + LogicError("AddToRowSliceValuesOf: a.GetNumRows() != numRows."); + + if (startIndex + numRows > GetNumRows()) + LogicError("AddToRowSliceValuesOf: startIndex + numRows exceeds GetNumRows()."); + + if (a.GetNumCols() != GetNumCols()) + LogicError("AddToRowSliceValuesOf: columns does not match."); + + CUDA_LONG N = (CUDA_LONG) a.GetNumElements(); + int blocksPerGrid = (int) ceil(1.0 * N / GridDim::maxThreadsPerBlock); + PrepareDevice(); + SyncGuard syncGuard; + _addToRowSliceValuesOf<<>>(Data(), a.Data(), N, (CUDA_LONG) startIndex, (CUDA_LONG) GetNumRows(), (CUDA_LONG) a.GetNumRows()); + return *this; +} + +//for each column of this, we add row slice of a starting from startIndex +template +GPUMatrix& GPUMatrix::AddWithRowSliceValuesOf(const GPUMatrix& a, const size_t startIndex, const size_t numRows) +{ + if (a.IsEmpty()) + LogicError("AddWithRowSliceValuesOf: input matrix a is empty."); + + if (GetNumRows() != numRows) + LogicError("AddWithRowSliceValuesOf: GetNumRows() != numRows."); + + if (startIndex + numRows > a.GetNumRows()) + LogicError("AddWithRowSliceValuesOf: startIndex + numRows exceeds a.GetNumRows()."); + + if (a.GetNumCols() != GetNumCols()) + LogicError("AddWithRowSliceValuesOf: columns does not match."); + + CUDA_LONG N = (CUDA_LONG) GetNumElements(); + int blocksPerGrid = (int) ceil(1.0 * N / GridDim::maxThreadsPerBlock); + PrepareDevice(); + SyncGuard syncGuard; + _addWithRowSliceValuesOf<<>>(Data(), a.Data(), N, (CUDA_LONG) startIndex, (CUDA_LONG) GetNumRows(), (CUDA_LONG) a.GetNumRows()); + return *this; +} + +template +GPUMatrix GPUMatrix::Diagonal() const +{ + size_t m = GetNumRows(); + size_t n = GetNumCols(); + if (m != n) + LogicError("Diagonal can be called only for square matrix. (rows=%d, cols=%d)", (int) m, (int) n); + + GPUMatrix diag(1, n, GetComputeDeviceId()); + + CUDA_LONG N = (CUDA_LONG) GetNumElements(); + int blocksPerGrid = (int) ceil(1.0 * N / GridDim::maxThreadsPerBlock); + PrepareDevice(); + SyncGuard syncGuard; + _assignToDiagonalValuesOf<<>>(diag.Data(), Data(), N, (CUDA_LONG) n); + return diag; +} + +// c = c - 1.0 for a specific position +template +void GPUMatrix::MinusOneAt(GPUMatrix& c, const size_t position) +{ + assert(position < c.GetNumElements()); + + CUDA_LONG n = (CUDA_LONG) c.GetNumElements(); + CUDA_LONG p = (CUDA_LONG) position; + + int blocksPerGrid = (int) ceil(1.0 * n / GridDim::maxThreadsPerBlock); + // BUGBUG: PrepareDevice() missing? + SyncGuard syncGuard; + _minusOneAt<<>>(c.Data(), p, n); +} + +template +GPUMatrix& GPUMatrix::AssignRepeatOf(const GPUMatrix& a, const size_t numRowRepeats, const size_t numColRepeats) +{ + if (this == &a) + LogicError("AssignRepeatOf: a is the same as [this]. Does not support inplace repeat."); + + if (a.IsEmpty()) + LogicError("AssignRepeatOf: Matrix a is empty."); + + RequireSize(a.GetNumRows() * numRowRepeats, a.GetNumCols() * numColRepeats); + + CUDA_LONG N = (CUDA_LONG) GetNumElements(); + CUDA_LONG n = (CUDA_LONG) a.GetNumCols(), m = (CUDA_LONG) a.GetNumRows(); + int blocksPerGrid = (int) ceil(1.0 * N / GridDim::maxThreadsPerBlock); + PrepareDevice(); + SyncGuard syncGuard; + _assignRepeatOf<<>>(Data(), a.Data(), N, m, n, (CUDA_LONG) GetNumRows()); + return *this; +} + +template +GPUMatrix& GPUMatrix::AddToRowRepeatValuesOf(const GPUMatrix& a, const size_t numRepeats) +{ + if (a.IsEmpty()) + LogicError("AddToRowRepeatValuesOf: input matrix a is empty."); + + if (a.GetNumRows() != GetNumRows() * numRepeats) + LogicError("AddToRowSliceValuesOf: a.GetNumRows() != GetNumRows() * numRepeats."); + + RequireSize(a.GetNumRows() / numRepeats, a.GetNumCols()); + + CUDA_LONG N = (CUDA_LONG) a.GetNumElements(); + int blocksPerGrid = (int) ceil(1.0 * N / GridDim::maxThreadsPerBlock); + PrepareDevice(); + SyncGuard syncGuard; + _addToRowRepeatValuesOf<<>>(Data(), a.Data(), N, (CUDA_LONG) a.GetNumRows(), (CUDA_LONG) a.GetNumCols(), (CUDA_LONG) GetNumRows()); + return *this; +} + +template +GPUMatrix& GPUMatrix::AssignPositiveAndShiftedNegSample(const GPUMatrix& a, const size_t posNumber, const size_t negNumber, const size_t shiftNumber) +{ + if (this == &a) + LogicError("AssignPositiveAndShiftedNegSample: a is the same as [this]. Does not support inplace assignment."); + + if (a.IsEmpty()) + LogicError("AssignPositiveAndShiftedNegSample: Matrix a is empty."); + + RequireSize(a.GetNumRows() * (posNumber + negNumber), a.GetNumCols()); + + CUDA_LONG N = (CUDA_LONG) GetNumElements(); + CUDA_LONG n = (CUDA_LONG) a.GetNumCols(), m = (CUDA_LONG) a.GetNumRows(); + int blocksPerGrid = (int) ceil(1.0 * N / GridDim::maxThreadsPerBlock); + PrepareDevice(); + SyncGuard syncGuard; + _assignPositiveAndShiftedNegSample<<>>(Data(), a.Data(), N, m, n, (CUDA_LONG) GetNumRows(), posNumber, shiftNumber); + return *this; +} + +template +GPUMatrix& GPUMatrix::AddFoldedPositiveAndShiftedNegSample(const GPUMatrix& a, const size_t posNumber, const size_t negNumber, const size_t shiftNumber) +{ + if (this == &a) + LogicError("AddFoldedPositiveAndShiftedNegSample: a is the same as [this]. Does not support inplace assignment."); + + if (a.IsEmpty()) + LogicError("AddFoldedPositiveAndShiftedNegSample: Matrix a is empty."); + + if (a.GetNumRows() != GetNumRows() * (posNumber + negNumber) || a.GetNumCols() != GetNumCols()) + LogicError("AddFoldedPositiveAndShiftedNegSample: dimensions mismatch."); + + CUDA_LONG N = (CUDA_LONG) a.GetNumElements(); + CUDA_LONG n = (CUDA_LONG) a.GetNumCols(), m = (CUDA_LONG) a.GetNumRows(); + int blocksPerGrid = (int) ceil(1.0 * N / GridDim::maxThreadsPerBlock); + PrepareDevice(); + SyncGuard syncGuard; + _addFoldedPositiveAndShiftedNegSample<<>>(Data(), a.Data(), N, m, n, (CUDA_LONG) GetNumRows(), posNumber, shiftNumber); + return *this; +} + +template +GPUMatrix GPUMatrix::Transpose() const +{ + if (IsEmpty()) + LogicError("Transpose: Matrix is empty."); + + GPUMatrix c(GetComputeDeviceId()); + c.AssignTransposeOf(*this); + return c; +} + +// GetCublasHandle - get a cublas handle for the given GPU, should only need one per GPU +// computeDevice - The compute device for which the cublas handle is desired +// returns: cublas handle +// NOTE: we currently don't bother to ever free the CUBLAS handle, it will be freed automatically by CUDA when the process ends +template +cublasHandle_t GPUMatrix::GetCublasHandle(int computeDevice /*=-1*/) +{ + // if the compute device is not passed, get the current device from CUDA + if (computeDevice < 0) + cudaGetDevice(&computeDevice); + + if (computeDevice < 0 || computeDevice >= MaxGpus) + LogicError("GetCublasHandle: Maximum GPU exceeded"); + cublasHandle_t cuHandle = s_cuHandle[computeDevice]; + if (cuHandle == NULL) + { + s_cuHandle[computeDevice] = cuHandle = _initCUBLAS(computeDevice); + } + CUBLAS_CALL(cublasSetStream(cuHandle, t_stream)); + + return cuHandle; +} + +template +GPUMatrix& GPUMatrix::AssignTransposeOf(const GPUMatrix& a) +{ + if (this == &a) + LogicError("AssignTransposeOf: a is the same as [this]. Does not support inplace transpose."); + + if (a.IsEmpty()) + LogicError("AssignTransposeOf: Matrix a is empty."); + + if (GetNumRows() != a.GetNumCols() || GetNumCols() != a.GetNumRows()) + RequireSize(a.GetNumCols(), a.GetNumRows()); + + cublasHandle_t cuHandle = GetCublasHandle(a.GetComputeDeviceId()); + cublasOperation_t transA = CUBLAS_OP_T; + cublasOperation_t transB = CUBLAS_OP_T; + int m = (int) a.m_numCols; + int n = (int) a.m_numRows; + ElemType alpha = 1; + ElemType beta = 0; + cublasStatus_t st; + if (sizeof(ElemType) == sizeof(float)) + st = cublasSgeam(cuHandle, transA, transB, m, n, reinterpret_cast(&alpha), reinterpret_cast(a.Data()), (int) a.m_numRows, reinterpret_cast(&beta), reinterpret_cast(a.Data()), (int) a.m_numRows, reinterpret_cast(Data()), (int) m_numRows); + else if (sizeof(ElemType) == sizeof(double)) + st = cublasDgeam(cuHandle, transA, transB, m, n, reinterpret_cast(&alpha), reinterpret_cast(a.Data()), (int) a.m_numRows, reinterpret_cast(&beta), reinterpret_cast(a.Data()), (int) a.m_numRows, reinterpret_cast(Data()), (int) m_numRows); + else + RuntimeError("Unsupported template argument in GPUMatrix"); + if (st != CUBLAS_STATUS_SUCCESS) + RuntimeError("AssignTransposeOf failed"); + m_numRows = a.m_numCols; + m_numCols = a.m_numRows; + return *this; +} + +template +__global__ void _doGatherColumnsOf(ElemType* us, size_t usStride, const ElemType beta, const ElemType* idx, size_t idxStride, const ElemType* a, size_t aStride, size_t aCols, const ElemType alpha, CUDA_LONG numElements) +{ + CUDA_LONG id = GridDim::GetLinearThreadId(); + if (id >= numElements) // note: there are no __syncthread() calls inside + return; + + // id = i + jOut * usStride; + // Each thread processes one element of the output matrix. + CUDA_LONG i = id % usStride; // row index into 'us' and 'a' + CUDA_LONG jOut = id / usStride; // col index into 'us' and 'idx' + + auto jInF = idx[jOut * idxStride]; // this is the column we need to get + if (::isnan(jInF) || jInF < 0) // negative index means gap + return; + size_t jIn = (size_t)jInF; + //if (jIn >= aCols) + // return; // actually a failure + + const ElemType& ra = a[ i + jIn * aStride ]; + ElemType& rus = us[id/*i + jOut * usStride*/]; + + ElemType res = ra * alpha; + if (beta != 0) + res += rus * beta; + rus = res; +} + +// *this[:,j] = a[:,idx[j]] * alpha + *this[:,j] * beta +template +GPUMatrix& GPUMatrix::DoGatherColumnsOf(ElemType beta, const GPUMatrix& idx, const GPUMatrix& a, ElemType alpha) +{ + if (idx.GetNumRows() != 1) // index is 1-dimensional only + InvalidArgument("DoGatherColumnsOf: Map must be a row vector."); + + if (beta == 0) + RequireSize(a.GetNumRows(), idx.GetNumCols()); // output has same column format as a, but number of columns comes from idx + else + VerifySize(a.GetNumRows(), idx.GetNumCols()); + + if (idx.GetComputeDeviceId() != a.GetComputeDeviceId() || GetComputeDeviceId() != a.GetComputeDeviceId()) + InvalidArgument("All matrices must be on the same GPU"); + a.PrepareDevice(); + + // launch the kernel + CUDA_LONG NN = (CUDA_LONG)GetNumElements(); // linear space identifying each individual input element + SyncGuard syncGuard; + GridDim grid(NN); + _doGatherColumnsOf<<>>(Data(), GetNumRows(), beta, idx.Data(), idx.GetNumRows(), a.Data(), a.GetNumRows(), a.GetNumCols(), alpha, grid.m_N); + + // Note: The following fails silently (no error, immediate or delayed) for numcols = 10000 under CUDA 7.0. + //_doGatherColumnsOf<<>>(Data(), GetNumRows(), beta, idx.Data(), idx.GetNumRows(), a.Data(), a.GetNumRows(), a.GetNumCols(), alpha); + + return *this; +} + +// little helper for debugging +template +static void Peek(const GPUMatrix& m, const char* which) +{ + size_t rows = m.GetNumRows(); + size_t cols = m.GetNumCols(); + ElemType buf[10000] = { 0 }; + size_t n = min(rows * cols, _countof(buf)); + CUDA_CALL(cudaMemcpy(buf, m.Data(), sizeof(ElemType) * n, cudaMemcpyDeviceToHost)); + UNUSED(which); UNUSED(rows); UNUSED(cols); sin(1.0f); // set breakpoint here + //CUDA_CALL(cudaMemcpy(const_cast(m.Data()), buf, sizeof(ElemType) * n, cudaMemcpyHostToDevice)); +} + +#define ALLOW_ATOMIC_SCATTER // allow to disable this, until we know atomicAdd() works properly here + +template +__global__ void _doScatterColumnsOf(ElemType* us, size_t usStride, size_t usCols, const ElemType* idx, size_t idxStride, const ElemType* a, size_t aStride, const ElemType alpha, CUDA_LONG numElements) +{ + CUDA_LONG id = GridDim::GetLinearThreadId(); + if (id >= numElements) // note: there are no __syncthread() calls inside + return; + + // id = i + jIn * aStride + // Each thread processes one element of a + CUDA_LONG i = id % aStride; // row index into 'a' and 'us' + CUDA_LONG jIn = id / aStride; // col index into 'a' and 'idx' + + auto jOutF = idx[jIn * idxStride]; // this is the column we copy/add into + if (::isnan(jOutF) || jOutF < 0) // negative index means gap + return; + size_t jOut = (size_t)jOutF; + //if (jOut >= usCols) + // return; // actually a failure --TODO: This should not be necessary. Why is it? + + const ElemType& ra = a[id/*i + jIn * aStride*/]; + ElemType& rus = us[ i + jOut * usStride ]; + + ElemType res = ra * alpha; + if (res != 0) // avoid memory conflict if e.g. an entire column has no gradient +#ifdef ALLOW_ATOMIC_SCATTER + atomicAdd(&rus, res); // rus += res; +#else + rus += res; +#endif + // Note: atomicAdd() is supposed to be fast in case of no conflict (the simple case of Scatter()) +} + +// *this[:,idx[j]] = a[:,j] * alpha + *this[:,idx[j]] * beta +template +GPUMatrix& GPUMatrix::DoScatterColumnsOf(ElemType beta, const GPUMatrix& idx, const GPUMatrix& a, ElemType alpha) +{ + if (idx.GetNumRows() != 1) // index is 1-dimensional only + InvalidArgument("DoScatterColumnsOf: Map must be a row vector."); + if (idx.GetNumCols() != a.GetNumCols()) + InvalidArgument("DoScatterColumnsOf: Map must have width of input vector."); + if (a.GetNumRows() != GetNumRows()) + InvalidArgument("DoScatterColumnsOf: Output must have same height as input vector."); + + if (idx.GetComputeDeviceId() != a.GetComputeDeviceId() || GetComputeDeviceId() != a.GetComputeDeviceId()) + InvalidArgument("All matrices must be on the same GPU"); + a.PrepareDevice(); + + auto& us = *this; + +#ifndef ALLOW_ATOMIC_SCATTER // verify that atomicAdd is not needed --this is not efficient + { + vector buf(idx.GetNumRows() * idx.GetNumCols()); // idx(,)are the column(s) we copy/add into + CUDA_CALL(cudaMemcpy(buf.data(), idx.Data(), sizeof(ElemType) * buf.size(), cudaMemcpyDeviceToHost)); + vector writtenTo(GetNumCols(), false); // remember whether an output column is in fact a target + for (size_t i = 0; i < buf.size(); i++) + { + auto colF = buf[i]; + if (std::isnan(colF) || colF < 0) + continue; + size_t col = (size_t)colF; + if (col >= GetNumCols()) + LogicError("DoScatterColumnsOf: Index value out of bounds."); + if (writtenTo[col]) + LogicError("DoScatterColumnsOf: #ifndef ALLOW_ATOMIC_SCATTER then columns must be unique. Column idx(%d,%d)=%d is used twice.", (int)(i % idx.GetNumCols()), (int)(i / idx.GetNumCols()), (int)col); + else + writtenTo[col] = true; + } + } +#endif + + // pre-scale with beta upfront + // Scatter may add more than one source column to the same target, so we must pre-scale with beta, and then just keep adding. + Scale(beta, us); // if beta is 0, then this will be a memset() + + // launch the kernel + CUDA_LONG NN = (CUDA_LONG)(a.GetNumElements()); // linear space identifying each individual input element + SyncGuard syncGuard; + GridDim grid(NN); + _doScatterColumnsOf<<>>(Data(), GetNumRows(), GetNumCols(), idx.Data(), idx.GetNumRows(), a.Data(), a.GetNumRows(), alpha, NN); + + //SyncGuard syncGuard; + //_doScatterColumnsOf<<>>(Data(), GetNumRows(), GetNumCols(), idx.Data(), idx.GetNumRows(), a.Data(), a.GetNumRows(), alpha, NN); + + return *this; +} + +template +void GPUMatrix::SetValue(const ElemType v) +{ + if (IsEmpty()) + return; + + CUDA_LONG N = (CUDA_LONG) GetNumElements(); + + // Check if value is zero, which can be set using cudaMemset + bool isZero = true; + const char* valArray = reinterpret_cast(&v); + + for (int i = 0; i < sizeof(ElemType); i++) + { + if (valArray[i] != 0) + { + isZero = false; + break; + } + } + + if (isZero) + { + CUDA_CALL(cudaMemset(Data(), 0, N * sizeof(ElemType))); + } + else + { + int blocksPerGrid = (int) ceil(1.0 * N / GridDim::maxThreadsPerBlock); + PrepareDevice(); + SyncGuard syncGuard; + _setValue<<>>(Data(), v, N); + } +} + +template +void GPUMatrix::SetValue(const ElemType* d_v) // d_v is pointer to the the value in GPU memory +{ + if (IsEmpty()) + LogicError("SetValue: Matrix is empty."); + + CUDA_LONG N = (CUDA_LONG) GetNumElements(); + int blocksPerGrid = (int) ceil(1.0 * N / GridDim::maxThreadsPerBlock); + PrepareDevice(); + SyncGuard syncGuard; + _setValue<<>>(Data(), d_v, N); +} + +template +void GPUMatrix::MaskColumnsValue(const GPUMatrix& columnsMask, ElemType val, size_t numColsPerMaskEntry) +{ + if (GetNumCols() != (columnsMask.GetNumCols() * numColsPerMaskEntry)) + RuntimeError("Matrix number of columns must equal 'number of columns in column mask * numColsPerMaskEntry'."); + + if (GetComputeDeviceId() != columnsMask.GetComputeDeviceId()) + RuntimeError("Matrix and column mask must be on the same device"); + + int blocksPerGrid = (int)columnsMask.GetNumCols(); + PrepareDevice(); + SyncGuard syncGuard; + _maskColumnsValue<<>>(Data(), columnsMask.Data(), (CUDA_LONG) GetNumCols(), (CUDA_LONG) GetNumRows(), val, numColsPerMaskEntry); +} + +template +void GPUMatrix::SetColumn(const ElemType* colPointer, size_t colInd) +{ + if (IsEmpty()) + LogicError("SetValue: Matrix is empty."); + if (colPointer == NULL) + return; + CUDA_CALL(cudaMemcpy(Data() + LocateColumn(colInd), colPointer, sizeof(ElemType) * m_numRows, cudaMemcpyHostToDevice)); +} + +template +void GPUMatrix::SetColumn(const GPUMatrix& valMat, size_t colInd) +{ + if (IsEmpty()) + LogicError("SetColumn: Matrix is empty."); + if (valMat.GetNumCols() != 1) + LogicError("SetColumn: only support one column matrix now."); + CUDA_CALL(cudaMemcpy(Data() + LocateColumn(colInd), valMat.Data(), sizeof(ElemType) * m_numRows, cudaMemcpyDeviceToDevice)); +} + +template +void GPUMatrix::SetValue(const GPUMatrix& deepCopyFrom) +{ + if (this == &deepCopyFrom) + return; + + SetValue(deepCopyFrom.GetNumRows(), deepCopyFrom.GetNumCols(), deepCopyFrom.GetComputeDeviceId(), deepCopyFrom.Data(), matrixFlagSetValueOnDevice); +} + +#if 0 +template +void GPUMatrix::SetValue(const CPUMatrix& /*deepCopyFrom*/) +{ + NOT_IMPLEMENTED; +} + +template +void GPUMatrix::SetValue(const CPUSparseMatrix& /*deepCopyFrom*/) +{ + NOT_IMPLEMENTED; +} + +template +void GPUMatrix::SetValue(const GPUSparseMatrix& deepCopyFrom) +{ + deepCopyFrom.CopyToDenseMatrix(*this); +} +#endif + +template +void GPUMatrix::SetValue(const size_t numRows, const size_t numCols, int deviceId, ElemType* pArray, size_t matrixFlags, DataTransferer* transferer) +{ + // handle externally managed case + // BUGBUG: This is super super ugly, and needs to be fixed, but if matrixFlags has the right value, then we can't free anything, + // and everything gets wonky. This should be fixed, and would go away if it is made a shared_ptr. + if (matrixFlags & matrixFlagDontOwnBuffer) + { + // free the existing array if it used to be an owned array + if ( Buffer() != NULL) + { + TracingGPUMemoryAllocator::Free(GetComputeDeviceId(), Buffer()); + } + m_numRows = numRows; + m_numCols = numCols; + SetBuffer(pArray, GetNumElements() * sizeof(ElemType), true); + SetSizeAllocated(GetNumElements()); + SetFormat(matrixFormatDense); + SetComputeDeviceId(deviceId); + } + else + { + if (transferer && (matrixFlags & matrixFlagSetValueOnDevice)) + RuntimeError("Asynchronous data copy from device to device is currently not supported."); + + // if the devices are different move it now + if (GetComputeDeviceId() != deviceId && deviceId >= 0) + { + Clear(); + ZeroInit(deviceId); + } + + // now RequireSize/allocate as necessary + RequireSize(numRows, numCols); + + // copy over the content to the buffer + PrepareDevice(); + if (pArray != NULL) + { + if (!(matrixFlags & matrixFormatRowMajor)) + { + if (transferer) + transferer->CopyCPUToGPUAsync(pArray, GetNumElements(), sizeof(ElemType), Data()); + else + CUDA_CALL(cudaMemcpy(Data(), pArray, sizeof(ElemType) * GetNumElements(), (matrixFlags & matrixFlagSetValueOnDevice) ? cudaMemcpyDeviceToDevice : cudaMemcpyHostToDevice)); + } + else // row major: must transpose (this is not meant to be efficient, but very useful for defining inline matrices for test code) + { + vector transposed(GetNumElements()); + for (size_t i = 0; i < numRows; i++) + for (size_t j = 0; j < numCols; j++) + transposed[i + numRows * j] = pArray[j + numCols * i]; + + if (transferer) + transferer->CopyCPUToGPUAsync(transposed.data(), GetNumElements(), sizeof(ElemType), Data()); + else + CUDA_CALL(cudaMemcpy(Data(), transposed.data(), sizeof(ElemType) * GetNumElements(), (matrixFlags & matrixFlagSetValueOnDevice) ? cudaMemcpyDeviceToDevice : cudaMemcpyHostToDevice)); + } + } + } + SetFormat(matrixFormatDense); +} + +template +void GPUMatrix::SetDiagonalValue(const ElemType v) +{ + CUDA_LONG N = (CUDA_LONG) GetNumRows(); + int blocksPerGrid = (int) ceil(1.0 * N / GridDim::maxThreadsPerBlock); + PrepareDevice(); + SyncGuard syncGuard; + _setDiagonalValue<<>>(Data(), v, N, (CUDA_LONG) GetNumRows()); +} + +template +void GPUMatrix::SetDiagonalValue(const GPUMatrix& vector) +{ + if (IsEmpty() || vector.IsEmpty()) + LogicError("SetDiagonalValue: Matrix is empty."); + + if (GetNumRows() != GetNumCols()) + LogicError("SetDiagonalValue: NumRows and NumCols do not agree."); + + if (vector.GetNumRows() != 1 && vector.GetNumCols() != 1) + LogicError("SetDiagonalValue: input vector must be a vector."); + + if (vector.GetNumElements() == 1) // reduce to simple form + SetDiagonalValue(vector.Data()[0]); + + else if (vector.GetNumRows() != GetNumRows() && vector.GetNumCols() != GetNumRows()) + LogicError("SetDiagonalValue: input vector's dimension does not agree with [this]."); + else + { + CUDA_LONG N = (CUDA_LONG) GetNumRows(); + int blocksPerGrid = (int) ceil(1.0 * N / GridDim::maxThreadsPerBlock); + PrepareDevice(); + SyncGuard syncGuard; + _setDiagonalValueFromVector<<>>(Data(), vector.Data(), N); + } +} + +template +void GPUMatrix::SetUniformRandomValue(const ElemType low, const ElemType high, unsigned long seed) +{ + PrepareDevice(); + CreateCurandObject(seed, __FUNCTION__); // TODO call ResetCurandObject() instead? + + cudaEvent_t done = nullptr; + CUDA_CALL(cudaEventCreate(&done)); // TODO: why not condition on do_sync, so that we can use SyncGuard? + if (sizeof(ElemType) == sizeof(float)) + CURAND_CALL(curandGenerateUniform(((curandGenerator_t*) s_curandGenerator)[0], reinterpret_cast(Data()), GetNumElements())); + else + CURAND_CALL(curandGenerateUniformDouble(((curandGenerator_t*) s_curandGenerator)[0], reinterpret_cast(Data()), GetNumElements())); + CUDA_CALL(cudaEventRecord(done)); + CUDA_CALL(cudaEventSynchronize(done)); + // CURAND_CALL(curandDestroyGenerator(gen)); + CUDA_CALL(cudaEventDestroy(done)); + + size_t N = GetNumElements(); + size_t blocksPerGrid = (size_t) ceil(N / (double) GridDim::maxThreadsPerBlock); + + SyncGuard syncGuard; + _rescaleToRange<<>>(Data(), N, low, high); +} + +template +void GPUMatrix::SetGaussianRandomValue(const ElemType mean, const ElemType sigma, unsigned long seed) +{ + PrepareDevice(); + CreateCurandObject(seed, __FUNCTION__); // TODO call ResetCurandObject() instead? + + // TODO: Why not use SyncGuard? + + // curandGenerateNormal can return the error CURAND_STATUS_LENGTH_NOT_MULTIPLE if GetNumElements() is odd. + // To avoid this we always allocate a buffer of even size and potentially generate one more random element. + auto n = asMultipleOf(GetNumElements(), 2); + if (sizeof(ElemType) == sizeof(float)) + CURAND_CALL(curandGenerateNormal(((curandGenerator_t*) s_curandGenerator)[0], reinterpret_cast(Data()), n, (float) mean, (float) sigma)); + else + CURAND_CALL(curandGenerateNormalDouble(((curandGenerator_t*) s_curandGenerator)[0], reinterpret_cast(Data()), n, (double) mean, (double) sigma)); + // CURAND_CALL(curandDestroyGenerator(gen)); +} + +//maskRate: percentage of values masked out (similar to dropout rate) +//scaleValue: which scale value to set to the left ones (unmasked items). +template +void GPUMatrix::SetUniformRandomMask(const ElemType maskRate, const ElemType scaleValue, RNGHandle& rngHandle) +{ + PrepareDevice(); + + GPURNGHandle* gpuRNGHandle = dynamic_cast(&rngHandle); + assert(gpuRNGHandle != nullptr); + + cudaEvent_t done = nullptr; + CUDA_CALL(cudaEventCreate(&done)); // TODO: why not condition on do_sync, so that we can use SyncGuard? + if (sizeof(ElemType) == sizeof(float)) + CURAND_CALL(curandGenerateUniform(gpuRNGHandle->Generator(), reinterpret_cast(Data()), GetNumElements())); + else + CURAND_CALL(curandGenerateUniformDouble(gpuRNGHandle->Generator(), reinterpret_cast(Data()), GetNumElements())); + CUDA_CALL(cudaEventRecord(done)); + CUDA_CALL(cudaEventSynchronize(done)); + CUDA_CALL(cudaEventDestroy(done)); + + size_t N = GetNumElements(); + size_t blocksPerGrid = (size_t) ceil(N / (double) GridDim::maxThreadsPerBlock); + SyncGuard syncGuard; + _setMaskAndScale<<>>(Data(), N, maskRate, scaleValue); +} + +template +ElemType GPUMatrix::Adagrad(GPUMatrix& gradients, const bool needAveMultiplier) +{ + size_t numColsNeeded = gradients.GetNumCols(); + if (needAveMultiplier) + numColsNeeded += gradients.GetNumCols(); + + if (IsEmpty() || GetNumCols() < numColsNeeded) + { + RequireSize(gradients.GetNumRows(), numColsNeeded); + SetValue(0.0); + } + + assert(GetNumRows() == gradients.GetNumRows() && GetNumCols() == numColsNeeded); + + size_t n = gradients.GetNumElements(); + + ElemType* multipliers = nullptr; + if (needAveMultiplier) + multipliers = Data() + n; // temp memory used to store multipliers, + + int blocksPerGrid = (n + GridDim::maxThreadsPerBlock - 1) / GridDim::maxThreadsPerBlock; + _adagrad<<>>(Data(), gradients.Data(), n, multipliers); + + if (!needAveMultiplier) + return 1; + + cublasHandle_t cuHandle = GetCublasHandle(GetComputeDeviceId()); + if (sizeof(ElemType) == sizeof(float)) + { + float aveMultiplier = 0; + CUBLAS_CALL(cublasSasum(cuHandle, (CUDA_LONG) n, reinterpret_cast(multipliers), 1, &aveMultiplier)); + return (ElemType) aveMultiplier / n; + } + else + { + double aveMultiplier = 0; + CUBLAS_CALL(cublasDasum(cuHandle, (CUDA_LONG) n, reinterpret_cast(multipliers), 1, &aveMultiplier)); + return (ElemType) aveMultiplier / n; + } +} + +template +void GPUMatrix::FSAdagrad(GPUMatrix& gradients, + GPUMatrix& functionValues, + ElemType learnRatePerSample, + ElemType momentum, + ElemType adaWeight, + ElemType adaMul, + bool unitGainMomentum) +{ + size_t numColsNeeded = 2 * gradients.GetNumCols(); + + if (IsEmpty() || (GetNumCols() < numColsNeeded)) + { + RequireSize(gradients.GetNumRows(), numColsNeeded); + SetValue(0.0); + } + + assert((GetNumRows() == gradients.GetNumRows()) && (GetNumCols() == numColsNeeded)); + + size_t n = gradients.GetNumElements(); + int blocksPerGrid = (n + GridDim::maxThreadsPerBlock - 1) / GridDim::maxThreadsPerBlock; + _fsadagrad<<>>(n, gradients.Data(), Data(), Data()+ n, functionValues.Data(), + learnRatePerSample, momentum, adaWeight, adaMul, unitGainMomentum); +} + +template +void GPUMatrix::Adam(GPUMatrix& gradients, + GPUMatrix& functionValues, + ElemType learnRatePerSample, + ElemType momentum, + ElemType adaWeight, + ElemType adaMul, + bool unitGainMomentum) +{ + size_t numColsNeeded = 2 * gradients.GetNumCols(); + + if (IsEmpty() || (GetNumCols() < numColsNeeded)) + { + RequireSize(gradients.GetNumRows(), numColsNeeded); + SetValue(0.0); + } + + assert((GetNumRows() == gradients.GetNumRows()) && (GetNumCols() == numColsNeeded)); + + size_t n = gradients.GetNumElements(); + int blocksPerGrid = (n + GridDim::maxThreadsPerBlock - 1) / GridDim::maxThreadsPerBlock; + _adam << > >(n, gradients.Data(), Data(), Data() + n, functionValues.Data(), + learnRatePerSample, momentum, adaWeight, adaMul, unitGainMomentum); +} + +template +ElemType GPUMatrix::RmsProp(GPUMatrix& gradients, + ElemType RMS_GAMMA, + ElemType RMS_WGT_INC, + ElemType RMS_WGT_MAX, + ElemType RMS_WGT_DEC, + ElemType RMS_WGT_MIN, + const bool needAveMultiplier) +{ + const ElemType floor = 1e-6f; + static ElemType* upd_gpu = (ElemType*) 0; + + size_t n = gradients.GetNumElements(); + int blocksPerGrid = (GetNumElements() + GridDim::maxThreadsPerBlock - 1) / GridDim::maxThreadsPerBlock; + + size_t numColsNeeded = gradients.GetNumCols() * 3; + if (needAveMultiplier) + numColsNeeded += gradients.GetNumCols(); + + if (IsEmpty() || GetNumCols() < numColsNeeded) + { + RequireSize(gradients.GetNumRows(), numColsNeeded); + SetValue(0.0); + + ElemType* avars = Data(); // accumulated variances for RMS scaling + ElemType* signs = Data() + n; // sign of previous gradient + ElemType* steps = Data() + 2 * n; // current step size + // Data()+3*n is temp memory used to store multipliers, no need to initialize + + _rmsprop_init<<>>(avars, signs, steps, gradients.Data(), n); + } + assert(GetNumRows() == gradients.GetNumRows() && GetNumCols() == numColsNeeded); + + ElemType* avars = Data(); // accumulated variances for RMS scaling + ElemType* signs = Data() + n; // sign of previous gradient + ElemType* steps = Data() + 2 * n; // current step size + + ElemType* multipliers = nullptr; + if (needAveMultiplier) + multipliers = Data() + 3 * n; // temp memory used to store multipliers, + + if (!upd_gpu) + { + const ElemType upd[] = { + 2, 2, 0, + 2, 2, 0, + 1, 1, 1, + 2, 2, 0, + 1, 2, 1, + 0, 2, 2, + 1, 1, 1, + 0, 2, 2, + 0, 2, 2, + }; + + upd_gpu = TracingGPUMemoryAllocator::Allocate(GetComputeDeviceId(), 27); + CUDA_CALL(cudaMemcpy(upd_gpu, upd, sizeof(ElemType) * _countof(upd), cudaMemcpyHostToDevice)); + } + + _rmsprop<<>>(avars, signs, steps, gradients.Data(), n, + RMS_GAMMA, RMS_WGT_INC, RMS_WGT_MAX, RMS_WGT_DEC, RMS_WGT_MIN, + floor, upd_gpu, multipliers); + + if (!needAveMultiplier) + return 1; + + cublasHandle_t cuHandle = GetCublasHandle(GetComputeDeviceId()); + if (sizeof(ElemType) == sizeof(float)) + { + float aveMultiplier = 0; + CUBLAS_CALL(cublasSasum(cuHandle, (CUDA_LONG) n, reinterpret_cast(multipliers), 1, &aveMultiplier)); + return aveMultiplier / n; + } + else + { + double aveMultiplier = 0; + CUBLAS_CALL(cublasDasum(cuHandle, (CUDA_LONG) n, reinterpret_cast(multipliers), 1, &aveMultiplier)); + return (ElemType) aveMultiplier / n; + } +} + +template +void GPUMatrix::AdaDelta(GPUMatrix& gradients, GPUMatrix& functionValues, ElemType learningRate, ElemType rho, ElemType epsilon) +{ + size_t numColsNeeded = 2 * gradients.GetNumCols(); + + if (IsEmpty() || (GetNumCols() < numColsNeeded)) + { + RequireSize(gradients.GetNumRows(), numColsNeeded); + SetValue(0.0); + } + + assert((GetNumRows() == gradients.GetNumRows()) && (GetNumCols() == numColsNeeded)); + + size_t n = gradients.GetNumElements(); + int blocksPerGrid = (n + GridDim::maxThreadsPerBlock - 1) / GridDim::maxThreadsPerBlock; + _adadelta << > >(n, gradients.Data(), Data(), Data() + n, functionValues.Data(), learningRate, rho, epsilon); +} + +template +void GPUMatrix::Reshape(const size_t numRows, const size_t numCols) +{ + assert(numRows * numCols == GetNumElements()); + if (numRows * numCols != GetNumElements()) + InvalidArgument("Reshape: total number of elements does not match."); + + m_numRows = numRows; + m_numCols = numCols; +} + +template +void GPUMatrix::RequireSize(const size_t numRows, const size_t numCols, bool growOnly) +{ + if (GetNumRows() != numRows || GetNumCols() != numCols) + Resize(numRows, numCols, growOnly); +} + +template +void GPUMatrix::Resize(const size_t numRows, const size_t numCols, bool growOnly) +{ + if (GetNumRows() == numRows && GetNumCols() == numCols) + return; + + VerifyResizable(__FUNCTION__); + + size_t numElements = numRows * numCols; + if (numElements > GetSizeAllocated() || // grow allocation + (!growOnly && numElements != GetSizeAllocated())) // shrink allocation if not growOnly + { + // If the buffer exists, free it before allocate + if (Buffer()) + { + TracingGPUMemoryAllocator::Free(GetComputeDeviceId(), Buffer()); + } + + // reallocate buffer if numElements > 0 + ElemType* pArray = nullptr; + if (numElements > 0) + { + pArray = TracingGPUMemoryAllocator::Allocate(GetComputeDeviceId(), numRows, numCols); + } + + SetBuffer(pArray, numElements * sizeof(ElemType)); + SetSizeAllocated(numElements); + } + + // success + m_sliceViewOffset = 0; + m_numRows = numRows; + m_numCols = numCols; +} + +template +size_t GPUMatrix::LocateElement(const size_t row, const size_t col) const +{ + assert(row < m_numRows && col < m_numCols); + return LocateColumn(col) + row; // matrix in column-wise storage +} + +template +size_t GPUMatrix::LocateColumn(const size_t col) const +{ + assert(col < GetNumCols()); + return col * m_numRows; // matrix in column-wise storage +} + +template +ElemType GPUMatrix::Get00Element() const +{ + ElemType res = 0; + CUDA_CALL(cudaMemcpy(&res, Data(), sizeof(ElemType), cudaMemcpyDeviceToHost)); + return res; +} +#pragma endregion Basic Operators + +#pragma region Member BLAS Functions +template +GPUMatrix& GPUMatrix::operator+=(ElemType alpha) +{ + if (IsEmpty()) + LogicError("operator+=: Matrix is empty."); + CUDA_LONG N = (CUDA_LONG) GetNumElements(); + int blocksPerGrid = (int) ceil(1.0 * N / GridDim::maxThreadsPerBlock); + SyncGuard syncGuard; + _addValue<<>>(Data(), alpha, N); + return *this; +} + +template +GPUMatrix GPUMatrix::operator+(ElemType alpha) const +{ + if (IsEmpty()) + LogicError("operator+: Matrix is empty."); + + GPUMatrix c(*this); + c += alpha; + return c; +} + +template +GPUMatrix& GPUMatrix::AssignSumOf(const ElemType alpha, const GPUMatrix& a) +{ + SetValue(a); + (*this) += alpha; + return (*this); +} + +template +GPUMatrix& GPUMatrix::operator+=(const GPUMatrix& a) +{ + ScaleAndAdd(1, a, *this); + return *this; +} + +template +GPUMatrix GPUMatrix::operator+(const GPUMatrix& a) const +{ + if (GetNumElements() == 1) + { + GPUMatrix c(a); + c += Get00Element(); + return c; + } + else if (a.GetNumElements() == 1) + { + GPUMatrix c(*this); + c += a.Get00Element(); + return c; + } + else + { + GPUMatrix c(*this); // this implementation will introduce a copy overhead. but make resue of the code + c += a; + return c; + } +} + +template +GPUMatrix& GPUMatrix::AssignSumOf(const GPUMatrix& a, const GPUMatrix& b) +{ + SetValue(a); + (*this) += b; + return (*this); +} + +template +GPUMatrix& GPUMatrix::operator-=(ElemType alpha) +{ + if (IsEmpty()) + LogicError("operato-=: Matrix is empty."); + return operator+=(-1 * alpha); +} + +template +GPUMatrix GPUMatrix::operator-(ElemType alpha) const +{ + if (IsEmpty()) + LogicError("operator-: Matrix is empty."); + return operator+(-1 * alpha); +} + +template +GPUMatrix& GPUMatrix::AssignDifferenceOf(const ElemType alpha, const GPUMatrix& a) +{ + RequireSize(a.m_numRows, a.m_numCols); + CUDA_LONG N = (CUDA_LONG) GetNumElements(); + int blocksPerGrid = (int) ceil(1.0 * N / GridDim::maxThreadsPerBlock); + a.PrepareDevice(); + SyncGuard syncGuard; + _assignDifferenceOf1<<>>(Data(), alpha, a.Data(), N); + return *this; +} + +template +GPUMatrix& GPUMatrix::AssignDifferenceOf(const GPUMatrix& a, const ElemType alpha) +{ + RequireSize(a.m_numRows, a.m_numCols); + CUDA_LONG N = (CUDA_LONG) GetNumElements(); + int blocksPerGrid = (int) ceil(1.0 * N / GridDim::maxThreadsPerBlock); + a.PrepareDevice(); + SyncGuard syncGuard; + _assignDifferenceOf2<<>>(Data(), alpha, a.Data(), N); + return *this; +} + +template +GPUMatrix& GPUMatrix::operator-=(const GPUMatrix& a) +{ + ScaleAndAdd(-1, a, *this); + + return *this; +} + +template +GPUMatrix GPUMatrix::operator-(const GPUMatrix& a) const +{ + GPUMatrix c(*this); // this implementation will introduce a copy overhead. but make resue of the code + c -= a; + return c; +} + +template +GPUMatrix& GPUMatrix::AssignDifferenceOf(const GPUMatrix& a, const GPUMatrix& b) +{ + if (this != &a) + { + RequireSize(a.GetNumRows(), a.GetNumCols()); + SetValue(a); + } + (*this) -= b; + return *this; +} + +template +GPUMatrix& GPUMatrix::operator*=(ElemType alpha) +{ + Scale(alpha, *this); + return *this; +} + +template +GPUMatrix GPUMatrix::operator*(ElemType alpha) const +{ + GPUMatrix c(GetNumRows(), GetNumCols(), GetComputeDeviceId()); + Scale(alpha, *this, c); + return c; +} + +template +GPUMatrix& GPUMatrix::AssignProductOf(const ElemType alpha, const GPUMatrix& a) +{ + Scale(alpha, a, *this); + return *this; +} + +template +GPUMatrix& GPUMatrix::AssignProductOf(const GPUMatrix& a, const bool transposeA, const GPUMatrix& b, const bool transposeB) +{ + if (a.GetNumElements() == 1) + { + if (transposeB) + AssignTransposeOf(b); + (*this) *= a.Get00Element(); + } + else if (b.GetNumElements() == 1) + { + if (transposeA) + AssignTransposeOf(a); + (*this) *= b.Get00Element(); + } + else + Multiply(a, transposeA, b, transposeB, *this); + return *this; +} + +template +GPUMatrix GPUMatrix::operator*(const GPUMatrix& a) const +{ + const GPUMatrix& us = *this; + if (GetNumElements() == 1) + { + GPUMatrix c(GetComputeDeviceId()); + c.AssignProductOf(Get00Element(), a); + return c; + } + else if (a.GetNumElements() == 1) + { + GPUMatrix c(GetComputeDeviceId()); + c.AssignProductOf(a.Get00Element(), us); + return c; + } + else + { + GPUMatrix c(GetNumRows(), a.GetNumCols(), GetComputeDeviceId()); + Multiply(*this, a, c); + return c; + } +} + +template +GPUMatrix& GPUMatrix::operator/=(ElemType alpha) +{ + (*this) *= 1 / alpha; + return (*this); +} + +template +GPUMatrix GPUMatrix::operator/(ElemType alpha) const +{ + return ((*this) * (1 / alpha)); +} + +//element-wise power +template +GPUMatrix& GPUMatrix::operator^=(ElemType alpha) +{ + GPUMatrix& us = *this; + ElementWisePower(alpha, us, us); + return us; +} + +template +GPUMatrix GPUMatrix::operator^(ElemType alpha) const +{ + GPUMatrix c(GetNumRows(), GetNumCols(), GetComputeDeviceId()); + ElementWisePower(alpha, *this, c); + return c; +} + +template +GPUMatrix& GPUMatrix::AssignElementPowerOf(const GPUMatrix& a, const ElemType power) +{ + ElementWisePower(power, a, *this); + return *this; +} + +template +GPUMatrix& GPUMatrix::AddElementProductOf(const GPUMatrix& a, const GPUMatrix& b) +{ + if (a.IsEmpty() || b.IsEmpty()) + LogicError("AddElementProductOf: Matrix is empty."); + + assert(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols()); + if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols())) + InvalidArgument("The input matrix dimensions do not match."); + + if (!(a.GetNumRows() == GetNumRows() && a.GetNumCols() == GetNumCols())) + InvalidArgument("The input matrix dimensions do not match [this]."); + + CUDA_LONG N = (CUDA_LONG) GetNumElements(); + int blocksPerGrid = (int) ceil(1.0 * N / GridDim::maxThreadsPerBlock); + a.PrepareDevice(); + SyncGuard syncGuard; + _addElementProductOf<<>>(Data(), a.Data(), b.Data(), N); + return *this; +} + +template +GPUMatrix& GPUMatrix::ColumnElementMultiplyWith(const GPUMatrix& a) +{ + if (a.IsEmpty() || IsEmpty()) + LogicError("ColumnElementMultiplyWith: Matrix is empty."); + + if (!(a.GetNumRows() == GetNumRows() && a.GetNumCols() == 1)) + InvalidArgument("ColumnElementMultiplyWith: The input matrix should be a col vector and match [this]'s rows."); + + CUDA_LONG N = (CUDA_LONG) a.GetNumRows(); + CUDA_LONG M = (CUDA_LONG) GetNumCols(); + int blocksPerGrid = (int) ceil(1.0 * N / GridDim::maxThreadsPerBlock); + a.PrepareDevice(); + SyncGuard syncGuard; + _columnElementMultiplyWith<<>>(Data(), a.Data(), N, M); + return *this; +} + +template +GPUMatrix& GPUMatrix::RowElementMultiplyWith(const GPUMatrix& a) +{ + if (a.IsEmpty() || IsEmpty()) + LogicError("RowElementMultiplyWith: Matrix is empty."); + + if (!(a.GetNumRows() == 1 && a.GetNumCols() == GetNumCols())) + InvalidArgument("RowElementMultiplyWith: The input matrix should be a row vector and match [this]'s columns."); + + CUDA_LONG N = (CUDA_LONG) GetNumRows(); + CUDA_LONG M = (CUDA_LONG) a.GetNumCols(); + int blocksPerGrid = (int) ceil(1.0 * M / GridDim::maxThreadsPerBlock); + a.PrepareDevice(); + SyncGuard syncGuard; + _rowElementMultiplyWith<<>>(Data(), a.Data(), N, M); + return *this; +} + +template +GPUMatrix& GPUMatrix::RowElementDivideBy(const GPUMatrix& a) +{ + if (a.IsEmpty() || IsEmpty()) + LogicError("RowElementDivideBy: Matrix is empty."); + + if (!(a.GetNumRows() == 1 && a.GetNumCols() == GetNumCols())) + InvalidArgument("RowElementDivideBy: The input matrix should be a row vector and match [this]'s columns."); + + CUDA_LONG N = (CUDA_LONG) GetNumRows(); + CUDA_LONG M = (CUDA_LONG) a.GetNumCols(); + int blocksPerGrid = (int) ceil(1.0 * M / GridDim::maxThreadsPerBlock); + a.PrepareDevice(); + SyncGuard syncGuard; + _rowElementDivideBy<<>>(Data(), a.Data(), N, M); + return *this; +} + +template +GPUMatrix& GPUMatrix::ColumnElementDivideBy(const GPUMatrix& a) +{ + if (a.IsEmpty() || IsEmpty()) + LogicError("ColumnElementDivideBy: Matrix is empty."); + + if (!(a.GetNumRows() == GetNumRows() && a.GetNumCols() == 1)) + InvalidArgument("ColumnElementDivideBy: The input matrix should be a col vector and match [this]'s rows."); + + CUDA_LONG N = (CUDA_LONG) a.GetNumRows(); + CUDA_LONG M = (CUDA_LONG) GetNumCols(); + int blocksPerGrid = (int) ceil(1.0 * N / GridDim::maxThreadsPerBlock); + a.PrepareDevice(); + SyncGuard syncGuard; + _ColumnElementDivideBy<<>>(Data(), a.Data(), N, M); + return *this; +} + +template +GPUMatrix& GPUMatrix::ElementInverse() +{ + if (IsEmpty()) + LogicError("ElementInverse: Matrix is empty."); + + CUDA_LONG N = (CUDA_LONG) GetNumElements(); + int blocksPerGrid = (int) ceil(1.0 * N / GridDim::maxThreadsPerBlock); + PrepareDevice(); + SyncGuard syncGuard; + _elemInverse<<>>(Data(), N); + return *this; +} + +template +GPUMatrix& GPUMatrix::AssignElementInverseOf(const GPUMatrix& a) +{ + SetValue(a); + return ElementInverse(); +} + +DEF_ELEMWISE_INPLACE_FUNC(Sigmoid) + +template +GPUMatrix& GPUMatrix::AssignSigmoidOf(const GPUMatrix& a) +{ + RequireSize(a.GetNumRows(), a.GetNumCols()); + CUDA_LONG N = (CUDA_LONG) GetNumElements(); + int blocksPerGrid = (int) ceil(1.0 * N / GridDim::maxThreadsPerBlock); + PrepareDevice(); + SyncGuard syncGuard; + // _elementWIseSigmoidOnCuda has an implementation that avoids possible overflow errors, but has a slight accuracy regression. +#if 0 + _elementWiseSigmoidOnCuda<<>>(a.Data(), Data(), N); +#else + _assignSigmoidOf<<>>(a.Data(), Data(), N); +#endif + return *this; +} + +DEF_ELEMWISE_INPLACE_FUNC(SigmoidDerivative) +DEF_ELEMWISE_ASSIGN_FUNC(SigmoidDerivative) + +template +void GPUMatrix::AssignNoiseContrastiveEstimation(const GPUMatrix& a, + const GPUMatrix& b, const GPUMatrix& bias, size_t sampleCount, GPUMatrix& tmp, GPUMatrix& c) +//this: samples+probs +// a : hidden +// b : embedding +// tmp: softmax +// c : loglikelihood +{ + UNCONST(ElemType, a, my_a); + UNCONST(ElemType, b, my_b); + UNCONST(ElemType, bias, my_bias); + SyncGuard syncGuard; + // a: dim * minibatch + // b: dim * |vocab| + int p = 512; + int width = a.GetNumRows(); // dimension of hidden vector + + while (p / 2 > width) + p = p / 2; + + // note: kernel has hard-coded dimension of 512 + _computeNceOutputMax512Threads << > >( + Data(), + sampleCount, + m_numRows / 2, + my_a.Data(), // a + a.GetNumRows(), + my_b.Data(), // b + my_bias.Data(), + tmp.Data()); // tmp + + p = 512; + while (p / 2 > GetNumElements() / 2) + p = p / 2; + // summing up objective must be done in one block + // note: kernel has hard-coded dimension of 512 + _assignNoiseContrastiveEstimationMax512Threads << <1, p >> >( + Data(), + sampleCount, + m_numRows / 2, + my_a.Data(), + a.GetNumCols(), + my_b.Data(), + tmp.Data(), + c.Data()); +} + +template +void GPUMatrix::AssignNCEDerivative(GPUMatrix& tmp, const GPUMatrix& a, + const GPUMatrix& b, size_t inputIndex, GPUMatrix& c) +{ + UNCONST(ElemType, a, my_a); + UNCONST(ElemType, b, my_b); + SyncGuard syncGuard; + int p = 512; + int width = a.GetNumRows(); + while (p / 2 > width) + p = p / 2; + + _assignNceDerivativeNew<<<(tmp.GetNumElements() + p - 1) / p, p>>>( + Data(), + tmp.GetNumCols(), + m_numRows / 2, + my_a.Data(), + a.GetNumRows(), + my_b.Data(), + tmp.Data(), + c.Data(), + inputIndex); +} + +template +void GPUMatrix::AssignSoftmaxSum(const GPUMatrix& a, GPUMatrix& c) +{ + UNCONST(ElemType, a, my_a); + SyncGuard syncGuard; + int p = 512; + int width = a.GetNumRows(); + while (p / 2 > width) + p = p / 2; + + // note: kernel has hard-coded dimension of 512 + _assignSoftmaxSumMax512Threads << <1, p >> >( + my_a.Data(), + width, + Data(), + c.Data()); +} + +template +void GPUMatrix::AssignNCEUnnormalizedEval(const GPUMatrix& a, const GPUMatrix& b, GPUMatrix& c) +{ + assert(a.GetComputeDeviceId() == b.GetComputeDeviceId()); + assert(GetNumRows() == a.GetNumRows()); + assert(GetNumCols() == b.GetNumRows()); + assert(a.GetNumCols() == b.GetNumRows()); + UNUSED(a); + UNUSED(b); + UNUSED(c); // TODO: this function seems like a stub + /* + EnsureAuxMemory(); + int p = 512; + int width = a.GetNumCols(); + while (p / 2 > width) p = p / 2; + + // this kernel need be launched in nnz blocks + _sparseInnerProductDenseTimesDense << > >( + m_dVal, + m_buf, + m_dCol, + m_nz, + GetNumRows(), + a.Buffer(), + b.Buffer(), + b.GetNumRows(), + m_res); + + // sum up the results + _reductionSum32 << <1, 32 >> >(m_res, c.Buffer(), m_nz);*/ +} + +DEF_ELEMWISE_INPLACE_FUNC(Tanh) +DEF_ELEMWISE_ASSIGN_FUNC(Tanh) + +template +GPUMatrix& GPUMatrix::InplaceLogSoftmax(const bool isColWise) +{ + if (IsEmpty()) + LogicError("InplaceLogSoftmax: Matrix is empty."); + + PrepareDevice(); + if (isColWise) + { + CUDA_LONG N = (CUDA_LONG) GetNumCols(); // one kernel per column + int blocksPerGrid = (int) ceil(N * 1.0 / GridDim::maxThreadsPerBlock); + SyncGuard syncGuard; + _logSoftMaxColWise<<>>(Data(), (CUDA_LONG) m_numCols, (CUDA_LONG) m_numRows); + } + else + { + CUDA_LONG N = (CUDA_LONG) GetNumRows(); // one kernel per column + int blocksPerGrid = (int) ceil(N * 1.0 / GridDim::maxThreadsPerBlock); + SyncGuard syncGuard; + _logSoftMaxRowWise<<>>(Data(), (CUDA_LONG) m_numCols, (CUDA_LONG) m_numRows); + } + return *this; +} + +template +GPUMatrix& GPUMatrix::AssignLogSoftmaxOf(const GPUMatrix& a, const bool isColWise) +{ + RequireSize(a.GetNumRows(), a.GetNumCols()); + if (isColWise) + { + PrepareDevice(); + CUDA_LONG N = (CUDA_LONG) GetNumCols(); + CUDA_LONG M = (CUDA_LONG) GetNumRows(); + SyncGuard syncGuard; + // note: kernel uses hard-coded thread dimension + _assignColumnwiseLogSoftmaxOf512Threads<<>>(a.Data(), Data(), N, M); + } + else + { + NOT_IMPLEMENTED; + } + + return *this; +} + +template +GPUMatrix& GPUMatrix::InplaceHardmax(const bool isColWise) +{ + return AssignHardmaxOf(*this, isColWise); +} + +template +GPUMatrix& GPUMatrix::AssignHardmaxOf(const GPUMatrix& a, const bool isColWise) +{ + RequireSize(a.GetNumRows(), a.GetNumCols()); + if (isColWise) + { + PrepareDevice(); + CUDA_LONG N = (CUDA_LONG) GetNumCols(); + CUDA_LONG M = (CUDA_LONG) GetNumRows(); + SyncGuard syncGuard; + // note: kernel uses hard-coded thread dimension + _assignColumnwiseHardmaxOf512Threads << > >(a.Data(), Data(), N, M); + } + else + { + NOT_IMPLEMENTED; + } + + return *this; +} + +DEF_ELEMWISE_INPLACE_FUNC(Sqrt) +DEF_ELEMWISE_ASSIGN_FUNC(Sqrt) + +DEF_ELEMWISE_INPLACE_FUNC(Exp) +DEF_ELEMWISE_ASSIGN_FUNC(Exp) + +DEF_ELEMWISE_INPLACE_FUNC(Log) +DEF_ELEMWISE_ASSIGN_FUNC(Log) + +DEF_ELEMWISE_INPLACE_FUNC(Abs) +DEF_ELEMWISE_ASSIGN_FUNC(Abs) + +DEF_ELEMWISE_INPLACE_FUNC(LinearRectifierDerivative) +DEF_ELEMWISE_ASSIGN_FUNC(LinearRectifierDerivative) + +DEF_ELEMWISE_INPLACE_FUNC(Cosine) +DEF_ELEMWISE_ASSIGN_FUNC(Cosine) + +DEF_ELEMWISE_INPLACE_FUNC(NegativeSine) +DEF_ELEMWISE_ASSIGN_FUNC(NegativeSine) + +template +GPUMatrix& GPUMatrix::InplaceTruncateBottom(const ElemType threshold) +{ + return AssignTruncateBottomOf(*this, threshold); +} + +template +GPUMatrix& GPUMatrix::AssignTruncateBottomOf(const GPUMatrix& a, const ElemType threshold) +{ + if (a.IsEmpty()) + LogicError("AssignTruncateBottomOf: Matrix a is empty."); + + if (this != &a) + { + RequireSize(a.GetNumRows(), a.GetNumCols()); + } + + CUDA_LONG N = (CUDA_LONG) GetNumElements(); + int blocksPerGrid = (int) ceil(N * 1.0 / GridDim::maxThreadsPerBlock); + PrepareDevice(); + SyncGuard syncGuard; + _assignTruncateBottom<<>>(Data(), a.Data(), threshold, N); + return *this; +} + +template +GPUMatrix& GPUMatrix::InplaceTruncateTop(const ElemType threshold) +{ + return AssignTruncateTopOf(*this, threshold); +} + +template +GPUMatrix& GPUMatrix::AssignTruncateTopOf(const GPUMatrix& a, const ElemType threshold) +{ + if (a.IsEmpty()) + LogicError("AssignTruncateTopOf: Matrix a is empty."); + + if (this != &a) + { + RequireSize(a.GetNumRows(), a.GetNumCols()); + } + + CUDA_LONG N = (CUDA_LONG) GetNumElements(); + int blocksPerGrid = (int) ceil(N * 1.0 / GridDim::maxThreadsPerBlock); + a.PrepareDevice(); + SyncGuard syncGuard; + _assignTruncateTop<<>>(Data(), a.Data(), threshold, N); + return *this; +} + +template +GPUMatrix& GPUMatrix::InplaceTruncate(const ElemType threshold) +{ + if (IsEmpty()) + LogicError("InplaceTruncate: Matrix is empty."); + + CUDA_LONG N = (CUDA_LONG) GetNumElements(); + int blocksPerGrid = (int) ceil(N * 1.0 / GridDim::maxThreadsPerBlock); + PrepareDevice(); + SyncGuard syncGuard; + _inplaceTruncate<<>>(Data(), threshold, N); + return *this; +} + +template +GPUMatrix& GPUMatrix::InplaceSoftThreshold(const ElemType threshold) +{ + if (IsEmpty()) + LogicError("InplaceSoftThreshold: Matrix is empty."); + + CUDA_LONG N = (CUDA_LONG) GetNumElements(); + int blocksPerGrid = (int) ceil(N * 1.0 / GridDim::maxThreadsPerBlock); + PrepareDevice(); + SyncGuard syncGuard; + _inplaceSoftThreshold<<>>(Data(), threshold, N); + return *this; +} +template +GPUMatrix& GPUMatrix::SetToZeroIfAbsLessThan(const ElemType threshold) +{ + if (IsEmpty()) + LogicError("SetToZeroIfAbsLessThan: Matrix is empty."); + CUDA_LONG N = (CUDA_LONG) GetNumElements(); + int blocksPerGrid = (int) ceil(N * 1.0 / GridDim::maxThreadsPerBlock); + PrepareDevice(); + SyncGuard syncGuard; + _setToZeroIfAbsLessThan<<>>(Data(), threshold, N); + return *this; +} + +template +ElemType GPUMatrix::SumOfAbsElements() const +{ + if (IsEmpty()) + LogicError("SumOfAbsElements: Matrix is empty"); + + cublasHandle_t cuHandle = GetCublasHandle(GetComputeDeviceId()); + if (sizeof(ElemType) == sizeof(float)) + { + float res = 0; + CUBLAS_CALL(cublasSasum(cuHandle, (CUDA_LONG) GetNumElements(), reinterpret_cast(Data()), 1, &res)); + return res; + } + else + { + double res = 0; + CUBLAS_CALL(cublasDasum(cuHandle, (CUDA_LONG) GetNumElements(), reinterpret_cast(Data()), 1, &res)); + return ElemType(res); + } +} + +template +ElemType GPUMatrix::SumOfElements() const +{ + if (IsEmpty()) + LogicError("SumOfElements: Matrix is empty"); + + ElemType* d_sum = TracingGPUMemoryAllocator::Allocate(GetComputeDeviceId(), 1); + ElemType h_sum; + + // WARNING: THIS kernel is not the most efficient way! + // note: kernel has hard-coded dimension of 1024 + _reductionSum1024Threads << <1, 1024, 0, t_stream >> >(Data(), d_sum, (CUDA_LONG)GetNumElements()); + CUDA_CALL(cudaMemcpy(&h_sum, d_sum, sizeof(ElemType), cudaMemcpyDeviceToHost)); + TracingGPUMemoryAllocator::Free(GetComputeDeviceId(), d_sum); + return h_sum; +} + +template +GPUMatrix& GPUMatrix::AssignSumOfElements(const GPUMatrix& a) +{ + if (a.IsEmpty()) + LogicError("AssignSumOfElements: Matrix a is empty"); + + RequireSize(1, 1); + + PrepareDevice(); + SyncGuard syncGuard; + // WARNING: THIS kernel is not the most efficient way! + // note: kernel has hard-coded dimension of 1024 + _reductionSumAndAssign1024Threads << <1, 1024 >> >(Data(), a.Data(), (CUDA_LONG)a.GetNumElements(), (CUDA_LONG)GetNumElements()); + return (*this); +} + +template +DeviceBoundNumber GPUMatrix::Sum_AsDeviceBoundNum() const +{ + if (IsEmpty()) + LogicError("Matrix is empty"); + ElemType* d_sum = TracingGPUMemoryAllocator::Allocate(GetComputeDeviceId(), 1); + + // WARNING: THIS kernel is not the most efficient way! + // note: kernel has hard-coded dimension of 1024 + _reductionSum1024Threads << <1, 1024, 0, t_stream >> >(Data(), d_sum, (CUDA_LONG)GetNumElements()); + DeviceBoundNumber result; + result.ShallowCopyFrom(d_sum, GetComputeDeviceId()); + return result; +} + +template +ElemType GPUMatrix::AbsoluteMax() const +{ + cublasHandle_t cuHandle = GetCublasHandle(GetComputeDeviceId()); + ElemType res; + if (sizeof(ElemType) == sizeof(float)) + { + int resInd = 0; + cublasIsamax(cuHandle, (CUDA_LONG)GetNumElements(), reinterpret_cast(Data()), 1, &resInd); + resInd--; + CUDA_CALL(cudaMemcpy(reinterpret_cast(&res), reinterpret_cast(Data() + resInd), sizeof(float), cudaMemcpyDeviceToHost)); + return res; + } + else + { + int resInd = 0; + cublasIdamax(cuHandle, (CUDA_LONG)GetNumElements(), reinterpret_cast(Data()), 1, &resInd); + resInd--; + + CUDA_CALL(cudaMemcpy(reinterpret_cast(&res), Data() + resInd, sizeof(double), cudaMemcpyDeviceToHost)); + + return res; + } +} + +template +GPUMatrix& GPUMatrix::ElementMultiplyWith(const GPUMatrix& a) +{ + if (IsEmpty() || a.IsEmpty()) + LogicError("ElementMultiplyWith: Matrix is empty."); + + GPUMatrix& us = *this; + assert(us.GetNumRows() == a.GetNumRows() && us.GetNumCols() == a.GetNumCols()); + if (us.GetNumRows() != a.GetNumRows() || us.GetNumCols() != a.GetNumCols()) + InvalidArgument("The matrix dimensions do not match."); + + CUDA_LONG N = (CUDA_LONG) GetNumElements(); + int blocksPerGrid = (int) ceil(((double) N) / GridDim::maxThreadsPerBlock); + a.PrepareDevice(); + SyncGuard syncGuard; + _elemMul<<>>(Data(), a.Data(), N); + return *this; +} + +template +GPUMatrix& GPUMatrix::AssignElementProductOf(const GPUMatrix& a, const GPUMatrix& b) +{ + if (a.IsEmpty() || b.IsEmpty()) + LogicError("AssignElementProductOf: Matrix is empty."); + + assert(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols()); + if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols())) + InvalidArgument("The input matrix dimensions do not match."); + + RequireSize(a.GetNumRows(), a.GetNumCols()); + CUDA_LONG N = (CUDA_LONG) GetNumElements(); + int blocksPerGrid = (int) ceil(((double) N) / GridDim::maxThreadsPerBlock); + a.PrepareDevice(); + SyncGuard syncGuard; + _assignElementProductOf<<>>(Data(), a.Data(), b.Data(), N); + return *this; +} + +template +GPUMatrix& GPUMatrix::ElementDivideBy(const GPUMatrix& a) +{ + return AssignElementDivisionOf(*this, a); +} + +template +GPUMatrix& GPUMatrix::AssignElementDivisionOf(const GPUMatrix& a, const GPUMatrix& b) +{ + if (a.IsEmpty() || b.IsEmpty()) + LogicError("AssignElementDivisionOf: Matrix is empty."); + + assert(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols()); + if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols())) + InvalidArgument("The input matrix dimensions do not match."); + + RequireSize(a.GetNumRows(), a.GetNumCols()); + CUDA_LONG N = (CUDA_LONG) GetNumElements(); + int blocksPerGrid = (int) ceil(((double) N) / GridDim::maxThreadsPerBlock); + a.PrepareDevice(); + SyncGuard syncGuard; + _assignElementDivisionOf<<>>(Data(), a.Data(), b.Data(), N); + return *this; +} + +template +bool GPUMatrix::IsEqualTo(const GPUMatrix& a, const ElemType threshold /*= 1e-8*/) const +{ + return AreEqual(*this, a, threshold); +} + +template +void GPUMatrix::VectorSum(const GPUMatrix& a, GPUMatrix& c, const bool isColWise) +{ + if (a.GetComputeDeviceId() != c.GetComputeDeviceId()) + { + InvalidArgument("All matrices must be on the same GPU"); + } + + a.PrepareDevice(); + + if (a.IsEmpty()) + LogicError("VectorSum: Input matrix is empty."); + + const CUDA_LONG n = (CUDA_LONG) a.GetNumRows(); + const CUDA_LONG m = (CUDA_LONG) a.GetNumCols(); + assert(m > 0 && n > 0); // converting from size_t to int may cause overflow + + int blocksPerGrid = 0; + if (isColWise) // col-wise + { + c.RequireSize(1, m); + blocksPerGrid = (int) ceil(1.0 * m / GridDim::maxThreadsPerBlock); + } + else + { + c.RequireSize(n, 1); + blocksPerGrid = (int) ceil(1.0 * n / GridDim::maxThreadsPerBlock); + } + + SyncGuard syncGuard; + _vectorSum<<>>(c.Data(), a.Data(), n, m, isColWise); +} +template +void GPUMatrix::VectorNorm1(GPUMatrix& c, const bool isColWise) const +{ + if (IsEmpty()) + LogicError("VectorNorm1: Matrix is empty."); + + const CUDA_LONG n = (CUDA_LONG) GetNumRows(); + const CUDA_LONG m = (CUDA_LONG) GetNumCols(); + assert(m > 0 && n > 0); // converting from size_t to int may cause overflow + + PrepareDevice(); + c.ChangeDeviceTo(GetComputeDeviceId()); + + int blocksPerGrid = 0; + if (isColWise) // col-wise + { + c.RequireSize(1, m); + blocksPerGrid = (int) ceil(1.0 * m / GridDim::maxThreadsPerBlock); + } + else + { + c.RequireSize(n, 1); + blocksPerGrid = (int) ceil(1.0 * n / GridDim::maxThreadsPerBlock); + } + + SyncGuard syncGuard; + _vectorNorm1<<>>(c.Data(), Data(), n, m, isColWise); +} + +template +GPUMatrix& GPUMatrix::AssignVectorNorm1Of(GPUMatrix& a, const bool isColWise) +{ + a.VectorNorm1(*this, isColWise); + return *this; +} + +template +void GPUMatrix::VectorNorm2(GPUMatrix& c, const bool isColWise) const +{ + if (IsEmpty()) + LogicError("VectorNorm2: Matrix is empty."); + + const CUDA_LONG n = (CUDA_LONG) GetNumRows(); + const CUDA_LONG m = (CUDA_LONG) GetNumCols(); + assert(m > 0 && n > 0); // converting from size_t to int may cause overflow + + PrepareDevice(); + c.ChangeDeviceTo(GetComputeDeviceId()); + + int blocksPerGrid = 0; + if (isColWise) // col-wise + { + c.RequireSize(1, m); + blocksPerGrid = (int) ceil(1.0 * m / GridDim::maxThreadsPerBlock); + } + else + { + c.RequireSize(n, 1); + c.ChangeDeviceTo(GetComputeDeviceId()); + blocksPerGrid = (int) ceil(1.0 * n / GridDim::maxThreadsPerBlock); + } + + SyncGuard syncGuard; + _vectorNorm2<<>>(c.Data(), Data(), n, m, isColWise); +} + +template +GPUMatrix& GPUMatrix::AssignVectorNorm2Of(GPUMatrix& a, const bool isColWise) +{ + a.VectorNorm2(*this, isColWise); + return *this; +} + +template +void GPUMatrix::VectorNormInf(GPUMatrix& c, const bool isColWise) const +{ + if (IsEmpty()) + LogicError("VectorMax: Matrix is empty."); + + // this implementation is not efficient + GPUMatrix tmp(GetComputeDeviceId()); + GPUMatrix tmp1(GetComputeDeviceId()); + tmp.AssignAbsOf((*this)); + tmp.VectorMax(tmp1, c, isColWise); +} + +template +GPUMatrix& GPUMatrix::AssignVectorNormInfOf(GPUMatrix& a, const bool isColWise) +{ + a.VectorNormInf(*this, isColWise); + return *this; +} + +template +GPUMatrix& GPUMatrix::AssignInnerProductOf(const GPUMatrix& a, const GPUMatrix& b, const bool isColWise) +{ + InnerProduct(a, b, *this, isColWise); + return *this; +} + +template +GPUMatrix& GPUMatrix::AssignKhatriRaoProductOf(const GPUMatrix& a, const GPUMatrix& b) +{ + if (a.IsEmpty() || b.IsEmpty()) + LogicError("AssignKhatriRaoProductOf: Matrix is empty."); + + CUDA_LONG cols = a.GetNumCols(); + assert(cols == b.GetNumCols()); + if (!(cols == b.GetNumCols())) + InvalidArgument("AssignKhatriRaoProductOf: The input matrix dimensions do not match."); + + CUDA_LONG rowsA = (CUDA_LONG) a.GetNumRows(); + CUDA_LONG rowsB = (CUDA_LONG) b.GetNumRows(); + RequireSize(rowsA * rowsB, cols); + float N = (float) GetNumElements(); + int blocksPerGrid = (int) ceil(N / GridDim::maxThreadsPerBlock); + a.PrepareDevice(); + SyncGuard syncGuard; + _assignKhatriRaoProductOf<<>>(Data(), a.Data(), b.Data(), rowsA, rowsB, cols); + return *this; +} + +//column-wise reshaped product. Used to compute KhatriRaoProduct Gradient +// this = reshape each column of a from (K1xK2,1) to (K1, K2) +// if each column of a is not transposed, each (K1, K2) times each column of b (K2, frames). +// the output is a (K1, frames) matrix +// if each column of a is tranposed, each (K1, K2)^T times each column of b(K1, frames) and output is (K2, frames) +template +GPUMatrix& GPUMatrix::AddColumnReshapeProductOf(const GPUMatrix& a, const GPUMatrix& b, const bool transposeAColumn) +{ + if (a.IsEmpty() || b.IsEmpty()) + LogicError("AddColumnReshapeProductOf: Matrix is empty."); + + CUDA_LONG cols = a.GetNumCols(); + assert(cols == b.GetNumCols()); + if (!(cols == b.GetNumCols())) + InvalidArgument("AddColumnReshapeProductOf: The input matrix dimensions do not match."); + + CUDA_LONG rowsA = (CUDA_LONG) a.GetNumRows(); + CUDA_LONG rowsB = (CUDA_LONG) b.GetNumRows(); + if (rowsA % rowsB != 0) + InvalidArgument("AddColumnReshapeProductOf: number of rows in a should be multiples of that in b."); + + CUDA_LONG rowsC = rowsA / rowsB; + if (rowsC != GetNumRows() || cols != GetNumCols()) + InvalidArgument("AddColumnReshapeProductOf: This matrix does not have the right size."); + + float N = (float) GetNumElements(); + int blocksPerGrid = (int) ceil(N / GridDim::maxThreadsPerBlock); + a.PrepareDevice(); + SyncGuard syncGuard; + _addColumnReshapeProductOf<<>>(Data(), a.Data(), b.Data(), rowsB, rowsC, cols, transposeAColumn); + return *this; +} + +template +GPUMatrix& GPUMatrix::AddWithScaleOf(ElemType alpha, const GPUMatrix& a) +{ + ScaleAndAdd(alpha, a, *this); + return *this; +} + +template +ElemType GPUMatrix::FrobeniusNorm() const +{ + if (IsEmpty()) + LogicError("FrobeniusNorm: Matrix is empty."); + + ElemType* d_sum = TracingGPUMemoryAllocator::Allocate(GetComputeDeviceId(), 1); + + ElemType h_sum = 0; + // WARNING: THIS kernel is not the most efficient way! + // note: kernel has hard-coded dimension of 1024 + _reductionSum21024Threads << <1, 1024, 0, t_stream >> >(Data(), d_sum, (CUDA_LONG)GetNumElements(), true); + CUDA_CALL(cudaMemcpy(&h_sum, d_sum, sizeof(ElemType), cudaMemcpyDeviceToHost)); + TracingGPUMemoryAllocator::Free(GetComputeDeviceId(), d_sum); + + return (h_sum); +} + +template +GPUMatrix& GPUMatrix::AssignFrobeniusNormOf(const GPUMatrix& a) +{ + if (a.IsEmpty()) + LogicError("AssignFrobeniusNormOf: Matrix a is empty."); + + RequireSize(1, 1); + + PrepareDevice(); + // WARNING: THIS kernel is not the most efficient way! + // note: kernel has hard-coded dimension of 1024 + _reductionSum21024Threads << <1, 1024, 0, t_stream >> >(a.Data(), Data(), (CUDA_LONG)a.GetNumElements(), true); + + return *this; +} + +template +ElemType GPUMatrix::MatrixNormInf() const +{ + if (IsEmpty()) + LogicError("MatrixNormInf: Matrix is empty."); + + ElemType* d_maxAbs = TracingGPUMemoryAllocator::Allocate(GetComputeDeviceId(), 1); + + ElemType h_maxAbs = 0; + // WARNING: THIS kernel is not the most efficient way! + // note: kernel has hard-coded dimension of 1024 + _reductionMatrixNormInf1024Threads << <1, 1024, 0, t_stream >> >(Data(), d_maxAbs, (CUDA_LONG)GetNumElements()); + CUDA_CALL(cudaMemcpy(&h_maxAbs, d_maxAbs, sizeof(ElemType), cudaMemcpyDeviceToHost)); + TracingGPUMemoryAllocator::Free(GetComputeDeviceId(), d_maxAbs); + return h_maxAbs; +} + +template +ElemType GPUMatrix::MatrixNorm1() const +{ + if (IsEmpty()) + LogicError("MatrixNorm1: Matrix is empty."); + return SumOfAbsElements(); +} + +template +ElemType GPUMatrix::MatrixNorm0() const +{ + if (IsEmpty()) + LogicError("MatrixNorm0: Matrix is empty."); + + ElemType* d_nz = TracingGPUMemoryAllocator::Allocate(GetComputeDeviceId(), 1); + ElemType h_nz = 0; + // WARNING: THIS kernel is not the most efficient way! + // note: kernel has hard-coded dimension of 1024 + _reductionMatrixNorm01024Threads << <1, 1024, 0, t_stream >> >(Data(), d_nz, (CUDA_LONG)GetNumElements()); + CUDA_CALL(cudaMemcpy(&h_nz, d_nz, sizeof(ElemType), cudaMemcpyDeviceToHost)); + TracingGPUMemoryAllocator::Free(GetComputeDeviceId(), d_nz); + return h_nz; +} + +template +GPUMatrix& GPUMatrix::AssignSignOf(const GPUMatrix& a) +{ + if (a.IsEmpty()) + LogicError("AssignSignOf: Matrix a is empty."); + + if (this != &a) + RequireSize(a.GetNumRows(), a.GetNumCols()); + + PrepareDevice(); + int blocksPerGrid = (int) ceil(1.0 * GetNumElements() / GridDim::maxThreadsPerBlock); + SyncGuard syncGuard; + _assignSignOf<<>>(Data(), a.Data(), (CUDA_LONG) GetNumElements()); + return *this; +} + +template +GPUMatrix& GPUMatrix::AddSignOf(const GPUMatrix& a) +{ + if (a.IsEmpty()) + LogicError("AddSignOf: Matrix a is empty."); + + if (this != &a) + RequireSize(a.GetNumRows(), a.GetNumCols()); + + PrepareDevice(); + int blocksPerGrid = (int) ceil(1.0 * GetNumElements() / GridDim::maxThreadsPerBlock); + SyncGuard syncGuard; + _addSignOf<<>>(Data(), a.Data(), (CUDA_LONG) GetNumElements()); + return *this; +} + +template +void GPUMatrix::VectorMax(GPUMatrix& maxIndexes, GPUMatrix& maxValues, const bool isColWise) const +{ + if (IsEmpty()) + LogicError("VectorMax: Matrix is empty."); + + const GPUMatrix& us = *this; + const CUDA_LONG m = (CUDA_LONG) GetNumRows(); + const CUDA_LONG n = (CUDA_LONG) GetNumCols(); + assert(m > 0 && n > 0); // converting from size_t to int may cause overflow + + PrepareDevice(); + SyncGuard syncGuard; + if (isColWise) + { + maxValues.RequireSize(1, n); + maxIndexes.RequireSize(1, n); + + int blocksPerGrid = n; // we'll have 1 block processing 1 column + // note: kernel has hard-coded dimension of 512 + _vectorMaxMinReduce512Threads<<>>(us.Data(), maxIndexes.Data(), maxValues.Data(), m, n); + + /*int blocksPerGrid=(int)ceil(1.0*n/GridDim::maxThreadsPerBlock); + _vectorMax<<>>(us.Data(),maxIndexes.Data(),maxValues.Data(),m,n,isColWise);*/ + } + else + { + maxValues.RequireSize(m, 1); + maxIndexes.RequireSize(m, 1); + int blocksPerGrid = (int) ceil(1.0 * m / GridDim::maxThreadsPerBlock); + _vectorMax<<>>(us.Data(), maxIndexes.Data(), maxValues.Data(), m, n, isColWise); + } +} + +__global__ void _initIndicesForSort(uint64_t* indexes, CUDA_LONG crow, CUDA_LONG ccol) +{ + CUDA_LONG id = blockDim.x * blockIdx.x + threadIdx.x; + if (id >= crow * ccol) + return; + uint32_t irow = id % crow; + uint32_t icol = id / crow; + indexes[id] = (static_cast(irow) << 32) | icol; +} + +template +void GPUMatrix::VectorMax(GPUMatrix& maxIndexes, GPUMatrix& maxValues, const bool isColWise, int topK) const +{ + if (IsEmpty()) + LogicError("VectorMax: Matrix is empty."); + + if (topK == 1) + { + VectorMax(maxIndexes, maxValues, isColWise); + return; + } + + if (!isColWise) + RuntimeError("Row-wise TopK max is not supported."); + + const GPUMatrix& us = *this; + const CUDA_LONG m = (CUDA_LONG) GetNumRows(); + const CUDA_LONG n = (CUDA_LONG) GetNumCols(); + assert(topK <= m); + assert(m > 0 && n > 0); // converting from size_t to int may cause overflow + + PrepareDevice(); + SyncGuard syncGuard; + maxValues.RequireSize(topK, n); + maxIndexes.RequireSize(topK, n); + + // To sort matrix columns we use 2-pass _stable_ sort algorithm: + // 1. Sort by values (descending) with corresponding row/col indexes. + // 2. Sort by col indices (ascending) with corresponding values/row indices. + // Indices are stored as 64-bit ints where low 32 bits represent column and high 32 bits - row index. + // On the second pass only first 32 bits of the index are used in sorting, so SortPairs has + // begin_bit and end_bit set accordingly. + + CUDA_LONG celt = static_cast(GetNumElements()); + ElemType* inVal = us.Data(); + ElemType* outVal1 = nullptr; + ElemType* outVal2 = nullptr; + uint64_t* inIdx = nullptr; + uint64_t* outIdx = nullptr; + // Determine temp buffer size needed for SortPairsDescending to sort values on the first pass. + size_t cbtemp = 0; + // If first param is nullptr then no actual work is done except writing result to cbtemp. + CUDA_CALL(cub::DeviceRadixSort::SortPairsDescending(nullptr, cbtemp, inVal, outVal1, inIdx, outIdx, celt, 0, sizeof(ElemType) * 8, t_stream)); + size_t ctemp1 = (cbtemp + sizeof(ElemType) - 1) / sizeof(ElemType); + // Determine temp buffer size needed for SortPairs to sort indices on the second pass. + cbtemp = 0; + CUDA_CALL(cub::DeviceRadixSort::SortPairs(nullptr, cbtemp, outIdx, inIdx, outVal1, outVal2, celt, 0, 32, t_stream)); + size_t ctemp2 = (cbtemp + sizeof(ElemType) - 1) / sizeof(ElemType); + size_t ctemp = std::max(ctemp1, ctemp2); + cbtemp = ctemp * sizeof(ElemType); + // ElemType count needed to store indices, accounting for natural alignment for uint64_t type. + size_t cidx = ((celt + 1) * sizeof(uint64_t) - 1 + sizeof(ElemType) - 1) / sizeof(ElemType); + // Get temp workspace. + auto workspace = GetOrCreateWorkspace(); + // RequireSize to store: output values for the 1st and 2nd passes, input indices, output indices, and temp storage. + workspace->RequireSize(m, 2 * n + (2 * cidx + ctemp + m - 1) / m); + outVal1 = workspace->Data(); + outVal2 = outVal1 + celt; + inIdx = reinterpret_cast(outVal2 + celt); + // Align indices pointer if needed. + size_t cbAlign = reinterpret_cast(inIdx) % sizeof(uint64_t); + if (cbAlign != 0) + reinterpret_cast(inIdx) += sizeof(uint64_t) - cbAlign; + outIdx = inIdx + celt; + void* ptmp = outIdx + celt; + assert(reinterpret_cast(reinterpret_cast(ptmp) + cbtemp) <= workspace->Data() + workspace->GetNumElements()); + + // Initialize indices. + const int ThreadsPerBlock = 128; + int cblock = (celt + ThreadsPerBlock - 1) / ThreadsPerBlock; + _initIndicesForSort<<>>(inIdx, m, n); + // Sort by values. + CUDA_CALL(cub::DeviceRadixSort::SortPairsDescending(ptmp, cbtemp, inVal, outVal1, inIdx, outIdx, celt, 0, sizeof(ElemType) * 8, t_stream)); + // Sort by column indices. outIdx contains indices after the first pass so it's used as an input. + CUDA_CALL(cub::DeviceRadixSort::SortPairs(ptmp, cbtemp, outIdx, inIdx, outVal1, outVal2, celt, 0, 32, t_stream)); + // Copy results. + cblock = (topK * n + ThreadsPerBlock - 1) / ThreadsPerBlock; + _copyTopKResults<<>>(inIdx, outVal2, maxIndexes.Data(), maxValues.Data(), m, n, topK); + + ReleaseWorkspace(std::move(workspace)); + +} + +template +void GPUMatrix::VectorMin(GPUMatrix& minIndexes, GPUMatrix& minValues, const bool isColWise) const +{ + if (IsEmpty()) + LogicError("VectorMax: Matrix is empty."); + + const GPUMatrix& us = *this; + const int m = (int) GetNumRows(); + const int n = (int) GetNumCols(); + + assert(m > 0 && n > 0); // converting from size_t to int may cause overflow + PrepareDevice(); + SyncGuard syncGuard; + if (isColWise) + { + minValues.RequireSize(1, n); + minIndexes.RequireSize(1, n); + + int blocksPerGrid = n; // we'll have 1 block processing 1 column + // note: kernel has hard-coded dimension of 512 + _vectorMaxMinReduce512Threads << > >(us.Data(), minIndexes.Data(), minValues.Data(), m, n); + + /* + int blocksPerGrid=(int)ceil(1.0*n/GridDim::maxThreadsPerBlock); + _vectorMin<<>>(us.Data(),minIndexes.Data(),minValues.Data(),m,n,isColWise);*/ + } + else + { + minValues.RequireSize(m, 1); + minIndexes.RequireSize(m, 1); + int blocksPerGrid = (int) ceil(1.0 * m / GridDim::maxThreadsPerBlock); + _vectorMin<<>>(us.Data(), minIndexes.Data(), minValues.Data(), m, n, isColWise); + } +} + +template +GPUMatrix& GPUMatrix::AssignNumOfDiff(const GPUMatrix& a, const GPUMatrix& b, bool searchInCol) +{ + if (a.GetNumCols() != b.GetNumCols()) + InvalidArgument("AssignNumOfDiff: a and b must have the same number of columns."); + if (!searchInCol && a.GetNumRows() != b.GetNumRows()) + InvalidArgument("AssignNumOfDiff: a and b must have the same number of rows."); + + RequireSize(1, 1); // result should be one element + + PrepareDevice(); + SyncGuard syncGuard; + if (!searchInCol) + { + // int blocksPerGrid=(int)ceil(1.0*a.GetNumElements()/GridDim::maxThreadsPerBlock); + // _assignNumOfDiff1024Threads<<>>(a.Data(), b.Data(), Data(), a.GetNumElements()); + // note: kernel has hard-coded dimension of 1024 + _assignNumOfDiff1024Threads << <1, 1024, 0, t_stream >> >(a.Data(), b.Data(), Data(), (CUDA_LONG)a.GetNumElements()); + } + else + { + const int blockSize = 1024; + _assignNumOfDiffCol<<<1, blockSize, 0, t_stream>>>(a.Data(), b.Data(), Data(), + static_cast(b.GetNumRows()), static_cast(a.GetNumCols())); + } + return *this; +} + +#pragma endregion Member BLAS Functions + +#pragma region Other helper functions +template +void GPUMatrix::Print(const char* /*matrixName*/, size_t /*rowStart*/, size_t /*rowEnd*/, size_t /*colStart*/, size_t /*colEnd*/) const +{ + NOT_IMPLEMENTED; +} + +template +void GPUMatrix::Print(const char* matrixName /*=nullptr*/) const +{ + size_t elemCount = GetNumRows() * GetNumCols(); + vector localCopy(elemCount); + cudaMemcpy(localCopy.data(), Data(), elemCount * sizeof(ElemType), cudaMemcpyDeviceToHost); + + fprintf(stderr, "\n###### "); + if (matrixName != nullptr) + fprintf(stderr, "%s ", matrixName); + fprintf(stderr, "(%lu, %lu) ######\n\n", (unsigned long)GetNumRows(), (unsigned long)GetNumCols()); + + if (IsEmpty()) + { + fprintf(stderr, "(empty)\n"); + return; + } + + // CNTK is using column-major storage + for (size_t i = 0; i < GetNumRows(); i++) + { + for (size_t j = 0; j < GetNumCols(); j++) + { + fprintf(stderr, "%.10f\t", localCopy[i + j * GetNumRows()]); + } + fprintf(stderr, "\n"); + } +} + +//helpfer function used for convolution neural network +template +GPUMatrix& GPUMatrix::AssignPackedConvolutionInput(const GPUMatrix& inputSubBatch, + const size_t inputWidth, const size_t inputHeight, const size_t inputChannels, + const size_t outputWidth, const size_t outputHeight, const size_t outputChannels, + const size_t kernelWidth, const size_t kernelHeight, const size_t horizontalSubsample, const size_t verticalSubsample, + const bool zeroPadding) +{ + assert(verticalSubsample <= kernelHeight && horizontalSubsample <= kernelWidth); + + size_t packedInputRows = kernelWidth * kernelHeight * inputChannels; + size_t packedInputColsPerSample = outputWidth * outputHeight; + size_t smallBatchSize = inputSubBatch.GetNumCols(); + RequireSize(packedInputRows, packedInputColsPerSample * smallBatchSize); + if (zeroPadding) + SetValue((ElemType) 0); + + PrepareDevice(); + int numThreadPerBlock = GridDim::maxThreadsPerBlock; +#if 1 + int blocksPerGrid = (smallBatchSize * inputWidth * inputHeight * inputChannels + numThreadPerBlock - 1) / numThreadPerBlock; +#else + dim3 blocksPerGrid((inputWidth * inputHeight * inputChannels + numThreadPerBlock - 1) / numThreadPerBlock, smallBatchSize); +#endif + SyncGuard syncGuard; + _assignPackedConvolutionInput<<>>(Data(), + inputSubBatch.Data(), + smallBatchSize, + inputWidth, inputHeight, inputChannels, + outputWidth, outputHeight, outputChannels, + kernelWidth, kernelHeight, horizontalSubsample, verticalSubsample, zeroPadding); + + return *this; +} + +//helpfer function used for convolution neural network +template +GPUMatrix& GPUMatrix::UnpackConvolutionInput(GPUMatrix& inputSubBatch, + const size_t inputWidth, const size_t inputHeight, const size_t inputChannels, + const size_t outputWidth, const size_t outputHeight, const size_t outputChannels, + const size_t kernelWidth, const size_t kernelHeight, const size_t horizontalSubsample, const size_t verticalSubsample, + const bool zeroPadding) const +{ + assert(verticalSubsample <= kernelHeight && horizontalSubsample <= kernelWidth); + + size_t smallBatchSize = inputSubBatch.GetNumCols(); + + PrepareDevice(); + int numThreadPerBlock = GridDim::maxThreadsPerBlock; +#if 1 + int blocksPerGrid = (smallBatchSize * inputWidth * inputHeight * inputChannels + numThreadPerBlock - 1) / numThreadPerBlock; +#else + dim3 blocksPerGrid((inputWidth * inputHeight * inputChannels + numThreadPerBlock - 1) / numThreadPerBlock, smallBatchSize); +#endif + SyncGuard syncGuard; + _unpackConvolutionInput<<>>(Data(), + inputSubBatch.Data(), + smallBatchSize, + inputWidth, inputHeight, inputChannels, + outputWidth, outputHeight, outputChannels, + kernelWidth, kernelHeight, horizontalSubsample, verticalSubsample, zeroPadding); + + return inputSubBatch; +} + +template +GPUMatrix& GPUMatrix::AssignMaxPoolingResult(const GPUMatrix& inputBatch, const size_t channels, + const size_t inputWidth, const size_t inputHeight, const size_t inputSizePerSample, + const size_t outputWidth, const size_t outputHeight, const size_t outputSizePerSample, + const size_t windowWidth, const size_t windowHeight, const size_t horizontalSubsample, const size_t verticalSubsample) +{ + assert(verticalSubsample <= windowHeight && horizontalSubsample <= windowWidth); + + unsigned int batchSize = inputBatch.GetNumCols(); + RequireSize(outputSizePerSample, batchSize); + + int numThreadPerBlock = GridDim::maxThreadsPerBlock; + int blocksPerGrid = (batchSize * outputSizePerSample + numThreadPerBlock - 1) / numThreadPerBlock; + + PrepareDevice(); + SyncGuard syncGuard; + _assignMaxPoolingResult<<>>(Data(), inputBatch.Data(), batchSize, channels, + inputWidth, inputHeight, inputSizePerSample, + outputWidth, outputHeight, outputSizePerSample, + windowWidth, windowHeight, horizontalSubsample, verticalSubsample); + + return *this; +} + +template +GPUMatrix& GPUMatrix::AddMaxPoolingGradient(const GPUMatrix& outputGradientBatch, const GPUMatrix& inputBatch, const GPUMatrix& outputBatch, + const size_t channels, + const size_t inputWidth, const size_t inputHeight, const size_t inputSizePerSample, + const size_t outputWidth, const size_t outputHeight, const size_t outputSizePerSample, + const size_t windowWidth, const size_t windowHeight, const size_t horizontalSubsample, const size_t verticalSubsample) +{ + assert(verticalSubsample <= windowHeight && horizontalSubsample <= windowWidth); + + unsigned int batchSize = outputGradientBatch.GetNumCols(); + int numThreadPerBlock = GridDim::maxThreadsPerBlock; + + PrepareDevice(); + SyncGuard syncGuard; + + int blocksPerGrid = (batchSize * inputSizePerSample + numThreadPerBlock - 1) / numThreadPerBlock; + _addMaxPoolingGradient<<>>(Data(), outputGradientBatch.Data(), inputBatch.Data(), outputBatch.Data(), batchSize, channels, + inputWidth, inputHeight, inputSizePerSample, + outputWidth, outputHeight, outputSizePerSample, + windowWidth, windowHeight, horizontalSubsample, verticalSubsample); + + return *this; +} + +template +GPUMatrix& GPUMatrix::AssignAveragePoolingResult(const GPUMatrix& inputBatch, const size_t channels, + const size_t inputWidth, const size_t inputHeight, const size_t inputSizePerSample, + const size_t outputWidth, const size_t outputHeight, const size_t outputSizePerSample, + const size_t windowWidth, const size_t windowHeight, const size_t horizontalSubsample, const size_t verticalSubsample) +{ + assert(verticalSubsample <= windowHeight && horizontalSubsample <= windowWidth); + + unsigned int batchSize = inputBatch.GetNumCols(); + RequireSize(outputSizePerSample, batchSize); + + int numThreadPerBlock = GridDim::maxThreadsPerBlock; + int blocksPerGrid = (batchSize * outputSizePerSample + numThreadPerBlock - 1) / numThreadPerBlock; + + PrepareDevice(); + SyncGuard syncGuard; + _assignAveragePoolingResult<<>>(Data(), inputBatch.Data(), batchSize, channels, + inputWidth, inputHeight, inputSizePerSample, + outputWidth, outputHeight, outputSizePerSample, + windowWidth, windowHeight, horizontalSubsample, verticalSubsample); + + return *this; +} + +template +GPUMatrix& GPUMatrix::AddAveragePoolingGradient(const GPUMatrix& outputGradientBatch, + const size_t channels, + const size_t inputWidth, const size_t inputHeight, const size_t inputSizePerSample, + const size_t outputWidth, const size_t outputHeight, const size_t outputSizePerSample, + const size_t windowWidth, const size_t windowHeight, const size_t horizontalSubsample, const size_t verticalSubsample) +{ + assert(verticalSubsample <= windowHeight && horizontalSubsample <= windowWidth); + + size_t batchSize = outputGradientBatch.GetNumCols(); + int numThreadPerBlock = GridDim::maxThreadsPerBlock; + + PrepareDevice(); + SyncGuard syncGuard; + size_t blocksPerGrid = (batchSize * inputSizePerSample + numThreadPerBlock - 1) / numThreadPerBlock; + _addAveragePoolingGradient<<>>(Data(), outputGradientBatch.Data(), (CUDA_LONG) batchSize, channels, + inputWidth, inputHeight, inputSizePerSample, + outputWidth, outputHeight, outputSizePerSample, + windowWidth, windowHeight, horizontalSubsample, verticalSubsample); + + return *this; +} + +#pragma endregion Other helper functions + +template +void GPUMatrix::ConvolutionForward(const GPUMatrix& kernel, const GPUMatrix& mpRowCol, const GPUMatrix& mpRowIwht, + const GPUMatrix& mpRowRun, const GPUMatrix& runs, GPUMatrix& output) const +{ + const int BlockSize = 128; + auto gdim = dim3((output.GetNumRows() + BlockSize - 1)/ BlockSize, std::min((int)GetNumCols(), 65535)); + PrepareDevice(); + SyncGuard syncGuard; + kConvolutionForward<<>>((int)GetNumCols(), kernel.Data(), mpRowCol.Data(), mpRowIwht.Data(), mpRowRun.Data(), + runs.Data(), Data(), (int)GetNumRows(), output.Data(), (int)output.GetNumRows()); +} + +template +void GPUMatrix::ConvolutionBackwardData(const GPUMatrix& kernel, const GPUMatrix& mpRowCol, const GPUMatrix& mpRowIwht, + const GPUMatrix& mpRowRun, const GPUMatrix& runs, GPUMatrix& grad) const +{ + const int BlockSize = 128; + auto gdim = dim3((GetNumRows() + BlockSize - 1)/ BlockSize, std::min((int)GetNumCols(), 65535)); + PrepareDevice(); + SyncGuard syncGuard; + kConvolutionBackwardData<<>>((int)GetNumCols(), kernel.Data(), mpRowCol.Data(), mpRowIwht.Data(), mpRowRun.Data(), + runs.Data(), Data(), (int)GetNumRows(), grad.Data(), (int)grad.GetNumRows()); +} + +template +void GPUMatrix::ConvolutionBackwardKernel(const GPUMatrix& in, const GPUMatrix& mpRowCol, const GPUMatrix& mpRowIwht, + const GPUMatrix& mpRowRun, const GPUMatrix& runs, GPUMatrix& kernelGrad) const +{ + const int BlockSize = 128; + auto gdim = dim3((GetNumRows() + BlockSize - 1)/ BlockSize, std::min((int)GetNumCols(), 65535)); + PrepareDevice(); + SyncGuard syncGuard; + kConvolutionBackwardKernel<<>>((int)GetNumCols(), (int)in.GetNumRows(), (int)GetNumRows(), + in.Data(), mpRowCol.Data(), mpRowIwht.Data(), mpRowRun.Data(), + runs.Data(), Data(), kernelGrad.Data()); +} + +template +void GPUMatrix::MaxPoolingForward(const GPUMatrix& mpRowCol, const GPUMatrix& mpRowIndices, const GPUMatrix& indices, GPUMatrix& output) const +{ + const int BlockSize = 128; + auto gdim = dim3((output.GetNumRows() + BlockSize - 1)/ BlockSize, std::min((int)GetNumCols(), 65535)); + PrepareDevice(); + SyncGuard syncGuard; + kMaxPoolingForward<<>>((int)GetNumCols(), mpRowCol.Data(), mpRowIndices.Data(), indices.Data(), + Data(), (int)GetNumRows(), output.Data(), (int)output.GetNumRows()); +} + +template +void GPUMatrix::MaxPoolingBackward(const GPUMatrix& out, const GPUMatrix& in, + const GPUMatrix& mpRowCol, const GPUMatrix& mpRowIndices, const GPUMatrix& indices, + GPUMatrix& grad) const +{ + const int BlockSize = 128; + auto gdim = dim3((GetNumRows() + BlockSize - 1)/ BlockSize, std::min((int)GetNumCols(), 65535)); + PrepareDevice(); + SyncGuard syncGuard; + kMaxPoolingBackward<<>>((int)GetNumCols(), out.Data(), in.Data(), + mpRowCol.Data(), mpRowIndices.Data(), indices.Data(), + Data(), (int)GetNumRows(), grad.Data(), (int)grad.GetNumRows()); +} + +template +void GPUMatrix::ROIPoolingForward(const size_t numRois, const size_t numImg, const size_t channels, const size_t width, const size_t height, + const size_t pooledWidth, const size_t pooledHeight, const GPUMatrix& roiData, GPUMatrix& output, + GPUMatrix& argmax) const +{ + PrepareDevice(); + SyncGuard syncGuard; + + int count = numRois * numImg * channels * pooledHeight * pooledWidth; + const int blockSize = GridDim::maxThreadsPerBlock; + auto numThreads = dim3((int)floor((double)(count + blockSize - 1) / blockSize)); + kROIPoolingForward<<>>(count, numRois, numImg, channels, width, height, + pooledWidth, pooledHeight, Data(), roiData.Data(), output.Data(), argmax.Data()); +} + +template +void GPUMatrix::ROIPoolingBackward(const size_t numRois, const size_t numImg, const size_t channels, const size_t width, const size_t height, + const size_t pooledWidth, const size_t pooledHeight, const GPUMatrix& roiData, GPUMatrix& grad, + GPUMatrix& argmax) const +{ + PrepareDevice(); + SyncGuard syncGuard; + + int count = numImg * channels * height * width; + const int blockSize = GridDim::maxThreadsPerBlock; + auto numThreads = dim3((int)floor((double)(count + blockSize - 1) / blockSize)); + kROIPoolingBackward<<>>(count, numRois, numImg, channels, width, height, + pooledWidth, pooledHeight, Data(), roiData.Data(), grad.Data(), argmax.Data()); +} + +template +void GPUMatrix::MaxUnpooling(const GPUMatrix& mpRowCol, const GPUMatrix& mpRowIndices, const GPUMatrix& indices, const GPUMatrix& poolInput, GPUMatrix& input) const +{ + const int BlockSize = 128; + auto gdim = dim3((GetNumRows() + BlockSize - 1)/ BlockSize, std::min((int)GetNumCols(), 65535)); + PrepareDevice(); + SyncGuard syncGuard; + kMaxUnpooling<<>>((int)GetNumCols(), mpRowCol.Data(), mpRowIndices.Data(), indices.Data(), + Data(), poolInput.Data(), (int)GetNumRows(), input.Data(), (int)input.GetNumRows()); +} + +template +void GPUMatrix::AveragePoolingForward(const GPUMatrix& mpRowCol, const GPUMatrix& mpRowIndices, const GPUMatrix& indices, GPUMatrix& output) const +{ + const int BlockSize = 128; + auto gdim = dim3((output.GetNumRows() + BlockSize - 1)/ BlockSize, std::min((int)GetNumCols(), 65535)); + PrepareDevice(); + SyncGuard syncGuard; + kAveragePoolingForward<<>>((int)GetNumCols(), mpRowCol.Data(), mpRowIndices.Data(), indices.Data(), + Data(), (int)GetNumRows(), output.Data(), (int)output.GetNumRows()); +} + +template +void GPUMatrix::AveragePoolingBackward(const GPUMatrix& mpRowCol, const GPUMatrix& mpRowIndices, const GPUMatrix& indices, GPUMatrix& grad) const +{ + const int BlockSize = 128; + auto gdim = dim3((GetNumRows() + BlockSize - 1)/ BlockSize, std::min((int)GetNumCols(), 65535)); + PrepareDevice(); + SyncGuard syncGuard; + kAveragePoolingBackward<<>>((int)GetNumCols(), mpRowCol.Data(), mpRowIndices.Data(), indices.Data(), + Data(), (int)GetNumRows(), grad.Data(), (int)grad.GetNumRows()); +} + +// returns savedMean/savedInvStdDev which are the actual values used to perform the normalization, except for blendFactor 1, in which case they are unused and set to empty +template +void GPUMatrix::BatchNormalizationForward(const GPUMatrix& scale, const GPUMatrix& bias, bool inferenceOnly, double expAvgFactor, double blendFactor, + GPUMatrix& runMean, GPUMatrix& runVariance, GPUMatrix& out, double epsilon, + GPUMatrix& savedMean, GPUMatrix& savedInvStdDev) const +{ + assert((GetNumRows() % scale.GetNumRows()) == 0); + + bool spatial = GetNumRows() != scale.GetNumRows(); + size_t vectorSize = GetNumRows(); + size_t spatialSize = spatial ? (GetNumRows() / scale.GetNumRows()) : 1; + size_t batchSize = GetNumCols(); + bool normalizeRunningStats; + + assert(0 < vectorSize && vectorSize <= std::numeric_limits::max()); + assert(0 < batchSize && batchSize <= std::numeric_limits::max()); + + SyncGuard syncGuard; + if (inferenceOnly) + { + // Pick running statistics for normalizing. No update reuqired, and + // saved statistics do not need to be produced. + assert(expAvgFactor == 0 && blendFactor == 1); + normalizeRunningStats = true; + savedMean.RequireSize(0, 0); + savedInvStdDev.RequireSize(0, 0); + } + else + { + // Compute data mean and inverse standard deviation (into savedMean and + // savedInvStdDev), and update running mean and variance. + // TODO expAvgFactor == 0 && blendFactor == 1 can be optimized (no need for update). + normalizeRunningStats = false; + savedMean.RequireSize(runMean); + savedInvStdDev.RequireSize(runMean); + if (spatial) + { + Call(spatialSize, vectorSize, spatialSize, batchSize, Data(), + expAvgFactor, blendFactor, + runMean.Data(), runVariance.Data(), epsilon, + savedMean.Data(), savedInvStdDev.Data(), GetStream()); + } + else + { + Call(vectorSize, vectorSize, batchSize, Data(), + expAvgFactor, blendFactor, + runMean.Data(), runVariance.Data(), epsilon, + savedMean.Data(), savedInvStdDev.Data(), GetStream()); + } + } + + Call(spatial ? spatialSize : vectorSize, vectorSize, spatialSize, batchSize, spatial, + normalizeRunningStats, epsilon, + Data(), out.Data(), + scale.Data(), bias.Data(), + runMean.Data(), runVariance.Data(), + savedMean.Data(), savedInvStdDev.Data(), + GetStream()); +} + +// savedMean/savedInvStdDev are the interpolated mean/inverse standard deviation as used in ForwardProp(). +// For blendFactor=1, they are not used and can be uninitialized or empty. +template +void GPUMatrix::BatchNormalizationBackward(const GPUMatrix& in, GPUMatrix& grad, const GPUMatrix& scale, double blendFactor, + const GPUMatrix& savedMean, const GPUMatrix& savedInvStdDev, + GPUMatrix& scaleGrad, GPUMatrix& biasGrad) const +{ + assert((GetNumRows() % scale.GetNumRows()) == 0); + + bool spatial = GetNumRows() != scale.GetNumRows(); + size_t vectorSize = GetNumRows(); + size_t spatialSize = spatial ? (GetNumRows() / scale.GetNumRows()) : 1; + size_t batchSize = GetNumCols(); + + assert(0 < vectorSize && vectorSize <= std::numeric_limits::max()); + assert(0 < batchSize && batchSize <= std::numeric_limits::max()); + + SyncGuard syncGuard; + if (spatial) + { + Call(spatialSize, vectorSize, spatialSize, batchSize, in.Data(), Data(), scaleGrad.Data(), biasGrad.Data(), + savedMean.Data(), savedInvStdDev.Data(), GetStream()); + } + else + { + Call(vectorSize, vectorSize, batchSize, in.Data(), Data(), scaleGrad.Data(), biasGrad.Data(), + savedMean.Data(), savedInvStdDev.Data(), GetStream()); + } + ElemType mbStatsWeight = (ElemType)(1 - blendFactor); // weight for contribution from actual MB stats (0 if none, e.g. locked BN node) + Call(spatial ? spatialSize : vectorSize, vectorSize, spatialSize, batchSize, spatial, + in.Data(), Data(), grad.Data(), scale.Data(), mbStatsWeight, scaleGrad.Data(), biasGrad.Data(), savedMean.Data(), savedInvStdDev.Data(), GetStream()); +} + +#pragma region RNN Functions + +template +void GPUMatrix::RNNForward(const GPUMatrix &inputX, const GPUMatrix ¶mW, size_t xDim, size_t yDim, const vector& numSequencesForFrame, const RnnAttributes& rnnAttributes, GPUMatrix& reserve, GPUMatrix& workspace) +{ + // numLayers, hiddenSize are input parameters + if (!m_rnnExecutor) + m_rnnExecutor = std::make_unique>(xDim, yDim, rnnAttributes); + m_rnnExecutor->ForwardCore(paramW, inputX, *this, numSequencesForFrame, rnnAttributes, reserve, workspace); +} + +template +void GPUMatrix::RNNBackwardData(const GPUMatrix& outputDY, const GPUMatrix& paramW, GPUMatrix& outputDX, const RnnAttributes& rnnAttributes, GPUMatrix& reserve, GPUMatrix& workspace) +{ + if (!m_rnnExecutor) + LogicError("RNNBackwardData called, but RNNWrapper object is not yet initialized"); + m_rnnExecutor->BackwardDataCore(*this, outputDY, paramW, outputDX, rnnAttributes, reserve, workspace); +} + +template +void GPUMatrix::RNNBackwardWeights(const GPUMatrix& inputX, const GPUMatrix& outputY, GPUMatrix& dw, const RnnAttributes& rnnAttributes, GPUMatrix& reserve, GPUMatrix& workspace) +{ + if (!m_rnnExecutor) + LogicError("RNNBackwardWeights called, but RNNWrapper object is not yet initialized"); + m_rnnExecutor->BackwardWeightsCore(inputX, outputY, dw, rnnAttributes, reserve, workspace); +} + +#pragma region Static BLAS Functions +// float/double overloads of cublasSgemm()/cublasDgemm() +static cublasStatus_t cublas_gemm(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, int k, const float* alpha, const float* A, int lda, const float* B, int ldb, const float* beta, float* C, int ldc) +{ + return cublasSgemm(handle, transa, transb, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc); +} +static cublasStatus_t cublas_gemm(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, int k, const double* alpha, const double* A, int lda, const double* B, int ldb, const double* beta, double* C, int ldc) +{ + return cublasDgemm(handle, transa, transb, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc); +} +static cublasStatus_t cublas_axpy(cublasHandle_t handle, int n, const float* alpha, const float* x, int incx, float* y, int incy) +{ + return cublasSaxpy(handle, n, alpha, x, incx, y, incy); +} +static cublasStatus_t cublas_axpy(cublasHandle_t handle, int n, const double* alpha, const double* x, int incx, double* y, int incy) +{ + return cublasDaxpy(handle, n, alpha, x, incx, y, incy); +} + +template +void GPUMatrix::MultiplyAndWeightedAdd(ElemType alpha, const GPUMatrix& a, const bool transposeA, const GPUMatrix& b, const bool transposeB, + ElemType beta, GPUMatrix& c) +{ + a.PrepareDevice(); + if ((a.GetComputeDeviceId() != b.GetComputeDeviceId()) || (b.GetComputeDeviceId() != c.GetComputeDeviceId())) // different GPUs + InvalidArgument("All matrices must be on the same GPU"); + + cublasHandle_t cuHandle = GetCublasHandle(b.GetComputeDeviceId()); + cublasOperation_t transA = transposeA ? CUBLAS_OP_T : CUBLAS_OP_N; + cublasOperation_t transB = transposeB ? CUBLAS_OP_T : CUBLAS_OP_N; + int m = int(transposeA ? a.m_numCols : a.m_numRows); + int n = int(transposeB ? b.m_numRows : b.m_numCols); + int k = int(transposeA ? a.m_numRows : a.m_numCols); + int l = int(transposeB ? b.m_numCols : b.m_numRows); + + if (beta == 0) + c.RequireSize(m, n); + else + c.VerifySize(m, n); // Can't resize if beta != 0 + + if (!(m > 0 && k > 0 && l > 0 && n > 0)) + RuntimeError("!(m>0 && k>0 && l>0 && n>0)"); // converting from size_t to int may cause overflow + if (k != l) + RuntimeError("matrix dim mismatch in MultiplyAndWeightedAdd"); + CUBLAS_CALL(cublas_gemm(cuHandle, transA, transB, m, n, k, &alpha, a.Data(), (int) a.m_numRows, b.Data(), (int) b.m_numRows, &beta, c.Data(), (int) c.m_numRows)); +} + +template +void GPUMatrix::Multiply1x1AndWeightedAdd(ElemType alpha, const GPUMatrix& a, const GPUMatrix& b, ElemType beta, GPUMatrix& c) +{ + a.PrepareDevice(); + if ((a.GetComputeDeviceId() != b.GetComputeDeviceId()) || (b.GetComputeDeviceId() != c.GetComputeDeviceId())) // different GPUs + InvalidArgument("All matrices must be on the same GPU"); + CUDA_LONG N = (CUDA_LONG) c.GetNumElements(); + int blocksPerGrid = (int) ceil(1.0 * N / GridDim::maxThreadsPerBlock); + SyncGuard syncGuard; + _multiply1x1AndWeightedAdd<<>>(alpha, a.Data(), b.Data(), beta, c.Data(), N); +} + +template +void GPUMatrix::MultiplyAndAdd(const GPUMatrix& a, const bool transposeA, const GPUMatrix& b, const bool transposeB, GPUMatrix& c) +{ + return GPUMatrix::MultiplyAndWeightedAdd(1, a, transposeA, b, transposeB, 1, c); +} + +template +void GPUMatrix::Multiply(const GPUMatrix& a, const bool transposeA, const GPUMatrix& b, const bool transposeB, GPUMatrix& c) +{ + return GPUMatrix::MultiplyAndWeightedAdd(1, a, transposeA, b, transposeB, 0, c); +} + +template +void GPUMatrix::Multiply(const GPUMatrix& a, const GPUMatrix& b, GPUMatrix& c) +{ + return GPUMatrix::MultiplyAndWeightedAdd(1, a, false, b, false, 0, c); +} + +template +void GPUMatrix::ColumnwiseScaleAndWeightedAdd(ElemType alpha, const GPUMatrix& a, const GPUMatrix& v, ElemType beta, GPUMatrix& c) +{ + if (v.GetNumRows() != 1 && v.GetNumCols() != 1) + InvalidArgument("the argument v must be a vector"); // v is a vector + + if (beta == 0) + c.RequireSize(a.GetNumRows(), a.GetNumCols()); + else + c.VerifySize(a.GetNumRows(), a.GetNumCols()); // Can't resize if beta != 0 + + int blocksPerGrid = (int)ceil(1.0 * c.GetNumElements() / GridDim::maxThreadsPerBlock); + SyncGuard syncGuard; + _columnwiseScaleAndWeightedAdd<<>>(alpha, a.Data(), v.Data(), beta, c.Data(), a.GetNumRows(), a.GetNumCols()); +} + +/// Matrix-scalar multiply with col-major matrices: c = alpha * a + c +/// if a is a column vector, add to all columns of c +/// if a is a row vector, add to all rows of c +/// if a is a scalar, add to all elements of c +/// Scalar +/// Input matrix +/// Resulting matrix, user is responsible for allocating this +template +/*static*/ void GPUMatrix::ScaleAndAdd(ElemType alpha, const GPUMatrix& a, GPUMatrix& c) +{ + if (a.GetComputeDeviceId() != c.GetComputeDeviceId()) + { + InvalidArgument("All matrices must be on the same GPU"); + } + else + { + if (a.IsEmpty() && c.IsEmpty()) + return; + a.PrepareDevice(); + if (a.IsEmpty() || c.IsEmpty()) + LogicError("ScaleAndAdd: one of the input matrices is empty."); + // if (a.GetNumRows() != 1 && a.GetNumCols() != 1) // a is not a col or row vector + if (a.GetNumRows() == c.GetNumRows() && a.GetNumCols() == c.GetNumCols()) // dimensions match + { + const int m = (int) a.GetNumRows(); + const int n = (int) a.GetNumCols(); + const int len = m * n; + const int incx = 1; + const int incy = 1; + + assert(m > 0 && n > 0 && len > 0); // converting from size_t to int may cause overflow + assert((int) c.GetNumRows() == m && (int) c.GetNumCols() == n); + if ((int) c.GetNumRows() != m || (int) c.GetNumCols() != n) + InvalidArgument("dimension of matrix c does not match dimension of matrix a."); + + cublasHandle_t cuHandle = GetCublasHandle(a.GetComputeDeviceId()); + // TODO: Overload the call to cublas_axpy to remove these ugly if/else statements. + if (sizeof(ElemType) == sizeof(float)) + { + CUBLAS_CALL(cublasSaxpy(cuHandle, len, reinterpret_cast(&alpha), reinterpret_cast(a.Data()), incx, reinterpret_cast(c.Data()), incy)); + } + else if (sizeof(ElemType) == sizeof(double)) + { + CUBLAS_CALL(cublasDaxpy(cuHandle, len, reinterpret_cast(&alpha), reinterpret_cast(a.Data()), incx, reinterpret_cast(c.Data()), incy)); + } + else + { + RuntimeError("Unsupported template argument in GPUMatrix"); + } + } + else if (a.GetNumElements() == 1) + { + CUDA_LONG N = (CUDA_LONG) c.GetNumElements(); + int blocksPerGrid = (int) ceil(1.0 * N / GridDim::maxThreadsPerBlock); + c.PrepareDevice(); + SyncGuard syncGuard; + _scaleAndAddScalar<<>>(c.Data(), N, alpha, a.Data(), c.Data()); + } + else if (a.GetNumCols() == 1) // col vector, add it to all columns + { + CUDA_LONG m = (CUDA_LONG) c.GetNumRows(); + CUDA_LONG n = (CUDA_LONG) c.GetNumCols(); + if (m != (CUDA_LONG) a.GetNumRows()) + InvalidArgument("To add column vector, rows should match."); + + int blocksPerGrid = (int) (ceil(1.0 * m * n / GridDim::maxThreadsPerBlock)); + SyncGuard syncGuard; +#ifdef VALIDATION + printf(">>>> CUDA compute device is %d\n", a.GetComputeDeviceId()); + printf(">>>> a.Data()= %p, c.Data()= %p, alpha = %f, m = %ld, n = %ld\n", a.Data(), c.Data(), alpha, m, n); + for (int i = 0; i < 2; i++) + { + ElemType buffer[10] = {-1.234f}; + cudaError_t error = cudaMemcpy(buffer, !i ? a.Data(): c.Data(), sizeof(buffer), cudaMemcpyKind::cudaMemcpyDeviceToHost); + if (error == cudaError::cudaSuccess) + printf("buffer valid\n"); + } +#endif + + _matrixVectorColumnWiseAddWithThreadPerElem<<>>(a.Data(), c.Data(), c.Data(), alpha, m, n); + } + else if (a.GetNumRows() == 1) // row vector, add it to all rows + { + cublasHandle_t cuHandle = GetCublasHandle(a.GetComputeDeviceId()); + int m = (int) c.GetNumRows(); + int n = (int) c.GetNumCols(); + assert(n == (int) a.GetNumCols()); + if (n != (int) a.GetNumCols()) + InvalidArgument("To add row vector, cols should match."); + + // TODO: Overload the call to cublas_axpy to remove these ugly if/else statements. + if (sizeof(ElemType) == sizeof(double)) + { + foreach_row (i, c) + { + CUBLAS_CALL(cublasDaxpy(cuHandle, n, reinterpret_cast(&alpha), reinterpret_cast(a.Data()), 1, reinterpret_cast(c.Data()+ i), m)); + } + } + else + { + foreach_row (i, c) + { + CUBLAS_CALL(cublasSaxpy(cuHandle, n, reinterpret_cast(&alpha), reinterpret_cast(a.Data()), 1, reinterpret_cast(c.Data()+ i), m)); + } + } + } + else + InvalidArgument("dimension of matrix c does not match dimension of matrix a."); + } +} + +/// Matrix-scalar multiply with col-major matrices: c = alpha * a + b +/// if a is a column vector, add to all columns of b +/// if a is a row vector, add to all rows of b +/// if a is a scalar, add to all elements of b +/// Scalar +/// Input matrix +/// Input matrix +/// Resulting matrix, user is responsible for allocating this +template +/*static*/ void GPUMatrix::ScaleAndAdd(ElemType alpha, const GPUMatrix& a, const GPUMatrix& b, GPUMatrix& c) +{ + if (a.GetComputeDeviceId() != c.GetComputeDeviceId() || a.GetComputeDeviceId() != b.GetComputeDeviceId()) + { + InvalidArgument("All matrices must be on the same GPU"); + } + else + { + if (a.IsEmpty() && b.IsEmpty()) + return; + a.PrepareDevice(); + if (a.IsEmpty() || b.IsEmpty()) + LogicError("ScaleAndAdd: One of the input matrices is empty."); + c.RequireSize(b.GetNumRows(), b.GetNumCols()); + // if (a.GetNumRows() != 1 && a.GetNumCols() != 1) // a is not a col or row vector + if (a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols()) // dimensions match + { + /* + const int m = (int)a.GetNumRows(); + const int n = (int)a.GetNumCols(); + const int len = m * n; + const int incx = 1; + const int incy = 1; + assert (m>0 && n>0 && len>0); // converting from size_t to int may cause overflow + */ + CUDA_LONG N = (CUDA_LONG) c.GetNumElements(); + int blocksPerGrid = (int) ceil(1.0 * N / GridDim::maxThreadsPerBlock); + c.PrepareDevice(); + SyncGuard syncGuard; + _matrixMatrixAddOnCuda<<>>(alpha, a.Data(), b.Data(), c.Data(), N); + } + else if (a.GetNumElements() == 1) + { + CUDA_LONG N = (CUDA_LONG) c.GetNumElements(); + int blocksPerGrid = (int) ceil(1.0 * N / GridDim::maxThreadsPerBlock); + c.PrepareDevice(); + SyncGuard syncGuard; + _scaleAndAddScalar<<>>(c.Data(), N, alpha, a.Data(), b.Data()); + } + else if (a.GetNumCols() == 1) // col vector, add it to all columns + { + CUDA_LONG m = (CUDA_LONG) c.GetNumRows(); + CUDA_LONG n = (CUDA_LONG) c.GetNumCols(); + if (m != (CUDA_LONG) a.GetNumRows()) + InvalidArgument("To add column vector, rows should match."); + + int blocksPerGrid = (int) (ceil(1.0 * m * n / GridDim::maxThreadsPerBlock)); + SyncGuard syncGuard; + _matrixVectorColumnWiseAddWithThreadPerElem<<>>(a.Data(), b.Data(), c.Data(), alpha, m, n); + + } + else if (a.GetNumRows() == 1) // row vector, add it to all rows + { + CUDA_LONG m = (CUDA_LONG) c.GetNumRows(); + CUDA_LONG n = (CUDA_LONG) c.GetNumCols(); + if (m != (CUDA_LONG) a.GetNumRows()) + InvalidArgument("To add column vector, rows should match."); + + int blocksPerGrid = (int) (ceil(1.0 * m * n / GridDim::maxThreadsPerBlock)); + SyncGuard syncGuard; + _matrixVectorRowWiseAddWithThreadPerElem<<>>(a.Data(), b.Data(), c.Data(), alpha, m, n); + } + else + InvalidArgument("Dimension of matrix c does not match dimension of matrix a."); + } +} + +/// c += alpha * (a-b) +/// if a, b, c must have same dim +/// Scalar +/// Input matrix +/// Input matrix +/// Resulting matrix, user is responsible for allocating this +template +void GPUMatrix::AddScaledDifference(const ElemType alpha, const GPUMatrix& a, const GPUMatrix& b, GPUMatrix& c) +{ + if (a.GetComputeDeviceId() != c.GetComputeDeviceId()) + { + InvalidArgument("All matrices must be on the same GPU"); + } + else + { + a.PrepareDevice(); + + assert(a.GetNumRows() == b.GetNumRows() && a.GetNumRows() == c.GetNumRows() && + a.GetNumCols() == b.GetNumCols() && a.GetNumCols() == c.GetNumCols()); + + if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumRows() == c.GetNumRows() && + a.GetNumCols() == b.GetNumCols() && a.GetNumCols() == c.GetNumCols())) + { + InvalidArgument("AddScaledDifference: a, b, and c must have same dimension."); + } + + if (a.IsEmpty()) + LogicError("AddScaledDifference: Input matrix a is empty."); + + CUDA_LONG n = (CUDA_LONG) a.GetNumElements(); + int blocksPerGrid = (int) ceil(1.0 * n / GridDim::maxThreadsPerBlock); + SyncGuard syncGuard; + _addScaledDifference<<>>(alpha, a.Data(), b.Data(), c.Data(), n); + } +} + +/// c = alpha * (a-b) +/// if a, b, c must have same dim +/// Scalar +/// Input matrix +/// Input matrix +/// Resulting matrix, user is responsible for allocating this +template +void GPUMatrix::AssignScaledDifference(const ElemType alpha, const GPUMatrix& a, const GPUMatrix& b, GPUMatrix& c) +{ + if (a.GetComputeDeviceId() != c.GetComputeDeviceId()) + { + InvalidArgument("All matrices must be on the same GPU"); + } + else + { + a.PrepareDevice(); + + assert(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols()); + + if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols())) + InvalidArgument("AssignScaledDifference: a, b must have same dimension."); + + if (a.IsEmpty()) + LogicError("AssignScaledDifference: Input matrix a is empty."); + + if (&c != &a && &c != &b) + c.RequireSize(a.GetNumRows(), a.GetNumCols()); + + CUDA_LONG n = (CUDA_LONG) a.GetNumElements(); + int blocksPerGrid = (int) ceil(1.0 * n / GridDim::maxThreadsPerBlock); + SyncGuard syncGuard; + _assignScaledDifference<<>>(alpha, a.Data(), b.Data(), c.Data(), n); + } +} + +/// c += alpha * (a-b) +/// if a, b, c must have same dim +/// 1X1 matrix +/// Input matrix +/// Input matrix +/// Resulting matrix, user is responsible for allocating this +template +void GPUMatrix::AddScaledDifference(const GPUMatrix& alpha, const GPUMatrix& a, const GPUMatrix& b, GPUMatrix& c) +{ + assert(alpha.GetNumElements() == 1); + if (!(alpha.GetNumElements() == 1)) + InvalidArgument("AddScaledDifference: alpha must be a 1X1 matrix."); + + if (a.GetComputeDeviceId() != c.GetComputeDeviceId()) + { + InvalidArgument("All matrices must be on the same GPU"); + } + else + { + a.PrepareDevice(); + + assert(a.GetNumRows() == b.GetNumRows() && a.GetNumRows() == c.GetNumRows() && + a.GetNumCols() == b.GetNumCols() && a.GetNumCols() == c.GetNumCols()); + + if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumRows() == c.GetNumRows() && + a.GetNumCols() == b.GetNumCols() && a.GetNumCols() == c.GetNumCols())) + { + InvalidArgument("AddScaledDifference: a, b, and c must have same dimension."); + } + + if (a.IsEmpty()) + LogicError("AddScaledDifference: Input matrix a is empty."); + + CUDA_LONG n = (CUDA_LONG) a.GetNumElements(); + int blocksPerGrid = (int) ceil(1.0 * n / GridDim::maxThreadsPerBlock); + SyncGuard syncGuard; + _addScaledDifference<<>>(alpha.Data(), a.Data(), b.Data(), c.Data(), n); + } +} + +/// c = alpha * (a-b) +/// if a, b, c must have same dim +/// Scalar +/// Input matrix +/// Input matrix +/// Resulting matrix, user is responsible for allocating this +template +void GPUMatrix::AssignScaledDifference(const GPUMatrix& alpha, const GPUMatrix& a, const GPUMatrix& b, GPUMatrix& c) +{ + assert(alpha.GetNumElements() == 1); + if (!(alpha.GetNumElements() == 1)) + InvalidArgument("AddScaledDifference: alpha must be a 1X1 matrix."); + + if (a.GetComputeDeviceId() != c.GetComputeDeviceId()) + { + InvalidArgument("All matrices must be on the same GPU"); + } + else + { + a.PrepareDevice(); + + assert(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols()); + + if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols())) + { + InvalidArgument("AssignScaledDifference: a, b must have same dimension."); + } + + if (a.IsEmpty()) + LogicError("AssignScaledDifference: Input matrix a is empty."); + + c.RequireSize(a.GetNumRows(), a.GetNumCols()); + + CUDA_LONG n = (CUDA_LONG) a.GetNumElements(); + int blocksPerGrid = (int) ceil(1.0 * n / GridDim::maxThreadsPerBlock); + SyncGuard syncGuard; + _assignScaledDifference<<>>(alpha.Data(), a.Data(), b.Data(), c.Data(), n); + } +} + +//c[ci,cj] += a[ai,aj] +template +void GPUMatrix::AddElementToElement(ElemType beta, const GPUMatrix& a, const size_t ai, const size_t aj, GPUMatrix& c, const size_t ci, const size_t cj) +{ + if (ai >= a.GetNumRows() || aj >= a.GetNumCols() || + ci >= c.GetNumRows() || cj >= c.GetNumCols()) + InvalidArgument("AddElementToElement: Index out of range."); + + a.PrepareDevice(); + SyncGuard syncGuard; + _addElementToElement<<<1, 1, 0, t_stream>>>(beta, a.Data(), (CUDA_LONG) a.LocateElement(ai, aj), c.Data(), (CUDA_LONG) c.LocateElement(ci, cj)); +} + +template +/*static*/ void GPUMatrix::Scale(ElemType alpha, GPUMatrix& a) +{ + if (alpha == 0) // if 0 then do not access the value, so that we can use this to multiply uninitialized matrices with beta=0 + { + CUDA_CALL(cudaMemset(a.Data(), 0, a.m_numRows * a.m_numCols * sizeof(ElemType))); + return; + } + + cublasHandle_t cuHandle = GetCublasHandle(a.GetComputeDeviceId()); + // TODO: Overload the call to cublas_axpy to remove these ugly if/else statements. + if (sizeof(ElemType) == sizeof(float)) + { + float alph = (float) alpha; + CUBLAS_CALL(cublasSscal(cuHandle, int(a.m_numRows * a.m_numCols), &alph, (float*) a.Data(), 1)); + } + else if (sizeof(ElemType) == sizeof(double)) + { + double alph = alpha; + CUBLAS_CALL(cublasDscal(cuHandle, int(a.m_numRows * a.m_numCols), &alph, (double*) a.Data(), 1)); + } + else + { + RuntimeError("Unsupported template argument in GPUMatrix"); + } +} + +template +/*static*/ void GPUMatrix::Scale(GPUMatrix& alpha, GPUMatrix& a) +{ + if (alpha.GetNumElements() != 1) + { + RuntimeError("Matrix alpha must be 1x1"); + } + cublasHandle_t cuHandle = GetCublasHandle(a.GetComputeDeviceId()); + cublasSetPointerMode(cuHandle, CUBLAS_POINTER_MODE_DEVICE); + if (sizeof(ElemType) == sizeof(float)) + { + CUBLAS_CALL(cublasSscal(cuHandle, int(a.m_numRows * a.m_numCols), (float*) alpha.Data(), (float*) a.Data(), 1)); + } + else if (sizeof(ElemType) == sizeof(double)) + { + CUBLAS_CALL(cublasDscal(cuHandle, int(a.m_numRows * a.m_numCols), (double*) alpha.Data(), (double*) a.Data(), 1)); + } + else + { + cublasSetPointerMode(cuHandle, CUBLAS_POINTER_MODE_HOST); + RuntimeError("Unsupported template argument in GPUMatrix"); + } + cublasSetPointerMode(cuHandle, CUBLAS_POINTER_MODE_HOST); +} + +template // c = alpha * a +/*static*/ void GPUMatrix::Scale(ElemType alpha, const GPUMatrix& a, GPUMatrix& c) +{ + c = a; + Scale(alpha, c); +} + +template +void GPUMatrix::InnerProduct(const GPUMatrix& a, const GPUMatrix& b, GPUMatrix& c, const bool isColWise) +{ + if (a.GetComputeDeviceId() != b.GetComputeDeviceId() || b.GetComputeDeviceId() != c.GetComputeDeviceId()) // different GPUs + InvalidArgument("All matrices must be on the same GPU"); + + if (a.IsEmpty() || b.IsEmpty()) + LogicError("Scale: one of the input matrices is empty."); + + const int m = (int) a.GetNumRows(); + const int n = (int) a.GetNumCols(); + const int k = (int) b.GetNumRows(); + const int l = (int) b.GetNumCols(); + + assert(m > 0 && n > 0 && k > 0 && l > 0); // converting from size_t to int may cause overflow + assert(m == k && n == l); // converting from size_t to int may cause overflow + if (m != k || n != l) + InvalidArgument("Matrices a and b should have same dimension."); + + if (isColWise) + c.RequireSize(1, n); + else + c.RequireSize(m, 1); + + if ((isColWise && m == 1) || (!isColWise && n == 1)) // in this case it's equivalent to element-wise product + { + c.AssignElementProductOf(a, b); + } + else + { + c.PrepareDevice(); + + int blocksPerGrid = 0; + if (isColWise) // col-wise + { + c.RequireSize(1, n); + blocksPerGrid = (int) ceil(1.0 * n / GridDim::maxThreadsPerBlock); + } + else + { + c.RequireSize(m, 1); + blocksPerGrid = (int) ceil(1.0 * m / GridDim::maxThreadsPerBlock); + } + + SyncGuard syncGuard; + _innerProduct<<>>(c.Data(), a.Data(), b.Data(), m, n, isColWise); + } +} + +template +ElemType GPUMatrix::InnerProductOfMatrices(const GPUMatrix& a, const GPUMatrix& b) +{ + if (a.IsEmpty() || b.IsEmpty()) + LogicError("InnerProductOfMatrices: one of the input matrices is empty."); + + const int m = (int) a.GetNumRows(); + const int n = (int) a.GetNumCols(); + const int k = (int) b.GetNumRows(); + const int l = (int) b.GetNumCols(); + + assert(m > 0 && n > 0 && k > 0 && l > 0); // converting from size_t to int may cause overflow + assert(m == k && n == l); // converting from size_t to int may cause overflow + if (m != k || n != l) + InvalidArgument("InnerProductOfMatrices: Matrices a and b should have same dimension."); + + cublasHandle_t cuHandle = GetCublasHandle(a.GetComputeDeviceId()); + if (sizeof(ElemType) == sizeof(double)) + { + double tmp = 0; + CUBLAS_CALL(cublasDdot(cuHandle, m * n, reinterpret_cast(a.Data()), 1, reinterpret_cast(b.Data()), 1, &tmp)); + return ElemType(tmp); + // return (ElemType)ddot((int)a.GetNumElements(), reinterpret_cast (a.Data()), 1, reinterpret_cast (b.Data()), 1); + } + else + { + float tmp = 0; + CUBLAS_CALL(cublasSdot(cuHandle, m * n, reinterpret_cast(a.Data()), 1, reinterpret_cast(b.Data()), 1, &tmp)); + return tmp; + // return (ElemType)sdot((int)a.GetNumElements(), reinterpret_cast (a.Data()), 1, reinterpret_cast (b.Data()), 1); + } +} + +template +GPUMatrix& GPUMatrix::AssignInnerProductOfMatrices(const GPUMatrix& a, const GPUMatrix& b) +{ + if (a.IsEmpty() || b.IsEmpty()) + LogicError("InnerProductOfMatrices: one of the input matrices is empty."); + + RequireSize(1, 1); + + const int m = (int) a.GetNumRows(); + const int n = (int) a.GetNumCols(); + const int k = (int) b.GetNumRows(); + const int l = (int) b.GetNumCols(); + + assert(m > 0 && n > 0 && k > 0 && l > 0); // converting from size_t to int may cause overflow + assert(m == k && n == l); // converting from size_t to int may cause overflow + if (m != k || n != l) + InvalidArgument("InnerProductOfMatrices: Matrices a and b should have same dimension."); + + cublasHandle_t cuHandle = GetCublasHandle(a.GetComputeDeviceId()); + cublasSetPointerMode(cuHandle, CUBLAS_POINTER_MODE_DEVICE); + if (sizeof(ElemType) == sizeof(double)) + { + CUBLAS_CALL(cublasDdot(cuHandle, m * n, reinterpret_cast(a.Data()), 1, reinterpret_cast(b.Data()), 1, reinterpret_cast(Data()))); + } + else + { + CUBLAS_CALL(cublasSdot(cuHandle, m * n, reinterpret_cast(a.Data()), 1, reinterpret_cast(b.Data()), 1, reinterpret_cast(Data()))); + } + cublasSetPointerMode(cuHandle, CUBLAS_POINTER_MODE_HOST); + return *this; +} + +template +void GPUMatrix::ElementWisePower(ElemType alpha, const GPUMatrix& a, GPUMatrix& c) +{ + if (a.GetComputeDeviceId() != c.GetComputeDeviceId()) + { + InvalidArgument("All matrices must be on the same GPU"); + } + else + { + if (a.IsEmpty()) + LogicError("ElementWisePower: The input matrix a is empty."); + + c.RequireSize(a.GetNumRows(), a.GetNumCols()); + + a.PrepareDevice(); + SyncGuard syncGuard; + CUDA_LONG N = (CUDA_LONG) a.GetNumElements(); + int blocksPerGrid = (int) ceil(1.0 * N / GridDim::maxThreadsPerBlock); + _elementWisePowerOnCuda<<>>(alpha, a.Data(), c.Data(), N); + } +} + +template +bool GPUMatrix::AreEqual(const GPUMatrix& a, const GPUMatrix& b, const ElemType threshold /*= 1e-8*/) +{ + if (a.IsEmpty() || b.IsEmpty()) + LogicError("AreEqual: one of the input matrices is empty."); + + if (a.GetNumRows() != b.GetNumRows() || a.GetNumCols() != b.GetNumCols()) + return false; + + bool bResult = false; + + long* res = new long[1]; + res[0] = 1; + long* d_res = TracingGPUMemoryAllocator::Allocate(a.GetComputeDeviceId(), 1); + CUDA_CALL(cudaMemcpy(d_res, res, sizeof(long) * 1, cudaMemcpyHostToDevice)); + CUDA_LONG N = (CUDA_LONG) a.GetNumElements(); + int blocksPerGrid = (int) ceil(1.0 * N / GridDim::maxThreadsPerBlock); + _areEqual<<>>(a.Data(), b.Data(), N, threshold, d_res); + CUDA_CALL(cudaMemcpy(res, d_res, sizeof(long) * 1, cudaMemcpyDeviceToHost)); + TracingGPUMemoryAllocator::Free(a.GetComputeDeviceId(), d_res); + if (res[0] != 0) + bResult = true; + delete[] res; + return bResult; +} + +// see Matrix::TensorShuffleScaleAndAdd() for comments +template +void GPUMatrix::TensorShuffleScaleAndAdd(ElemType keepWeight, const GPUMatrix& a, size_t D, size_t S, size_t M, size_t K, size_t T, ElemType scaleFactor, const GPUMatrix& b, GPUMatrix& c) +{ + CUDA_LONG N = (CUDA_LONG) c.GetNumElements(); + assert(N == (CUDA_LONG) a.GetNumElements() && N == (CUDA_LONG) b.GetNumElements()); + assert(a.GetComputeDeviceId() == c.GetComputeDeviceId() && b.GetComputeDeviceId() == c.GetComputeDeviceId()); + a.PrepareDevice(); + SyncGuard syncGuard; + int blocksPerGrid = (int) ceil(1.0 * N / GridDim::maxThreadsPerBlock); + _tensorShuffleScaleAndAdd<<>>(keepWeight, a.Data(), D, S, M, K, T, scaleFactor, b.Data(), c.Data()); +} + +template +bool GPUMatrix::HasElement(const GPUMatrix& a, const ElemType v) +{ + if (a.IsEmpty()) + LogicError("HasElement: the input matrix is empty."); + + bool bResult = false; + ElemType* res = new ElemType[2]; + res[0] = v; + res[1] = 0; + ElemType* d_res = TracingGPUMemoryAllocator::Allocate(a.GetComputeDeviceId(), 2); + CUDA_CALL(cudaMemcpy(d_res, res, sizeof(ElemType) * 2, cudaMemcpyHostToDevice)); + CUDA_LONG N = (CUDA_LONG) a.GetNumElements(); + int blocksPerGrid = (int) ceil(1.0 * N / GridDim::maxThreadsPerBlock); + _hasElement<<>>(a.Data(), N, d_res); + CUDA_CALL(cudaMemcpy(res, d_res, sizeof(ElemType) * 2, cudaMemcpyDeviceToHost)); + TracingGPUMemoryAllocator::Free(a.GetComputeDeviceId(), d_res); + if (res[1] != 0) + bResult = true; + else + bResult = false; + + delete[] res; + return bResult; +} + +template +void GPUMatrix::CreateCurandObject(unsigned long seed, const char* caller) +{ + assert(caller != nullptr); + + if (s_curandGenerator == NULL) + { + unsigned long long cudaSeed = (seed == USE_TIME_BASED_SEED) ? time(NULL) : seed; + if (GetMathLibTraceLevel() > 0) + { + fprintf(stderr, "%s (GPU): creating curand object with seed %llu, sizeof(ElemType)==%lu\n", + caller, cudaSeed, (unsigned long)sizeof(ElemType)); + } + s_curandGenerator = new curandGenerator_t; + // Create pseudo-random number generator + CURAND_CALL(curandCreateGenerator(&(((curandGenerator_t*) s_curandGenerator)[0]), CURAND_RNG_PSEUDO_XORWOW)); + CURAND_CALL(curandSetPseudoRandomGeneratorSeed(((curandGenerator_t*) s_curandGenerator)[0], cudaSeed)); + CURAND_CALL(curandSetGeneratorOrdering(((curandGenerator_t*) s_curandGenerator)[0], CURAND_ORDERING_PSEUDO_SEEDED)); + } +} + +template +void GPUMatrix::ResetCurandObject(unsigned long seed, const char* caller) +{ + assert(caller != nullptr); + + if (s_curandGenerator && (seed != USE_TIME_BASED_SEED)) + { + // Note: this might be slow. + CURAND_CALL(curandSetPseudoRandomGeneratorSeed(((curandGenerator_t*) s_curandGenerator)[0], seed)); + CURAND_CALL(curandSetGeneratorOffset(((curandGenerator_t*) s_curandGenerator)[0], 0)); + } + else + { + CreateCurandObject(seed, caller); + } +} + +template +GPUMatrix GPUMatrix::Ones(const size_t rows, const size_t cols, int deviceId) +{ + GPUMatrix c(rows, cols, deviceId); // will initialize to 0 + c.SetValue(1); + return c; +} + +template +GPUMatrix GPUMatrix::Zeros(const size_t rows, const size_t cols, int deviceId) +{ + GPUMatrix c(rows, cols, deviceId); // will initialize to 0 + // c.SetValue(0); + return c; +} + +template +GPUMatrix GPUMatrix::Eye(const size_t rows, int deviceId) +{ + GPUMatrix c(rows, rows, deviceId); // will initialize to 0 + c.SetDiagonalValue(1); + return c; +} + +template +GPUMatrix GPUMatrix::RandomUniform(const size_t rows, const size_t cols, int deviceId, const ElemType low, const ElemType high, unsigned long seed) +{ + GPUMatrix c(rows, cols, deviceId); // will initialize to 0 + c.SetUniformRandomValue(low, high, seed); + return c; +} + +template +GPUMatrix GPUMatrix::RandomGaussian(const size_t rows, const size_t cols, int deviceId, const ElemType mean, const ElemType sigma, unsigned long seed) +{ + GPUMatrix c(rows, cols, deviceId); // will initialize to 0 + c.SetGaussianRandomValue(mean, sigma, seed); + return c; +} + +template +ElemType GPUMatrix::GetLearnRateForBlock_Helper(const GPUMatrix& Gradients, const GPUMatrix& SmoothedGradients) +{ + ElemType* d_res = TracingGPUMemoryAllocator::Allocate(Gradients.GetComputeDeviceId(), 1); + + // Compute inner product of matrices and keep it on device + const int m = (int) Gradients.GetNumRows(); + const int n = (int) Gradients.GetNumCols(); + const int k = (int) SmoothedGradients.GetNumRows(); + const int l = (int) SmoothedGradients.GetNumCols(); + assert(m > 0 && n > 0 && k > 0 && l > 0); // converting from size_t to int may cause overflow + assert(m == k && n == l); // converting from size_t to int may cause overflow + if (m != k || n != l) + InvalidArgument("InnerProductOfMatrices: Matrices a and b should have same dimension."); + + if (sizeof(ElemType) == sizeof(double)) + { + cublasHandle_t cuHandle = GetCublasHandle(Gradients.GetComputeDeviceId()); + cublasSetPointerMode(cuHandle, CUBLAS_POINTER_MODE_DEVICE); + CUBLAS_CALL(cublasDdot(cuHandle, m * n, reinterpret_cast(Gradients.Data()), 1, reinterpret_cast(SmoothedGradients.Data()), 1, reinterpret_cast(d_res))); + cublasSetPointerMode(cuHandle, CUBLAS_POINTER_MODE_HOST); + } + else + { + cublasHandle_t cuHandle = GetCublasHandle(Gradients.GetComputeDeviceId()); + cublasSetPointerMode(cuHandle, CUBLAS_POINTER_MODE_DEVICE); + CUBLAS_CALL(cublasSdot(cuHandle, m * n, reinterpret_cast(Gradients.Data()), 1, reinterpret_cast(SmoothedGradients.Data()), 1, reinterpret_cast(d_res))); + cublasSetPointerMode(cuHandle, CUBLAS_POINTER_MODE_HOST); + } + // d_res[0] should now contain inner product of matrices + // Compute squared Frobenius norms (squared sums of elements) + // note: kernel has hard-coded dimension of 512 + _lrHelper512Threads << <1, 512, 0, t_stream >> >(Gradients.Data(), SmoothedGradients.Data(), (CUDA_LONG)Gradients.GetNumElements(), d_res); + ElemType res; + CUDA_CALL(cudaMemcpy(&res, d_res, sizeof(ElemType), cudaMemcpyDeviceToHost)); + TracingGPUMemoryAllocator::Free(Gradients.GetComputeDeviceId(), d_res); + return res; +} +// The inputs are two row vectors [a1 a2 a3 a4] [b1 b2 b3 b4] +// The outputs are one matrix of size (nt+1)*4 +// The first row is just element multiplication +// The rest rows will be with shift +template +GPUMatrix& GPUMatrix::AssignElementProductOfWithShiftNeg(const GPUMatrix& a, const GPUMatrix& b, const size_t shift, const size_t nt) +{ + if (a.IsEmpty() || b.IsEmpty()) + LogicError("AssignElementProductOf: Matrix is empty."); + + assert(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols()); + if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols())) + InvalidArgument("The input matrix dimensions do not match."); + + if (!(a.GetNumRows() == 1)) + InvalidArgument("The input matrix must be a row vector."); + + RequireSize(nt + 1, a.GetNumCols()); + int BS = a.GetNumCols(); + + // the output matrix is of size (nt+1, BS) + dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); + dim3 block_tail((nt + 1 + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (BS + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); + + a.PrepareDevice(); + SyncGuard syncGuard; + _assignElementProductOfWithShiftNeg<<>>(Data(), a.Data(), b.Data(), shift, nt + 1, BS); + // _assignElementProductOf << > >(Data(), a.Data(), b.Data(), nt); + + return *this; +} + +template +GPUMatrix& GPUMatrix::AssignOneHot(const GPUMatrix& a, vector& shape, size_t axis) +{ + if(a.IsEmpty()) + LogicError("AssignOneHot: Matrix a is empty."); + + if (axis >= shape.size()) + LogicError("AssignOneHot: axis is not correct"); + + size_t item_size = 1; + for (size_t i = 0; i < shape.size() && i < axis; i++) + item_size *= shape[i]; + + size_t num_class = shape[axis]; + + auto nCols = a.GetNumCols(); + auto nRows = num_class * a.GetNumRows(); + this->RequireSize(nRows, nCols); + this->PrepareDevice(); + + CUDA_CALL(cudaMemset(Data(), 0, nCols * nRows * sizeof(ElemType))); + + + CUDA_LONG N = (CUDA_LONG)a.GetNumElements(); + int blocksPerGrid = (int)ceil(((double)N) / GridDim::maxThreadsPerBlock); + SyncGuard syncGuard; + _assignOneHot<<>>(a.Data(), Data(), num_class, item_size, N); + + return *this; +} + +template +void GPUMatrix::InnerProductWithShiftNeg(const GPUMatrix& a, const GPUMatrix& b, GPUMatrix& c, const size_t shift, const size_t nt) +{ + if (a.GetComputeDeviceId() != b.GetComputeDeviceId() || b.GetComputeDeviceId() != c.GetComputeDeviceId()) // different GPUs + InvalidArgument("All matrices must be on the same GPU"); + + if (a.IsEmpty() || b.IsEmpty()) + LogicError("Scale: one of the input matrices is empty."); + + const int m = (int) a.GetNumRows(); + const int n = (int) a.GetNumCols(); + const int k = (int) b.GetNumRows(); + const int l = (int) b.GetNumCols(); + + assert(m > 0 && n > 0 && k > 0 && l > 0); // converting from size_t to int may cause overflow + assert(m == k && n == l); // converting from size_t to int may cause overflow + if (m != k || n != l) + InvalidArgument("Matrices a and b should have same dimension."); + + c.RequireSize(nt + 1, n); + + if (true) + { + c.PrepareDevice(); + + dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); + dim3 block_tail((nt + 1 + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (n + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); + + SyncGuard syncGuard; + _innerProductWithShiftNeg<<>>(c.Data(), a.Data(), b.Data(), m, n, shift, nt + 1); + } +} + +template +GPUMatrix& GPUMatrix::GetARowByIndex(const GPUMatrix& a, const size_t m) +{ + if (a.IsEmpty()) + LogicError("GetARowByIndex: Matrix is empty."); + + RequireSize(1, a.GetNumCols()); + + int n = a.GetNumRows(); + int P = a.GetNumCols(); + + if (m >= n) + LogicError("GetARowByIndex: m is out of range."); + + int blocksPerGrid = (int) ceil(((double) P) / GridDim::maxThreadsPerBlock); + + a.PrepareDevice(); + SyncGuard syncGuard; + _getARowByIndex<<>>(Data(), a.Data(), n, P, m); + // _assignElementProductOf << > >(Data(), a.Data(), b.Data(), nt); + return *this; +} + +// Calculate CTC score +// prob (input): the posterior output from the network +// alpha, beta (output): alpha and beta for forward-backward calculation. +// phoneSeq (input): phone ID sequence for each utterance in this minibatch, each col is one utterance +// phoneBoundary (input): phone boundary (frame index) of each phone for each utterance in this minibatch, each col is one utterance +// totalScore (output): total CTC score +// uttToChanInd (input): map from utterance ID to minibatch channel ID. We need this because each channel may contain more than one utterance. +// uttBeginFrame(input): the positon of the first frame of each utterance in the minibatch channel. We need this because each channel may contain more than one utterance. +// uttFrameNum (input): the frame number of each utterance. The size of this vector = the number of all utterances in this minibatch +// uttPhoneNum (input): the phone number of each utterance. The size of this vector = the number of all utterances in this minibatch +// numParallelSequences (input): channel number in this minibatch +// maxFrameNum (input): the maximum channel frame number +// delayConstraint -- label output delay constraint introduced during training that allows to have shorter delay during inference. +// Alpha and Beta scores outside of the delay boundary are set to zero. +// Setting this parameter smaller will result in shorted delay between label output during decoding, yet may hurt accuracy +// delayConstraint=-1 means no constraint +template +GPUMatrix& GPUMatrix::AssignCTCScore(const GPUMatrix& prob, + GPUMatrix& alpha, + GPUMatrix& beta, + const GPUMatrix phoneSeq, + const GPUMatrix phoneBoundary, + ElemType &totalScore, + const std::vector& uttToChanInd, + const std::vector & uttBeginFrame, + const std::vector & uttFrameNum, + const std::vector & uttPhoneNum, + const size_t numParallelSequences, + const size_t maxFrameNum, + const size_t blankTokenId, + const int delayConstraint, + const bool isColWise) +{ + if (isColWise) + { + PrepareDevice(); + // Total number of phones + long totalPhoneNum = prob.GetNumRows(); + size_t uttNum = uttFrameNum.size(); + + // Max number of phones in utterances in this minibatch + size_t maxPhoneNum = phoneSeq.GetNumRows(); + + size_t *gpuFrameNum; + CUDA_CALL(cudaMalloc((void **)&gpuFrameNum, uttNum * sizeof(size_t))); + CUDA_CALL(cudaMemcpy(gpuFrameNum, uttFrameNum.data(), uttNum * sizeof(size_t), cudaMemcpyHostToDevice)); + + size_t *gpuPhoneNum; + CUDA_CALL(cudaMalloc((void **)&gpuPhoneNum, uttNum * sizeof(size_t))); + CUDA_CALL(cudaMemcpy(gpuPhoneNum, uttPhoneNum.data(), uttNum * sizeof(size_t), cudaMemcpyHostToDevice)); + + size_t *gpuBeginFrame; + CUDA_CALL(cudaMalloc((void **)&gpuBeginFrame, uttNum * sizeof(size_t))); + CUDA_CALL(cudaMemcpy(gpuBeginFrame, uttBeginFrame.data(), uttNum * sizeof(size_t), cudaMemcpyHostToDevice)); + + size_t *gpuUttToChanInd; + CUDA_CALL(cudaMalloc((void **)&gpuUttToChanInd, uttNum * sizeof(size_t))); + CUDA_CALL(cudaMemcpy(gpuUttToChanInd, uttToChanInd.data(), uttNum * sizeof(size_t), cudaMemcpyHostToDevice)); + + ElemType *gpuScores; + CUDA_CALL(cudaMalloc((void **)&gpuScores, uttNum * sizeof(ElemType))); + + cudaEvent_t done = nullptr; + CUDA_CALL(cudaEventCreate(&done)); + dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); + // x dimension is for utterances + // y dimention is for phone sequence in each utterance + // Ensure that we allocate correct number of blocks for given number of utterances and max number of phones in those utterances + dim3 block_tail((uttNum + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (maxPhoneNum + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); + for (long t = 0; t < maxFrameNum; t++) + { + _assignAlphaScore << > >(prob.Data(), alpha.Data(), phoneSeq.Data(), phoneBoundary.Data(), gpuUttToChanInd, + gpuFrameNum, gpuBeginFrame, gpuPhoneNum, numParallelSequences, uttNum, t, maxPhoneNum, totalPhoneNum, blankTokenId, delayConstraint); + } + + for (long t = maxFrameNum - 1; t >= 0; t--) + { + _assignBetaScore << > >(prob.Data(), beta.Data(), phoneSeq.Data(), phoneBoundary.Data(), gpuUttToChanInd, + gpuFrameNum, gpuBeginFrame, gpuPhoneNum, numParallelSequences, uttNum, t, maxPhoneNum, totalPhoneNum, blankTokenId, delayConstraint); + } + + _assignTotalScore << > > (beta.Data(), gpuScores, uttNum, gpuUttToChanInd, gpuBeginFrame, numParallelSequences, maxPhoneNum); + + dim3 block_tail_2((uttNum + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (maxFrameNum + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); + + _assignCTCScore << < block_tail_2, thread_tail, 0, t_stream >> >(Data(), prob.Data(), alpha.Data(), beta.Data(), phoneSeq.Data(), uttNum, gpuUttToChanInd, + gpuBeginFrame, gpuPhoneNum, gpuFrameNum, numParallelSequences, maxPhoneNum, totalPhoneNum); + + vectorscores(uttNum); + CUDA_CALL(cudaMemcpyAsync(scores.data(), gpuScores, sizeof(ElemType) * uttNum, cudaMemcpyDeviceToHost, t_stream)); + + for (size_t utt = 0; utt < uttFrameNum.size(); utt++) + { + totalScore += scores[utt]; + } + + CUDA_CALL(cudaFree(gpuFrameNum)); + CUDA_CALL(cudaFree(gpuPhoneNum)); + CUDA_CALL(cudaFree(gpuBeginFrame)); + CUDA_CALL(cudaFree(gpuUttToChanInd)); + CUDA_CALL(cudaFree(gpuScores)); + + CUDA_CALL(cudaEventRecord(done)); + CUDA_CALL(cudaEventSynchronize(done)); + CUDA_CALL(cudaEventDestroy(done)); + } + else + { + NOT_IMPLEMENTED; + } + + return *this; +} + +template +void GPUMatrix::ConductRowElementMultiplyWithShift(const GPUMatrix& a, const GPUMatrix& b, GPUMatrix& c, const size_t shift, const bool isafixed) +{ + if (a.GetComputeDeviceId() != b.GetComputeDeviceId() || b.GetComputeDeviceId() != c.GetComputeDeviceId()) // different GPUs + InvalidArgument("All matrices must be on the same GPU"); + + if (a.IsEmpty() || b.IsEmpty()) + LogicError("Scale: one of the input matrices is empty."); + + const int m = (int) a.GetNumRows(); + const int n = (int) a.GetNumCols(); + const int O = (int) b.GetNumRows(); + const int P = (int) b.GetNumCols(); + + assert(m > 0 && n > 0 && O > 0 && P > 0); // converting from size_t to int may cause overflow + if (m != 1 || n != P) + InvalidArgument("Matrices a and b should have same dimension."); + + c.RequireSize(O, P); + + if (true) + { + c.PrepareDevice(); + + dim3 thread_tail(DEFAULT_THREAD_PER_DIM, DEFAULT_THREAD_PER_DIM); + dim3 block_tail((O + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM, (P + DEFAULT_THREAD_PER_DIM - 1) / DEFAULT_THREAD_PER_DIM); + + SyncGuard syncGuard; + _conductRowElementMultiplyWithShift<<>>(c.Data(), a.Data(), b.Data(), O, P, shift, isafixed); + } +} + +template +GPUMatrix& GPUMatrix::AssignElementProductOfWithShift(const GPUMatrix& a, const GPUMatrix& b, const size_t shift) +{ + if (a.IsEmpty() || b.IsEmpty()) + LogicError("AssignElementProductOfWithShift: Matrix is empty."); + + assert(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols()); + if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols())) + InvalidArgument("The input matrix dimensions do not match."); + + // int O = a.GetNumRows(); + int P = a.GetNumCols(); + + RequireSize(1, P); + CUDA_LONG N = (CUDA_LONG) GetNumElements(); + int blocksPerGrid = (int) ceil(((double) N) / GridDim::maxThreadsPerBlock); + a.PrepareDevice(); + SyncGuard syncGuard; + _assignElementProductOfWithShift<<>>(Data(), a.Data(), b.Data(), shift, N); + return *this; +} + +//sequence training +template +GPUMatrix& GPUMatrix::DropFrame(const GPUMatrix& label, const GPUMatrix& gamma, const ElemType& threshhold) +{ + if (IsEmpty()) + LogicError("DropFrame: Matrix is empty."); + + PrepareDevice(); + + long N = (long) GetNumCols(); // one kernel per column + int blocksPerGrid = (int) ceil(N * 1.0 / GridDim::maxThreadsPerBlock); + SyncGuard syncGuard; + _DropFrame<<>>(Data(), label.Data(), gamma.Data(), threshhold, (long) m_numCols, (long) m_numRows); + return *this; +} + +template +GPUMatrix& GPUMatrix::AssignSequenceError(const ElemType hsmoothingWeight, const GPUMatrix& label, + const GPUMatrix& dnnoutput, const GPUMatrix& gamma, ElemType alpha) +{ + if (IsEmpty()) + LogicError("AssignSequenceError: Matrix is empty."); + + PrepareDevice(); + + SyncGuard syncGuard; + long N = (LONG64) label.GetNumElements(); + int blocksPerGrid = (int) ceil(1.0 * N / GridDim::maxThreadsPerBlock); + _AssignSequenceError<<>>(hsmoothingWeight, Data(), label.Data(), dnnoutput.Data(), gamma.Data(), alpha, N); + return *this; +} + +#pragma endregion Static BLAS Functions + +/// f = logadd(f, vec) to get the logadd sum of vector elments +template +ElemType GPUMatrix::LogSumOfElements() const +{ + if (IsEmpty()) + LogicError("SumOfElements: Matrix is empty"); + + ElemType* d_sum = TracingGPUMemoryAllocator::Allocate(GetComputeDeviceId(), 1); + + ElemType h_sum; + CUDA_LONG N = (CUDA_LONG) GetNumElements(); + int blocksPerGrid = (int) ceil(((double) N) / GridDim::maxThreadsPerBlock); + + _reductionLogAddSum<<>>(Data(), + d_sum, 1, N); + CUDA_CALL(cudaMemcpy(&h_sum, d_sum, sizeof(ElemType), cudaMemcpyDeviceToHost)); + TracingGPUMemoryAllocator::Free(GetComputeDeviceId(), d_sum); + + return h_sum; +} + +template +void GPUMatrix::RCRFBackwardCompute( + const GPUMatrix& alpha, GPUMatrix& beta, + const GPUMatrix& /*lbls*/, + const GPUMatrix& pos_scores, const GPUMatrix& pair_scores, const int shift) +{ + if (alpha.IsEmpty() || pos_scores.IsEmpty() || pair_scores.IsEmpty()) + LogicError("RCRFBackwardCompute: one of the input matrices is empty."); + + if (alpha.GetNumRows() != pos_scores.GetNumRows() || alpha.GetNumCols() != pos_scores.GetNumCols()) + LogicError("RCRFBackwardCompute: matrix dimensions mismatched."); + + size_t iNumLab = alpha.GetNumRows(); + size_t iNumPos = alpha.GetNumCols(); + + alpha.PrepareDevice(); + beta.RequireSize(iNumLab, iNumPos); + + ElemType* d_zeta = TracingGPUMemoryAllocator::Allocate(alpha.GetComputeDeviceId(), iNumLab); + + CUDA_LONG N = iNumLab; + // TODO: change all three '512' to 'GridDim::maxThreadsPerBlock' (not doing this now since I cannot test it) + int blocksPerGrid = (int) ceil(1.0 * N / 512); + size_t szMemSize; + for (int t = iNumPos - 1; t >= 0; t--) + { + szMemSize = sizeof(ElemType) * iNumLab; + // This function assumes iNumLab <= 1024 and that shared memory == total (!) number of threads == iNumLab. + assert(iNumLab <= 1024); + _rcrfBackwardComputeZetaMax1024Labels << > >(t, iNumPos, alpha.Data(), d_zeta, pair_scores.Data(), iNumLab, shift); + szMemSize = iNumLab * 3; + szMemSize *= sizeof(ElemType); + // This function assumes iNumLab <= 1024 and that shared memory == total (!) number of threads == 3 * iNumLab. + assert(iNumLab <= 1024); + _rcrfBackwardComputeMax1024Labels << > >(t, iNumPos, alpha.Data(), beta.Data(), + d_zeta, pair_scores.Data(), iNumLab, shift); + } + /* + error = cudaGetErrorString(cudaPeekAtLastError()); + printf("%s\n", error); + error = cudaGetErrorString(cudaThreadSynchronize()); + printf("%s\n", error); + */ + + TracingGPUMemoryAllocator::Free(alpha.GetComputeDeviceId(), d_zeta); +} + +/** + Compute the gradient for the first order Markov transition probabilities + It uses equations derived in R. Collobert's paper "Natural language processing (almost) from scratch" + */ +template +void GPUMatrix::RCRFTransGrdCompute(const GPUMatrix& lbls, + const GPUMatrix& alpha, + const GPUMatrix& beta, + const GPUMatrix& pair_scores, + GPUMatrix& grd, + const int startLbl, + const int shift) +{ + assert(shift == 1); + int iNumPos = alpha.GetNumCols(); + int iNumLab = alpha.GetNumRows(); + + ElemType* d_zeta = TracingGPUMemoryAllocator::Allocate(alpha.GetComputeDeviceId(), iNumLab); + + CUDA_LONG N = iNumLab; + // TODO: change all three '512' to 'GridDim::maxThreadsPerBlock' (not doing this now since I cannot test it) + int blocksPerGrid = (int)ceil(1.0 * N / 512); + size_t szMemSize; + for (int t = 0; t < iNumPos; t++) + { + szMemSize = sizeof(ElemType) * iNumLab; + // This function assumes iNumLab <= 1024 and that shared memory == total (!) number of threads == iNumLab. + assert(iNumLab <= 1024); + // BUGBUG: This is launched with 512 threads per block, but allocates shared mem as if there is only one block. Likewise for all 4 of these functions. + _rcrfTransGrdComputeZetaMax1024Labels << > >(t - 1, iNumPos, alpha.Data(), d_zeta, pair_scores.Data(), iNumLab, startLbl, shift); + szMemSize = iNumLab * 3; + szMemSize *= sizeof(ElemType); + // This function assumes iNumLab <= 1024 and that shared memory == total (!) number of threads == iNumLab. + assert(iNumLab <= 1024); + _rcrfTransGrdComputeMax1024Labels << > >(t, startLbl, alpha.Data(), beta.Data(), + d_zeta, pair_scores.Data(), lbls.Data(), grd.Data(), iNumPos, iNumLab, shift); + } + TracingGPUMemoryAllocator::Free(alpha.GetComputeDeviceId(), d_zeta); +}; + +// ----------------------------------------------------------------------- +// TensorView entry points from Matrix.cpp +// ----------------------------------------------------------------------- + +// helper to provide a vector of ones of at least the given number of elements +// TODO: Use this to implement ComputationNode::ConstOnes? Or do we even need that anymore? +template +static shared_ptr> GetOnesVector(size_t N, DEVICEID_TYPE deviceId) +{ + // using a dynamically allocated array so this will never get freed, avoiding free-after-DLL-unload issues. + // and using shared_ptrs since we don't want to leak more than CacheSize elements + // when using a plain array we would have to control lifetime of the object and destructor would be called for every element in the array at the end + const int CacheSize = 32; + static shared_ptr> * onesCache = new shared_ptr>[CacheSize]; // cache of objects + + if (deviceId >= CacheSize){ + LogicError("GetOnesVector: onesCache[] too small (%d entries), increase (you need %d) and recompile.", CacheSize, (int)deviceId + 1); + } + + auto p = onesCache[deviceId]; + if (!p || p->GetNumRows() < N) // must (re-)allocate + { + p = make_shared>(GPUMatrix::Ones(N, 1, deviceId)); + onesCache[deviceId] = p; // this will replace the pointer thread-safely (although weird race conditions may happen where a larger entry is overwritten by a smaller one; will still run correctly) + } + return p; +} + +// perform unary operation 'op' on a giving 'this', reinterpreting the matrices as tensors as specified by the dims and strides +// This binds the N-ariness to a template parameter N, and gets the data pointers out from the matrix objects. +template +void GPUMatrix::TensorOp(ElemType beta, const GPUMatrix& a, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, + const array& offsets, + const SmallVector& regularOpDims, const array, 2>& regularStrides, + const SmallVector& reducingOpDims, const array, 2>& reducingStrides) +{ + if (reductionOp != ElementWiseOperator::opSum && + reductionOp != ElementWiseOperator::opLogSum && + reductionOp != ElementWiseOperator::opMin && + reductionOp != ElementWiseOperator::opMax && + reductionOp != ElementWiseOperator::opElementwiseProduct) + InvalidArgument("TensorOp: Unary reduction operations other than opMax, opMin, opSum, and opLogSum are not implemented."); + + a.PrepareDevice(); + if (a.GetComputeDeviceId() != GetComputeDeviceId()) + InvalidArgument("All matrices must be on the same GPU"); + + // special case: linear processing + // The case statement has measurable impact for unary ops (but not for binary ops it seems, due to double mem access). + // Linear gap-free unary ops happen so regularly that we will eliminate the case statement from the CUDA kernel, and instead expand all. + if (regularOpDims.size() == 1 && regularStrides[0][0] == 1 && regularStrides[1][0] == 1 && reducingOpDims.size() == 0) + { + // special case: for copy, use cudaMemcpy() instead, or cublas_axpy() + // TODO: We should observe if these actually make a speed difference, and if not, remove these special cases. + if (op == ElementWiseOperator::opCopy && beta == 0 && alpha == 1) + return CUDA_CALL(cudaMemcpy(Data()+ offsets[1], a.Data()+ offsets[0], sizeof(ElemType) * regularOpDims[0], cudaMemcpyDeviceToDevice)); + else if (op == ElementWiseOperator::opCopy && beta == 1) + return CUBLAS_CALL(cublas_axpy(GetCublasHandle(GetComputeDeviceId()), (int) regularOpDims[0], &alpha, a.Data()+ offsets[0], 1, Data()+ offsets[1], 1)); + else + return LaunchUnaryTensorOp(beta, a.Data()+ offsets[0], Data()+ offsets[1], alpha, op, regularOpDims[0]); + } + + // special case: sum-reducing a matrix onto a column vector; can be done with SGEMM + // Note: A minor risk is that with this, our own reduction function will rarely be used. + // That function was tested to give the same results with 'double', and nearly the same with 'float' (different summation order matters). + else if (op == ElementWiseOperator::opCopy && // we are just adding to target without any further operation + reductionOp == ElementWiseOperator::opSum && +#ifdef _DEBUG + sizeof(ElemType) == sizeof(float) && // in debug don't shortcut 'double' so we have some test of our own codepath +#endif + regularOpDims.size() == 1 && regularStrides[0][0] == 1 && regularStrides[1][0] == 1 && // we are processing a column + reducingOpDims.size() == 1 && reducingStrides[0][0] >= (ptrdiff_t) regularOpDims[0]) // reducing across columns and no overlap + { + assert(reducingStrides[1][0] == 0); + auto ARows = regularOpDims[0]; // vertical steps + auto ACols = reducingOpDims[0]; // horizontal steps (reduction) + auto ALd = reducingStrides[0][0]; // horizontal step width through matrix + cublasHandle_t cuHandle = GetCublasHandle(a.GetComputeDeviceId()); + CUBLAS_CALL(cublas_gemm(cuHandle, CUBLAS_OP_N, CUBLAS_OP_N, (int) /*CRows=*/ARows, /*CCols=*/1, (int) ACols, &alpha, + /*A00=*/a.Data()+ offsets[0], (int) ALd, + /*B00=*/GetOnesVector(ACols, a.GetComputeDeviceId())->Data(), (int) /*BRows=*/ACols, &beta, + /*C00=*/Data()+ offsets[1], (int) /*CRows=*/ARows)); + return; + } + + // TODO: Add a special case for tensor bias reduction. cudnn is ~7% faster on Image/QuickE2E. + + // regular case + else + return TensorOpN(beta, array{a.Data(), Data()}, alpha, op, reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides); +} + +// perform binary operation 'op' on a and b giving 'this', reinterpreting the matrices as tensors as specified by the dims and strides +template +void GPUMatrix::TensorOp(ElemType beta, const GPUMatrix& a, const GPUMatrix& b, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, + const array& offsets, + const SmallVector& regularOpDims, const array, 3>& regularStrides, + const SmallVector& reducingOpDims, const array, 3>& reducingStrides) +{ + if (reductionOp != ElementWiseOperator::opSum) + InvalidArgument("TensorOp: The only permitted binary reduction operation is opSum."); + + a.PrepareDevice(); + if (a.GetComputeDeviceId() != GetComputeDeviceId() || b.GetComputeDeviceId() != GetComputeDeviceId()) + InvalidArgument("All matrices must be on the same GPU"); + + return TensorOpN(beta, array{a.Data(), b.Data(), Data()}, alpha, op, reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides); +} + +// perform ternary operation 'op' on a, and c giving 'this', reinterpreting the matrices as tensors as specified by the dims and strides +template +void GPUMatrix::TensorOp(ElemType beta, const GPUMatrix& a, const GPUMatrix& b, const GPUMatrix& c, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, + const array& offsets, + const SmallVector& regularOpDims, const array, 4>& regularStrides, + const SmallVector& reducingOpDims, const array, 4>& reducingStrides) +{ + if (reductionOp != ElementWiseOperator::opSum) + InvalidArgument("TensorOp: The only permitted ternary reduction operation is opSum."); + + a.PrepareDevice(); + if (a.GetComputeDeviceId() != GetComputeDeviceId() || b.GetComputeDeviceId() != GetComputeDeviceId() || c.GetComputeDeviceId() != GetComputeDeviceId()) + InvalidArgument("All matrices must be on the same GPU"); + return TensorOpN(beta, array{a.Data(), b.Data(), c.Data(), Data()}, alpha, op, reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides); +} + +template +void GPUMatrix::TensorArgOp(const GPUMatrix& a, ElementWiseOperator reductionOp, + const array& offsets, + const SmallVector& regularOpDims, const array, 2>& regularStrides, + const SmallVector& reducingOpDims, const array, 2>& reducingStrides) +{ + if (reductionOp != ElementWiseOperator::opArgmin && + reductionOp != ElementWiseOperator::opArgmax) + InvalidArgument("TensorOp: Arg reduction operations other than opArgmax, and opArgmin are not implemented."); + + a.PrepareDevice(); + if (a.GetComputeDeviceId() != GetComputeDeviceId()) + InvalidArgument("All matrices must be on the same GPU"); + return TensorOpN((ElemType) 0, array{a.Data(), Data()}, (ElemType) 1, ElementWiseOperator::opCopy, reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides); +} + +// ======================================================================= +// explicit instantiations business +// ======================================================================= + +template class GPUMatrix; +template class GPUMatrix; +template class DeviceBoundNumber; +template class DeviceBoundNumber; + +template +cublasHandle_t GPUMatrix::s_cuHandle[GPUMatrix::MaxGpus] = {0}; + +template +void* GPUMatrix::s_curandGenerator = NULL; + +// We use Matrix as the backing store for QuantizedMatrix +// Let's explicitly instantiate the methods we need for that purpose +template GPUMatrix::GPUMatrix(const size_t numRows, const size_t numCols, int deviceId); +template GPUMatrix::GPUMatrix(const size_t numRows, const size_t numCols, int deviceId, char* pArray, const size_t matrixFlags); +template GPUMatrix::GPUMatrix(const GPUMatrix&); +template GPUMatrix::GPUMatrix(GPUMatrix&&); +template char* GPUMatrix::CopyToArray() const; +template void GPUMatrix::ChangeDeviceTo(int); +template void GPUMatrix::Resize(size_t, size_t, bool); +template void GPUMatrix::RequireSize(size_t, size_t, bool); + +template GPUMatrix::~GPUMatrix(); +template GPUMatrix GPUMatrix::ColumnSlice(size_t startColumn, size_t numCols) const; +template GPUMatrix& GPUMatrix::operator=(GPUMatrix&&); +template GPUMatrix::GPUMatrix(int); +template void GPUMatrix::SetValue(const char); +template void GPUMatrix::SetValue(const size_t numRows, const size_t numCols, int deviceId, char* pArray, size_t matrixFlags, DataTransferer* transferer); +//template void GPUMatrix::SetValue(CPUMatrix const&); +template void GPUMatrix::SetValue(GPUMatrix const&); +//template void GPUMatrix::SetValue(CPUSparseMatrix const&); +//template void GPUMatrix::SetValue(GPUSparseMatrix const&); +template void GPUMatrix::CopySection(size_t numRows, size_t numCols, char* dst, size_t colStride) const; +template void GPUMatrix::Reshape(const size_t, const size_t); +template GPUMatrix& GPUMatrix::operator*=(char); +template DEVICEID_TYPE GPUMatrix::PrepareDevice(DEVICEID_TYPE deviceId) const; + +// Support +template GPUMatrix::GPUMatrix(const size_t numRows, const size_t numCols, int deviceId); +template GPUMatrix::GPUMatrix(const size_t numRows, const size_t numCols, int deviceId, short* pArray, const size_t matrixFlags); +template GPUMatrix::GPUMatrix(const GPUMatrix&); +template GPUMatrix::GPUMatrix(GPUMatrix&&); +template short* GPUMatrix::CopyToArray() const; +template void GPUMatrix::ChangeDeviceTo(int); +template void GPUMatrix::Resize(size_t, size_t, bool); +template void GPUMatrix::RequireSize(size_t, size_t, bool); + +template GPUMatrix::~GPUMatrix(); +template GPUMatrix GPUMatrix::ColumnSlice(size_t startColumn, size_t numCols) const; +template GPUMatrix& GPUMatrix::operator=(GPUMatrix&&); +template GPUMatrix::GPUMatrix(int); +template void GPUMatrix::SetValue(const short); +template void GPUMatrix::SetValue(const size_t numRows, const size_t numCols, int deviceId, short* pArray, size_t matrixFlags, DataTransferer* transferer); +//template void GPUMatrix::SetValue(CPUMatrix const&); +template void GPUMatrix::SetValue(GPUMatrix const&); +//template void GPUMatrix::SetValue(CPUSparseMatrix const&); +//template void GPUMatrix::SetValue(GPUSparseMatrix const&); +template void GPUMatrix::CopySection(size_t numRows, size_t numCols, short* dst, size_t colStride) const; +template void GPUMatrix::Reshape(const size_t, const size_t); +template GPUMatrix& GPUMatrix::operator*=(short); +template DEVICEID_TYPE GPUMatrix::PrepareDevice(DEVICEID_TYPE deviceId) const; + +template GPUMatrix::GPUMatrix(const size_t, const size_t, int, int*, const size_t); +template GPUMatrix::~GPUMatrix(); + +template int* TracingGPUMemoryAllocator::Allocate(int, size_t); +template size_t* TracingGPUMemoryAllocator::Allocate(int, size_t); +template long* TracingGPUMemoryAllocator::Allocate(int, size_t); +template short* TracingGPUMemoryAllocator::Allocate(int, size_t); +template char* TracingGPUMemoryAllocator::Allocate(int, size_t); +template float* TracingGPUMemoryAllocator::Allocate(int, size_t); +template double* TracingGPUMemoryAllocator::Allocate(int, size_t); + +template void TracingGPUMemoryAllocator::Free(int, int*, bool); +template void TracingGPUMemoryAllocator::Free(int, size_t*, bool); +template void TracingGPUMemoryAllocator::Free(int, short*, bool); +template void TracingGPUMemoryAllocator::Free(int, char*, bool); +template void TracingGPUMemoryAllocator::Free(int, float*, bool); +template void TracingGPUMemoryAllocator::Free(int, double*, bool); + +}}} + +// !!!!This is from helper_cuda.h which comes with CUDA samples!!!! Consider if it is beneficial to just include all helper_cuda.h +// TODO: This is duplicated in BestGpu.cpp +// Beginning of GPU Architecture definitions +int _ConvertSMVer2Cores(int major, int minor) +{ + // Defines for GPU Architecture types (using the SM version to determine the # of cores per SM + typedef struct + { + int SM; // 0xMm (hexidecimal notation), M = SM Major version, and m = SM minor version + int Cores; + } sSMtoCores; + + sSMtoCores nGpuArchCoresPerSM[] = + { + {0x10, 8}, // Tesla Generation (SM 1.0) G80 class + {0x11, 8}, // Tesla Generation (SM 1.1) G8x class + {0x12, 8}, // Tesla Generation (SM 1.2) G9x class + {0x13, 8}, // Tesla Generation (SM 1.3) GT200 class + {0x20, 32}, // Fermi Generation (SM 2.0) GF100 class + {0x21, 48}, // Fermi Generation (SM 2.1) GF10x class + {0x30, 192}, // Kepler Generation (SM 3.0) GK10x class + {0x35, 192}, // Kepler Generation (SM 3.5) GK11x class + {-1, -1}}; + + int index = 0; + + while (nGpuArchCoresPerSM[index].SM != -1) + { + if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor)) + { + return nGpuArchCoresPerSM[index].Cores; + } + + index++; + } + return nGpuArchCoresPerSM[7].Cores; +}; +// end of GPU Architecture definitions + +//inline CUDA_LONG _GetFreeMemoryOnCUDADevice(int devId) +//{ +// CUdevice cudaDevice; +// CUresult result = cuDeviceGet(&cudaDevice, devId); +// if(result!= CUDA_SUCCESS) +// { +// return 0; +// } +// +// // create cuda context +// CUcontext cudaContext; +// result = cuCtxCreate(&cudaContext, CU_CTX_SCHED_AUTO, cudaDevice); +// if(result != CUDA_SUCCESS) +// { +// return 0; +// } +// +// // get the amount of free memory on the graphics card +// size_t free; +// size_t total; +// result = cuMemGetInfo(&free, &total); +// if (result!=CUDA_SUCCESS) +// { +// return 0; +// } +// else +// return (CUDA_LONG)free; +//} + +#endif // CPUONLY diff --git a/cuda_code/GPUSimulator_1.cu b/cuda_code/GPUSimulator_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..914f50db2fd38af7ecc5481d2faff33acb60f8f9 --- /dev/null +++ b/cuda_code/GPUSimulator_1.cu @@ -0,0 +1,435 @@ +/* Copyright (c) 2013-2020, MUSEN Development Team. All rights reserved. + This file is part of MUSEN framework http://msolids.net/musen. + See LICENSE file for license and warranty information. */ + +#include "GPUSimulator.cuh" +#include + +CGPU::CGPU(const CCUDADefines* _cudaDefines) : + m_cudaDefines{ _cudaDefines } +{ + CUDAKernels::SetThreadsNumber(m_cudaDefines->CUDA_THREADS_PER_BLOCK); +} + +void CGPU::SetExternalAccel(const CVector3& _acceleration) +{ + CUDAKernels::SetExternalAccel(_acceleration); +} + +void CGPU::SetSimulationDomain(const SVolumeType& _domain) +{ + CUDAKernels::SetSimulationDomain(_domain); +} + +void CGPU::SetPBC(const SPBC& _PBCInfo) +{ + m_PBCEnabled = _PBCInfo.bEnabled; + CUDAKernels::SetPBC(_PBCInfo); +} + +void CGPU::SetCompoundsNumber(size_t _nCompounds) +{ + CUDAKernels::SetCompoundsNumber(_nCompounds); +} + +void CGPU::SetAnisotropyFlag(bool _enabled) +{ + CUDAKernels::SetAnisotropyFlag(_enabled); +} + +void CGPU::InitializeWalls(const std::vector>& _vvWallsInGeom, const std::vector>& _adjacentWalls) +{ + /// set walls in geometries + m_vvWallsInGeom.resize(_vvWallsInGeom.size()); + for (size_t i = 0; i < _vvWallsInGeom.size(); ++i) + { + // NOTE: conventional assignment copy leads to warnings in debug + m_vvWallsInGeom[i].resize(_vvWallsInGeom[i].size()); + for (size_t j = 0; j < _vvWallsInGeom[i].size(); ++j) + m_vvWallsInGeom[i][j] = _vvWallsInGeom[i][j]; + } + + /// set adjacent walls + h_vec_u hostStartIndices; + h_vec_u hostAdjacentWalls; + + size_t number = 0; // total number of elements in the matrix + for (const auto& list : _adjacentWalls) + number += list.size(); + + hostAdjacentWalls.resize(number); + hostStartIndices.resize(_adjacentWalls.size() + 1); + + if (!hostStartIndices.empty()) hostStartIndices.front() = 0; // for easier access + for (size_t i = 1; i < _adjacentWalls.size(); ++i) + hostStartIndices[i] = hostStartIndices[i - 1] + _adjacentWalls[i - 1].size(); + if (!hostStartIndices.empty()) hostStartIndices.back() = number - 1; // for easier access + + ParallelFor(_adjacentWalls.size(), [&](size_t i) + { + std::copy(_adjacentWalls[i].begin(), _adjacentWalls[i].end(), hostAdjacentWalls.begin() + hostStartIndices[i]); + }); + + m_adjacentWalls.startIndices = hostStartIndices; + m_adjacentWalls.adjacentWalls = hostAdjacentWalls; +} + +void CGPU::InitializeCollisions() +{ + m_CollisionsPP.vVerletDst.clear(); + m_CollisionsPP.vVerletPartInd.clear(); + m_CollisionsPP.vVerletSrc.clear(); + m_CollisionsPP.collisions.Clear(); + + m_CollisionsPW.vVerletDst.clear(); + m_CollisionsPW.vVerletPartInd.clear(); + m_CollisionsPW.vVerletSrc.clear(); + m_CollisionsPW.collisions.Clear(); +} + + +void CGPU::Flags2IndicesList(const size_t _size, bool _flags[], d_vec_u& _sequence, d_vec_i8& _storage, unsigned* _listLength, unsigned _list[]) +{ + if (!_size) return; + + // sequence [ 0; _size - 1 ] + if (_sequence.size() != _size) + { + _sequence.resize(_size); + thrust::sequence(_sequence.begin(), _sequence.end()); + } + // Determine temporary device storage requirements + void *pTempStorage = nullptr; + size_t nTempStorageSize = 0; + thrust::cuda_cub::cub::DeviceSelect::Flagged(pTempStorage, nTempStorageSize, _sequence.data().get(), _flags, _list, _listLength, static_cast(_size)); + // Allocate temporary storage + if (_storage.size() < nTempStorageSize) + _storage.resize(nTempStorageSize); + // Run selection + thrust::cuda_cub::cub::DeviceSelect::Flagged(_storage.data().get(), nTempStorageSize, _sequence.data().get(), _flags, _list, _listLength, static_cast(_size)); +} + +void CGPU::ApplyExternalAcceleration(SGPUParticles& _particles) +{ + CUDA_KERNEL_ARGS2_DEFAULT(CUDAKernels::ApplyExternalAcceleration_kernel, static_cast(_particles.nElements), _particles.Masses, _particles.Forces); +} + +double CGPU::CalculateNewTimeStep(double _currTimeStep, double _initTimeStep, double _partMoveLimit, double _timeStepFactor, SGPUParticles& _particles) const +{ + CUDA_KERNEL_ARGS2_DEFAULT(CUDAKernels::GatherForFlexibleTimeStep_kernel, static_cast(_particles.nElements), + _particles.Masses, _particles.Forces, _particles.TempDouble1); + + static d_vec_d maxTimeStep; + if (maxTimeStep.empty()) + maxTimeStep.resize(1); + CUDA_REDUCE_CALLER(CUDAKernels::ReduceMin_kernel, _particles.nElements, _particles.TempDouble1, _particles.TempDouble2, maxTimeStep.data().get()); + + double maxStep; + CUDA_MEMCPY_D2H(&maxStep, maxTimeStep.data().get(), sizeof(double)); + maxStep = std::sqrt(std::sqrt(maxStep) * _partMoveLimit); + + if (_currTimeStep > maxStep) + return maxStep; + if (_currTimeStep < _initTimeStep) + return std::min(_currTimeStep * _timeStepFactor, _initTimeStep); + return _currTimeStep; +} + +void CGPU::MoveParticles(double& _currTimeStep, double _initTimeStep, SGPUParticles& _particles, bool _bFlexibleTimeStep) +{ + CUDA_KERNEL_ARGS2_DEFAULT(CUDAKernels::MoveParticles_kernel, _currTimeStep, static_cast(_particles.nElements), _particles.Masses, _particles.InertiaMoments, + _particles.Moments, _particles.Forces, _particles.Vels, _particles.AnglVels, _particles.Coords, _particles.Quaternions); +} + +void CGPU::MoveParticlesPrediction(double _timeStep, SGPUParticles& _particles) +{ + CUDA_KERNEL_ARGS2_DEFAULT(CUDAKernels::MoveParticlesPrediction_kernel, _timeStep, static_cast(_particles.nElements), + _particles.Masses, _particles.InertiaMoments, _particles.Moments, _particles.Forces, _particles.Vels, _particles.AnglVels, _particles.Quaternions); +} + +void CGPU::CalculateTotalForceOnWall(size_t _iGeom, SGPUWalls& _walls, d_vec_v3& _vTotalForce) +{ + static d_vec_v3 forces, temp; + if (forces.size() != m_vvWallsInGeom[_iGeom].size()) + forces.resize(m_vvWallsInGeom[_iGeom].size()); + if (temp.size() != m_vvWallsInGeom[_iGeom].size()) + temp.resize(m_vvWallsInGeom[_iGeom].size()); + //calculate force + CUDA_KERNEL_ARGS2_DEFAULT(CUDAKernels::GatherForcesFromWalls_kernel, static_cast(m_vvWallsInGeom[_iGeom].size()), + m_vvWallsInGeom[_iGeom].data().get(), _walls.Forces, forces.data().get()); + CUDA_REDUCE_CALLER(CUDAKernels::ReduceSum_kernel, m_vvWallsInGeom[_iGeom].size(), forces.data().get(), temp.data().get(), _vTotalForce.data().get()); +} + +CVector3 CGPU::CalculateTotalForceOnWall(size_t _iGeom, SGPUWalls & _walls) +{ + static d_vec_v3 vTotalForce(1); + CVector3 vResult; + CalculateTotalForceOnWall(_iGeom, _walls, vTotalForce); + CUDA_MEMCPY_D2H(&vResult, vTotalForce.data().get(), sizeof(CVector3)); + return vResult; +} + +void CGPU::MoveWalls(double _timeStep, size_t _iGeom, const CVector3& _vel, const CVector3& _rotVel, const CVector3& _rotCenter, const CMatrix3& _rotMatrix, + const CVector3& _freeMotion, bool _bForceDependentMotion, bool _bRotateAroundCenter, double _dMass, SGPUWalls& _walls, const CVector3& _vExternalAccel) +{ + static d_vec_v3 vTotalForce(1); + static d_vec_v3 vRotCenter(1); // used in case when rotation around center is defined + + if (_bRotateAroundCenter || _bForceDependentMotion || !_freeMotion.IsZero()) + CalculateTotalForceOnWall(_iGeom, _walls, vTotalForce); + if (_bRotateAroundCenter) // precalculate rotation center + CUDA_KERNEL_ARGS2_DEFAULT(CUDAKernels::CalculateGeometryCenter_kernel, static_cast(m_vvWallsInGeom[_iGeom].size()), m_vvWallsInGeom[_iGeom].data().get(), + _walls.Vertices1, _walls.Vertices2, _walls.Vertices3, vRotCenter.data().get()); + CUDA_KERNEL_ARGS2_DEFAULT(CUDAKernels::MoveWalls_kernel, _timeStep, + static_cast(m_vvWallsInGeom[_iGeom].size()), _vel, _rotVel, _rotCenter, _rotMatrix, + _freeMotion, vTotalForce.data().get(), _dMass, _bRotateAroundCenter, _vExternalAccel, + vRotCenter.data().get(), m_vvWallsInGeom[_iGeom].data().get(), + _walls.Vertices1, _walls.Vertices2, _walls.Vertices3, _walls.MinCoords, + _walls.MaxCoords, _walls.NormalVectors, _walls.Vels, _walls.RotCenters, _walls.RotVels); +} + +void CGPU::UpdateVerletLists(bool _bPPVerlet, const SGPUParticles& _particles, const SGPUWalls& _walls, const h_vec_u& _vVerListSrcNew, const h_vec_u& _vVerListDstNew, + const h_vec_u& _vVerListIndNew, const h_vec_u8& _vVirtShifts, d_vec_u& _vVerListSrcOld, d_vec_u& _vVerListDstOld, d_vec_u& _vVerListIndOld, SGPUCollisions& _collisions) const +{ + const d_vec_u dvVerlSrcNew(_vVerListSrcNew); + const d_vec_u dvVerlDstNew(_vVerListDstNew); + const d_vec_u dvVerlIndNew(_vVerListIndNew); + const size_t collNum = dvVerlDstNew.size(); + static SGPUCollisions newCollisions; + newCollisions.Resize(collNum); + + CUDA_MEMSET(newCollisions.TangOverlaps, 0, collNum * sizeof(*newCollisions.TangOverlaps)); + CUDA_MEMSET(newCollisions.TotalForces, 0, collNum * sizeof(*newCollisions.TotalForces)); + CUDA_MEMSET(newCollisions.NormalOverlaps, 0, collNum * sizeof(*newCollisions.NormalOverlaps)); + CUDA_MEMSET(newCollisions.ActivityFlags, 0, collNum * sizeof(*newCollisions.ActivityFlags)); + + if (m_PBCEnabled) + CUDA_MEMCPY_H2D(newCollisions.VirtualShifts, _vVirtShifts.data(), collNum * sizeof(*newCollisions.VirtualShifts)); + else + CUDA_MEMSET(newCollisions.VirtualShifts, 0, collNum * sizeof(*newCollisions.VirtualShifts)); + + if (_bPPVerlet) + { + CUDA_KERNEL_ARGS2_DEFAULT(CUDAKernels::InitializePPCollisions_kernel, static_cast(collNum), dvVerlSrcNew.data().get(), dvVerlDstNew.data().get(), + _particles.ContactRadii, _particles.Masses, _particles.CompoundIndices, + newCollisions.SrcIDs, newCollisions.DstIDs, newCollisions.EquivMasses, newCollisions.EquivRadii, newCollisions.SumRadii, newCollisions.InteractPropIDs); + if (!_vVerListDstOld.empty()) + CUDA_KERNEL_ARGS2_DEFAULT(CUDAKernels::CopyCollisionsPP_kernel, + static_cast(_vVerListDstOld.size()), + _vVerListSrcOld.data().get(), _vVerListDstOld.data().get(), dvVerlDstNew.data().get(), dvVerlIndNew.data().get(), _collisions.ActivityFlags, + _collisions.NormalOverlaps, _collisions.TangOverlaps, _collisions.ContactVectors, _collisions.TotalForces, + newCollisions.NormalOverlaps, newCollisions.TangOverlaps, newCollisions.ContactVectors, newCollisions.TotalForces); + } + else + { + CUDA_KERNEL_ARGS2_DEFAULT(CUDAKernels::InitializePWCollisions_kernel, static_cast(collNum), dvVerlSrcNew.data().get(), dvVerlDstNew.data().get(), + _particles.CompoundIndices, _walls.CompoundIndices, + newCollisions.SrcIDs, newCollisions.DstIDs, newCollisions.InteractPropIDs); + if (!_vVerListDstOld.empty()) + CUDA_KERNEL_ARGS2_DEFAULT(CUDAKernels::CopyCollisionsPW_kernel, + static_cast(_vVerListDstOld.size()), + _vVerListSrcOld.data().get(), _vVerListDstOld.data().get(), dvVerlDstNew.data().get(), dvVerlIndNew.data().get(), _collisions.ActivityFlags, + _collisions.NormalOverlaps, _collisions.TangOverlaps, _collisions.ContactVectors, _collisions.TotalForces, + newCollisions.ActivityFlags, newCollisions.NormalOverlaps, newCollisions.TangOverlaps, newCollisions.ContactVectors, newCollisions.TotalForces); + } + + _collisions.CopyFrom(newCollisions); + + _vVerListDstOld = dvVerlDstNew; + _vVerListIndOld = dvVerlIndNew; + _vVerListSrcOld = dvVerlSrcNew; +} + +void CGPU::SortByDst(unsigned _nPart, const d_vec_u& _vVerListSrc, const d_vec_u& _vVerListDst, d_vec_u& _vVerCollInd_DstSorted, d_vec_u& _vVerPartInd_DstSorted) const +{ + unsigned nCollisions = _vVerListSrc.size(); + static d_vec_u vVerListDstTemp, vTemp; + vVerListDstTemp = _vVerListDst; + _vVerCollInd_DstSorted.resize(nCollisions); + _vVerPartInd_DstSorted.resize(_nPart + 1); + vTemp.resize(_nPart + 1); + thrust::fill(vTemp.begin(), vTemp.end(), nCollisions + 1); // fill initially with impossible values to indicate later what was not filled + + thrust::sequence(thrust::device, _vVerCollInd_DstSorted.begin(), _vVerCollInd_DstSorted.end()); + thrust::sort_by_key(thrust::device, vVerListDstTemp.begin(), vVerListDstTemp.end(), _vVerCollInd_DstSorted.begin()); + + CUDA_KERNEL_ARGS2_DEFAULT(CUDAKernels::FillUniqueIndexes_kernel, nCollisions, vVerListDstTemp.data().get(), vTemp.data().get()); + CUDA_KERNEL_ARGS2_DEFAULT(CUDAKernels::FillNonExistendIndexes_kernel, _nPart, nCollisions, vTemp.data().get(), _vVerPartInd_DstSorted.data().get()); +} + +void CGPU::UpdateActiveCollisionsPP(const SGPUParticles& _particles) +{ + CUDA_KERNEL_ARGS2_DEFAULT(CUDAKernels::UpdateActiveCollisionsPP_kernel, static_cast(m_CollisionsPP.vVerletSrc.size()), m_CollisionsPP.vVerletSrc.data().get(), m_CollisionsPP.vVerletDst.data().get(), + _particles.Coords, m_CollisionsPP.collisions.VirtualShifts, m_CollisionsPP.collisions.SumRadii, m_CollisionsPP.collisions.ActivityFlags, + m_CollisionsPP.collisions.NormalOverlaps, m_CollisionsPP.collisions.ContactVectors, m_CollisionsPP.collisions.TangOverlaps); + + static d_vec_u sequence; // temporal vector for indices needed internally in Flags2IndicesList + static d_vec_i8 tempStorage; // temporal storage needed internally in Flags2IndicesList + Flags2IndicesList(static_cast(m_CollisionsPP.vVerletSrc.size()), m_CollisionsPP.collisions.ActivityFlags, sequence, tempStorage, m_CollisionsPP.collisions.ActiveCollisionsNum, m_CollisionsPP.collisions.ActivityIndices); +} + +void CGPU::UpdateActiveCollisionsPW(const SGPUParticles& _particles, const SGPUWalls& _walls) +{ + static d_vec_IT vTempIntersectType; + + static d_vec_b vActivePart; + static d_vec_u vActivePartIndexes; + static d_vec_u nActivePartIndexesNumber(1); + vTempIntersectType.resize(m_CollisionsPW.vVerletDst.size()); + vActivePart.resize(_particles.nElements); + vActivePartIndexes.resize(_particles.nElements); + thrust::fill(thrust::device, vActivePart.begin(), vActivePart.end(), false); + + const unsigned nCollisions = static_cast(m_CollisionsPW.vVerletDst.size()); + + CUDA_KERNEL_ARGS2_DEFAULT(CUDAKernels::GetIntersectTypePW_kernel, nCollisions, + m_CollisionsPW.vVerletSrc.data().get(), m_CollisionsPW.vVerletDst.data().get(), + _particles.ContactRadii, _particles.Coords, _walls.Vertices1, _walls.Vertices2, _walls.Vertices3, _walls.MinCoords, _walls.MaxCoords, _walls.NormalVectors, + m_CollisionsPW.collisions.VirtualShifts, vTempIntersectType.data().get(), m_CollisionsPW.collisions.ContactVectors, vActivePart.data().get()); + + static d_vec_u sequence; + static d_vec_i8 tempStorage; + Flags2IndicesList(vActivePart.size(), vActivePart.data().get(), sequence, tempStorage, nActivePartIndexesNumber.data().get(), vActivePartIndexes.data().get()); + + CUDA_KERNEL_ARGS2_DEFAULT(CUDAKernels::CombineIntersectionsPW_kernel, + nActivePartIndexesNumber.data().get(), vActivePartIndexes.data().get(), + static_cast(m_CollisionsPW.vVerletPartInd.size()), nCollisions, + m_CollisionsPW.vVerletDst.data().get(), m_CollisionsPW.vVerletPartInd.data().get(), _walls.NormalVectors, + vTempIntersectType.data().get(), m_CollisionsPW.collisions.VirtualShifts); + + // treat contact transition between adjacent triangles + static d_vec_b collActivated, collDeactivated; + collActivated.resize(nCollisions); + collDeactivated.resize(nCollisions); + thrust::fill(thrust::device, collActivated.begin(), collActivated.end(), false); + thrust::fill(thrust::device, collDeactivated.begin(), collDeactivated.end(), false); + CUDA_KERNEL_ARGS2_DEFAULT(CUDAKernels::UpdateActiveCollisionsPW_kernel, nCollisions, vTempIntersectType.data().get(), + m_CollisionsPW.collisions.ActivityFlags, collActivated.data().get(), collDeactivated.data().get()); + static d_vec_u sequence2; + static d_vec_i8 tempStorage2; + static d_vec_u activatedCollIndices; + static d_vec_u nActivatedColls(1); + activatedCollIndices.resize(nCollisions); + Flags2IndicesList(nCollisions, collActivated.data().get(), sequence2, tempStorage2, nActivatedColls.data().get(), activatedCollIndices.data().get()); + CUDA_KERNEL_ARGS2_DEFAULT(CUDAKernels::CopyCollisionsForAdjacentWalls, + nActivatedColls.data().get(), activatedCollIndices.data().get(), collDeactivated.data().get(), + m_CollisionsPW.vVerletSrc.data().get(), m_CollisionsPW.vVerletDst.data().get(), m_CollisionsPW.vVerletPartInd.data().get(), + m_adjacentWalls.adjacentWalls.data().get(), m_adjacentWalls.startIndices.data().get(), + m_CollisionsPW.collisions.TangOverlaps); + + Flags2IndicesList(nCollisions, m_CollisionsPW.collisions.ActivityFlags, sequence2, tempStorage2, m_CollisionsPW.collisions.ActiveCollisionsNum, m_CollisionsPW.collisions.ActivityIndices); +} + +void CGPU::GatherForcesFromPWCollisions(SGPUParticles& _particles, SGPUWalls& _walls) const +{ + CUDA_KERNEL_ARGS2_DEFAULT(CUDAKernels::GatherForcesFromPWCollisions_kernel, _particles.Forces, _walls.Forces, + m_CollisionsPW.collisions.ActiveCollisionsNum, m_CollisionsPW.collisions.ActivityIndices, + m_CollisionsPW.collisions.SrcIDs, m_CollisionsPW.collisions.DstIDs, m_CollisionsPW.collisions.TotalForces); +} + +void CGPU::CheckParticlesInDomain(const double _currTime, const SGPUParticles& _particles, unsigned* _bufActivePartsNum) const +{ + if (!_particles.nElements) + { + unsigned nTemp = static_cast(_particles.nElements); + CUDA_MEMCPY_H2D(_bufActivePartsNum, &nTemp, sizeof(unsigned)); + return; + } + + CUDA_KERNEL_ARGS2_DEFAULT(CUDAKernels::CheckParticlesInDomain_kernel, _currTime, static_cast(_particles.nElements), + _particles.Activities, _particles.EndActivities, _particles.Coords); + CUDA_REDUCE_CALLER(CUDAKernels::ReduceSum_kernel, _particles.nElements, _particles.Activities, _particles.TempUInt, _bufActivePartsNum); +} + +void CGPU::CheckBondsActivity(const double _currTime, const SGPUParticles& _particles, SGPUSolidBonds& _bonds) +{ + if (!_particles.nElements) + return; + + CUDA_KERNEL_ARGS2_DEFAULT(CUDAKernels::CheckBondsActivity_kernel, _currTime, static_cast(_bonds.nElements), + _particles.Activities, _bonds.Activities, _bonds.LeftIDs, _bonds.RightIDs, _bonds.EndActivities); +} + +void CGPU::MoveParticlesOverPBC(const SGPUParticles& _particles) +{ + static d_vec_u8 vCrossingShifts; // shifts for particles, which crossed PBC boundaries + static d_vec_b vCrossingFlags; // indicates that particle crossed PBC boundaries + vCrossingShifts.resize(_particles.nElements); + vCrossingFlags.resize(_particles.nElements); + thrust::fill(thrust::device, vCrossingShifts.begin(), vCrossingShifts.end(), 0); + thrust::fill(thrust::device, vCrossingFlags.begin(), vCrossingFlags.end(), false); + + CUDA_KERNEL_ARGS2_DEFAULT(CUDAKernels::MoveVirtualParticlesBox, static_cast(_particles.nElements), _particles.Activities, + _particles.Coords, _particles.CoordsVerlet, vCrossingShifts.data().get(), vCrossingFlags.data().get()); + + // turn crossing shifts flags to particles' indices + static d_vec_u sequence; // temporal vector for indices needed internally in Flags2IndicesList + static d_vec_i8 tempStorage; // temporal storage needed internally in Flags2IndicesList + static d_vec_u nCrossed(1); // [0] - number of crossed particles + static d_vec_u dvCrossedIndices; // indices of crossed particles + dvCrossedIndices.resize(_particles.nElements); + Flags2IndicesList(static_cast(_particles.nElements), vCrossingFlags.data().get(), sequence, tempStorage, nCrossed.data().get(), dvCrossedIndices.data().get()); + + CUDA_KERNEL_ARGS2_DEFAULT(CUDAKernels::AddShiftsToCollisions, static_cast(_particles.nElements), nCrossed.data().get(), dvCrossedIndices.data().get(), vCrossingShifts.data().get(), + m_CollisionsPP.vVerletPartInd.data().get(), m_CollisionsPP.vVerletPartInd_DstSorted.data().get(), m_CollisionsPP.vVerletCollInd_DstSorted.data().get(), + static_cast(m_CollisionsPP.vVerletSrc.size()), m_CollisionsPP.collisions.SrcIDs, m_CollisionsPP.collisions.DstIDs, m_CollisionsPP.collisions.VirtualShifts); +} + +void CGPU::CopyCollisionsGPU2CPU(SGPUCollisions& _PPCollisionsHost, SGPUCollisions& _PWCollisionsHost) const +{ + _PPCollisionsHost.CopyFrom(m_CollisionsPP.collisions); + _PWCollisionsHost.CopyFrom(m_CollisionsPW.collisions); +} + +void CGPU::GetOverlapsInfo(const SGPUParticles& _particles, size_t _maxParticleID, double& _maxOverlap, double& _avrOverlap) const +{ + const unsigned collNumberPP = m_CollisionsPP.collisions.nElements; + const unsigned collNumberPW = m_CollisionsPW.collisions.nElements; + + static d_vec_d overlapsPP, overlapsPW, tempPP, tempPW; + static d_vec_u8 flagsPP, flagsPW; + overlapsPP.resize(collNumberPP); + overlapsPW.resize(collNumberPW); + tempPP.resize(collNumberPP); + tempPW.resize(collNumberPW); + flagsPP.resize(collNumberPP); + flagsPW.resize(collNumberPW); + thrust::fill(overlapsPP.begin(), overlapsPP.end(), 0.0); + thrust::fill(overlapsPW.begin(), overlapsPW.end(), 0.0); + thrust::fill(flagsPP.begin(), flagsPP.end(), 0); + thrust::fill(flagsPW.begin(), flagsPW.end(), 0); + d_vec_d res(4, 0); // {maxPP, maxPW, sumPP, sumPW} + size_t numberPP{ 0 }, numberPW{ 0 }; + + // for PP collisions + if (collNumberPP) + { + CUDA_KERNEL_ARGS2_DEFAULT(CUDAKernels::GetPPOverlaps_kernel, + m_CollisionsPP.collisions.ActiveCollisionsNum, m_CollisionsPP.collisions.ActivityIndices, m_CollisionsPP.collisions.SrcIDs, m_CollisionsPP.collisions.DstIDs, + m_CollisionsPP.collisions.NormalOverlaps, _maxParticleID, + overlapsPP.data().get(), flagsPP.data().get()); + CUDA_REDUCE_CALLER(CUDAKernels::ReduceMax_kernel, collNumberPP, overlapsPP.data().get(), tempPP.data().get(), thrust::device_pointer_cast(&res[0]).get()); + CUDA_REDUCE_CALLER(CUDAKernels::ReduceSum_kernel, collNumberPP, overlapsPP.data().get(), tempPP.data().get(), thrust::device_pointer_cast(&res[2]).get()); + numberPP = thrust::count(flagsPP.begin(), flagsPP.end(), size_t(1)); + } + + // for PW collisions + if (collNumberPW) + { + CUDA_KERNEL_ARGS2_DEFAULT(CUDAKernels::GetPWOverlaps_kernel, + m_CollisionsPW.collisions.ActiveCollisionsNum, m_CollisionsPW.collisions.ActivityIndices, m_CollisionsPW.collisions.DstIDs, + m_CollisionsPW.collisions.VirtualShifts, m_CollisionsPW.collisions.ContactVectors, + _particles.Coords, _particles.ContactRadii, + _maxParticleID, overlapsPW.data().get(), flagsPW.data().get()); + + CUDA_REDUCE_CALLER(CUDAKernels::ReduceMax_kernel, collNumberPW, overlapsPW.data().get(), tempPW.data().get(), thrust::device_pointer_cast(&res[1]).get()); + CUDA_REDUCE_CALLER(CUDAKernels::ReduceSum_kernel, collNumberPW, overlapsPW.data().get(), tempPW.data().get(), thrust::device_pointer_cast(&res[3]).get()); + numberPW = thrust::count(flagsPW.begin(), flagsPW.end(), size_t(1)); + } + + // copy to CPU + h_vec_d cpu_res = res; + // calculate results + _maxOverlap = std::max(cpu_res[0], cpu_res[1]); + _avrOverlap = numberPP + numberPW ? (cpu_res[2] + cpu_res[3]) / (numberPP + numberPW) : 0; +} diff --git a/cuda_code/GPUTensor_2.cu b/cuda_code/GPUTensor_2.cu new file mode 100644 index 0000000000000000000000000000000000000000..389107167e69300805653c47610dcbe84af6641e --- /dev/null +++ b/cuda_code/GPUTensor_2.cu @@ -0,0 +1,1003 @@ +// +// Copyright (c) Microsoft. All rights reserved. +// Licensed under the MIT license. See LICENSE.md file in the project root for full license information. +// + +#include "stdafx.h" +#include "Basics.h" +#include "BestGpu.h" + +#ifndef CPUONLY + +#include "GPUTensor.h" +#include "GPUMatrix.h" +#include "GPUMatrixCUDAKernels.cuh" +#include "CommonMatrix.h" +#define TENSOR_OPS_DECL __device__ __host__ +#include "TensorOps.h" +#include +#include +#include "cublas_v2.h" +#include +#include + +#ifndef let +#define let const auto +#endif + +#pragma comment(lib, "cudart.lib") // instruct linker to reference these libs +#pragma comment(lib, "cublas.lib") + +#pragma warning(disable : 4267) // conversion from 'size_t' to 'unsigned int'; happens in CUDA <<>> syntax if a and b are size_t +#pragma warning(disable : 4127) // conditional expression is constant; "if (sizeof(ElemType)==sizeof(float))" triggers this +#pragma warning(disable : 4702) // unreachable code; triggered for unknown reasons + +#ifdef _WIN32 +// thread local storage to access the current stream, initalize to default stream +__declspec(thread) +#endif +extern cudaStream_t t_stream; + +namespace Microsoft { namespace MSR { namespace CNTK { + +// ======================================================================= +// TensorView support +// ======================================================================= + +// TensorView computes element-wise tensor operations. +// - supports general strides +// - input broadcasting is supported by stride=0 +// - the operation is denoted by an opCode +// - reduction is supported, including summation, min, max (dual to broadcasting when computing gradients) +// - reduction operation is given by an opCode: opSum, opMin, opMax and opLogSum. +// +// This library makes extensive use of templates and macros. +// Specifically, templates are used recursively to recurse over tensor dimensions. +// For example, a tensor op of rank K is computed by looping over the last dimension +// and then calling the same function template recursively with K-1. +// Template specializations exist in order to: +// - terminate recursion +// - optimize for thread-parallel reduction where elements are consecutive in memory +// +// The general algorithm is very straight forward: +// +// for all output dimensions [###]: // TensorOp() +// output[###] *= beta +// for all reduction dimensions [***]: // TensorOpWithReduction() +// output[###] += op(input1[###,***], input1[###,***], ...) * alpha +// +// Indices and dimensions used throughout this code: +// - N = ariness; number of arguments *including output* (binary op: N=3) +// - K = rank of output elements, regularOpDims.size(). K=0 means scalar. +// - k = -1..K-1 = recursion index +// - M = reduction rank, reducingOpDims.size(). M=0 means no reduction. +// - m = -1..M-1 = recursion index +// +// Other frequently used variable names: +// - alpha, beta: BLAS-style weights: outVal = beta * outVal + alpha * f(inVals) +// where beta=0 is an assignment (0 * outVal := 0, even e.g. if outVal = NaN) +// - pointers[N]: pointer to first element, for each argument +// - regularOpDims[K]: tensor dimensions of output elements to produce +// - regularStrides[N,K]: strides; multiply index[k] with strides[n,k] to get element offset for this dimension +// Broadcasting of inputs is implemented by a stride being 0. +// - reducingOpDims[M]: tensor dimensions of input elements to reduce over +// - reducingStrides[N,M]: strides for input reduction. Always 0 for output argument. +// +// This code uses two custom structs, FixedArray<> and FixedMatrix<>, which +// are templated equivalents to vector<> and vector> for CUDA code. + +// ----------------------------------------------------------------------- +// simple fixed-size arrays for passing dimension information by value +// since CUDA can't just take our std::array and std::vector +// ----------------------------------------------------------------------- + +template +struct FixedArray +{ + T m_data[N]; + __device__ __host__ size_t size() const + { + return N; + } + __device__ __host__ T& operator[](size_t n) + { + return m_data[n]; + } + __device__ __host__ T operator[](size_t n) const + { + return m_data[n]; + } + template + FixedArray(const VEC& data) // construct from CPU-side STL array or vector + { + assert(data.size() == N); + for (size_t n = 0; n < N; n++) + { + m_data[n] = (T) data[n]; + if (m_data[n] != data[n]) // overflow check + InvalidArgument("FixedArray: Dimensions out of range, too few bits."); + } + } +}; +template // specialized version for 0 elements +struct FixedArray +{ + __device__ __host__ size_t size() const + { + return 0; + } + template + FixedArray(const VEC& data) + { + assert(data.size() == 0); + UNUSED(data); + } + FixedArray() + { + } +}; + +template // N = which input/output; K = index depth +struct FixedMatrix +{ + T m_data[N][K]; + __device__ __host__ size_t getNumRows() const + { + return N; + } + __device__ __host__ size_t getNumCols() const + { + return K; + } + __device__ __host__ T& operator()(size_t n, size_t k) + { + return m_data[n][k]; + } + __device__ __host__ T operator()(size_t n, size_t k) const + { + return m_data[n][k]; + } + template + FixedMatrix(const array, N>& data) // construct from CPU-side array of vectors + { + assert(data.size() == N); + for (size_t n = 0; n < N; n++) + { + assert(data[n].size() == K); + for (size_t k = 0; k < K; k++) + { + m_data[n][k] = (T) data[n][k]; + if (m_data[n][k] != data[n][k]) // overflow check + InvalidArgument("FixedArray: Dimensions out of range, too few bits."); + } + } + } +}; +template // specialized version for 0 elements +struct FixedMatrix +{ + __device__ __host__ size_t getNumRows() const + { + return N; + } + __device__ __host__ size_t getNumCols() const + { + return 0; + } + template + FixedMatrix(const array, N>& data) + { + assert(data.size() == N); + for (size_t n = 0; n < N; n++) + assert(data[n].size() == 0); + UNUSED(data); + } + FixedMatrix() + { + } +}; + +// ----------------------------------------------------------------------- +// function to actually compute a function of (N-1) inputs based on the opcode +// ----------------------------------------------------------------------- + +template +struct TensorOps +{ + static __device__ ElemType Compute(const FixedArray& pointers, ElementWiseOperator op) + { +#define CaseNullaryTensorOp(oper) \ + case ElementWiseOperator::op##oper: \ + return Op##oper() + switch (op) + { + ForAllNullaryOps(CaseNullaryTensorOp); + default: + return OpConstOne(); // (failure--we only have one nullary op, so use the same, maybe it will eliminate the switch altogether) + } + } + static __device__ ElemType Compute(const FixedArray& pointers, ElementWiseOperator op) + { + ElemType a = *(pointers[0]); +#define CaseUnaryTensorOp(oper) \ + case ElementWiseOperator::op##oper: \ + return Op##oper(a) + switch (op) + { + ForAllUnaryOps(CaseUnaryTensorOp); + default: + return 0; // (failure) + } + } + static __device__ ElemType Compute(const FixedArray& pointers, ElementWiseOperator op) + { + // const ElemType & a = *(pointers[0]); // const & for opIndex--costs quite some code bloat + ElemType a = *(pointers[0]); + ElemType b = *(pointers[1]); +#define CaseBinaryTensorOp(oper) \ + case ElementWiseOperator::op##oper: \ + return Op##oper(a, b) + switch (op) + { + ForAllBinaryOps(CaseBinaryTensorOp); // note: this costs about 6% compared to having only a single case + default: + return 0; // (failure) + } + } + static __device__ ElemType Compute(const FixedArray& pointers, ElementWiseOperator op) + { +#define CaseTernaryTensorOp(oper) \ + case ElementWiseOperator::op##oper: \ + return Op##oper(*(pointers[0]), *(pointers[1]), *(pointers[2])) // reading each time, which saves mem accesses for OpCond + switch (op) + { + ForAllTernaryOps(CaseTernaryTensorOp); + default: + return 0; // (failure) + } + } +}; + +//---------------------------------------------------------------------------- +// For reductions we need the neutral elements of the corresponding binary ops +//---------------------------------------------------------------------------- +template __device__ ElemType NeutralValue(ElementWiseOperator op) +{ + return 0; // error, only the explicit instantiations below should be used. +}; + +template<> __device__ float NeutralValue(ElementWiseOperator op) +{ + switch (op) + { + case ElementWiseOperator::opSum: return 0; + case ElementWiseOperator::opLogSum: return -INFINITY; + case ElementWiseOperator::opMin: return FLT_MAX; + case ElementWiseOperator::opMax: return FLT_MIN; + default: return 0; // error + } +}; + +template<> __device__ double NeutralValue(ElementWiseOperator op) +{ + switch (op) + { + case ElementWiseOperator::opSum: return 0; + case ElementWiseOperator::opLogSum: return -INFINITY; + case ElementWiseOperator::opMin: return DBL_MAX; + case ElementWiseOperator::opMax: return DBL_MIN; + default: return 0; // error + } +}; + + +// ---------------------------------------------------------------------------- +// Function to update an aggregate value for the specifed reduction operation +// ---------------------------------------------------------------------------- + +template __device__ void UpdateAggregate(ReductionType& aggregate, ElemType val, ElementWiseOperator reductionOp) +{ + switch (reductionOp) + { + case ElementWiseOperator::opSum: + aggregate += val; + break; + case ElementWiseOperator::opLogSum: + aggregate = OpLogSum(aggregate, val); + break; + case ElementWiseOperator::opMin: + if (val < aggregate) + aggregate = val; + break; + case ElementWiseOperator::opMax: + if (val > aggregate) + aggregate = val; + break; + } +}; + + +// ----------------------------------------------------------------------- +// function to compute the value for a given output location (including reduction) +// ----------------------------------------------------------------------- + +//#define ReduceElemType double +#define ReduceElemType ElemType // (note: we could use 'double' here, but that would cause problems with CUDA cards that don't support double) + +template +struct TensorOpReduce +{ + // this version for m >= 0 + static __device__ ElemType Compute(FixedArray pointers, ElementWiseOperator op, ElementWiseOperator reductionOp, + const FixedArray& reducingOpDims, const FixedMatrix& reducingStrides) + { + // start with index 0 + // We may use 'double' since we are memory-bound anyway. + ReduceElemType aggregate = TensorOpReduce::Compute(pointers, op, reductionOp, reducingOpDims, reducingStrides); + // apply this index to the pointers + C_size_t dim = reducingOpDims[m]; + for (C_size_t k = 1 /*done with k=0 already*/; k < dim; k++) + { + // bump the pointers + for (C_size_t i = 0; i < N - 1; i++) // N-1 because output is not used here + pointers[i] += reducingStrides(i, (C_size_t) m); + ElemType val = TensorOpReduce::Compute(pointers, op, reductionOp, reducingOpDims, reducingStrides); + UpdateAggregate(aggregate, val, reductionOp); + } + return (ElemType) aggregate; + } +}; + +// this one terminates the template recursion over reduction dimensions +// The pointers are pointing to the input element. +template +struct TensorOpReduce +{ + // this version for m = -1 + // the pointers are pointing to the right location(s) to take the operation over + static __device__ ElemType Compute(FixedArray pointers, ElementWiseOperator op, ElementWiseOperator reductionOp, + const FixedArray& /*reducingOpDims*/, const FixedMatrix& /*reducingStrides*/) + { + return TensorOps::Compute(pointers, op); // finally computing something! + } +}; + +// ----------------------------------------------------------------------- +// function to compute one constituent of the value for a given output location +// (reduction is not done here, but by calling into here multiple times) +// ----------------------------------------------------------------------- + +template +struct TensorOpParallelReduce +{ + // this version for m >= 0 + static __device__ ElemType Compute(CUDA_LONG id, FixedArray pointers, ElementWiseOperator op, + const FixedArray& reducingOpDims, const FixedMatrix& reducingStrides) + { + // map id (location on grid) to index[k] + C_size_t stride = 1; // compute the stride. This seems expensive, but since we we only currently support M <= 2, this is just compile-time selection between 1 and reducingOpDims[0]. + for (int i = 0; i < m; i++) + stride *= reducingOpDims[(C_size_t) i]; + C_size_t index = id / stride; // this dimension. For m=0, the stride is 1 and hence the division will be removed at compile time. + id = id % stride; // remaining dimensions inside this. For m=0 this value is ignored and hence not even computed. + // apply this index to the pointers + for (C_size_t i = 0; i < N - 1; i++) + pointers[i] += index * reducingStrides(i, (C_size_t) m); // now this dimension is taken care of + return TensorOpParallelReduce::Compute(id, pointers, op, reducingOpDims, reducingStrides); + } +}; + +// this one terminates the template recursion over reduction dimensions +// The pointers are pointing to the input element. +template +struct TensorOpParallelReduce +{ + // this version for m = -1 + // the pointers are pointing to the right location(s) to take the operation over + static __device__ ElemType Compute(CUDA_LONG /*id*/, FixedArray pointers, ElementWiseOperator op, + const FixedArray& /*reducingOpDims*/, const FixedMatrix& /*reducingStrides*/) + { + return TensorOps::Compute(pointers, op); // finally computing something! + } +}; + +// ----------------------------------------------------------------------- +// perform loop over regular index k for N-nary operations (N counting the output) +// ----------------------------------------------------------------------- + +// The 'pointers' only refer to a single element, so we will bump them in-place to perform indexing. +template +struct TensorOpElement +{ + // template-recursive version loops over indices + static __device__ void Compute(CUDA_LONG id, ElemType beta, FixedArray& pointers, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, + const FixedArray& regularOpStrides, const FixedMatrix& regularStrides, + const FixedArray& reducingOpDims, const FixedMatrix& reducingStrides, + CUDA_LONG reductionBegin, CUDA_LONG reductionChunkSize) + { + // map id (location on grid) to index[k] + C_size_t stride = regularOpStrides[(C_size_t) k]; + C_size_t index = id / stride; // this dimension + id = id % stride; // remaining dimensions inside this + // apply this index to the pointers + for (C_size_t i = 0; i < N; i++) + pointers[i] += index * regularStrides(i, (C_size_t) k); // now this dimension is taken care of + // process the previous index + TensorOpElement::Compute(id, beta, pointers, alpha, op, reductionOp, regularOpStrides, regularStrides, reducingOpDims, reducingStrides, reductionBegin, reductionChunkSize); + } +}; + +// specialization for k=0 where op stride is guaranteed to be 1 +template +struct TensorOpElement +{ + // template-recursive version loops over indices + static __device__ void Compute(CUDA_LONG id, ElemType beta, FixedArray& pointers, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, + const FixedArray& regularOpStrides, const FixedMatrix& regularStrides, + const FixedArray& reducingOpDims, const FixedMatrix& reducingStrides, + CUDA_LONG reductionBegin, CUDA_LONG reductionChunkSize) + { + // map id (location on grid) to index[k] + C_size_t index = id; // this dimension + // apply this index to the pointers + for (C_size_t i = 0; i < N; i++) + pointers[i] += index * regularStrides(i, 0); // now this dimension is taken care of + // process the previous index + TensorOpElement::Compute(/*id*/ 0, beta, pointers, alpha, op, reductionOp, regularOpStrides, regularStrides, reducingOpDims, reducingStrides, reductionBegin, reductionChunkSize); + } +}; + +// specialization for k = -1 terminates the template recursion, and computes reductions in a for loop +template +struct TensorOpElement +{ + // template-recursion-teminating version computes the actual value for this output location + // now the output pointers point to the right element (input pointers may still iterate for reduction) + static __device__ void Compute(CUDA_LONG /*id*/, ElemType beta, FixedArray& pointers, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, + const FixedArray& /*regularOpStrides*/, const FixedMatrix& /*regularStrides*/, + const FixedArray& reducingOpDims, const FixedMatrix& reducingStrides, CUDA_LONG /*reductionBegin*/, CUDA_LONG /*reductionChunkSize*/) + { + // compute the operation for this output coordinate + // This may still involve a reduction over inverse-broadcasting dimensions. + ElemType val = TensorOpReduce::Compute(pointers, op, reductionOp, reducingOpDims, reducingStrides); + // scale + val *= alpha; + // combine with previous value in target matrix, then write it out + if (N < 4 || val != 0 || beta != 1) // (skip memory access if not needed) (N<4: skip this test) + { + auto* pout = pointers[pointers.size() - 1]; + if (beta != 0) // (skip memory access if not needed, and allow for ignoring NaNs) + val += beta * *pout; + // save + *pout = val; + } + } +}; + +#undef ALLOW_ATOMIC_REDUCTION // undefine to disable use of atomicAdd() below, for testing it + +// specialization for k = -1 terminates the template recursion, and computes reductions in parallel +template +struct TensorOpElement +{ + // template-recursion-teminating version computes the actual value for this output location + // now the output pointers point to the right element (input pointers may still iterate for reduction) + static __device__ void Compute(CUDA_LONG /*id*/, ElemType beta, FixedArray& pointers, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, + const FixedArray& /*regularOpStrides*/, const FixedMatrix& /*regularStrides*/, + const FixedArray& reducingOpDims, const FixedMatrix& reducingStrides, CUDA_LONG reductionBegin, CUDA_LONG reductionChunkSize) + { + CUDA_LONG reductionBlock = blockIdx.z; // reduction-block index --larger reductions are split into blocks + CUDA_LONG tid = threadIdx.x; // thread index + CUDA_LONG tids = blockDim.x; // out of how many threads --note: last block is partial + + // determine our range --this is a single int mul, we can stomach it (we could alternatively pass in yet another parameter) + CUDA_LONG reductionDim = (CUDA_LONG) reducingOpDims[0]; + for (C_size_t i = 1; i < reducingOpDims.size(); i++) + reductionDim *= reducingOpDims[i]; + + // determine the redId range that we operate on + // Each thread takes a stride tid + (multiples of tids) within this range. + reductionBegin += reductionChunkSize * reductionBlock; + CUDA_LONG reductionEnd = min(reductionBegin + reductionChunkSize, reductionDim); + + // compute the operation for this input coordinate + ReduceElemType aggregate = NeutralValue(reductionOp); + + for (CUDA_LONG redId = reductionBegin + tid; redId < reductionEnd; redId += tids) + { + auto val = TensorOpParallelReduce::Compute(redId, pointers, op, reducingOpDims, reducingStrides); + UpdateAggregate(aggregate, val, reductionOp); + } + + // reduce --cf https://docs.nvidia.com/cuda/samples/6_Advanced/reduction/doc/reduction.pdf + __shared__ ReduceElemType volatile accumulators[GridDim::maxThreadsPerBlock /*tids*/]; + accumulators[tid] = aggregate; + __syncthreads(); + static_assert(GridDim::maxThreadsPerBlock <= 1024, "GridDim::maxThreadsPerBlock too large, need to add manually unrolled steps"); + for (CUDA_LONG i = 512; i; i >>= 1) + { + if (tid < i && tid + i < tids) + UpdateAggregate(accumulators[tid], accumulators[tid + i], reductionOp); + + if (0 + i < tids) + __syncthreads(); // sync if condition true for at least one thread + // TODO: use volatile* and then we can skip the __syncthreads() for the last 32 values. See Amit's allreduce() function implementation in MatrixQuantizer_kernel.cu. + } + + // now set final value to output coordinate + if (tid == 0) + { + ElemType val = (ElemType) accumulators[0]; + // scale + val *= alpha; + // combine with previous value in target matrix, then write it out + if (N < 4 || val != 0 || beta != 1) // (skip memory access if not needed) (N<4: skip this test) + { + auto* pout = pointers[pointers.size() - 1]; +#ifdef ALLOW_ATOMIC_REDUCTION + CUDA_LONG reductionBlocks = gridDim.z; // number of reduction blocks. If >1 we need atomicAdd + if (reductionBlocks > 1) // multiple blocks: need to use atomicAdd() + { + // in this case, outer calling code must pass beta = 1 + atomicAdd(pout, val); + } + else +#endif + { + if (beta != 0) + val += beta * *pout; + // save + *pout = val; + } + } + } + } +}; + +// ----------------------------------------------------------------------- +// kernel and launch --no reduction +// ----------------------------------------------------------------------- + +// launch tensor op with CUDA +template +__global__ void _launchTensorOp(ElemType beta, FixedArray pointers, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, + FixedArray regularOpStrides, FixedMatrix regularStrides, CUDA_LONG numElements, + FixedArray reducingOpDims, FixedMatrix reducingStrides) +{ + CUDA_LONG id = GridDim::GetLinearThreadId(); + if (id < numElements) // note: there are no __syncthread() calls inside + TensorOpElement::Compute(id, beta, pointers, alpha, op, reductionOp, regularOpStrides, regularStrides, reducingOpDims, reducingStrides, 0, 0); +} + +template +static void LaunchTensorOp(ElemType beta, array pointerVector, ElemType alpha, ElementWiseOperator op, + const SmallVector& regularOpDims, const array, N>& regularStrideVectors) +{ + // copy all parameters to CUDA-compatible data structures + FixedArray pointers(pointerVector); + SmallVector regularOpStrideVector; // kernel needs the strides for converting thread index back to multi-dimensional tensor index + C_size_t numElements = 1; + for (C_size_t k = 0; k < regularOpDims.size(); k++) + { + regularOpStrideVector.push_back(numElements); + numElements *= (C_size_t) regularOpDims[k]; + } + FixedArray regularOpStrides(regularOpStrideVector); + FixedMatrix regularStrides(regularStrideVectors); + FixedArray reducingOpDims; // empty reduction dimensions + FixedMatrix reducingStrides; + + // launch the kernel + CUDA_LONG NN = (CUDA_LONG) numElements; // linear space identifying each individual input element + SyncGuard syncGuard; + GridDim grid(NN); + _launchTensorOp <<>>(beta, pointers, alpha, op, (ElementWiseOperator)(-1) /* dummy reductionOp */, regularOpStrides, regularStrides, grid.m_N, reducingOpDims, reducingStrides); +} + +// ----------------------------------------------------------------------- +// kernel and launch --with reduction +// ----------------------------------------------------------------------- + +template +__global__ void _launchTensorOpWithReduction(ElemType beta, FixedArray pointers, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, + FixedArray regularOpStrides, FixedMatrix regularStrides, CUDA_LONG numElements, + FixedArray reducingOpDims, FixedMatrix reducingStrides, + CUDA_LONG reductionBegin, CUDA_LONG reductionChunkSize) +{ + CUDA_LONG id = gridDim.x * blockIdx.y + blockIdx.x; // input dimensions are Y dimension of blocks in this case, so we can use thread dim for shared-memory/parallelization +#ifndef ALLOW_ATOMIC_REDUCTION + CUDA_LONG reductionBlock = blockIdx.z; // reduction-block index --larger reductions are split into blocks + pointers[pointers.size() - 1] += numElements * reductionBlock; // the output tensor is dense (no gaps); and there is one copy for each reduction block (those get further reduced into one later) +#endif + if (id < numElements) // note: we have __syncthread() calls but only entire blocks in sync, so this is OK + TensorOpElement::Compute(id, beta, pointers, alpha, op, reductionOp, regularOpStrides, regularStrides, reducingOpDims, reducingStrides, reductionBegin, reductionChunkSize); +} + +// helper function to provide a reduction buffer +template +static shared_ptr AllocateReductionBuffer(size_t N) +{ + ElemType* deviceBufferPtr; + CUDA_CALL(cudaMalloc((void**)&deviceBufferPtr, sizeof(ElemType) * N)); + return shared_ptr(deviceBufferPtr, [](ElemType* deviceBufferPtr){ cudaFree((void*)deviceBufferPtr); }); +} + +template +static shared_ptr GetReductionBuffer(size_t N) +{ + bool dontCache = false; // (for debugging only) + if (t_stream != 0 || dontCache) // we cache for the NULL stream but don't bother for others, since we only ever use the NULL stream currently + return AllocateReductionBuffer(N); + + static shared_ptr reductionBuffersCache[32]; // cache of objects --TODO: Do we have a #define the the max somewhere? Then also use it in CPUMatrix.cu GetOnesTensor() + static size_t reductionBuffersCacheSize[_countof(reductionBuffersCache)] = { 0 }; + let deviceId = GridDim::GetCurrentDeviceId(); + if (deviceId >= _countof(reductionBuffersCache)) // index check w.r.t. our hard-coded dimensions + return AllocateReductionBuffer(N); // out of bounds: don't cache + if (!reductionBuffersCache[deviceId]) + { + reductionBuffersCache[deviceId] = AllocateReductionBuffer(N); + reductionBuffersCacheSize[deviceId] = N; + } + if (N > reductionBuffersCacheSize[deviceId]) // buffer size check + LogicError("GetReductionBuffer: Must be called with the number of multiprocs, which may not change."); + return reductionBuffersCache[deviceId]; +} + +// All dimensions (N-ariness, number of input dimensions K and number of reduction dimensions M) are bound to template parameters now. +template +static void LaunchTensorOpWithReduction(ElemType beta, array pointerVector, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, + const SmallVector& regularOpDims, const array, N>& regularStrideVectors, + const SmallVector& reducingOpDimVector, const array, N>& reducingStrideVectors) +{ + // copy all parameters to CUDA-compatible data structures + FixedArray pointers(pointerVector); + SmallVector regularOpStrideVector; // kernel needs the strides for converting thread index back to multi-dimensional tensor index + C_size_t numElements = 1; + for (C_size_t k = 0; k < regularOpDims.size(); k++) + { + regularOpStrideVector.push_back(numElements); // stride for dense representation of our output elements (if they were flattened) + numElements *= (C_size_t) regularOpDims[k]; + } + FixedArray regularOpStrides(regularOpStrideVector); + FixedMatrix regularStrides(regularStrideVectors); + FixedArray reducingOpDims(reducingOpDimVector); + FixedMatrix reducingStrides(reducingStrideVectors); + + // launch the kernel + CUDA_LONG NN = (CUDA_LONG) numElements; // linear space identifying each individual output element + SyncGuard syncGuard; + + // do some optimization for reductions + // - example: 30 GPU procs, warp size 32 --> 960 GPU cores + // - NN elements must be computed, each involving a reduction over reductionDim elements + // Cases: + // - #output elements NN >= GPU cores --> use one proc per element, do reduction in inner loop + // E.g. if >=960 elements are computed, each gets its own GPU thread. + // - reduction dimension would benefit from multiple blocks --> multiple blocks work on a single output element + // E.g. + // - gradient of adding a bias: reducing to a bias, e.g. 512-dim + // - gradient of scalar multiplication: big elementwise product reduced to a scalar (big dot product, e.g. [1024 x 1024] = 1M elements) + // - softmax in seq-2-seq attention model: reduce over length of attention window (e.g. 20) + // - summation of criterion value: scalar reduction over a few hundred or thousand samples in the minibatch + C_size_t reductionDim = 1; // number of elements to reduce over + for (C_size_t k = 0; k < reducingOpDimVector.size(); k++) + reductionDim *= (C_size_t) reducingOpDimVector[k]; + GridDim grid(NN); + let& props = GridDim::GetDeviceProps(); + // === simple case: NN large, one thread per output element + bool disableParallelReduction = false; // (for debugging) + if (reductionDim == 1 || // no reduction + grid.m_blocksPerGrid >= props.multiProcessorCount || // enough output elements to fill all multiprocs + reductionDim * numElements <= 2 * props.warpSize || // trivial operation not worth the trouble (2* because the more complex one also needs 2 kernel launches) + disableParallelReduction || // (for debugging) + reductionDim * numElements <= props.multiProcessorCount) // recursive call from reduction below + { + // we got enough elements to generate: do one element per thread, and reduction inside + _launchTensorOp<<>>( + beta, pointers, alpha, op, reductionOp, + regularOpStrides, regularStrides, grid.m_N, + reducingOpDims, reducingStrides); + } + // === optimization: simple case would not use all multiprocs + else + { + // m_blocksPerGrid can be thought of NN / 512, with appropriate rounding + + // we are reducing and are underutilizing the multiprocs we have: get more parallelism by doing reduction in parallel + // If we get here, then + // - the total number of outputs to produce is < #multiprocs * warpSize, e.g. < 960 + // - each output has at least two inputs, but possibly millions + // Examples: + // (a1) NN=900 + // - each multiproc processes multiple elements concurrently, each reducing over its inputs inside + // - use one block per output element + // (a2) NN=30 + // - same as (a1) except 30 multiprocs run only a single block each + // (a3) NN=16 + // - same as (a1) except only 16 multiproc run one block + // (b1) NN=15 + // - 2 blocks work together on a single output element + // (b2) NN=1 (NN < #multiprocs, e.g. NN < 30) + // - multiple blocks work together on a single output element + // - only this case requires memory, and only K * NN + // where K = blocks that work together, + // both K and NN < #multiprocs, + // and K * NN = on the order of NN, but generally a bit larger due to rounding. + + // By how much do we underutilize? + // We increase #blocks by that factor by breaking reduction into that many chunks. + let numReductionChunks = max(props.multiProcessorCount / NN, 1); // only >1 for NN < multiProcessorCount + + // distribute NN over block X and Y + let blockXOverBy = CeilDiv(NN, props.maxGridSize[0]); + let numBlocksX = CeilDiv(NN, blockXOverBy); + let numBlocksY = CeilDiv(NN, numBlocksX); + // while block Z is for multiple blocks working together on a single output element + let numBlocksZ = numReductionChunks; + // Block dim is now: + // - X, Y: such that X*Y covers NN + // - Z: reduction chunks + + // reduction goes into thread dim X + let reductionChunkSize = CeilDiv(reductionDim, numReductionChunks); + let numThreadsX = min(reductionChunkSize, GridDim::maxThreadsPerBlock); // any that's over will be done by looping inside the kernel + + // --- cases (a1) and (a2) + // This involves no reduction across blocks. + if (numReductionChunks == 1) + { + _launchTensorOpWithReduction<<>>( + beta, pointers, alpha, op, reductionOp, + regularOpStrides, regularStrides, NN, + reducingOpDims, reducingStrides, /*reductionBegin*/ 0, reductionChunkSize); + } + // --- case (b) + // Reduction across blocks. This is the difficult one. +#ifndef ALLOW_ATOMIC_REDUCTION // temporarily disabled to ensure it is not causing the non-reproducability + else + { + // we get here if NN <= #multiprocs + assert(NN <= props.multiProcessorCount && numBlocksX == NN && numBlocksY == 1); + // dims are: + // - numBlocksZ = numReductionChunks = how many multiprocs work together to produce one output element + // - numBlocksX = NN = number of output elements + // - numThreadsX = reductionChunkSize clipped to 512; reductionChunkSize > 512 is handled by an inner for loop inside of the kernel + + // we need memory for block outputs of dimension [numBlocksX x numBlocksZ] + // - total elements = NN * Floor(#multiprocs / NN) = <= #multiprocs + let reductionBufferSize = props.multiProcessorCount; + assert(reductionBufferSize >= NN * numBlocksZ); + shared_ptr reductionBuffer = GetReductionBuffer(reductionBufferSize); + + // 'pointers', 'regularOpStrides', and 'regularStrides' are set up to point to the target memory. + // We need to reroute them to point to our reductionBuffer. + // - pointer[N-1] -> replace by reductionBuffer + // - regularStrides -> replace [N-1] by regularOpStrides which already represent the NN elements for a dense memory layout + // - beta -> 0 since we write into temp memory + // - kernel must use block.z as second index into the output buffer; add (block.z * NN) to the pointer + FixedArray pointers1 = pointers; + pointers1[N - 1] = reductionBuffer.get(); + auto regularStrideVectors1 = regularStrideVectors; + for (size_t k = 0; k < regularOpStrides.size(); k++) + regularStrideVectors1[N - 1][k] = (ptrdiff_t)regularOpStrideVector[k]; + FixedMatrix regularStrides1(regularStrideVectors1); + ElemType beta1 = 0; + ElemType alpha1 = 1; + _launchTensorOpWithReduction << > >( + beta1, pointers1, alpha1, op, reductionOp, + regularOpStrides, regularStrides1, NN, + reducingOpDims, reducingStrides, /*reductionBegin*/0, reductionChunkSize); + +#if 1 + // now reduce and redistribute + // Create a new tensor task, and execute it recursively: + // - input = reductionBuffer + // - output = true output + // - op dims/strides = output elements + // - reduce dims/strides = numBlocksZ + // - op = opCopy + array pointerVector2{ reductionBuffer.get(), pointerVector[N - 1] }; + const array, 2> regularStrideVectors2{ regularStrideVectors1[N - 1], regularStrideVectors[N - 1] }; + const array, 2> reducingStrideVectors2{ SmallVector{ NN }, SmallVector{ 0 } }; + const SmallVector reducingOpDimVector2{ (size_t)numReductionChunks }; + LaunchTensorOpWithReduction( + beta, pointerVector2, alpha, ElementWiseOperator::opCopy, reductionOp, + regularOpDims, regularStrideVectors2, + reducingOpDimVector2, reducingStrideVectors2); + // (note: ^^this will have a nested syncGuard, which is fine) + +#else + _launchTensorOp<<>>( + beta, pointers, alpha, op, reductionOp, + regularOpStrides, regularStrides, grid.m_N, + reducingOpDims, reducingStrides); + //for (size_t z = 0; z < numBlocksZ; z++) + // _launchTensorOpWithReduction<<>>(z == 0 ? beta : 1, pointers, alpha, op, + // regularOpStrides, regularStrides, NN, + // reducingOpDims, reducingStrides, reductionChunkSize * z, reductionChunkSize); + vector peekPartial(NN * numBlocksZ, -42); + vector peekFinal(NN, -42); + CUDA_CALL(cudaMemcpy(peekPartial.data(), reductionBuffer, sizeof(ElemType) * peekPartial.size(), cudaMemcpyDeviceToHost)); + CUDA_CALL(cudaMemcpy(peekFinal.data(), pointers[pointers.size()-1], sizeof(ElemType) * peekFinal.size(), cudaMemcpyDeviceToHost)); + double s1 = 0, s2 = 0; + for (auto v : peekPartial) + s1 += v; + for (auto v : peekFinal) + s2 += v; + sin(1.0); +#endif + } +#else + else if (beta == 1) + { + // no need to pre-scale; just add (common for gradients) + _launchTensorOpWithReduction<<>>(beta, pointers, alpha, op, reductionOp, regularOpStrides, regularStrides, NN, reducingOpDims, reducingStrides, 0, reductionChunkSize); + return; + } + else + { + // We need more than one chunk, we will use atomicAdd(). + // First reset/pre-multiply input; then do the remaining chunks using atomicAdd(). + _launchTensorOpWithReduction<<>>(beta, pointers, alpha, op, reductionOp, regularOpStrides, regularStrides, NN, reducingOpDims, reducingStrides, 0, reductionChunkSize); + // We will leave it like this for a while, but eventually need to revisit using temporary memory. + _launchTensorOpWithReduction<<>>(/*beta=*/1, pointers, alpha, op, reductionOp, regularOpStrides, regularStrides, NN, reducingOpDims, reducingStrides, reductionChunkSize, reductionChunkSize); + } +#endif + } +} + +// ----------------------------------------------------------------------- +// kernel and launch --linear unary +// ----------------------------------------------------------------------- + +// for linear unary ops, we need to define a functor for every function for use as a template parameter (lambda syntax doesn't work in CUDA 7) +#define DefineUnaryTensorFunctor(oper) \ + struct Functor##oper \ + { \ + template \ + static __device__ ElemType f(ElemType a) \ + { \ + return Op##oper(a); \ + } \ + }; +ForAllUnaryOps(DefineUnaryTensorFunctor); + +// the top-level kernel for linear unary ops +// Note: If we have a beta, we have 2 memory accesses, so this optimization may no longer be needed as we are memory-bound. +template +__global__ void _launchUnaryTensorOp(ElemType beta, const ElemType* pa, ElemType* pb, ElemType alpha, CUDA_LONG numElements) +{ + CUDA_LONG id = GridDim::GetLinearThreadId(); + if (id >= numElements) + return; + ElemType a = pa[id]; + ElemType val = FN::f(a); + val *= alpha; + if (beta != 0) + val += beta * pb[id]; + pb[id] = val; +} +// version without beta and alpha +template +__global__ void _launchUnaryTensorOp(const ElemType* pa, ElemType* pb, CUDA_LONG numElements) +{ + CUDA_LONG id = GridDim::GetLinearThreadId(); + if (id >= numElements) + return; + ElemType a = pa[id]; + ElemType val = FN::f(a); + pb[id] = val; +} + +// special case of linear unary operation +template +void LaunchUnaryTensorOp(ElemType beta, const ElemType* pa, ElemType* pb, ElemType alpha, ElementWiseOperator op, size_t regularOpDim) +{ + CUDA_LONG NN = (CUDA_LONG) regularOpDim; + +#define CaseLaunchUnaryTensorOp(oper) \ + case ElementWiseOperator::op##oper: \ + if (beta == 0 && alpha == 1) \ + _launchUnaryTensorOp<<>>(pa, pb, NN); \ + else \ + _launchUnaryTensorOp<<>>(beta, pa, pb, alpha, NN);\ + break; + + SyncGuard syncGuard; + GridDim grid(NN); + switch (op) + { + ForAllUnaryOps(CaseLaunchUnaryTensorOp); + default: + LogicError("LaunchTensorOp1: Unknown op code %d.", (int) op); + } +} + +// ----------------------------------------------------------------------- +// map runtime parameters N to template parameters +// ----------------------------------------------------------------------- + +// tensor operation with k+1 dimensions (-1 means scalar) +template +static void TensorOpWithRegularLoop(ElemType beta, const array& pointers, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, + const SmallVector& regularOpDims, const array, N>& regularStrides, + const SmallVector& reducingOpDims, const array, N>& reducingStrides) +{ + size_t dims = reducingOpDims.size(); + switch (dims) + { + case 2: + return LaunchTensorOpWithReduction(beta, pointers, alpha, op, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); + case 1: + return LaunchTensorOpWithReduction(beta, pointers, alpha, op, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); + case 0: + return LaunchTensorOp(beta, pointers, alpha, op, regularOpDims, regularStrides); + default: + LogicError("TensorOp: %d non-flattened reduction dimensions are not supported.", (C_int) dims); + } +} + +// tensor operation, generalized in number of arguments +// This function now expands into different k. It also eliminates the offsets by adding them to the pointers. +template +void TensorOpN(ElemType beta, array pointers, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, + const array& offsets, + const SmallVector& regularOpDims, const array, N>& regularStrides, + const SmallVector& reducingOpDims, const array, N>& reducingStrides) +{ + for (C_size_t i = 0; i < N; i++) // N = a small constant, this will be unrolled + pointers[i] += offsets[i]; + size_t dims = regularOpDims.size(); + switch (dims) + { + case 4: + return TensorOpWithRegularLoop(beta, pointers, alpha, op, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); + case 3: + return TensorOpWithRegularLoop(beta, pointers, alpha, op, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); + case 2: + return TensorOpWithRegularLoop(beta, pointers, alpha, op, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); + case 1: + return TensorOpWithRegularLoop(beta, pointers, alpha, op, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); + case 0: + return TensorOpWithRegularLoop(beta, pointers, alpha, op, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); + default: + LogicError("TensorOp: %d non-flattened input dimensions are not supported.", (C_int) dims); + } +} + +//------------------------------------------------------------------------ +// explicit instantiations--these are being called from GPUMatrix.cu +//------------------------------------------------------------------------ + +template void TensorOpN(float beta, array pointers, float alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, + const array& offsets, + const SmallVector& regularOpDims, const array, 2>& regularStrides, + const SmallVector& reducingOpDims, const array, 2>& reducingStrides); +template void TensorOpN(float beta, array pointers, float alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, + const array& offsets, + const SmallVector& regularOpDims, const array, 3>& regularStrides, + const SmallVector& reducingOpDims, const array, 3>& reducingStrides); +template void TensorOpN(float beta, array pointers, float alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, + const array& offsets, + const SmallVector& regularOpDims, const array, 4>& regularStrides, + const SmallVector& reducingOpDims, const array, 4>& reducingStrides); +template void TensorOpN(double beta, array pointers, double alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, + const array& offsets, + const SmallVector& regularOpDims, const array, 2>& regularStrides, + const SmallVector& reducingOpDims, const array, 2>& reducingStrides); +template void TensorOpN(double beta, array pointers, double alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, + const array& offsets, + const SmallVector& regularOpDims, const array, 3>& regularStrides, + const SmallVector& reducingOpDims, const array, 3>& reducingStrides); +template void TensorOpN(double beta, array pointers, double alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, + const array& offsets, + const SmallVector& regularOpDims, const array, 4>& regularStrides, + const SmallVector& reducingOpDims, const array, 4>& reducingStrides); + +template void LaunchUnaryTensorOp(float beta, const float* pa, float* pb, float alpha, ElementWiseOperator op, size_t regularOpDim); +template void LaunchUnaryTensorOp(double beta, const double* pa, double* pb, double alpha, ElementWiseOperator op, size_t regularOpDim); + +}}} + +#endif // CPUONLY diff --git a/cuda_code/GPUTensor_7.cu b/cuda_code/GPUTensor_7.cu new file mode 100644 index 0000000000000000000000000000000000000000..711182c14d25756d3f50da087772640c1d1272d8 --- /dev/null +++ b/cuda_code/GPUTensor_7.cu @@ -0,0 +1,1090 @@ +// +// Copyright (c) Microsoft. All rights reserved. +// Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved +// Licensed under the MIT license. See LICENSE.md file in the project root for full license information. +// + +#include "stdafx.h" +#include "Basics.h" +#include "BestGpu.h" + +#ifndef CPUONLY + +#include "GPUTensor.h" +#include "GPUMatrix.h" +#include "GPUMatrixCUDAKernels.cuh" +#include "CommonMatrix.h" +#define TENSOR_OPS_DECL __device__ __host__ +#include "TensorOps.h" +#include "fast_divmod.h" +#include +#include +#include "cublas_v2.h" +#include +#include + +// use fast divisor +#define USE_FAST_DIVMOD + +#ifndef let +#define let const auto +#endif + +#pragma comment(lib, "cudart.lib") // instruct linker to reference these libs +#pragma comment(lib, "cublas.lib") + +#pragma warning(disable : 4267) // conversion from 'size_t' to 'unsigned int'; happens in CUDA <<>> syntax if a and b are size_t +#pragma warning(disable : 4127) // conditional expression is constant; "if (sizeof(ElemType)==sizeof(float))" triggers this +#pragma warning(disable : 4702) // unreachable code; triggered for unknown reasons + +#ifdef _WIN32 +// thread local storage to access the current stream, initalize to default stream +__declspec(thread) +#endif +extern cudaStream_t t_stream; + +namespace Microsoft { namespace MSR { namespace CNTK { + +// ======================================================================= +// TensorView support +// ======================================================================= + +// TensorView computes element-wise tensor operations. +// - supports general strides +// - input broadcasting is supported by stride=0 +// - the operation is denoted by an opCode +// - reduction is supported, including summation, min, max (dual to broadcasting when computing gradients) +// - reduction operation is given by an opCode: opSum, opMin, opMax and opLogSum. +// +// This library makes extensive use of templates and macros. +// Specifically, templates are used recursively to recurse over tensor dimensions. +// For example, a tensor op of rank K is computed by looping over the last dimension +// and then calling the same function template recursively with K-1. +// Template specializations exist in order to: +// - terminate recursion +// - optimize for thread-parallel reduction where elements are consecutive in memory +// +// The general algorithm is very straight forward: +// +// for all output dimensions [###]: // TensorOp() +// output[###] *= beta +// for all reduction dimensions [***]: // TensorOpWithReduction() +// output[###] += op(input1[###,***], input1[###,***], ...) * alpha +// +// Indices and dimensions used throughout this code: +// - N = ariness; number of arguments *including output* (binary op: N=3) +// - K = rank of output elements, regularOpDims.size(). K=0 means scalar. +// - k = -1..K-1 = recursion index +// - M = reduction rank, reducingOpDims.size(). M=0 means no reduction. +// - m = -1..M-1 = recursion index +// +// Other frequently used variable names: +// - alpha, beta: BLAS-style weights: outVal = beta * outVal + alpha * f(inVals) +// where beta=0 is an assignment (0 * outVal := 0, even e.g. if outVal = NaN) +// - pointers[N]: pointer to first element, for each argument +// - regularOpDims[K]: tensor dimensions of output elements to produce +// - regularStrides[N,K]: strides; multiply index[k] with strides[n,k] to get element offset for this dimension +// Broadcasting of inputs is implemented by a stride being 0. +// - reducingOpDims[M]: tensor dimensions of input elements to reduce over +// - reducingStrides[N,M]: strides for input reduction. Always 0 for output argument. +// +// This code uses two custom structs, FixedArray<> and FixedMatrix<>, which +// are templated equivalents to vector<> and vector> for CUDA code. + +// ----------------------------------------------------------------------- +// simple fixed-size arrays for passing dimension information by value +// since CUDA can't just take our std::array and std::vector +// ----------------------------------------------------------------------- + +template +struct FixedArray +{ + T m_data[N]; + __device__ __host__ size_t size() const + { + return N; + } + __device__ __host__ T& operator[](size_t n) + { + return m_data[n]; + } + __device__ __host__ T operator[](size_t n) const + { + return m_data[n]; + } + template + FixedArray(const VEC& data) // construct from CPU-side STL array or vector + { + assert(data.size() == N); + for (size_t n = 0; n < N; n++) + { + m_data[n] = (T) data[n]; + if (m_data[n] != data[n]) // overflow check + InvalidArgument("FixedArray: Dimensions out of range, too few bits."); + } + } +}; +template // specialized version for 0 elements +struct FixedArray +{ + __device__ __host__ size_t size() const + { + return 0; + } + template + FixedArray(const VEC& data) + { + assert(data.size() == 0); + UNUSED(data); + } + FixedArray() + { + } +}; + +template // N = which input/output; K = index depth +struct FixedMatrix +{ + T m_data[N][K]; + __device__ __host__ size_t getNumRows() const + { + return N; + } + __device__ __host__ size_t getNumCols() const + { + return K; + } + __device__ __host__ T& operator()(size_t n, size_t k) + { + return m_data[n][k]; + } + __device__ __host__ T operator()(size_t n, size_t k) const + { + return m_data[n][k]; + } + template + FixedMatrix(const array, N>& data) // construct from CPU-side array of vectors + { + assert(data.size() == N); + for (size_t n = 0; n < N; n++) + { + assert(data[n].size() == K); + for (size_t k = 0; k < K; k++) + { + m_data[n][k] = (T) data[n][k]; + if (m_data[n][k] != data[n][k]) // overflow check + InvalidArgument("FixedArray: Dimensions out of range, too few bits."); + } + } + } +}; +template // specialized version for 0 elements +struct FixedMatrix +{ + __device__ __host__ size_t getNumRows() const + { + return N; + } + __device__ __host__ size_t getNumCols() const + { + return 0; + } + template + FixedMatrix(const array, N>& data) + { + assert(data.size() == N); + for (size_t n = 0; n < N; n++) + assert(data[n].size() == 0); + UNUSED(data); + } + FixedMatrix() + { + } +}; + +// ----------------------------------------------------------------------- +// function to actually compute a function of (N-1) inputs based on the opcode +// ----------------------------------------------------------------------- + +template +struct TensorOps +{ + static __device__ ElemType Compute(const FixedArray& pointers, ElementWiseOperator op) + { +#define CaseNullaryTensorOp(oper) \ + case ElementWiseOperator::op##oper: \ + return Op##oper() + switch (op) + { + ForAllNullaryOps(CaseNullaryTensorOp); + default: + return OpConstOne(); // (failure--we only have one nullary op, so use the same, maybe it will eliminate the switch altogether) + } + } + static __device__ ElemType Compute(const FixedArray& pointers, ElementWiseOperator op) + { + ElemType a = *(pointers[0]); +#define CaseUnaryTensorOp(oper) \ + case ElementWiseOperator::op##oper: \ + return Op##oper(a) + switch (op) + { + ForAllUnaryOps(CaseUnaryTensorOp); + default: + return 0; // (failure) + } + } + static __device__ ElemType Compute(const FixedArray& pointers, ElementWiseOperator op) + { + // const ElemType & a = *(pointers[0]); // const & for opIndex--costs quite some code bloat + ElemType a = *(pointers[0]); + ElemType b = *(pointers[1]); +#define CaseBinaryTensorOp(oper) \ + case ElementWiseOperator::op##oper: \ + return Op##oper(a, b) + switch (op) + { + ForAllBinaryOps(CaseBinaryTensorOp); // note: this costs about 6% compared to having only a single case + default: + return 0; // (failure) + } + } + static __device__ ElemType Compute(const FixedArray& pointers, ElementWiseOperator op) + { +#define CaseTernaryTensorOp(oper) \ + case ElementWiseOperator::op##oper: \ + return Op##oper(*(pointers[0]), *(pointers[1]), *(pointers[2])) // reading each time, which saves mem accesses for OpCond + switch (op) + { + ForAllTernaryOps(CaseTernaryTensorOp); + default: + return 0; // (failure) + } + } +}; + +//---------------------------------------------------------------------------- +// For reductions we need the neutral elements of the corresponding binary ops +//---------------------------------------------------------------------------- + +// NeutralValue seems to be dead code +template __device__ ElemType NeutralValue(ElementWiseOperator op) +{ + return 0; // error, only the explicit instantiations below should be used. +}; + +template<> __device__ float NeutralValue(ElementWiseOperator op) +{ + switch (op) + { + case ElementWiseOperator::opSum: return 0; + case ElementWiseOperator::opLogSum: return -INFINITY; + case ElementWiseOperator::opMin: return FLT_MAX; + case ElementWiseOperator::opMax: return -FLT_MAX; + case ElementWiseOperator::opElementwiseProduct: return 1.0f; + default: return 0; // error + } +}; + +template<> __device__ double NeutralValue(ElementWiseOperator op) +{ + switch (op) + { + case ElementWiseOperator::opSum: return 0; + case ElementWiseOperator::opLogSum: return -INFINITY; + case ElementWiseOperator::opMin: return DBL_MAX; + case ElementWiseOperator::opMax: return -DBL_MAX; + case ElementWiseOperator::opElementwiseProduct: return 1.0; + default: return 0; // error + } +}; + + +// ---------------------------------------------------------------------------- +// Function to update an aggregate value for the specifed reduction operation +// ---------------------------------------------------------------------------- + +template __device__ void UpdateAggregate(ReductionType& aggregate, ElemType val, ElementWiseOperator reductionOp) +{ + switch (reductionOp) + { + case ElementWiseOperator::opSum: + aggregate += val; + break; + case ElementWiseOperator::opLogSum: + aggregate = OpLogSum(aggregate, val); + break; + case ElementWiseOperator::opMin: + if (val < aggregate) + aggregate = val; + break; + case ElementWiseOperator::opMax: + if (val > aggregate) + aggregate = val; + break; + case ElementWiseOperator::opElementwiseProduct: + aggregate *= val; + break; + } +}; + + +// ----------------------------------------------------------------------- +// function to compute the value for a given output location (including reduction) +// ----------------------------------------------------------------------- + +//#define ReduceElemType double +#define ReduceElemType ElemType // (note: we could use 'double' here, but that would cause problems with CUDA cards that don't support double) + +template +struct TensorOpReduce +{ + // this version for m >= 0 + static __device__ ElemType Compute(FixedArray pointers, ElementWiseOperator op, ElementWiseOperator reductionOp, + const FixedArray& reducingOpDims, const FixedMatrix& reducingStrides) + { + // start with index 0 + // We may use 'double' since we are memory-bound anyway. + ReduceElemType aggregate = TensorOpReduce::Compute(pointers, op, reductionOp, reducingOpDims, reducingStrides); + // apply this index to the pointers + C_size_t dim = reducingOpDims[m]; + for (C_size_t k = 1 /*done with k=0 already*/; k < dim; k++) + { + // bump the pointers + #pragma unroll + for (C_size_t i = 0; i < N - 1; i++) { // N-1 because output is not used here + pointers[i] += reducingStrides(i, (C_size_t) m); + } + ElemType val = TensorOpReduce::Compute(pointers, op, reductionOp, reducingOpDims, reducingStrides); + UpdateAggregate(aggregate, val, reductionOp); + } + return (ElemType) aggregate; + } +}; + +// this one terminates the template recursion over reduction dimensions +// The pointers are pointing to the input element. +template +struct TensorOpReduce +{ + // this version for m = -1 + // the pointers are pointing to the right location(s) to take the operation over + static __device__ ElemType Compute(FixedArray pointers, ElementWiseOperator op, ElementWiseOperator reductionOp, + const FixedArray& /*reducingOpDims*/, const FixedMatrix& /*reducingStrides*/) + { + return TensorOps::Compute(pointers, op); // finally computing something! + } +}; + +// ----------------------------------------------------------------------- +// function to compute one constituent of the value for a given output location +// (reduction is not done here, but by calling into here multiple times) +// ----------------------------------------------------------------------- + +template +struct TensorOpParallelReduce +{ + // this version for m >= 0 + static __device__ ElemType Compute(CUDA_LONG id, FixedArray pointers, ElementWiseOperator op, + const FixedArray& reducingOpDims, const FixedMatrix& reducingStrides, + FixedArray reducingOpDimDivmod) + { + // map id (location on grid) to index[k] + C_size_t stride = 1; // compute the stride. This seems expensive, but since we we only currently support M <= 2, this is just compile-time selection between 1 and reducingOpDims[0]. + #pragma unroll + for (int i = 0; i < m; i++) { + stride *= reducingOpDims[(C_size_t) i]; + } + + C_size_t index; +#ifndef USE_FAST_DIVMOD + index = id / stride; // this dimension. For m=0, the stride is 1 and hence the division will be removed at compile time. + // id = id % stride; // remaining dimensions inside this. For m=0 this value is ignored and hence not even computed. + id = id - stride*index; // remaining dimensions inside this. For m=0 this value is ignored and hence not even computed. +#else + if (m == 0) { + index = id; + id = 0; + } else { + reducingOpDimDivmod[m].divmod(id, index, id); + } +#endif + // apply this index to the pointers + #pragma unroll + for (C_size_t i = 0; i < N - 1; i++) { + pointers[i] += index * reducingStrides(i, (C_size_t) m); // now this dimension is taken care of + } + return TensorOpParallelReduce::Compute(id, pointers, op, reducingOpDims, reducingStrides, reducingOpDimDivmod); + } +}; + +// this one terminates the template recursion over reduction dimensions +// The pointers are pointing to the input element. +template +struct TensorOpParallelReduce +{ + // this version for m = -1 + // the pointers are pointing to the right location(s) to take the operation over + static __device__ ElemType Compute(CUDA_LONG /*id*/, FixedArray pointers, ElementWiseOperator op, + const FixedArray& /*reducingOpDims*/, const FixedMatrix& /*reducingStrides*/, + FixedArray reducingOpDimDivmod) + { + return TensorOps::Compute(pointers, op); // finally computing something! + } +}; + +// ----------------------------------------------------------------------- +// perform loop over regular index k for N-nary operations (N counting the output) +// ----------------------------------------------------------------------- + +// The 'pointers' only refer to a single element, so we will bump them in-place to perform indexing. +template +struct TensorOpElement +{ + // template-recursive version loops over indices + static __device__ void Compute(CUDA_LONG id, ElemType beta, FixedArray& pointers, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, + const FixedArray& regularOpStrides, const FixedMatrix& regularStrides, + const FixedArray& reducingOpDims, const FixedMatrix& reducingStrides, + CUDA_LONG reductionBegin, CUDA_LONG reductionChunkSize, + FixedArray regularOpStrideDivmod, FixedArray reducingOpDimDivmod) + { + // map id (location on grid) to index[k] +#ifndef USE_FAST_DIVMOD + C_size_t stride = regularOpStrides[(C_size_t) k]; + C_size_t index = id / stride; // this dimension + // id = id % stride; // remaining dimensions inside this + id = id - stride*index; // remaining dimensions inside this +#else + C_size_t index; + regularOpStrideDivmod[k].divmod(id, index, id); +#endif + // apply this index to the pointers + #pragma unroll + for (C_size_t i = 0; i < N; i++) { + pointers[i] += index * regularStrides(i, (C_size_t) k); // now this dimension is taken care of + } + // process the previous index + TensorOpElement::Compute(id, beta, pointers, alpha, op, reductionOp, regularOpStrides, regularStrides, reducingOpDims, reducingStrides, reductionBegin, reductionChunkSize, + regularOpStrideDivmod, reducingOpDimDivmod); + } +}; + +// specialization for k=0 where op stride is guaranteed to be 1 +template +struct TensorOpElement +{ + // template-recursive version loops over indices + static __device__ void Compute(CUDA_LONG id, ElemType beta, FixedArray& pointers, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, + const FixedArray& regularOpStrides, const FixedMatrix& regularStrides, + const FixedArray& reducingOpDims, const FixedMatrix& reducingStrides, + CUDA_LONG reductionBegin, CUDA_LONG reductionChunkSize, + FixedArray regularOpStrideDivmod, FixedArray reducingOpDimDivmod) + { + // map id (location on grid) to index[k] + C_size_t index = id; // this dimension + // apply this index to the pointers + #pragma unroll + for (C_size_t i = 0; i < N; i++) { + pointers[i] += index * regularStrides(i, 0); // now this dimension is taken care of + } + // process the previous index + TensorOpElement::Compute(/*id*/ 0, beta, pointers, alpha, op, reductionOp, regularOpStrides, regularStrides, reducingOpDims, reducingStrides, reductionBegin, reductionChunkSize, + regularOpStrideDivmod, reducingOpDimDivmod); + } +}; + +// specialization for k = -1 terminates the template recursion, and computes reductions in a for loop +template +struct TensorOpElement +{ + // template-recursion-teminating version computes the actual value for this output location + // now the output pointers point to the right element (input pointers may still iterate for reduction) + static __device__ void Compute(CUDA_LONG /*id*/, ElemType beta, FixedArray& pointers, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, + const FixedArray& /*regularOpStrides*/, const FixedMatrix& /*regularStrides*/, + const FixedArray& reducingOpDims, const FixedMatrix& reducingStrides, CUDA_LONG /*reductionBegin*/, CUDA_LONG /*reductionChunkSize*/, + FixedArray regularOpStrideDivmod, FixedArray reducingOpDimDivmod) + { + // compute the operation for this output coordinate + // This may still involve a reduction over inverse-broadcasting dimensions. + ElemType val = TensorOpReduce::Compute(pointers, op, reductionOp, reducingOpDims, reducingStrides); + // scale + val *= alpha; + // combine with previous value in target matrix, then write it out + if (N < 4 || val != 0 || beta != 1) // (skip memory access if not needed) (N<4: skip this test) + { + auto* pout = pointers[pointers.size() - 1]; + if (beta != 0) // (skip memory access if not needed, and allow for ignoring NaNs) + val += beta * *pout; + // save + *pout = val; + } + } +}; + +#undef ALLOW_ATOMIC_REDUCTION // undefine to disable use of atomicAdd() below, for testing it + +// specialization for k = -1 terminates the template recursion, and computes reductions in parallel +template +struct TensorOpElement +{ + // template-recursion-teminating version computes the actual value for this output location + // now the output pointers point to the right element (input pointers may still iterate for reduction) + static __device__ void Compute(CUDA_LONG /*id*/, ElemType beta, FixedArray& pointers, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, + const FixedArray& /*regularOpStrides*/, const FixedMatrix& /*regularStrides*/, + const FixedArray& reducingOpDims, const FixedMatrix& reducingStrides, CUDA_LONG reductionBegin, CUDA_LONG reductionChunkSize, + FixedArray regularOpStrideDivmod, FixedArray reducingOpDimDivmod) + { + CUDA_LONG reductionBlock = blockIdx.z; // reduction-block index --larger reductions are split into blocks + CUDA_LONG tid = threadIdx.x; // thread index + CUDA_LONG tids = blockDim.x; // out of how many threads --note: last block is partial + + // determine our range --this is a single int mul, we can stomach it (we could alternatively pass in yet another parameter) + CUDA_LONG reductionDim = (CUDA_LONG) reducingOpDims[0]; + for (C_size_t i = 1; i < reducingOpDims.size(); i++) + reductionDim *= reducingOpDims[i]; + + // determine the redId range that we operate on + // Each thread takes a stride tid + (multiples of tids) within this range. + reductionBegin += reductionChunkSize * reductionBlock; + CUDA_LONG reductionEnd = min(reductionBegin + reductionChunkSize, reductionDim); + + // compute the operation for this input coordinate + ReduceElemType aggregate = NeutralValue(reductionOp); + + for (CUDA_LONG redId = reductionBegin + tid; redId < reductionEnd; redId += tids) + { + auto val = TensorOpParallelReduce::Compute(redId, pointers, op, reducingOpDims, reducingStrides, reducingOpDimDivmod); + UpdateAggregate(aggregate, val, reductionOp); + } + + // reduce --cf https://docs.nvidia.com/cuda/samples/6_Advanced/reduction/doc/reduction.pdf + __shared__ ReduceElemType volatile accumulators[GridDim::maxThreadsPerBlock /*tids*/]; + accumulators[tid] = aggregate; + __syncthreads(); + static_assert(GridDim::maxThreadsPerBlock <= 1024, "GridDim::maxThreadsPerBlock too large, need to add manually unrolled steps"); + for (CUDA_LONG i = 512; i; i >>= 1) + { + if (tid < i && tid + i < tids) + UpdateAggregate(accumulators[tid], accumulators[tid + i], reductionOp); + + if (0 + i < tids) + __syncthreads(); // sync if condition true for at least one thread + // TODO: use volatile* and then we can skip the __syncthreads() for the last 32 values. See Amit's allreduce() function implementation in MatrixQuantizer_kernel.cu. + } + + // now set final value to output coordinate + if (tid == 0) + { + ElemType val = (ElemType) accumulators[0]; + // scale + val *= alpha; + // combine with previous value in target matrix, then write it out + if (N < 4 || val != 0 || beta != 1) // (skip memory access if not needed) (N<4: skip this test) + { + auto* pout = pointers[pointers.size() - 1]; +#ifdef ALLOW_ATOMIC_REDUCTION + CUDA_LONG reductionBlocks = gridDim.z; // number of reduction blocks. If >1 we need atomicAdd + if (reductionBlocks > 1) // multiple blocks: need to use atomicAdd() + { + // in this case, outer calling code must pass beta = 1 + atomicAdd(pout, val); + } + else +#endif + { + if (beta != 0) + val += beta * *pout; + // save + *pout = val; + } + } + } + } +}; + +// ----------------------------------------------------------------------- +// kernel and launch --no reduction +// ----------------------------------------------------------------------- + +// launch tensor op with CUDA +template +__global__ void _launchTensorOp(ElemType beta, FixedArray pointers, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, + FixedArray regularOpStrides, FixedMatrix regularStrides, CUDA_LONG numElements, + FixedArray reducingOpDims, FixedMatrix reducingStrides, + FixedArray regularOpStrideDivmod, FixedArray reducingOpDimDivmod) +{ + CUDA_LONG id = GridDim::GetLinearThreadId(); + if (id < numElements) // note: there are no __syncthread() calls inside + TensorOpElement::Compute(id, beta, pointers, alpha, op, reductionOp, regularOpStrides, regularStrides, reducingOpDims, reducingStrides, 0, 0, + regularOpStrideDivmod, reducingOpDimDivmod); +} + +template +static void LaunchTensorOp(ElemType beta, array pointerVector, ElemType alpha, ElementWiseOperator op, + const SmallVector& regularOpDims, const array, N>& regularStrideVectors) +{ + // copy all parameters to CUDA-compatible data structures + FixedArray pointers(pointerVector); + SmallVector regularOpStrideVector; // kernel needs the strides for converting thread index back to multi-dimensional tensor index + C_size_t numElements = 1; + // input divisors + SmallVector regularOpStrideDivmodVector; + for (C_size_t k = 0; k < regularOpDims.size(); k++) + { + regularOpStrideVector.push_back(numElements); + // create fast division objects + regularOpStrideDivmodVector.push_back(fast_divmod(numElements)); + numElements *= (C_size_t) regularOpDims[k]; + } + + SmallVector reducingOpDimDivmodVector; + + FixedArray regularOpStrides(regularOpStrideVector); + FixedMatrix regularStrides(regularStrideVectors); + FixedArray reducingOpDims; // empty reduction dimensions + FixedMatrix reducingStrides; + // reduced divisors + FixedArray regularOpStrideDivmod(regularOpStrideDivmodVector); + FixedArray reducingOpDimDivmod; + + // launch the kernel + CUDA_LONG NN = (CUDA_LONG) numElements; // linear space identifying each individual input element + SyncGuard syncGuard; + GridDim grid(NN); + _launchTensorOp <<>>(beta, pointers, alpha, op, (ElementWiseOperator)(-1) /* dummy reductionOp */, regularOpStrides, regularStrides, + grid.m_N, reducingOpDims, reducingStrides, + regularOpStrideDivmod, reducingOpDimDivmod); +} + +// ----------------------------------------------------------------------- +// kernel and launch --with reduction +// ----------------------------------------------------------------------- + +template +__global__ void _launchTensorOpWithReduction(ElemType beta, FixedArray pointers, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, + FixedArray regularOpStrides, FixedMatrix regularStrides, CUDA_LONG numElements, + FixedArray reducingOpDims, FixedMatrix reducingStrides, + CUDA_LONG reductionBegin, CUDA_LONG reductionChunkSize, + FixedArray regularOpStrideDivmod, FixedArray reducingOpDimDivmod) +{ + CUDA_LONG id = gridDim.x * blockIdx.y + blockIdx.x; // input dimensions are Y dimension of blocks in this case, so we can use thread dim for shared-memory/parallelization +#ifndef ALLOW_ATOMIC_REDUCTION + CUDA_LONG reductionBlock = blockIdx.z; // reduction-block index --larger reductions are split into blocks + pointers[pointers.size() - 1] += numElements * reductionBlock; // the output tensor is dense (no gaps); and there is one copy for each reduction block (those get further reduced into one later) +#endif + if (id < numElements) { // note: we have __syncthread() calls but only entire blocks in sync, so this is OK + TensorOpElement::Compute(id, beta, pointers, alpha, op, reductionOp, regularOpStrides, regularStrides, reducingOpDims, reducingStrides, reductionBegin, reductionChunkSize, + regularOpStrideDivmod, reducingOpDimDivmod); + } +} + +// helper function to provide a reduction buffer +template +static shared_ptr AllocateReductionBuffer(size_t N) +{ + ElemType* deviceBufferPtr; + CUDA_CALL(cudaMalloc((void**)&deviceBufferPtr, sizeof(ElemType) * N)); + return shared_ptr(deviceBufferPtr, [](ElemType* deviceBufferPtr){ cudaFree((void*)deviceBufferPtr); }); +} + +template +static shared_ptr GetReductionBuffer(size_t N) +{ + bool dontCache = false; // (for debugging only) + if (t_stream != 0 || dontCache) // we cache for the NULL stream but don't bother for others, since we only ever use the NULL stream currently + return AllocateReductionBuffer(N); + + static shared_ptr reductionBuffersCache[32]; // cache of objects --TODO: Do we have a #define the the max somewhere? Then also use it in CPUMatrix.cu GetOnesTensor() + static size_t reductionBuffersCacheSize[_countof(reductionBuffersCache)] = { 0 }; + let deviceId = GridDim::GetCurrentDeviceId(); + if (deviceId >= _countof(reductionBuffersCache)) // index check w.r.t. our hard-coded dimensions + return AllocateReductionBuffer(N); // out of bounds: don't cache + if (!reductionBuffersCache[deviceId]) + { + reductionBuffersCache[deviceId] = AllocateReductionBuffer(N); + reductionBuffersCacheSize[deviceId] = N; + } + if (N > reductionBuffersCacheSize[deviceId]) // buffer size check + LogicError("GetReductionBuffer: Must be called with the number of multiprocs, which may not change."); + return reductionBuffersCache[deviceId]; +} + +// All dimensions (N-ariness, number of input dimensions K and number of reduction dimensions M) are bound to template parameters now. +template +static void LaunchTensorOpWithReduction(ElemType beta, array pointerVector, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, + const SmallVector& regularOpDims, const array, N>& regularStrideVectors, + const SmallVector& reducingOpDimVector, const array, N>& reducingStrideVectors) +{ + // copy all parameters to CUDA-compatible data structures + FixedArray pointers(pointerVector); + SmallVector regularOpStrideVector; // kernel needs the strides for converting thread index back to multi-dimensional tensor index + C_size_t numElements = 1; + // input divisors + SmallVector regularOpStrideDivmodVector; + for (C_size_t k = 0; k < regularOpDims.size(); k++) + { + regularOpStrideVector.push_back(numElements); // stride for dense representation of our output elements (if they were flattened) + regularOpStrideDivmodVector.push_back(fast_divmod((unsigned int)numElements)); + numElements *= (C_size_t) regularOpDims[k]; + } + // output divisors + SmallVector reducingOpDimDivmodVector; + C_size_t stride = 1; + for (C_size_t k = 0; k < reducingOpDimVector.size(); ++k) { + reducingOpDimDivmodVector.push_back(fast_divmod(stride)); + stride *= (C_size_t) reducingOpDimVector[k]; + } + + FixedArray regularOpStrides(regularOpStrideVector); + FixedMatrix regularStrides(regularStrideVectors); + FixedArray reducingOpDims(reducingOpDimVector); + FixedMatrix reducingStrides(reducingStrideVectors); + // reduced divisors + FixedArray regularOpStrideDivmod(regularOpStrideDivmodVector); + FixedArray reducingOpDimDivmod(reducingOpDimDivmodVector); + + // launch the kernel + CUDA_LONG NN = (CUDA_LONG) numElements; // linear space identifying each individual output element + SyncGuard syncGuard; + + // do some optimization for reductions + // - example: 30 GPU procs, warp size 32 --> 960 GPU cores + // - NN elements must be computed, each involving a reduction over reductionDim elements + // Cases: + // - #output elements NN >= GPU cores --> use one proc per element, do reduction in inner loop + // E.g. if >=960 elements are computed, each gets its own GPU thread. + // - reduction dimension would benefit from multiple blocks --> multiple blocks work on a single output element + // E.g. + // - gradient of adding a bias: reducing to a bias, e.g. 512-dim + // - gradient of scalar multiplication: big elementwise product reduced to a scalar (big dot product, e.g. [1024 x 1024] = 1M elements) + // - softmax in seq-2-seq attention model: reduce over length of attention window (e.g. 20) + // - summation of criterion value: scalar reduction over a few hundred or thousand samples in the minibatch + C_size_t reductionDim = 1; // number of elements to reduce over + for (C_size_t k = 0; k < reducingOpDimVector.size(); k++) + reductionDim *= (C_size_t) reducingOpDimVector[k]; + GridDim grid(NN); + let& props = GridDim::GetDeviceProps(); + // === simple case: NN large, one thread per output element + bool disableParallelReduction = false; // (for debugging) + if (reductionDim == 1 || // no reduction + grid.m_blocksPerGrid >= props.multiProcessorCount || // enough output elements to fill all multiprocs + reductionDim * numElements <= 2 * props.warpSize || // trivial operation not worth the trouble (2* because the more complex one also needs 2 kernel launches) + disableParallelReduction || // (for debugging) + reductionDim * numElements <= props.multiProcessorCount) // recursive call from reduction below + { + // we got enough elements to generate: do one element per thread, and reduction inside + _launchTensorOp<<>>( + beta, pointers, alpha, op, reductionOp, + regularOpStrides, regularStrides, grid.m_N, + reducingOpDims, reducingStrides, + regularOpStrideDivmod, reducingOpDimDivmod); + } + // === optimization: simple case would not use all multiprocs + else + { + // m_blocksPerGrid can be thought of NN / 512, with appropriate rounding + + // we are reducing and are underutilizing the multiprocs we have: get more parallelism by doing reduction in parallel + // If we get here, then + // - the total number of outputs to produce is < #multiprocs * warpSize, e.g. < 960 + // - each output has at least two inputs, but possibly millions + // Examples: + // (a1) NN=900 + // - each multiproc processes multiple elements concurrently, each reducing over its inputs inside + // - use one block per output element + // (a2) NN=30 + // - same as (a1) except 30 multiprocs run only a single block each + // (a3) NN=16 + // - same as (a1) except only 16 multiproc run one block + // (b1) NN=15 + // - 2 blocks work together on a single output element + // (b2) NN=1 (NN < #multiprocs, e.g. NN < 30) + // - multiple blocks work together on a single output element + // - only this case requires memory, and only K * NN + // where K = blocks that work together, + // both K and NN < #multiprocs, + // and K * NN = on the order of NN, but generally a bit larger due to rounding. + + // By how much do we underutilize? + // We increase #blocks by that factor by breaking reduction into that many chunks. + let numReductionChunks = max(props.multiProcessorCount / NN, 1); // only >1 for NN < multiProcessorCount + + // distribute NN over block X and Y + let blockXOverBy = CeilDiv(NN, props.maxGridSize[0]); + let numBlocksX = CeilDiv(NN, blockXOverBy); + let numBlocksY = CeilDiv(NN, numBlocksX); + // while block Z is for multiple blocks working together on a single output element + let numBlocksZ = numReductionChunks; + // Block dim is now: + // - X, Y: such that X*Y covers NN + // - Z: reduction chunks + + // reduction goes into thread dim X + let reductionChunkSize = CeilDiv(reductionDim, numReductionChunks); + let numThreadsX = min(reductionChunkSize, GridDim::maxThreadsPerBlock); // any that's over will be done by looping inside the kernel + + // --- cases (a1) and (a2) + // This involves no reduction across blocks. + if (numReductionChunks == 1) + { + _launchTensorOpWithReduction<<>>( + beta, pointers, alpha, op, reductionOp, + regularOpStrides, regularStrides, NN, + reducingOpDims, reducingStrides, /*reductionBegin*/ 0, reductionChunkSize, + regularOpStrideDivmod, reducingOpDimDivmod); + } + // --- case (b) + // Reduction across blocks. This is the difficult one. +#ifndef ALLOW_ATOMIC_REDUCTION // temporarily disabled to ensure it is not causing the non-reproducability + else + { + // we get here if NN <= #multiprocs + assert(NN <= props.multiProcessorCount && numBlocksX == NN && numBlocksY == 1); + // dims are: + // - numBlocksZ = numReductionChunks = how many multiprocs work together to produce one output element + // - numBlocksX = NN = number of output elements + // - numThreadsX = reductionChunkSize clipped to 512; reductionChunkSize > 512 is handled by an inner for loop inside of the kernel + + // we need memory for block outputs of dimension [numBlocksX x numBlocksZ] + // - total elements = NN * Floor(#multiprocs / NN) = <= #multiprocs + let reductionBufferSize = props.multiProcessorCount; + assert(reductionBufferSize >= NN * numBlocksZ); + shared_ptr reductionBuffer = GetReductionBuffer(reductionBufferSize); + + // 'pointers', 'regularOpStrides', and 'regularStrides' are set up to point to the target memory. + // We need to reroute them to point to our reductionBuffer. + // - pointer[N-1] -> replace by reductionBuffer + // - regularStrides -> replace [N-1] by regularOpStrides which already represent the NN elements for a dense memory layout + // - beta -> 0 since we write into temp memory + // - kernel must use block.z as second index into the output buffer; add (block.z * NN) to the pointer + FixedArray pointers1 = pointers; + pointers1[N - 1] = reductionBuffer.get(); + auto regularStrideVectors1 = regularStrideVectors; + for (size_t k = 0; k < regularOpStrides.size(); k++) + regularStrideVectors1[N - 1][k] = (ptrdiff_t)regularOpStrideVector[k]; + FixedMatrix regularStrides1(regularStrideVectors1); + ElemType beta1 = 0; + ElemType alpha1 = 1; + _launchTensorOpWithReduction << > >( + beta1, pointers1, alpha1, op, reductionOp, + regularOpStrides, regularStrides1, NN, + reducingOpDims, reducingStrides, /*reductionBegin*/0, reductionChunkSize, + regularOpStrideDivmod, reducingOpDimDivmod); + +#if 1 + // now reduce and redistribute + // Create a new tensor task, and execute it recursively: + // - input = reductionBuffer + // - output = true output + // - op dims/strides = output elements + // - reduce dims/strides = numBlocksZ + // - op = opCopy + array pointerVector2{ reductionBuffer.get(), pointerVector[N - 1] }; + const array, 2> regularStrideVectors2{ regularStrideVectors1[N - 1], regularStrideVectors[N - 1] }; + const array, 2> reducingStrideVectors2{ SmallVector{ NN }, SmallVector{ 0 } }; + const SmallVector reducingOpDimVector2{ (size_t)numReductionChunks }; + LaunchTensorOpWithReduction( + beta, pointerVector2, alpha, ElementWiseOperator::opCopy, reductionOp, + regularOpDims, regularStrideVectors2, + reducingOpDimVector2, reducingStrideVectors2); + + // (note: ^^this will have a nested syncGuard, which is fine) + +#else + _launchTensorOp<<>>( + beta, pointers, alpha, op, reductionOp, + regularOpStrides, regularStrides, grid.m_N, + reducingOpDims, reducingStrides); + //for (size_t z = 0; z < numBlocksZ; z++) + // _launchTensorOpWithReduction<<>>(z == 0 ? beta : 1, pointers, alpha, op, + // regularOpStrides, regularStrides, NN, + // reducingOpDims, reducingStrides, reductionChunkSize * z, reductionChunkSize); + vector peekPartial(NN * numBlocksZ, -42); + vector peekFinal(NN, -42); + CUDA_CALL(cudaMemcpy(peekPartial.data(), reductionBuffer, sizeof(ElemType) * peekPartial.size(), cudaMemcpyDeviceToHost)); + CUDA_CALL(cudaMemcpy(peekFinal.data(), pointers[pointers.size()-1], sizeof(ElemType) * peekFinal.size(), cudaMemcpyDeviceToHost)); + double s1 = 0, s2 = 0; + for (auto v : peekPartial) + s1 += v; + for (auto v : peekFinal) + s2 += v; + sin(1.0); +#endif + } +#else + else if (beta == 1) + { + // no need to pre-scale; just add (common for gradients) + _launchTensorOpWithReduction<<>>(beta, pointers, alpha, op, reductionOp, regularOpStrides, + regularStrides, NN, reducingOpDims, reducingStrides, 0, reductionChunkSize, + regularOpStrideDivmod, reducingOpDimDivmod); + return; + } + else + { + // We need more than one chunk, we will use atomicAdd(). + // First reset/pre-multiply input; then do the remaining chunks using atomicAdd(). + _launchTensorOpWithReduction<<>>(beta, pointers, alpha, op, reductionOp, regularOpStrides, regularStrides, NN, reducingOpDims, reducingStrides, 0, reductionChunkSize, + regularOpStrideDivmod, reducingOpDimDivmod); + // We will leave it like this for a while, but eventually need to revisit using temporary memory. + _launchTensorOpWithReduction<<>>(/*beta=*/1, pointers, alpha, op, reductionOp, regularOpStrides, regularStrides, NN, reducingOpDims, reducingStrides, reductionChunkSize, reductionChunkSize, + regularOpStrideDivmod, reducingOpDimDivmod); + } +#endif + } +} + +// ----------------------------------------------------------------------- +// kernel and launch --linear unary +// ----------------------------------------------------------------------- + +// for linear unary ops, we need to define a functor for every function for use as a template parameter (lambda syntax doesn't work in CUDA 7) +#define DefineUnaryTensorFunctor(oper) \ + struct Functor##oper \ + { \ + template \ + static __device__ ElemType f(ElemType a) \ + { \ + return Op##oper(a); \ + } \ + }; +ForAllUnaryOps(DefineUnaryTensorFunctor); + +// the top-level kernel for linear unary ops +// Note: If we have a beta, we have 2 memory accesses, so this optimization may no longer be needed as we are memory-bound. +template +__global__ void _launchUnaryTensorOp(ElemType beta, const ElemType* pa, ElemType* pb, ElemType alpha, CUDA_LONG numElements) +{ + CUDA_LONG id = GridDim::GetLinearThreadId(); + if (id >= numElements) + return; + ElemType a = pa[id]; + ElemType val = FN::f(a); + val *= alpha; + if (beta != 0) + val += beta * pb[id]; + pb[id] = val; +} +// version without beta and alpha +template +__global__ void _launchUnaryTensorOp(const ElemType* pa, ElemType* pb, CUDA_LONG numElements) +{ + CUDA_LONG id = GridDim::GetLinearThreadId(); + if (id >= numElements) + return; + ElemType a = pa[id]; + ElemType val = FN::f(a); + pb[id] = val; +} + +// special case of linear unary operation +template +void LaunchUnaryTensorOp(ElemType beta, const ElemType* pa, ElemType* pb, ElemType alpha, ElementWiseOperator op, size_t regularOpDim) +{ + CUDA_LONG NN = (CUDA_LONG) regularOpDim; + +#define CaseLaunchUnaryTensorOp(oper) \ + case ElementWiseOperator::op##oper: \ + if (beta == 0 && alpha == 1) \ + _launchUnaryTensorOp<<>>(pa, pb, NN); \ + else \ + _launchUnaryTensorOp<<>>(beta, pa, pb, alpha, NN);\ + break; + + SyncGuard syncGuard; + GridDim grid(NN); + switch (op) + { + ForAllUnaryOps(CaseLaunchUnaryTensorOp); + default: + LogicError("LaunchTensorOp1: Unknown op code %d.", (int) op); + } +} + +// ----------------------------------------------------------------------- +// map runtime parameters N to template parameters +// ----------------------------------------------------------------------- + +// tensor operation with k+1 dimensions (-1 means scalar) +template +static void TensorOpWithRegularLoop(ElemType beta, const array& pointers, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, + const SmallVector& regularOpDims, const array, N>& regularStrides, + const SmallVector& reducingOpDims, const array, N>& reducingStrides) +{ + size_t dims = reducingOpDims.size(); + switch (dims) + { + case 2: + return LaunchTensorOpWithReduction(beta, pointers, alpha, op, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); + case 1: + return LaunchTensorOpWithReduction(beta, pointers, alpha, op, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); + case 0: + return LaunchTensorOp(beta, pointers, alpha, op, regularOpDims, regularStrides); + default: + LogicError("TensorOp: %d non-flattened reduction dimensions are not supported.", (C_int) dims); + } +} + +// tensor operation, generalized in number of arguments +// This function now expands into different k. It also eliminates the offsets by adding them to the pointers. +template +void TensorOpN(ElemType beta, array pointers, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, + const array& offsets, + const SmallVector& regularOpDims, const array, N>& regularStrides, + const SmallVector& reducingOpDims, const array, N>& reducingStrides) +{ + for (C_size_t i = 0; i < N; i++) // N = a small constant, this will be unrolled + pointers[i] += offsets[i]; + size_t dims = regularOpDims.size(); + switch (dims) + { + case 4: + return TensorOpWithRegularLoop(beta, pointers, alpha, op, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); + case 3: + return TensorOpWithRegularLoop(beta, pointers, alpha, op, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); + case 2: + return TensorOpWithRegularLoop(beta, pointers, alpha, op, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); + case 1: + return TensorOpWithRegularLoop(beta, pointers, alpha, op, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); + case 0: + return TensorOpWithRegularLoop(beta, pointers, alpha, op, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); + default: + LogicError("TensorOp: %d non-flattened input dimensions are not supported.", (C_int) dims); + } +} + +//------------------------------------------------------------------------ +// explicit instantiations--these are being called from GPUMatrix.cu +//------------------------------------------------------------------------ + +template void TensorOpN(float beta, array pointers, float alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, + const array& offsets, + const SmallVector& regularOpDims, const array, 2>& regularStrides, + const SmallVector& reducingOpDims, const array, 2>& reducingStrides); +template void TensorOpN(float beta, array pointers, float alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, + const array& offsets, + const SmallVector& regularOpDims, const array, 3>& regularStrides, + const SmallVector& reducingOpDims, const array, 3>& reducingStrides); +template void TensorOpN(float beta, array pointers, float alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, + const array& offsets, + const SmallVector& regularOpDims, const array, 4>& regularStrides, + const SmallVector& reducingOpDims, const array, 4>& reducingStrides); +template void TensorOpN(double beta, array pointers, double alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, + const array& offsets, + const SmallVector& regularOpDims, const array, 2>& regularStrides, + const SmallVector& reducingOpDims, const array, 2>& reducingStrides); +template void TensorOpN(double beta, array pointers, double alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, + const array& offsets, + const SmallVector& regularOpDims, const array, 3>& regularStrides, + const SmallVector& reducingOpDims, const array, 3>& reducingStrides); +template void TensorOpN(double beta, array pointers, double alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, + const array& offsets, + const SmallVector& regularOpDims, const array, 4>& regularStrides, + const SmallVector& reducingOpDims, const array, 4>& reducingStrides); + +template void LaunchUnaryTensorOp(float beta, const float* pa, float* pb, float alpha, ElementWiseOperator op, size_t regularOpDim); +template void LaunchUnaryTensorOp(double beta, const double* pa, double* pb, double alpha, ElementWiseOperator op, size_t regularOpDim); + +}}} + +#endif // CPUONLY diff --git a/cuda_code/GameOfLifeLogic.cu b/cuda_code/GameOfLifeLogic.cu new file mode 100644 index 0000000000000000000000000000000000000000..9f7bd7ef2ce4464a4a7c8415569b6949287db2e1 --- /dev/null +++ b/cuda_code/GameOfLifeLogic.cu @@ -0,0 +1,92 @@ +extern "C" +__global__ void calculate(int width, int height, int *board, int *board_result) +{ + int i = blockIdx.x * blockDim.x + threadIdx.x; + int size = width * height; + if (i < size) + { + int cell_width = i % width; + int cell_height = i / width; + int cell_state = board[width*cell_height+cell_width]; + + int alive_cells = 0; + for(int x=-1; x<=1; x++){ + for(int y=-1; y<=1; y++){ + if(x==0 && y==0) + continue; + int neighbour_cell = width*(cell_height+y)+cell_width+x; + if(board[neighbour_cell] != 0) + alive_cells++; + } + } + + if(cell_state != 0 && (alive_cells == 2 || alive_cells == 3)){ + board_result[i] = board[width*cell_height+cell_width]; + } + + else if(alive_cells == 3 && cell_state == 0){ + int red_count = 0; + int green_count = 0; + int blue_count = 0; + int yellow_count = 0; + int max_count = 0; + int max_color = 0; + + for(int x=-1; x<=1; x++){ + for(int y=-1; y<=1; y++){ + if(x==0 && y==0) + continue; + int neighbour_cell = width*(cell_height+y)+cell_width+x; + int color_neighbour_cell = board[neighbour_cell]; + if(color_neighbour_cell == 2){ + red_count++; + if(max_count < red_count){ + max_count = red_count; + max_color = color_neighbour_cell; + } + } + else if(color_neighbour_cell == 3){ + green_count++; + if(max_count < green_count){ + max_count = green_count; + max_color = color_neighbour_cell; + } + } + else if(color_neighbour_cell == 4){ + blue_count++; + if(max_count < blue_count){ + max_count = blue_count; + max_color = color_neighbour_cell; + } + } + else if(color_neighbour_cell == 5){ + yellow_count++; + if(max_count < yellow_count){ + max_count = yellow_count; + max_color = color_neighbour_cell; + } + } + + } + } + int zero_color = 0; + if(red_count == 0) + zero_color = 2; + if(green_count == 0) + zero_color = 3; + if(blue_count == 0) + zero_color = 4; + if(yellow_count == 0) + zero_color = 5; + + if(max_count != 1) + board_result[i] = max_color; + else + board_result[i] = zero_color; + } + else{ + board_result[i] = 0; + } + } + +} diff --git a/cuda_code/Gather_4.cu b/cuda_code/Gather_4.cu new file mode 100644 index 0000000000000000000000000000000000000000..cb4627cd3b664db18f8581ee158ddabc48c2d944 --- /dev/null +++ b/cuda_code/Gather_4.cu @@ -0,0 +1,189 @@ +/* NiuTrans.Tensor - an open-source tensor library + * Copyright (C) 2017, Natural Language Processing Lab, Northestern University. + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * $Created by: Xu Chen (email: hello_master1954@163.com) 2018-11-27 + */ + +#include "Gather.cuh" +#include "CopyBlocksSelected.cuh" +#include "../../XDevice.h" +#include "../../XUtility.h" + +namespace nts { // namespace nts(NiuTrans.Tensor) + +#ifdef USE_CUDA + +/* +gather indexed sub-tensors(cuda version) + +>> sData - the data pointer of the source tensor +>> tData - the data pointer of the target tensor +>> sIndex - the index of the source tensor +>> indexSize - the size of the srcIndex +>> stride - stride of a data block +*/ +__global__ +void KernelGather(DTYPE * sData, DTYPE * tData, int * sIndex, int indexSize, int stride) +{ + __shared__ DTYPE * sp[MAX_CUDA_THREAD_NUM_PER_BLOCK]; + __shared__ DTYPE * tp[MAX_CUDA_THREAD_NUM_PER_BLOCK]; + + /* block id */ + int i = blockDim.x * blockIdx.x + threadIdx.x; + + /* offset in each block */ + int offset = blockDim.y * blockIdx.y + threadIdx.y; + + if(i >= indexSize || offset >= stride) + return; + + if(threadIdx.y == 0){ + sp[threadIdx.x] = sData + sIndex[i] * stride; + tp[threadIdx.x] = tData + i * stride; + } + + __syncthreads(); + + DTYPE * s = sp[threadIdx.x]; + DTYPE * t = tp[threadIdx.x]; + + t[offset] = s[offset]; +} + +/* +gather indexed sub-tensors(cuda version) + +>> sData - the data pointer of the source tensor +>> tData - the data pointer of the target tensor +>> sIndex - the index of the source tensor +>> indexSize - the size of the srcIndex +>> stride - stride of a data block +>> strideNum - strideNum of a data block +>> blockNum - block size of data +*/ +__global__ +void KernelGather(DTYPE * sData, DTYPE * tData, int * sIndex, int stride, int strideNum, int blockNum) +{ + int idx = blockDim.x * blockIdx.x + threadIdx.x; + int idy = blockDim.y * blockIdx.y + threadIdx.y; + int blockIndex = idy / stride; + int offsetInBlock = idy % stride; + + int size = stride * strideNum * blockNum; + +#pragma unroll + for (int i = idx * stride + stride * strideNum * blockIndex + offsetInBlock; + i < stride * strideNum * blockIndex + offsetInBlock + stride * strideNum && i < size; + i += stride * blockDim.x) { + tData[i] = sData[sIndex[i]]; + } +} + +/* +gather indexed sub-tensors(cuda version) + +>> s - the source tensor +>> t - the target tensor +>> srcIndex - the tensor to save the index of the source tensor +*/ +void _CudaGather(const XTensor * s, XTensor * t, XTensor * srcIndex) +{ + int devID = s->devID; + XMem * mem = s->mem; + + int stride = s->GetDim(1); + int indexSize = srcIndex->unitNum; + + int cudaGrids[3]; + int cudaBlocks[3]; + + int devIDBackup; + ProtectCudaDev(devID, devIDBackup); + + GDevs.GetCudaThread2D(devID, indexSize, stride, MAX_INT, cudaGrids, cudaBlocks); + + dim3 blocks(cudaGrids[0], cudaGrids[1]); + dim3 threads(cudaBlocks[0], cudaBlocks[1]); + + DTYPE * sData = (DTYPE*)s->data; + DTYPE * tData = (DTYPE*)t->data; + + int * sIndex = NULL; + + if (srcIndex->devID < 0) { + sIndex = mem != NULL ? + (int*)mem->AllocBuf(mem->devID, sizeof(int) * indexSize) : + (int*)XMemAlloc(mem->devID, sizeof(int) * indexSize); + XMemCopy(sIndex, devID, srcIndex, -1, sizeof(int) * indexSize); + } + else + sIndex = (int *)srcIndex->data; + + KernelGather<<>>(sData, tData, sIndex, indexSize, stride); + + if (srcIndex->devID < 0) { + if(mem != NULL) + mem->ReleaseBuf(mem->devID, sizeof(int) * indexSize); + else + XMemFree(mem->devID, sIndex); + } + + BacktoCudaDev(devID, devIDBackup); +} + +/* +gather indexed sub-tensors(cuda version) + +>> s - the source tensor +>> t - the target tensor +>> srcIndex - the tensor to save the index of the source tensor +>> dim - the leading dimension to define "sub-tensors" +*/ +void _CudaGather(const XTensor * s, XTensor * t, XTensor * srcIndex, int dim) +{ + int devID = srcIndex->devID; + XMem * mem = s->mem; + + int stride = 1; + int blockNum = 1; + int indexSize = srcIndex->unitNum; + int strideNum = srcIndex->dimSize[dim]; + for (int i = 0; i < dim; i++) + blockNum *= srcIndex->dimSize[i]; + for (int i = dim + 1; i < srcIndex->order; i++) + stride *= srcIndex->dimSize[i]; + + int * sIndex = NULL; + if (srcIndex->devID < 0) { + sIndex = mem != NULL ? + (int*)mem->AllocBuf(mem->devID, sizeof(int) * indexSize) : + (int*)XMemAlloc(mem->devID, sizeof(int) * indexSize); + XMemCopy(sIndex, devID, srcIndex, -1, sizeof(int) * indexSize); + } + else + sIndex = (int *)srcIndex->data; + + int cudaGrids[3]; + int cudaBlocks[3]; + GDevs.GetCudaThread2D(devID, max(32, strideNum), stride*blockNum, MAX_INT, cudaGrids, cudaBlocks); + + KernelGather << > > ((DTYPE *)s->data, (DTYPE *)t->data, sIndex, stride, strideNum, blockNum); +} +#endif // USE_CUDA + +} // namespace nts(NiuTrans.Tensor) \ No newline at end of file diff --git a/cuda_code/GcdLcmKernel_5.cu b/cuda_code/GcdLcmKernel_5.cu new file mode 100644 index 0000000000000000000000000000000000000000..cc5b9cba208ce12b3157c03baff53c23180188f4 --- /dev/null +++ b/cuda_code/GcdLcmKernel_5.cu @@ -0,0 +1,46 @@ +#define TORCH_ASSERT_NO_OPERATORS +#include +#include +#include +#include +#include +#include +#include + +// NOTE: CUDA on Windows requires that the enclosing function +// of a __device__ lambda not have internal linkage. + +namespace at { namespace native { + +// See note [Jiterator] +const char gcd_name[] = "gcd"; +void gcd_kernel_cuda(TensorIteratorBase& iter) { + #ifdef USE_JITERATOR + AT_DISPATCH_INTEGRAL_TYPES(iter.common_dtype(), "gcd_cuda", [&]() { + jitted_gpu_kernel(iter, gcd_string); + }); + #else + AT_DISPATCH_INTEGRAL_TYPES(iter.common_dtype(), "gcd_cuda", [&]() { + gpu_kernel(iter, [] GPU_LAMBDA (scalar_t a, scalar_t b) -> scalar_t { + return calc_gcd(a, b); + }); + }); + #endif // USE_JITERATOR +} + +void lcm_kernel_cuda(TensorIteratorBase& iter) { + AT_DISPATCH_INTEGRAL_TYPES(iter.common_dtype(), "lcm_cuda", [&]() { + gpu_kernel(iter, [] GPU_LAMBDA (scalar_t a, scalar_t b) -> scalar_t { + scalar_t g = calc_gcd(a, b); + return (g == 0) ? 0 : ::abs(a / g * b); + }); + }); +} + +REGISTER_DISPATCH(gcd_stub, &gcd_kernel_cuda); +REGISTER_DISPATCH(lcm_stub, &lcm_kernel_cuda); + +}} // namespace at::native diff --git a/cuda_code/GeantTrack.cu b/cuda_code/GeantTrack.cu new file mode 100644 index 0000000000000000000000000000000000000000..0662bfad469777b856fda341cf271eb3fcf9891a --- /dev/null +++ b/cuda_code/GeantTrack.cu @@ -0,0 +1,41 @@ + +#include "GeantCudaUtils.h" +#include "backend/cuda/Interface.h" + +#include "Geant/CoprocessorBrokerKernel.h" +#include "GeantTaskData.h" +#include "GeantTrack.h" + +namespace geant { + +inline namespace cuda { +template void MakeInstanceArrayAt(GeantTaskData *addr, size_t nElements, size_t sizeOf, size_t, int, GeantPropagator *); + +template void MakeInstanceAt(GeantTrack_v *addr, unsigned int, int); + +__global__ void Clear(GeantTrack_v *tracks) +{ + tracks->Clear(); +} + +int Clear_gpu(vecgeom::cxx::DevicePtr &tracks, int blocksPerGrid, int threadsPerBlock, + cudaStream_t stream) +{ + Clear<<>>(tracks); + GEANT_CUDA_ERROR(cudaGetLastError()); + return 1; +} + +} // cuda +} // Geant + +namespace vecgeom { +namespace cxx { +template void DevicePtr::Construct() const; +template size_t DevicePtr::SizeOf(); +template void DevicePtr::Construct(int) const; +template size_t DevicePtr::SizeOf(); +template size_t DevicePtr::SizeOf(); +template size_t DevicePtr::SizeOf(); +} // cxx +} // vecgeom diff --git a/cuda_code/Genz2_8D_2.cu b/cuda_code/Genz2_8D_2.cu new file mode 100644 index 0000000000000000000000000000000000000000..ebcc67502b32b148ba29df501efd65857221cc33 --- /dev/null +++ b/cuda_code/Genz2_8D_2.cu @@ -0,0 +1,55 @@ +#include "demo_utils.cuh" +#include "function.cuh" +#include +#include +#include +#include +#include + +using namespace quad; + +namespace detail { + class GENZ_3_8D { + public: + __device__ __host__ double + operator()(double x, + double y, + double z, + double w, + double v, + double u, + double t, + double s) + { + return pow(1 + 8 * s + 7 * t + 6 * u + 5 * v + 4 * w + 3 * x + 2 * y + z, + -9) / + 2.2751965817917756076e-10; + } + }; +} + +int +main() +{ + double epsrel = 2.56e-09; // 1.e-3; // starting error tolerance. + double const epsrel_min = 1.0e-13; + double true_value = 3.015702399795044e+17; + GENZ_2_8D integrand; + constexpr int ndim = 8; + Config configuration; + configuration.outfileVerbosity = 0; + // configuration.heuristicID = 0; + PrintHeader(); + + while (cu_time_and_call("GENZ_2_8D", + integrand, + epsrel, + true_value, + "gpucuhre", + std::cout, + configuration) == true && + epsrel > epsrel_min) { + epsrel /= 5.0; + break; + } +} diff --git a/cuda_code/GpuIndexFlat_1.cu b/cuda_code/GpuIndexFlat_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..442c633563cfecb349137a00fc6769f95f0d7bc0 --- /dev/null +++ b/cuda_code/GpuIndexFlat_1.cu @@ -0,0 +1,597 @@ +/** + * Copyright (c) 2015-present, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under the BSD+Patents license found in the + * LICENSE file in the root directory of this source tree. + */ + +// Copyright 2004-present Facebook. All Rights Reserved. + +#include "GpuIndexFlat.h" +#include "../IndexFlat.h" +#include "GpuResources.h" +#include "impl/FlatIndex.cuh" +#include "utils/CopyUtils.cuh" +#include "utils/DeviceUtils.h" +#include "utils/Float16.cuh" +#include "utils/StaticUtils.h" + +#include +#include +#include + +namespace faiss { namespace gpu { + +/// Default CPU search size for which we use paged copies +constexpr size_t kMinPageSize = (size_t) 256 * 1024 * 1024; + +/// Size above which we page copies from the CPU to GPU (non-paged +/// memory usage) +constexpr size_t kNonPinnedPageSize = (size_t) 256 * 1024 * 1024; + +GpuIndexFlat::GpuIndexFlat(GpuResources* resources, + const faiss::IndexFlat* index, + GpuIndexFlatConfig config) : + GpuIndex(resources, index->d, index->metric_type, config), + minPagedSize_(kMinPageSize), + config_(config), + data_(nullptr) { + verifySettings_(); + + // Flat index doesn't need training + this->is_trained = true; + + copyFrom(index); +} + +GpuIndexFlat::GpuIndexFlat(GpuResources* resources, + int dims, + faiss::MetricType metric, + GpuIndexFlatConfig config) : + GpuIndex(resources, dims, metric, config), + minPagedSize_(kMinPageSize), + config_(config), + data_(nullptr) { + verifySettings_(); + + // Flat index doesn't need training + this->is_trained = true; + + // Construct index + DeviceScope scope(device_); + data_ = new FlatIndex(resources, + dims, + metric == faiss::METRIC_L2, + config_.useFloat16, + config_.useFloat16Accumulator, + config_.storeTransposed, + memorySpace_); +} + +GpuIndexFlat::~GpuIndexFlat() { + delete data_; +} + +void +GpuIndexFlat::setMinPagingSize(size_t size) { + minPagedSize_ = size; +} + +size_t +GpuIndexFlat::getMinPagingSize() const { + return minPagedSize_; +} + +void +GpuIndexFlat::copyFrom(const faiss::IndexFlat* index) { + DeviceScope scope(device_); + + this->d = index->d; + this->metric_type = index->metric_type; + + // GPU code has 32 bit indices + FAISS_THROW_IF_NOT_FMT(index->ntotal <= + (faiss::Index::idx_t) std::numeric_limits::max(), + "GPU index only supports up to %zu indices; " + "attempting to copy CPU index with %zu parameters", + (size_t) std::numeric_limits::max(), + (size_t) index->ntotal); + this->ntotal = index->ntotal; + + delete data_; + data_ = new FlatIndex(resources_, + this->d, + index->metric_type == faiss::METRIC_L2, + config_.useFloat16, + config_.useFloat16Accumulator, + config_.storeTransposed, + memorySpace_); + + // The index could be empty + if (index->ntotal > 0) { + data_->add(index->xb.data(), + index->ntotal, + resources_->getDefaultStream(device_)); + } +} + +void +GpuIndexFlat::copyTo(faiss::IndexFlat* index) const { + DeviceScope scope(device_); + + index->d = this->d; + index->ntotal = this->ntotal; + index->metric_type = this->metric_type; + + FAISS_ASSERT(data_->getSize() == this->ntotal); + index->xb.resize(this->ntotal * this->d); + + auto stream = resources_->getDefaultStream(device_); + + if (this->ntotal > 0) { + if (config_.useFloat16) { + auto vecFloat32 = data_->getVectorsFloat32Copy(stream); + fromDevice(vecFloat32, index->xb.data(), stream); + } else { + fromDevice(data_->getVectorsFloat32Ref(), index->xb.data(), stream); + } + } +} + +size_t +GpuIndexFlat::getNumVecs() const { + return this->ntotal; +} + +void +GpuIndexFlat::reset() { + DeviceScope scope(device_); + + // Free the underlying memory + data_->reset(); + this->ntotal = 0; +} + +void +GpuIndexFlat::train(Index::idx_t n, const float* x) { + // nothing to do +} + +void +GpuIndexFlat::add(Index::idx_t n, const float* x) { + DeviceScope scope(device_); + + // To avoid multiple re-allocations, ensure we have enough storage + // available + data_->reserve(n, resources_->getDefaultStream(device_)); + + // If we're not operating in float16 mode, we don't need the input + // data to be resident on our device; we can add directly. + if (!config_.useFloat16) { + addImpl_(n, x, nullptr); + } else { + // Otherwise, perform the paging + GpuIndex::add(n, x); + } +} + +void +GpuIndexFlat::addImpl_(Index::idx_t n, + const float* x, + const Index::idx_t* ids) { + // Device is already set in GpuIndex::addInternal_ + + // We do not support add_with_ids + FAISS_THROW_IF_NOT_MSG(!ids, "add_with_ids not supported"); + FAISS_THROW_IF_NOT(n > 0); + + // Due to GPU indexing in int32, we can't store more than this + // number of vectors on a GPU + FAISS_THROW_IF_NOT_FMT(this->ntotal + n <= + (faiss::Index::idx_t) std::numeric_limits::max(), + "GPU index only supports up to %zu indices", + (size_t) std::numeric_limits::max()); + + data_->add(x, n, resources_->getDefaultStream(device_)); + this->ntotal += n; +} + +struct IntToLong { + __device__ long operator()(int v) const { return (long) v; } +}; + +void +GpuIndexFlat::search(faiss::Index::idx_t n, + const float* x, + faiss::Index::idx_t k, + float* distances, + faiss::Index::idx_t* labels) const { + if (n == 0) { + return; + } + + // For now, only support <= max int results + FAISS_THROW_IF_NOT_FMT(n <= + (faiss::Index::idx_t) std::numeric_limits::max(), + "GPU index only supports up to %zu indices", + (size_t) std::numeric_limits::max()); + FAISS_THROW_IF_NOT_FMT(k <= 1024, + "GPU only supports k <= 1024 (requested %d)", + (int) k); // select limitation + + DeviceScope scope(device_); + auto stream = resources_->getDefaultStream(device_); + + // The input vectors may be too large for the GPU, but we still + // assume that the output distances and labels are not. + // Go ahead and make space for output distances and labels on the + // GPU. + // If we reach a point where all inputs are too big, we can add + // another level of tiling. + auto outDistances = toDevice(resources_, + device_, + distances, + stream, + {(int) n, (int) k}); + + // FlatIndex only supports an interface returning int indices + DeviceTensor outIntIndices( + resources_->getMemoryManagerCurrentDevice(), + {(int) n, (int) k}, stream); + + bool usePaged = false; + + if (getDeviceForAddress(x) == -1) { + // It is possible that the user is querying for a vector set size + // `x` that won't fit on the GPU. + // In this case, we will have to handle paging of the data from CPU + // -> GPU. + // Currently, we don't handle the case where the output data won't + // fit on the GPU (e.g., n * k is too large for the GPU memory). + size_t dataSize = (size_t) n * this->d * sizeof(float); + + if (dataSize >= minPagedSize_) { + searchFromCpuPaged_(n, x, k, + outDistances.data(), + outIntIndices.data()); + usePaged = true; + } + } + + if (!usePaged) { + searchNonPaged_(n, x, k, + outDistances.data(), + outIntIndices.data()); + } + + // Convert and copy int indices out + auto outIndices = toDevice(resources_, + device_, + labels, + stream, + {(int) n, (int) k}); + + // Convert int to long + thrust::transform(thrust::cuda::par.on(stream), + outIntIndices.data(), + outIntIndices.end(), + outIndices.data(), + IntToLong()); + + // Copy back if necessary + fromDevice(outDistances, distances, stream); + fromDevice(outIndices, labels, stream); +} + +void +GpuIndexFlat::searchImpl_(faiss::Index::idx_t n, + const float* x, + faiss::Index::idx_t k, + float* distances, + faiss::Index::idx_t* labels) const { + FAISS_ASSERT_MSG(false, "Should not be called"); +} + +void +GpuIndexFlat::searchNonPaged_(int n, + const float* x, + int k, + float* outDistancesData, + int* outIndicesData) const { + Tensor outDistances(outDistancesData, {n, k}); + Tensor outIndices(outIndicesData, {n, k}); + + auto stream = resources_->getDefaultStream(device_); + + // Make sure arguments are on the device we desire; use temporary + // memory allocations to move it if necessary + auto vecs = toDevice(resources_, + device_, + const_cast(x), + stream, + {n, (int) this->d}); + + data_->query(vecs, k, outDistances, outIndices, true); +} + +void +GpuIndexFlat::searchFromCpuPaged_(int n, + const float* x, + int k, + float* outDistancesData, + int* outIndicesData) const { + Tensor outDistances(outDistancesData, {n, k}); + Tensor outIndices(outIndicesData, {n, k}); + + // Is pinned memory available? + auto pinnedAlloc = resources_->getPinnedMemory(); + int pageSizeInVecs = + (int) ((pinnedAlloc.second / 2) / (sizeof(float) * this->d)); + + if (!pinnedAlloc.first || pageSizeInVecs < 1) { + // Just page without overlapping copy with compute + int batchSize = utils::nextHighestPowerOf2( + (int) ((size_t) kNonPinnedPageSize / + (sizeof(float) * this->d))); + + for (int cur = 0; cur < n; cur += batchSize) { + int num = std::min(batchSize, n - cur); + + auto outDistancesSlice = outDistances.narrowOutermost(cur, num); + auto outIndicesSlice = outIndices.narrowOutermost(cur, num); + + searchNonPaged_(num, + x + (size_t) cur * this->d, + k, + outDistancesSlice.data(), + outIndicesSlice.data()); + } + + return; + } + + // + // Pinned memory is available, so we can overlap copy with compute. + // We use two pinned memory buffers, and triple-buffer the + // procedure: + // + // 1 CPU copy -> pinned + // 2 pinned copy -> GPU + // 3 GPU compute + // + // 1 2 3 1 2 3 ... (pinned buf A) + // 1 2 3 1 2 ... (pinned buf B) + // 1 2 3 1 ... (pinned buf A) + // time -> + // + auto defaultStream = resources_->getDefaultStream(device_); + auto copyStream = resources_->getAsyncCopyStream(device_); + + FAISS_ASSERT((size_t) pageSizeInVecs * this->d <= + (size_t) std::numeric_limits::max()); + + float* bufPinnedA = (float*) pinnedAlloc.first; + float* bufPinnedB = bufPinnedA + (size_t) pageSizeInVecs * this->d; + float* bufPinned[2] = {bufPinnedA, bufPinnedB}; + + // Reserve space on the GPU for the destination of the pinned buffer + // copy + DeviceTensor bufGpuA( + resources_->getMemoryManagerCurrentDevice(), + {(int) pageSizeInVecs, (int) this->d}, + defaultStream); + DeviceTensor bufGpuB( + resources_->getMemoryManagerCurrentDevice(), + {(int) pageSizeInVecs, (int) this->d}, + defaultStream); + DeviceTensor* bufGpus[2] = {&bufGpuA, &bufGpuB}; + + // Copy completion events for the pinned buffers + std::unique_ptr eventPinnedCopyDone[2]; + + // Execute completion events for the GPU buffers + std::unique_ptr eventGpuExecuteDone[2]; + + // All offsets are in terms of number of vectors; they remain within + // int bounds (as this function only handles max in vectors) + + // Current start offset for buffer 1 + int cur1 = 0; + int cur1BufIndex = 0; + + // Current start offset for buffer 2 + int cur2 = -1; + int cur2BufIndex = 0; + + // Current start offset for buffer 3 + int cur3 = -1; + int cur3BufIndex = 0; + + while (cur3 < n) { + // Start async pinned -> GPU copy first (buf 2) + if (cur2 != -1 && cur2 < n) { + // Copy pinned to GPU + int numToCopy = std::min(pageSizeInVecs, n - cur2); + + // Make sure any previous execution has completed before continuing + auto& eventPrev = eventGpuExecuteDone[cur2BufIndex]; + if (eventPrev.get()) { + eventPrev->streamWaitOnEvent(copyStream); + } + + CUDA_VERIFY(cudaMemcpyAsync(bufGpus[cur2BufIndex]->data(), + bufPinned[cur2BufIndex], + (size_t) numToCopy * this->d * sizeof(float), + cudaMemcpyHostToDevice, + copyStream)); + + // Mark a completion event in this stream + eventPinnedCopyDone[cur2BufIndex] = + std::move(std::unique_ptr(new CudaEvent(copyStream))); + + // We pick up from here + cur3 = cur2; + cur2 += numToCopy; + cur2BufIndex = (cur2BufIndex == 0) ? 1 : 0; + } + + if (cur3 != -1 && cur3 < n) { + // Process on GPU + int numToProcess = std::min(pageSizeInVecs, n - cur3); + + // Make sure the previous copy has completed before continuing + auto& eventPrev = eventPinnedCopyDone[cur3BufIndex]; + FAISS_ASSERT(eventPrev.get()); + + eventPrev->streamWaitOnEvent(defaultStream); + + // Create tensor wrappers + DeviceTensor input(bufGpus[cur3BufIndex]->data(), + {numToProcess, this->d}); + auto outDistancesSlice = outDistances.narrowOutermost(cur3, numToProcess); + auto outIndicesSlice = outIndices.narrowOutermost(cur3, numToProcess); + + data_->query(input, k, + outDistancesSlice, + outIndicesSlice, true); + + // Create completion event + eventGpuExecuteDone[cur3BufIndex] = + std::move(std::unique_ptr(new CudaEvent(defaultStream))); + + // We pick up from here + cur3BufIndex = (cur3BufIndex == 0) ? 1 : 0; + cur3 += numToProcess; + } + + if (cur1 < n) { + // Copy CPU mem to CPU pinned + int numToCopy = std::min(pageSizeInVecs, n - cur1); + + // Make sure any previous copy has completed before continuing + auto& eventPrev = eventPinnedCopyDone[cur1BufIndex]; + if (eventPrev.get()) { + eventPrev->cpuWaitOnEvent(); + } + + memcpy(bufPinned[cur1BufIndex], + x + (size_t) cur1 * this->d, + (size_t) numToCopy * this->d * sizeof(float)); + + // We pick up from here + cur2 = cur1; + cur1 += numToCopy; + cur1BufIndex = (cur1BufIndex == 0) ? 1 : 0; + } + } +} + +void +GpuIndexFlat::reconstruct(faiss::Index::idx_t key, + float* out) const { + DeviceScope scope(device_); + + FAISS_THROW_IF_NOT_MSG(key < this->ntotal, "index out of bounds"); + auto stream = resources_->getDefaultStream(device_); + + if (config_.useFloat16) { + auto vec = data_->getVectorsFloat32Copy(key, 1, stream); + fromDevice(vec.data(), out, this->d, stream); + } else { + auto vec = data_->getVectorsFloat32Ref()[key]; + fromDevice(vec.data(), out, this->d, stream); + } +} + +void +GpuIndexFlat::reconstruct_n(faiss::Index::idx_t i0, + faiss::Index::idx_t num, + float* out) const { + DeviceScope scope(device_); + + FAISS_THROW_IF_NOT_MSG(i0 < this->ntotal, "index out of bounds"); + FAISS_THROW_IF_NOT_MSG(i0 + num - 1 < this->ntotal, "num out of bounds"); + auto stream = resources_->getDefaultStream(device_); + + if (config_.useFloat16) { + auto vec = data_->getVectorsFloat32Copy(i0, num, stream); + fromDevice(vec.data(), out, num * this->d, stream); + } else { + auto vec = data_->getVectorsFloat32Ref()[i0]; + fromDevice(vec.data(), out, this->d * num, stream); + } +} + +void +GpuIndexFlat::verifySettings_() const { + // If we want Hgemm, ensure that it is supported on this device + if (config_.useFloat16Accumulator) { +#ifdef FAISS_USE_FLOAT16 + FAISS_THROW_IF_NOT_MSG(config_.useFloat16, + "useFloat16Accumulator can only be enabled " + "with useFloat16"); + + FAISS_THROW_IF_NOT_FMT(getDeviceSupportsFloat16Math(config_.device), + "Device %d does not support Hgemm " + "(useFloat16Accumulator)", + config_.device); +#else + FAISS_THROW_IF_NOT_MSG(false, "not compiled with float16 support"); +#endif + } +} + +// +// GpuIndexFlatL2 +// + +GpuIndexFlatL2::GpuIndexFlatL2(GpuResources* resources, + faiss::IndexFlatL2* index, + GpuIndexFlatConfig config) : + GpuIndexFlat(resources, index, config) { +} + +GpuIndexFlatL2::GpuIndexFlatL2(GpuResources* resources, + int dims, + GpuIndexFlatConfig config) : + GpuIndexFlat(resources, dims, faiss::METRIC_L2, config) { +} + +void +GpuIndexFlatL2::copyFrom(faiss::IndexFlatL2* index) { + GpuIndexFlat::copyFrom(index); +} + +void +GpuIndexFlatL2::copyTo(faiss::IndexFlatL2* index) { + GpuIndexFlat::copyTo(index); +} + +// +// GpuIndexFlatIP +// + +GpuIndexFlatIP::GpuIndexFlatIP(GpuResources* resources, + faiss::IndexFlatIP* index, + GpuIndexFlatConfig config) : + GpuIndexFlat(resources, index, config) { +} + +GpuIndexFlatIP::GpuIndexFlatIP(GpuResources* resources, + int dims, + GpuIndexFlatConfig config) : + GpuIndexFlat(resources, dims, faiss::METRIC_INNER_PRODUCT, config) { +} + +void +GpuIndexFlatIP::copyFrom(faiss::IndexFlatIP* index) { + GpuIndexFlat::copyFrom(index); +} + +void +GpuIndexFlatIP::copyTo(faiss::IndexFlatIP* index) { + GpuIndexFlat::copyTo(index); +} + +} } // namespace diff --git a/cuda_code/GpuUtils.cu b/cuda_code/GpuUtils.cu new file mode 100644 index 0000000000000000000000000000000000000000..94f94500361b9a2d9da342caa4a2aa72fca459fd --- /dev/null +++ b/cuda_code/GpuUtils.cu @@ -0,0 +1,46 @@ +/*************************************************************************************** + GpuShareSat -- Copyright (c) 2020, Nicolas Prevot + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and +associated documentation files (the "Software"), to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, publish, distribute, +sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or +substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT +NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT +OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + **************************************************************************************************/ +#include +#include + +#include +#include "GpuUtils.cuh" + +__device__ int forSyncOut; + +namespace GpuShare { + +StreamPointer::StreamPointer() { + cudaStreamCreate(&stream); +} + +StreamPointer::~StreamPointer() { + cudaStreamDestroy(stream); +} + +EventPointer::EventPointer() { + cudaEventCreate(&event); +} + +EventPointer::~EventPointer() { + cudaEventDestroy(event); +} + + +} diff --git a/cuda_code/GravityKernel.cu b/cuda_code/GravityKernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..cee3b45837d904828231277b50aa907952bd8222 --- /dev/null +++ b/cuda_code/GravityKernel.cu @@ -0,0 +1,214 @@ +#include "GravityKernel.cuh" + +#include +#include +#include +#include + +#include "cuda_runtime.h" +#include "device_launch_parameters.h" + +#define THREADS_PER_BLOCK 64 + +__global__ void acceleration_kernel(Particle* particle, float* acc_sum, Particle* particles, int count) { + + int positionID = blockDim.x * blockIdx.x + threadIdx.x; + + float gConstant = 10; + + if (positionID < count) { + Particle ref = particles[positionID]; + + float distX = ref.position[0] - particle->position[0]; + float distY = ref.position[1] - particle->position[1]; + + float dist2 = distX * distX + distY * distY; + + float force = 0; + + if (dist2 > 100) { + force = gConstant * ref.mass * particle->mass / (float)dist2; + + float rad = atan2(distY, distX); + + acc_sum[positionID] = force * cos(rad)/particle->mass; + acc_sum[positionID + Constant::NUM_PARTICLES] = force * sin(rad)/particle->mass; + } + else { + acc_sum[positionID] = 0; + acc_sum[positionID + Constant::NUM_PARTICLES] = 0; + } + + } + +} + +__global__ void sum_kernel(float* acc_sum) { + + const int tid = threadIdx.x; + + auto step_size = 1; + int number_of_threads = blockDim.x; + + if (tid < Constant::NUM_PARTICLES) { + + while (number_of_threads > 0) + { + if (tid < number_of_threads) // still alive? + { + const auto fst = tid * step_size * 2; + const auto snd = fst + step_size; + acc_sum[fst] += acc_sum[snd]; + } + + step_size <<= 1; + number_of_threads >>= 1; + } + } + +} + +__global__ void gravity_kernel(Particle* particles, float* acc_sum, int count) { + + float dt = .10 + ; + float drag = 0; + + int id = blockDim.x * blockIdx.x + threadIdx.x; + + if (id < count) { + + const int blocks = Constant::NUM_PARTICLES / THREADS_PER_BLOCK + 1; + const int threads = Constant::NUM_PARTICLES / 2; + + acceleration_kernel << > > (&particles[id], (acc_sum + 2 * id * Constant::NUM_PARTICLES), particles, count); + + sum_kernel << <1, threads >> > ((acc_sum + 2 * id * Constant::NUM_PARTICLES)); + sum_kernel << <1, threads >> > ((acc_sum + (2 * id + 1) * Constant::NUM_PARTICLES)); + + float x = 0, y = 0; + + /*for (int i = 0; i < Constant::NUM_PARTICLES; i++) { + x += acc_sum[2 * id * Constant::NUM_PARTICLES]; + y += acc_sum[2 * id * Constant::NUM_PARTICLES + Constant::NUM_PARTICLES]; + }*/ + + x = acc_sum[2 * id * Constant::NUM_PARTICLES]; + y = acc_sum[(2 * id + 1) * Constant::NUM_PARTICLES]; + + Particle ref = particles[id]; + + float* vel = ref.velocity; + float* pos = ref.position; + + vel[0] += x * dt; + vel[1] += y * dt; + + vel[0] -= vel[0] * dt * drag; + vel[1] -= vel[1] * dt * drag; + + pos[0] += vel[0] * dt; + pos[1] += vel[1] * dt; + + } +} + + +GravityKernel::GravityKernel() { + this->_world = new World(); +} + +GravityKernel::GravityKernel(World* world) { + this->_world = world; +} + +__host__ void GravityKernel::cudaPrep() { + Particle* particles = this->_world->particles; + + Particle* d_particles; + float* d_acc_sum; + + float** d_positions = new float* [Constant::NUM_PARTICLES]; + float** d_velocities = new float* [Constant::NUM_PARTICLES]; + + if (cudaMalloc(&d_particles, sizeof(Particle) * Constant::NUM_PARTICLES) != cudaSuccess) { + std::cout << "Particle Device Allocation Error" << std::endl; + return; + } + + if (cudaMalloc(&d_acc_sum, sizeof(float) * Constant::NUM_PARTICLES * Constant::NUM_PARTICLES * Constant::DIMENSIONS) != cudaSuccess) { + std::cout << "Particle Accelerations Allocation Error" << std::endl; + return; + } + + + for (int i = 0; i < Constant::NUM_PARTICLES; i++) { + + if (cudaMalloc(&(d_positions[i]), sizeof(float) * Constant::DIMENSIONS) != cudaSuccess) { + std::cout << "Position Mapping Failure" << std::endl; + } + + if (cudaMalloc(&(d_velocities[i]), sizeof(float) * Constant::DIMENSIONS) != cudaSuccess) { + std::cout << "Velocity Mapping Failure" << std::endl; + } + + + if (cudaMemcpy(&(d_particles[i].position), &(d_positions[i]), sizeof(float*), cudaMemcpyHostToDevice) != cudaSuccess){ + std::cout << "Particle Position Allocation Error" << std::endl; + } + + if (cudaMemcpy(&(d_particles[i].velocity), &(d_velocities[i]), sizeof(float*), cudaMemcpyHostToDevice) != cudaSuccess) { + std::cout << "Particle Velocity Allocation Error" << std::endl; + } + + } + + for (int i = 0; i < Constant::NUM_PARTICLES; i++) { + if (cudaMemcpy(d_positions[i], particles[i].position, sizeof(float) * Constant::DIMENSIONS, cudaMemcpyHostToDevice) != cudaSuccess) { + std::cout << "Particle Position Allocation Error" << std::endl; + } + + if (cudaMemcpy(d_velocities[i], particles[i].velocity, sizeof(float) * Constant::DIMENSIONS, cudaMemcpyHostToDevice) != cudaSuccess) { + std::cout << "Particle Velocity Allocation Error" << std::endl; + } + + if (cudaMemcpy(&(d_particles[i].mass), &(particles[i].mass), sizeof(int), cudaMemcpyHostToDevice) != cudaSuccess) { + std::cout << "Particle Mass Allocation Error" << std::endl; + } + } + + this->d_particles = d_particles; + this->d_acc_sum = d_acc_sum; + this->d_positions = d_positions; + this->d_velocities = d_velocities; +} + +__host__ void GravityKernel::runKernel() { + + int blocks = Constant::NUM_PARTICLES / THREADS_PER_BLOCK + 1; + + gravity_kernel<<>>(d_particles, d_acc_sum, Constant::NUM_PARTICLES); + cudaDeviceSynchronize(); + + Particle* ref = this->_world->particles; + + for (int i = 0; i < Constant::NUM_PARTICLES; i++) { + cudaMemcpy(ref[i].position, d_positions[i], sizeof(float) * Constant::DIMENSIONS, cudaMemcpyDeviceToHost); + cudaMemcpy(ref[i].velocity, d_velocities[i], sizeof(float) * Constant::DIMENSIONS, cudaMemcpyDeviceToHost); + } + +} + + +__host__ void GravityKernel::cudaClear() { + + std::cout << "Clearing memory" << std::endl; + + for (int i = 0; i < Constant::NUM_PARTICLES; i++) { + cudaFree(&(d_particles[i].position)); + cudaFree(&(d_particles[i].velocity)); + } + + cudaFree(d_particles); +} + diff --git a/cuda_code/GridSampler_18.cu b/cuda_code/GridSampler_18.cu new file mode 100644 index 0000000000000000000000000000000000000000..b6d225dbbac04195579228bea9303cf9959f342f --- /dev/null +++ b/cuda_code/GridSampler_18.cu @@ -0,0 +1,937 @@ +#define TORCH_ASSERT_NO_OPERATORS +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { namespace native { + +using namespace at::cuda::detail; + +using at::native::detail::GridSamplerInterpolation; +using at::native::detail::GridSamplerPadding; + +namespace { + template + C10_LAUNCH_BOUNDS_1(256) + __global__ void grid_sampler_2d_kernel( + const index_t nthreads, + TensorInfo input, + TensorInfo grid, + TensorInfo output, + const GridSamplerInterpolation interpolation_mode, + const GridSamplerPadding padding_mode, + bool align_corners) { + index_t C = input.sizes[1]; + index_t inp_H = input.sizes[2]; + index_t inp_W = input.sizes[3]; + index_t out_H = grid.sizes[1]; + index_t out_W = grid.sizes[2]; + index_t inp_sN = input.strides[0]; + index_t inp_sC = input.strides[1]; + index_t inp_sH = input.strides[2]; + index_t inp_sW = input.strides[3]; + index_t grid_sN = grid.strides[0]; + index_t grid_sH = grid.strides[1]; + index_t grid_sW = grid.strides[2]; + index_t grid_sCoor = grid.strides[3]; + index_t out_sN = output.strides[0]; + index_t out_sC = output.strides[1]; + index_t out_sH = output.strides[2]; + index_t out_sW = output.strides[3]; + + CUDA_KERNEL_LOOP_TYPE(index, nthreads, index_t) { + const index_t w = index % out_W; + const index_t h = (index / out_W) % out_H; + const index_t n = index / (out_H * out_W); + const index_t grid_offset = n * grid_sN + h * grid_sH + w * grid_sW; + + // get the corresponding input x, y co-ordinates from grid + scalar_t x = grid.data[grid_offset]; + scalar_t y = grid.data[grid_offset + grid_sCoor]; + + scalar_t ix = grid_sampler_compute_source_index(x, inp_W, padding_mode, align_corners); + scalar_t iy = grid_sampler_compute_source_index(y, inp_H, padding_mode, align_corners); + + if (interpolation_mode == GridSamplerInterpolation::Bilinear) { + // get NE, NW, SE, SW pixel values from (x, y) + index_t ix_nw = static_cast(::floor(ix)); + index_t iy_nw = static_cast(::floor(iy)); + index_t ix_ne = ix_nw + 1; + index_t iy_ne = iy_nw; + index_t ix_sw = ix_nw; + index_t iy_sw = iy_nw + 1; + index_t ix_se = ix_nw + 1; + index_t iy_se = iy_nw + 1; + + // get surfaces to each neighbor: + scalar_t nw = (ix_se - ix) * (iy_se - iy); + scalar_t ne = (ix - ix_sw) * (iy_sw - iy); + scalar_t sw = (ix_ne - ix) * (iy - iy_ne); + scalar_t se = (ix - ix_nw) * (iy - iy_nw); + + // calculate bilinear weighted pixel value and set output pixel + auto inp_ptr_NC = input.data + n * inp_sN; + auto out_ptr_NCHW = output.data + n * out_sN + h * out_sH + w * out_sW; + for (index_t c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCHW += out_sC) { + *out_ptr_NCHW = static_cast(0); + if (within_bounds_2d(iy_nw, ix_nw, inp_H, inp_W)) { + *out_ptr_NCHW += inp_ptr_NC[iy_nw * inp_sH + ix_nw * inp_sW] * nw; + } + if (within_bounds_2d(iy_ne, ix_ne, inp_H, inp_W)) { + *out_ptr_NCHW += inp_ptr_NC[iy_ne * inp_sH + ix_ne * inp_sW] * ne; + } + if (within_bounds_2d(iy_sw, ix_sw, inp_H, inp_W)) { + *out_ptr_NCHW += inp_ptr_NC[iy_sw * inp_sH + ix_sw * inp_sW] * sw; + } + if (within_bounds_2d(iy_se, ix_se, inp_H, inp_W)) { + *out_ptr_NCHW += inp_ptr_NC[iy_se * inp_sH + ix_se * inp_sW] * se; + } + } + } else if (interpolation_mode == GridSamplerInterpolation::Nearest) { + index_t ix_nearest = static_cast(::round(ix)); + index_t iy_nearest = static_cast(::round(iy)); + + // assign nearest neighor pixel value to output pixel + auto inp_ptr_NC = input.data + n * inp_sN; + auto out_ptr_NCHW = output.data + n * out_sN + h * out_sH + w * out_sW; + for (index_t c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCHW += out_sC) { + if (within_bounds_2d(iy_nearest, ix_nearest, inp_H, inp_W)) { + *out_ptr_NCHW = inp_ptr_NC[iy_nearest * inp_sH + ix_nearest * inp_sW]; + } else { + *out_ptr_NCHW = static_cast(0); + } + } + } else if (interpolation_mode == GridSamplerInterpolation::Bicubic) { + + ix = grid_sampler_unnormalize(x, inp_W, align_corners); + iy = grid_sampler_unnormalize(y, inp_H, align_corners); + + scalar_t ix_nw = ::floor(ix); + scalar_t iy_nw = ::floor(iy); + + const scalar_t tx = ix - ix_nw; + const scalar_t ty = iy - iy_nw; + + auto inp_ptr_NC = input.data + n * inp_sN; + auto out_ptr_NCHW = output.data + n * out_sN + h * out_sH + w * out_sW; + for (index_t c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCHW += out_sC) { + scalar_t coefficients[4]; + + #pragma unroll 4 + for (index_t i = 0; i < 4; ++i) { + coefficients[i] = cubic_interp1d( + get_value_bounded(inp_ptr_NC, ix_nw - 1, iy_nw - 1 + i, inp_W, inp_H, inp_sW, inp_sH, padding_mode, align_corners), + get_value_bounded(inp_ptr_NC, ix_nw + 0, iy_nw - 1 + i, inp_W, inp_H, inp_sW, inp_sH, padding_mode, align_corners), + get_value_bounded(inp_ptr_NC, ix_nw + 1, iy_nw - 1 + i, inp_W, inp_H, inp_sW, inp_sH, padding_mode, align_corners), + get_value_bounded(inp_ptr_NC, ix_nw + 2, iy_nw - 1 + i, inp_W, inp_H, inp_sW, inp_sH, padding_mode, align_corners), + tx); + } + + *out_ptr_NCHW = cubic_interp1d( + coefficients[0], + coefficients[1], + coefficients[2], + coefficients[3], + ty); + } + } + } + } + + template + C10_LAUNCH_BOUNDS_1(512) + __global__ void grid_sampler_3d_kernel( + const index_t nthreads, + TensorInfo input, + TensorInfo grid, + TensorInfo output, + const GridSamplerInterpolation interpolation_mode, + const GridSamplerPadding padding_mode, + bool align_corners) { + + index_t C = input.sizes[1]; + index_t inp_D = input.sizes[2]; + index_t inp_H = input.sizes[3]; + index_t inp_W = input.sizes[4]; + index_t out_D = grid.sizes[1]; + index_t out_H = grid.sizes[2]; + index_t out_W = grid.sizes[3]; + index_t inp_sN = input.strides[0]; + index_t inp_sC = input.strides[1]; + index_t inp_sD = input.strides[2]; + index_t inp_sH = input.strides[3]; + index_t inp_sW = input.strides[4]; + index_t grid_sN = grid.strides[0]; + index_t grid_sD = grid.strides[1]; + index_t grid_sH = grid.strides[2]; + index_t grid_sW = grid.strides[3]; + index_t grid_sCoor = grid.strides[4]; + index_t out_sN = output.strides[0]; + index_t out_sC = output.strides[1]; + index_t out_sD = output.strides[2]; + index_t out_sH = output.strides[3]; + index_t out_sW = output.strides[4]; + + CUDA_KERNEL_LOOP_TYPE(index, nthreads, index_t) { + const index_t w = index % out_W; + const index_t h = (index / out_W) % out_H; + const index_t d = (index / (out_H * out_W)) % out_D; + const index_t n = index / (out_D * out_H * out_W); + const index_t grid_offset = n * grid_sN + d * grid_sD + h * grid_sH + w * grid_sW; + + // get the corresponding input x, y, z co-ordinates from grid + scalar_t ix = grid.data[grid_offset]; + scalar_t iy = grid.data[grid_offset + grid_sCoor]; + scalar_t iz = grid.data[grid_offset + 2 * grid_sCoor]; + + ix = grid_sampler_compute_source_index(ix, inp_W, padding_mode, align_corners); + iy = grid_sampler_compute_source_index(iy, inp_H, padding_mode, align_corners); + iz = grid_sampler_compute_source_index(iz, inp_D, padding_mode, align_corners); + + if (interpolation_mode == GridSamplerInterpolation::Bilinear) { + // get corner pixel values from (x, y, z) + // for 4d, we used north-east-south-west + // for 5d, we add top-bottom + index_t ix_tnw = static_cast(::floor(ix)); + index_t iy_tnw = static_cast(::floor(iy)); + index_t iz_tnw = static_cast(::floor(iz)); + + index_t ix_tne = ix_tnw + 1; + index_t iy_tne = iy_tnw; + index_t iz_tne = iz_tnw; + + index_t ix_tsw = ix_tnw; + index_t iy_tsw = iy_tnw + 1; + index_t iz_tsw = iz_tnw; + + index_t ix_tse = ix_tnw + 1; + index_t iy_tse = iy_tnw + 1; + index_t iz_tse = iz_tnw; + + index_t ix_bnw = ix_tnw; + index_t iy_bnw = iy_tnw; + index_t iz_bnw = iz_tnw + 1; + + index_t ix_bne = ix_tnw + 1; + index_t iy_bne = iy_tnw; + index_t iz_bne = iz_tnw + 1; + + index_t ix_bsw = ix_tnw; + index_t iy_bsw = iy_tnw + 1; + index_t iz_bsw = iz_tnw + 1; + + index_t ix_bse = ix_tnw + 1; + index_t iy_bse = iy_tnw + 1; + index_t iz_bse = iz_tnw + 1; + + // get surfaces to each neighbor: + scalar_t tnw = (ix_bse - ix) * (iy_bse - iy) * (iz_bse - iz); + scalar_t tne = (ix - ix_bsw) * (iy_bsw - iy) * (iz_bsw - iz); + scalar_t tsw = (ix_bne - ix) * (iy - iy_bne) * (iz_bne - iz); + scalar_t tse = (ix - ix_bnw) * (iy - iy_bnw) * (iz_bnw - iz); + scalar_t bnw = (ix_tse - ix) * (iy_tse - iy) * (iz - iz_tse); + scalar_t bne = (ix - ix_tsw) * (iy_tsw - iy) * (iz - iz_tsw); + scalar_t bsw = (ix_tne - ix) * (iy - iy_tne) * (iz - iz_tne); + scalar_t bse = (ix - ix_tnw) * (iy - iy_tnw) * (iz - iz_tnw); + + auto inp_ptr_NC = input.data + n * inp_sN; + auto out_ptr_NCDHW = output.data + n * out_sN + d * out_sD + h * out_sH + w * out_sW; + for (index_t c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCDHW += out_sC) { + // (c, iz_tnw, iy_tnw, ix_tnw) * tnw + (c, iz_tne, iy_tne, ix_tne) * tne + // + (c, iz_tsw, iy_tsw, ix_tsw) * tsw + (c, iz_tse, iy_tse, ix_tse) * tse + // + (c, iz_bnw, iy_bnw, ix_bnw) * bnw + (c, iz_bne, iy_bne, ix_bne) * bne + // + (c, iz_bsw, iy_bsw, ix_bsw) * bsw + (c, iz_bse, iy_bse, ix_bse) * bse + *out_ptr_NCDHW = static_cast(0); + if (within_bounds_3d(iz_tnw, iy_tnw, ix_tnw, inp_D, inp_H, inp_W)) { + *out_ptr_NCDHW += inp_ptr_NC[iz_tnw * inp_sD + iy_tnw * inp_sH + ix_tnw * inp_sW] * tnw; + } + if (within_bounds_3d(iz_tne, iy_tne, ix_tne, inp_D, inp_H, inp_W)) { + *out_ptr_NCDHW += inp_ptr_NC[iz_tne * inp_sD + iy_tne * inp_sH + ix_tne * inp_sW] * tne; + } + if (within_bounds_3d(iz_tsw, iy_tsw, ix_tsw, inp_D, inp_H, inp_W)) { + *out_ptr_NCDHW += inp_ptr_NC[iz_tsw * inp_sD + iy_tsw * inp_sH + ix_tsw * inp_sW] * tsw; + } + if (within_bounds_3d(iz_tse, iy_tse, ix_tse, inp_D, inp_H, inp_W)) { + *out_ptr_NCDHW += inp_ptr_NC[iz_tse * inp_sD + iy_tse * inp_sH + ix_tse * inp_sW] * tse; + } + if (within_bounds_3d(iz_bnw, iy_bnw, ix_bnw, inp_D, inp_H, inp_W)) { + *out_ptr_NCDHW += inp_ptr_NC[iz_bnw * inp_sD + iy_bnw * inp_sH + ix_bnw * inp_sW] * bnw; + } + if (within_bounds_3d(iz_bne, iy_bne, ix_bne, inp_D, inp_H, inp_W)) { + *out_ptr_NCDHW += inp_ptr_NC[iz_bne * inp_sD + iy_bne * inp_sH + ix_bne * inp_sW] * bne; + } + if (within_bounds_3d(iz_bsw, iy_bsw, ix_bsw, inp_D, inp_H, inp_W)) { + *out_ptr_NCDHW += inp_ptr_NC[iz_bsw * inp_sD + iy_bsw * inp_sH + ix_bsw * inp_sW] * bsw; + } + if (within_bounds_3d(iz_bse, iy_bse, ix_bse, inp_D, inp_H, inp_W)) { + *out_ptr_NCDHW += inp_ptr_NC[iz_bse * inp_sD + iy_bse * inp_sH + ix_bse * inp_sW] * bse; + } + } + } else if (interpolation_mode == GridSamplerInterpolation::Nearest) { + index_t ix_nearest = static_cast(::round(ix)); + index_t iy_nearest = static_cast(::round(iy)); + index_t iz_nearest = static_cast(::round(iz)); + + // assign nearest neighor pixel value to output pixel + auto inp_ptr_NC = input.data + n * inp_sN; + auto out_ptr_NCDHW = output.data + n * out_sN + d * out_sD + h * out_sH + w * out_sW; + for (index_t c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCDHW += out_sC) { + if (within_bounds_3d(iz_nearest, iy_nearest, ix_nearest, inp_D, inp_H, inp_W)) { + *out_ptr_NCDHW = inp_ptr_NC[iz_nearest * inp_sD + iy_nearest * inp_sH + ix_nearest * inp_sW]; + } else { + *out_ptr_NCDHW = static_cast(0); + } + } + } + } + } + +// Note [Passing pointer and offset to fastAtomicAdd] +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// For its internal bounds checking, fastAtomicAdd needs to know where the destination address +// lies relative to the entire tensor, so we pass the base grad_input.data and full offset information, +// including batch * channel offset (NC_offset). + + template + C10_LAUNCH_BOUNDS_1(256) + __global__ void grid_sampler_2d_backward_kernel( + const index_t nthreads, + TensorInfo grad_output, + TensorInfo input, + TensorInfo grid, + TensorInfo grad_input, // initialized to zeros (or unused if input_requires_grad is false) + TensorInfo grad_grid, // initialized to empty + const GridSamplerInterpolation interpolation_mode, + const GridSamplerPadding padding_mode, + bool align_corners, + const index_t grad_input_memory_span, + const bool input_requires_grad) { + + index_t C = input.sizes[1]; + index_t inp_H = input.sizes[2]; + index_t inp_W = input.sizes[3]; + index_t out_H = grid.sizes[1]; + index_t out_W = grid.sizes[2]; + index_t inp_sN = input.strides[0]; + index_t inp_sC = input.strides[1]; + index_t inp_sH = input.strides[2]; + index_t inp_sW = input.strides[3]; + index_t grid_sN = grid.strides[0]; + index_t grid_sH = grid.strides[1]; + index_t grid_sW = grid.strides[2]; + index_t grid_sCoor = grid.strides[3]; + index_t gOut_sN = grad_output.strides[0]; + index_t gOut_sC = grad_output.strides[1]; + index_t gOut_sH = grad_output.strides[2]; + index_t gOut_sW = grad_output.strides[3]; + // gInp_* (and NC_offset below) are not really needed if input_requires_grad is false. + index_t gInp_sN; + index_t gInp_sC; + index_t gInp_sH; + index_t gInp_sW; + if (input_requires_grad) { + gInp_sN = grad_input.strides[0]; + gInp_sC = grad_input.strides[1]; + gInp_sH = grad_input.strides[2]; + gInp_sW = grad_input.strides[3]; + } + index_t gGrid_sW = grad_grid.strides[2]; + + CUDA_KERNEL_LOOP_TYPE(index, nthreads, index_t) { + const index_t w = index % out_W; + const index_t h = (index / out_W) % out_H; + const index_t n = index / (out_H * out_W); + const auto grid_offset = n * grid_sN + h * grid_sH + w * grid_sW; + + // get the corresponding input x, y co-ordinates from grid + scalar_t x = grid.data[grid_offset]; + scalar_t y = grid.data[grid_offset + grid_sCoor]; + + // multipliers for gradients on ix and iy + scalar_t gix_mult, giy_mult; + scalar_t ix = grid_sampler_compute_source_index_set_grad(x, inp_W, padding_mode, align_corners, &gix_mult); + scalar_t iy = grid_sampler_compute_source_index_set_grad(y, inp_H, padding_mode, align_corners, &giy_mult); + + if (interpolation_mode == GridSamplerInterpolation::Bilinear) { + // get NE, NW, SE, SW pixel values from (x, y) + index_t ix_nw = static_cast(::floor(ix)); + index_t iy_nw = static_cast(::floor(iy)); + index_t ix_ne = ix_nw + 1; + index_t iy_ne = iy_nw; + index_t ix_sw = ix_nw; + index_t iy_sw = iy_nw + 1; + index_t ix_se = ix_nw + 1; + index_t iy_se = iy_nw + 1; + + // get surfaces to each neighbor: + scalar_t nw = (ix_se - ix) * (iy_se - iy); + scalar_t ne = (ix - ix_sw) * (iy_sw - iy); + scalar_t sw = (ix_ne - ix) * (iy - iy_ne); + scalar_t se = (ix - ix_nw) * (iy - iy_nw); + + scalar_t gix = static_cast(0), giy = static_cast(0); + scalar_t *gOut_ptr_NCHW = grad_output.data + n * gOut_sN + h * gOut_sH + w * gOut_sW; + index_t NC_offset = n * gInp_sN; + scalar_t *inp_ptr_NC = input.data + n * inp_sN; + for (index_t c = 0; c < C; ++c, inp_ptr_NC += inp_sC, NC_offset += gInp_sC, gOut_ptr_NCHW += gOut_sC) { + scalar_t gOut = *gOut_ptr_NCHW; + + if (input_requires_grad) { + // calculate and set grad_input. See Note [Passing pointer and offset to fastAtomicAdd]. + safe_add_2d(grad_input.data, iy_nw, ix_nw, gInp_sH, gInp_sW, inp_H, inp_W, nw * gOut, NC_offset, grad_input_memory_span); + safe_add_2d(grad_input.data, iy_ne, ix_ne, gInp_sH, gInp_sW, inp_H, inp_W, ne * gOut, NC_offset, grad_input_memory_span); + safe_add_2d(grad_input.data, iy_sw, ix_sw, gInp_sH, gInp_sW, inp_H, inp_W, sw * gOut, NC_offset, grad_input_memory_span); + safe_add_2d(grad_input.data, iy_se, ix_se, gInp_sH, gInp_sW, inp_H, inp_W, se * gOut, NC_offset, grad_input_memory_span); + } + + // calculate grad_grid + if (within_bounds_2d(iy_nw, ix_nw, inp_H, inp_W)) { + scalar_t nw_val = inp_ptr_NC[iy_nw * inp_sH + ix_nw * inp_sW]; + gix -= nw_val * (iy_se - iy) * gOut; + giy -= nw_val * (ix_se - ix) * gOut; + } + if (within_bounds_2d(iy_ne, ix_ne, inp_H, inp_W)) { + scalar_t ne_val = inp_ptr_NC[iy_ne * inp_sH + ix_ne * inp_sW]; + gix += ne_val * (iy_sw - iy) * gOut; + giy -= ne_val * (ix - ix_sw) * gOut; + } + if (within_bounds_2d(iy_sw, ix_sw, inp_H, inp_W)) { + scalar_t sw_val = inp_ptr_NC[iy_sw * inp_sH + ix_sw * inp_sW]; + gix -= sw_val * (iy - iy_ne) * gOut; + giy += sw_val * (ix_ne - ix) * gOut; + } + if (within_bounds_2d(iy_se, ix_se, inp_H, inp_W)) { + scalar_t se_val = inp_ptr_NC[iy_se * inp_sH + ix_se * inp_sW]; + gix += se_val * (iy - iy_nw) * gOut; + giy += se_val * (ix - ix_nw) * gOut; + } + } + + // assuming grad_grid is contiguous + // thus we can + // 1. use index with gGrid_sW to directly compute gGrid_ptr_NHW + // 2. directly assign to gGrid_ptr_NHW[0], gGrid_ptr_NHW[1] + scalar_t *gGrid_ptr_NHW = grad_grid.data + index * gGrid_sW; + gGrid_ptr_NHW[0] = gix_mult * gix; + gGrid_ptr_NHW[1] = giy_mult * giy; + } else if (interpolation_mode == GridSamplerInterpolation::Nearest) { + if (input_requires_grad) { + index_t ix_nearest = static_cast(::round(ix)); + index_t iy_nearest = static_cast(::round(iy)); + + // assign nearest neighor pixel value to output pixel + scalar_t *gOut_ptr_NCHW = grad_output.data + n * gOut_sN + h * gOut_sH + w * gOut_sW; + index_t NC_offset = n * gInp_sN; + for (index_t c = 0; c < C; ++c, NC_offset += gInp_sC, gOut_ptr_NCHW += gOut_sC) { + // calculate and set grad_input. See Note [Passing pointer and offset to fastAtomicAdd]. + safe_add_2d(grad_input.data, iy_nearest, ix_nearest, gInp_sH, gInp_sW, inp_H, inp_W, *gOut_ptr_NCHW, NC_offset, grad_input_memory_span); + } + } + + // assuming grad_grid is contiguous + // thus we can + // 1. use index with gGrid_sW to directly compute gGrid_ptr_NHW + // 2. directly assign to gGrid_ptr_NHW[0], gGrid_ptr_NHW[1] + scalar_t *gGrid_ptr_NHW = grad_grid.data + index * gGrid_sW; + gGrid_ptr_NHW[0] = static_cast(0); + gGrid_ptr_NHW[1] = static_cast(0); + } else if (interpolation_mode == GridSamplerInterpolation::Bicubic) { + + ix = grid_sampler_unnormalize_set_grad(x, inp_W, align_corners, &gix_mult); + iy = grid_sampler_unnormalize_set_grad(y, inp_H, align_corners, &giy_mult); + + scalar_t ix_nw = ::floor(ix); + scalar_t iy_nw = ::floor(iy); + + const scalar_t tx = ix - ix_nw; + const scalar_t ty = iy - iy_nw; + + scalar_t x_coeffs[4]; + scalar_t y_coeffs[4]; + scalar_t x_coeffs_grad[4]; + scalar_t y_coeffs_grad[4]; + + get_cubic_upsampling_coefficients(x_coeffs, tx); + get_cubic_upsampling_coefficients(y_coeffs, ty); + get_cubic_coefficients_grad(x_coeffs_grad, tx); + get_cubic_coefficients_grad(y_coeffs_grad, ty); + + scalar_t gix = static_cast(0); + scalar_t giy = static_cast(0); + + scalar_t *gOut_ptr_NCHW = grad_output.data + n * gOut_sN + h * gOut_sH + w * gOut_sW; + index_t NC_offset = n * gInp_sN; + scalar_t *inp_ptr_NC = input.data + n * inp_sN; + + for (index_t c = 0; c < C; ++c, gOut_ptr_NCHW += gOut_sC, NC_offset += gInp_sC, inp_ptr_NC+= inp_sC) { + scalar_t gOut = *gOut_ptr_NCHW; + + #pragma unroll 4 + for (index_t i = 0; i < 4; ++i) { + #pragma unroll 4 + for (index_t j = 0; j < 4; ++j) { + + if (input_requires_grad) { + // set input gradient. See Note [Passing pointer and offset to fastAtomicAdd]. + add_value_bounded(grad_input.data, ix_nw - 1 + i, iy_nw - 1 + j, inp_W, inp_H, gInp_sW, gInp_sH, + gOut * x_coeffs[i] * y_coeffs[j], + padding_mode, + align_corners, + NC_offset, + grad_input_memory_span); + } + + // set grid gradient + scalar_t val = get_value_bounded(inp_ptr_NC, ix_nw - 1 + i, iy_nw - 1 + j, + inp_W, inp_H, inp_sW, inp_sH, padding_mode, align_corners); + + gix -= val * x_coeffs_grad[i] * y_coeffs[j] * gOut; + giy -= val * y_coeffs_grad[j] * x_coeffs[i] * gOut; + } + } + } + + scalar_t *gGrid_ptr_NHW = grad_grid.data + index * gGrid_sW; + gGrid_ptr_NHW[0] = gix_mult * gix; + gGrid_ptr_NHW[1] = giy_mult * giy; + } + } + } + + template + C10_LAUNCH_BOUNDS_1(256) + __global__ void grid_sampler_3d_backward_kernel( + const index_t nthreads, + TensorInfo grad_output, + TensorInfo input, + TensorInfo grid, + TensorInfo grad_input, // initialized to zeros (or unused if input_requires_grad is false) + TensorInfo grad_grid, // initialized to empty + const GridSamplerInterpolation interpolation_mode, + const GridSamplerPadding padding_mode, + bool align_corners, + const index_t grad_input_memory_span, + const bool input_requires_grad) { + + index_t C = input.sizes[1]; + index_t inp_D = input.sizes[2]; + index_t inp_H = input.sizes[3]; + index_t inp_W = input.sizes[4]; + index_t out_D = grid.sizes[1]; + index_t out_H = grid.sizes[2]; + index_t out_W = grid.sizes[3]; + index_t inp_sN = input.strides[0]; + index_t inp_sC = input.strides[1]; + index_t inp_sD = input.strides[2]; + index_t inp_sH = input.strides[3]; + index_t inp_sW = input.strides[4]; + index_t grid_sN = grid.strides[0]; + index_t grid_sD = grid.strides[1]; + index_t grid_sH = grid.strides[2]; + index_t grid_sW = grid.strides[3]; + index_t grid_sCoor = grid.strides[4]; + index_t gOut_sN = grad_output.strides[0]; + index_t gOut_sC = grad_output.strides[1]; + index_t gOut_sD = grad_output.strides[2]; + index_t gOut_sH = grad_output.strides[3]; + index_t gOut_sW = grad_output.strides[4]; + // gInp_* (and NC_offset below) are not really needed if input_requires_grad is false. + int64_t gInp_sN = 0; + int64_t gInp_sC = 0; + int64_t gInp_sD = 0; + int64_t gInp_sH = 0; + int64_t gInp_sW = 0; + if (input_requires_grad) { + gInp_sN = grad_input.strides[0]; + gInp_sC = grad_input.strides[1]; + gInp_sD = grad_input.strides[2]; + gInp_sH = grad_input.strides[3]; + gInp_sW = grad_input.strides[4]; + } + index_t gGrid_sW = grad_grid.strides[3]; + + CUDA_KERNEL_LOOP_TYPE(index, nthreads, index_t) { + const index_t w = index % out_W; + const index_t h = (index / out_W) % out_H; + const index_t d = (index / (out_H * out_W)) % out_D; + const index_t n = index / (out_D * out_H * out_W); + const auto grid_offset = n * grid_sN + d * grid_sD + h * grid_sH + w * grid_sW; + + // get the corresponding input x, y, z co-ordinates from grid + scalar_t ix = grid.data[grid_offset]; + scalar_t iy = grid.data[grid_offset + grid_sCoor]; + scalar_t iz = grid.data[grid_offset + 2 * grid_sCoor]; + + // multipliers for gradients on ix, iy, and iz + scalar_t gix_mult, giy_mult, giz_mult; + ix = grid_sampler_compute_source_index_set_grad(ix, inp_W, padding_mode, align_corners, &gix_mult); + iy = grid_sampler_compute_source_index_set_grad(iy, inp_H, padding_mode, align_corners, &giy_mult); + iz = grid_sampler_compute_source_index_set_grad(iz, inp_D, padding_mode, align_corners, &giz_mult); + + if (interpolation_mode == GridSamplerInterpolation::Bilinear) { + // get corner pixel values from (x, y, z) + // for 4d, we used north-east-south-west + // for 5d, we add top-bottom + index_t ix_tnw = static_cast(::floor(ix)); + index_t iy_tnw = static_cast(::floor(iy)); + index_t iz_tnw = static_cast(::floor(iz)); + + index_t ix_tne = ix_tnw + 1; + index_t iy_tne = iy_tnw; + index_t iz_tne = iz_tnw; + + index_t ix_tsw = ix_tnw; + index_t iy_tsw = iy_tnw + 1; + index_t iz_tsw = iz_tnw; + + index_t ix_tse = ix_tnw + 1; + index_t iy_tse = iy_tnw + 1; + index_t iz_tse = iz_tnw; + + index_t ix_bnw = ix_tnw; + index_t iy_bnw = iy_tnw; + index_t iz_bnw = iz_tnw + 1; + + index_t ix_bne = ix_tnw + 1; + index_t iy_bne = iy_tnw; + index_t iz_bne = iz_tnw + 1; + + index_t ix_bsw = ix_tnw; + index_t iy_bsw = iy_tnw + 1; + index_t iz_bsw = iz_tnw + 1; + + index_t ix_bse = ix_tnw + 1; + index_t iy_bse = iy_tnw + 1; + index_t iz_bse = iz_tnw + 1; + + // get surfaces to each neighbor: + scalar_t tnw = (ix_bse - ix) * (iy_bse - iy) * (iz_bse - iz); + scalar_t tne = (ix - ix_bsw) * (iy_bsw - iy) * (iz_bsw - iz); + scalar_t tsw = (ix_bne - ix) * (iy - iy_bne) * (iz_bne - iz); + scalar_t tse = (ix - ix_bnw) * (iy - iy_bnw) * (iz_bnw - iz); + scalar_t bnw = (ix_tse - ix) * (iy_tse - iy) * (iz - iz_tse); + scalar_t bne = (ix - ix_tsw) * (iy_tsw - iy) * (iz - iz_tsw); + scalar_t bsw = (ix_tne - ix) * (iy - iy_tne) * (iz - iz_tne); + scalar_t bse = (ix - ix_tnw) * (iy - iy_tnw) * (iz - iz_tnw); + + scalar_t gix = static_cast(0), giy = static_cast(0), giz = static_cast(0); + scalar_t *gOut_ptr_NCDHW = grad_output.data + n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW; + index_t NC_offset; + if (input_requires_grad) { + NC_offset = n * gInp_sN; + } + scalar_t *inp_ptr_NC = input.data + n * inp_sN; + // calculate bilinear weighted pixel value and set output pixel + for (index_t c = 0; c < C; ++c, gOut_ptr_NCDHW += gOut_sC, NC_offset += gInp_sC, inp_ptr_NC += inp_sC) { + scalar_t gOut = *gOut_ptr_NCDHW; + + // calculate and set grad_input. See Note [Passing pointer and offset to fastAtomicAdd]. + if (input_requires_grad) { + safe_add_3d(grad_input.data, iz_tnw, iy_tnw, ix_tnw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tnw * gOut, + NC_offset, grad_input_memory_span); + safe_add_3d(grad_input.data, iz_tne, iy_tne, ix_tne, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tne * gOut, + NC_offset, grad_input_memory_span); + safe_add_3d(grad_input.data, iz_tsw, iy_tsw, ix_tsw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tsw * gOut, + NC_offset, grad_input_memory_span); + safe_add_3d(grad_input.data, iz_tse, iy_tse, ix_tse, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tse * gOut, + NC_offset, grad_input_memory_span); + safe_add_3d(grad_input.data, iz_bnw, iy_bnw, ix_bnw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bnw * gOut, + NC_offset, grad_input_memory_span); + safe_add_3d(grad_input.data, iz_bne, iy_bne, ix_bne, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bne * gOut, + NC_offset, grad_input_memory_span); + safe_add_3d(grad_input.data, iz_bsw, iy_bsw, ix_bsw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bsw * gOut, + NC_offset, grad_input_memory_span); + safe_add_3d(grad_input.data, iz_bse, iy_bse, ix_bse, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bse * gOut, + NC_offset, grad_input_memory_span); + } + // calculate grad_grid + if (within_bounds_3d(iz_tnw, iy_tnw, ix_tnw, inp_D, inp_H, inp_W)) { + scalar_t tnw_val = inp_ptr_NC[iz_tnw * inp_sD + iy_tnw * inp_sH + ix_tnw * inp_sW]; + gix -= tnw_val * (iy_bse - iy) * (iz_bse - iz) * gOut; + giy -= tnw_val * (ix_bse - ix) * (iz_bse - iz) * gOut; + giz -= tnw_val * (ix_bse - ix) * (iy_bse - iy) * gOut; + } + if (within_bounds_3d(iz_tne, iy_tne, ix_tne, inp_D, inp_H, inp_W)) { + scalar_t tne_val = inp_ptr_NC[iz_tne * inp_sD + iy_tne * inp_sH + ix_tne * inp_sW]; + gix += tne_val * (iy_bsw - iy) * (iz_bsw - iz) * gOut; + giy -= tne_val * (ix - ix_bsw) * (iz_bsw - iz) * gOut; + giz -= tne_val * (ix - ix_bsw) * (iy_bsw - iy) * gOut; + } + if (within_bounds_3d(iz_tsw, iy_tsw, ix_tsw, inp_D, inp_H, inp_W)) { + scalar_t tsw_val = inp_ptr_NC[iz_tsw * inp_sD + iy_tsw * inp_sH + ix_tsw * inp_sW]; + gix -= tsw_val * (iy - iy_bne) * (iz_bne - iz) * gOut; + giy += tsw_val * (ix_bne - ix) * (iz_bne - iz) * gOut; + giz -= tsw_val * (ix_bne - ix) * (iy - iy_bne) * gOut; + } + if (within_bounds_3d(iz_tse, iy_tse, ix_tse, inp_D, inp_H, inp_W)) { + scalar_t tse_val = inp_ptr_NC[iz_tse * inp_sD + iy_tse * inp_sH + ix_tse * inp_sW]; + gix += tse_val * (iy - iy_bnw) * (iz_bnw - iz) * gOut; + giy += tse_val * (ix - ix_bnw) * (iz_bnw - iz) * gOut; + giz -= tse_val * (ix - ix_bnw) * (iy - iy_bnw) * gOut; + } + if (within_bounds_3d(iz_bnw, iy_bnw, ix_bnw, inp_D, inp_H, inp_W)) { + scalar_t bnw_val = inp_ptr_NC[iz_bnw * inp_sD + iy_bnw * inp_sH + ix_bnw * inp_sW]; + gix -= bnw_val * (iy_tse - iy) * (iz - iz_tse) * gOut; + giy -= bnw_val * (ix_tse - ix) * (iz - iz_tse) * gOut; + giz += bnw_val * (ix_tse - ix) * (iy_tse - iy) * gOut; + } + if (within_bounds_3d(iz_bne, iy_bne, ix_bne, inp_D, inp_H, inp_W)) { + scalar_t bne_val = inp_ptr_NC[iz_bne * inp_sD + iy_bne * inp_sH + ix_bne * inp_sW]; + gix += bne_val * (iy_tsw - iy) * (iz - iz_tsw) * gOut; + giy -= bne_val * (ix - ix_tsw) * (iz - iz_tsw) * gOut; + giz += bne_val * (ix - ix_tsw) * (iy_tsw - iy) * gOut; + } + if (within_bounds_3d(iz_bsw, iy_bsw, ix_bsw, inp_D, inp_H, inp_W)) { + scalar_t bsw_val = inp_ptr_NC[iz_bsw * inp_sD + iy_bsw * inp_sH + ix_bsw * inp_sW]; + gix -= bsw_val * (iy - iy_tne) * (iz - iz_tne) * gOut; + giy += bsw_val * (ix_tne - ix) * (iz - iz_tne) * gOut; + giz += bsw_val * (ix_tne - ix) * (iy - iy_tne) * gOut; + } + if (within_bounds_3d(iz_bse, iy_bse, ix_bse, inp_D, inp_H, inp_W)) { + scalar_t bse_val = inp_ptr_NC[iz_bse * inp_sD + iy_bse * inp_sH + ix_bse * inp_sW]; + gix += bse_val * (iy - iy_tnw) * (iz - iz_tnw) * gOut; + giy += bse_val * (ix - ix_tnw) * (iz - iz_tnw) * gOut; + giz += bse_val * (ix - ix_tnw) * (iy - iy_tnw) * gOut; + } + } + + // assuming grad_grid is contiguous + // thus we can + // 1. use index with gGrid_sW to directly compute gGrid_ptr_NDHW + // 2. directly assign to gGrid_ptr_NDHW[0], gGrid_ptr_NDHW[1], gGrid_ptr_NDHW[2] + scalar_t *gGrid_ptr_NDHW = grad_grid.data + index * gGrid_sW; + gGrid_ptr_NDHW[0] = gix_mult * gix; + gGrid_ptr_NDHW[1] = giy_mult * giy; + gGrid_ptr_NDHW[2] = giz_mult * giz; + } else if (interpolation_mode == GridSamplerInterpolation::Nearest) { + if (input_requires_grad) { + auto ix_nearest = static_cast(::round(ix)); + auto iy_nearest = static_cast(::round(iy)); + auto iz_nearest = static_cast(::round(iz)); + + // assign nearest neighor pixel value to output pixel + scalar_t *gOut_ptr_NCDHW = grad_output.data + n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW; + index_t NC_offset = n * gInp_sN; + for (index_t c = 0; c < C; ++c, gOut_ptr_NCDHW += gOut_sC, NC_offset += gInp_sC) { + // calculate and set grad_input. See Note [Passing pointer and offset to fastAtomicAdd]. + safe_add_3d(grad_input.data, iz_nearest, iy_nearest, ix_nearest, + gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, *gOut_ptr_NCDHW, + NC_offset, grad_input_memory_span); + } + } + // assuming grad_grid is contiguous + // thus we can + // 1. use index with gGrid_sW to directly compute gGrid_ptr_NDHW + // 2. directly assign to gGrid_ptr_NDHW[0], gGrid_ptr_NDHW[1], gGrid_ptr_NDHW[2] + scalar_t *gGrid_ptr_NDHW = grad_grid.data + index * gGrid_sW; + gGrid_ptr_NDHW[0] = static_cast(0); + gGrid_ptr_NDHW[1] = static_cast(0); + gGrid_ptr_NDHW[2] = static_cast(0); + } + } + } +} // namespace + +// No shape checking needed here. See # NOTE [ grid_sampler Native Functions ]. +void launch_grid_sampler_2d_forward_kernel( + const TensorBase &output, const TensorBase &input, const TensorBase &grid, + int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { + auto N = input.size(0); + auto H = grid.size(1); + auto W = grid.size(2); + int64_t count = N * H * W; + if (count > 0) { + AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "grid_sampler_2d_cuda", [&] { + if (canUse32BitIndexMath(input) && canUse32BitIndexMath(grid) && + canUse32BitIndexMath(output)) { + grid_sampler_2d_kernel + <<>>( + static_cast(count), + getTensorInfo(input), + getTensorInfo(grid), + getTensorInfo(output), + static_cast(interpolation_mode), + static_cast(padding_mode), + align_corners); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + } else { + grid_sampler_2d_kernel + <<>>( + count, + getTensorInfo(input), + getTensorInfo(grid), + getTensorInfo(output), + static_cast(interpolation_mode), + static_cast(padding_mode), + align_corners); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + } + }); + } +} + +// No shape checking needed here. See # NOTE [ grid_sampler Native Functions ]. +void launch_grid_sampler_3d_forward_kernel( + const TensorBase &output, const TensorBase &input, const TensorBase &grid, + int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { + auto N = input.size(0); + auto D = grid.size(1); + auto H = grid.size(2); + auto W = grid.size(3); + int64_t count = N * D * H * W; + if (count > 0) { + AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "grid_sampler_3d_cuda", [&] { + if (canUse32BitIndexMath(input) && canUse32BitIndexMath(grid) && + canUse32BitIndexMath(output)) { + grid_sampler_3d_kernel + <<>>( + static_cast(count), + getTensorInfo(input), + getTensorInfo(grid), + getTensorInfo(output), + static_cast(interpolation_mode), + static_cast(padding_mode), + align_corners); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + } else { + grid_sampler_3d_kernel + <<>>( + count, + getTensorInfo(input), + getTensorInfo(grid), + getTensorInfo(output), + static_cast(interpolation_mode), + static_cast(padding_mode), + align_corners); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + } + }); + } +} + +// No shape checking needed here. See # NOTE [ grid_sampler Native Functions ]. +void launch_grid_sampler_2d_backward_kernel( + const TensorBase &grad_input, const TensorBase &grad_grid, + const TensorBase &grad_output, const TensorBase &input, + const TensorBase &grid, int64_t interpolation_mode, int64_t padding_mode, + bool align_corners, std::array output_mask) { + // See Note [Writing Nondeterministic Operations] + // Nondeterministic because of atomicAdd usage + globalContext().alertNotDeterministic("grid_sampler_2d_backward_cuda"); + auto N = input.size(0); + auto H = grid.size(1); + auto W = grid.size(2); + + // If `input` gradient is not required, we skip computing it -- not needing to create + // the tensor to hold the gradient can markedly increase performance. (`grid` gradient + // is always computed.) + auto input_requires_grad = output_mask[0]; + + Tensor grad_input = ([&]() { + if (input_requires_grad) { + return at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); + } else { + return Tensor(); + } + })(); + auto grad_grid = at::empty_like(grid, LEGACY_CONTIGUOUS_MEMORY_FORMAT); + int64_t count = N * H * W; + if (count > 0) { + AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "grid_sampler_2d_backward_cuda", [&] { + if (canUse32BitIndexMath(input) && canUse32BitIndexMath(grid) && + canUse32BitIndexMath(grad_output)) { + grid_sampler_2d_backward_kernel + <<>>( + static_cast(count), + getTensorInfo(grad_output), + getTensorInfo(input), + getTensorInfo(grid), + input_requires_grad ? getTensorInfo(grad_input) : TensorInfo(), + getTensorInfo(grad_grid), + static_cast(interpolation_mode), + static_cast(padding_mode), + align_corners, + /*grad_input_memory_span =*/input_requires_grad ? static_cast(grad_input.numel()) : 0, + input_requires_grad); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + } else { + grid_sampler_2d_backward_kernel + <<>>( + count, + getTensorInfo(grad_output), + getTensorInfo(input), + getTensorInfo(grid), + input_requires_grad ? getTensorInfo(grad_input) : TensorInfo(), + getTensorInfo(grad_grid), + static_cast(interpolation_mode), + static_cast(padding_mode), + align_corners, + /*grad_input_memory_span =*/input_requires_grad ? grad_input.numel() : 0, + input_requires_grad); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + } + }); + } +} + +// No shape checking needed here. See # NOTE [ grid_sampler Native Functions ]. +void launch_grid_sampler_3d_backward_kernel( + const TensorBase &grad_input, const TensorBase &grad_grid, + const TensorBase& grad_output, const TensorBase& input, + const TensorBase& grid, int64_t interpolation_mode, int64_t padding_mode, + bool align_corners, std::array output_mask) { + // See Note [Writing Nondeterministic Operations] + // Nondeterministic because of atomicAdd usage + globalContext().alertNotDeterministic("grid_sampler_3d_backward_cuda"); + auto N = input.size(0); + auto D = grid.size(1); + auto H = grid.size(2); + auto W = grid.size(3); + int64_t count = N * D * H * W; + auto input_requires_grad = output_mask[0]; + if (count > 0) { + AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "grid_sampler_3d_backward_cuda", [&] { + if (canUse32BitIndexMath(input) && canUse32BitIndexMath(grid) && + canUse32BitIndexMath(grad_output)) { + grid_sampler_3d_backward_kernel + <<>>( + static_cast(count), + getTensorInfo(grad_output), + getTensorInfo(input), + getTensorInfo(grid), + input_requires_grad ? getTensorInfo(grad_input) : TensorInfo(), + getTensorInfo(grad_grid), + static_cast(interpolation_mode), + static_cast(padding_mode), + align_corners, + /*grad_input_memory_span =*/input_requires_grad ? static_cast(grad_input.numel()) : 0, + input_requires_grad); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + } else { + grid_sampler_3d_backward_kernel + <<>>( + count, + getTensorInfo(grad_output), + getTensorInfo(input), + getTensorInfo(grid), + input_requires_grad ? getTensorInfo(grad_input) : TensorInfo(), + getTensorInfo(grad_grid), + static_cast(interpolation_mode), + static_cast(padding_mode), + align_corners, + /*grad_input_memory_span =*/input_requires_grad ? grad_input.numel() : 0, + input_requires_grad); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + } + }); + } +} + +}} // namespace at::native diff --git a/cuda_code/HelloWorld_4.cu b/cuda_code/HelloWorld_4.cu new file mode 100644 index 0000000000000000000000000000000000000000..0e55e83bb1b774171a6e55ad37fb7e30e5485cf6 --- /dev/null +++ b/cuda_code/HelloWorld_4.cu @@ -0,0 +1,143 @@ +/****************************/ +/* THIS IS OPEN SOURCE CODE */ +/****************************/ + +/** + * @file HelloWorld.c + * CVS: $Id$ + * @author Heike Jagode + * jagode@eecs.utk.edu + * Mods: + * + * test case for Example component + * + * + * @brief + * This file is a very simple HelloWorld C example which serves (together + * with its Makefile) as a guideline on how to add tests to components. + * The papi configure and papi Makefile will take care of the compilation + * of the component tests (if all tests are added to a directory named + * 'tests' in the specific component dir). + * See components/README for more details. + * + * The string "Hello World!" is mangled and then restored. + */ + +#include +#include +#include "papi_test.h" + +#define NUM_EVENTS 1 +#define PAPI + +// Prototypes +__global__ void helloWorld(char*); + + +// Host function +int main(int argc, char** argv) +{ +#ifdef PAPI + int retval, i; + int EventSet = PAPI_NULL; + long long values[NUM_EVENTS]; + /* REPLACE THE EVENT NAME 'PAPI_FP_OPS' WITH A CUDA EVENT + FOR THE CUDA DEVICE YOU ARE RUNNING ON. + RUN papi_native_avail to get a list of CUDA events that are + supported on your machine */ + char *EventName[] = { "PAPI_FP_OPS" }; + int events[NUM_EVENTS]; + + /* PAPI Initialization */ + retval = PAPI_library_init( PAPI_VER_CURRENT ); + if( retval != PAPI_VER_CURRENT ) + fprintf( stderr, "PAPI_library_init failed\n" ); + + printf( "PAPI_VERSION : %4d %6d %7d\n", + PAPI_VERSION_MAJOR( PAPI_VERSION ), + PAPI_VERSION_MINOR( PAPI_VERSION ), + PAPI_VERSION_REVISION( PAPI_VERSION ) ); + + /* convert PAPI native events to PAPI code */ + for( i = 0; i < NUM_EVENTS; i++ ){ + retval = PAPI_event_name_to_code( EventName[i], &events[i] ); + if( retval != PAPI_OK ) + fprintf( stderr, "PAPI_event_name_to_code failed\n" ); + else + printf( "Name %s --- Code: %x\n", EventName[i], events[i] ); + } + + retval = PAPI_create_eventset( &EventSet ); + if( retval != PAPI_OK ) + fprintf( stderr, "PAPI_create_eventset failed\n" ); + + retval = PAPI_add_events( EventSet, events, NUM_EVENTS ); + if( retval != PAPI_OK ) + fprintf( stderr, "PAPI_add_events failed\n" ); + + retval = PAPI_start( EventSet ); + if( retval != PAPI_OK ) + fprintf( stderr, "PAPI_start failed\n" ); +#endif + + + int j; + + // desired output + char str[] = "Hello World!"; + + // mangle contents of output + // the null character is left intact for simplicity + for(j = 0; j < 12; j++) { + str[j] -= j; + //printf("str=%s\n", str); + } + + + // allocate memory on the device + char *d_str; + size_t size = sizeof(str); + cudaMalloc((void**)&d_str, size); + + // copy the string to the device + cudaMemcpy(d_str, str, size, cudaMemcpyHostToDevice); + + // set the grid and block sizes + dim3 dimGrid(2); // one block per word + dim3 dimBlock(6); // one thread per character + + // invoke the kernel + helloWorld<<< dimGrid, dimBlock >>>(d_str); + + // retrieve the results from the device + cudaMemcpy(str, d_str, size, cudaMemcpyDeviceToHost); + + // free up the allocated memory on the device + cudaFree(d_str); + + printf("END: %s\n", str); + + +#ifdef PAPI + retval = PAPI_stop( EventSet, values ); + if( retval != PAPI_OK ) + fprintf( stderr, "PAPI_stop failed\n" ); + + for( i = 0; i < NUM_EVENTS; i++ ) + printf( "%12lld \t\t --> %s \n", values[i], EventName[i] ); +#endif + + return 0; +} + + +// Device kernel +__global__ void +helloWorld(char* str) +{ + // determine where in the thread grid we are + int idx = blockIdx.x * blockDim.x + threadIdx.x; + // unmangle output + str[idx] += idx; +} + diff --git a/cuda_code/IVFAppend_5.cu b/cuda_code/IVFAppend_5.cu new file mode 100644 index 0000000000000000000000000000000000000000..c345faf5268288848e4341c0766efb98976d8381 --- /dev/null +++ b/cuda_code/IVFAppend_5.cu @@ -0,0 +1,641 @@ +/** + * Copyright (c) Facebook, Inc. and its affiliates. + * + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +namespace faiss { +namespace gpu { + +// +// IVF list metadata aupdate +// + +// Updates the device-size array of list start pointers for codes and indices +__global__ void runUpdateListPointers( + Tensor listIds, + Tensor newListLength, + Tensor newCodePointers, + Tensor newIndexPointers, + int* listLengths, + void** listCodes, + void** listIndices) { + int i = blockIdx.x * blockDim.x + threadIdx.x; + + if (i < listIds.getSize(0)) { + int listId = listIds[i]; + listLengths[listId] = newListLength[i]; + listCodes[listId] = newCodePointers[i]; + listIndices[listId] = newIndexPointers[i]; + } +} + +void runUpdateListPointers( + Tensor& listIds, + Tensor& newListLength, + Tensor& newCodePointers, + Tensor& newIndexPointers, + thrust::device_vector& listLengths, + thrust::device_vector& listCodes, + thrust::device_vector& listIndices, + cudaStream_t stream) { + int numThreads = std::min(listIds.getSize(0), getMaxThreadsCurrentDevice()); + int numBlocks = utils::divUp(listIds.getSize(0), numThreads); + + dim3 grid(numBlocks); + dim3 block(numThreads); + + runUpdateListPointers<<>>( + listIds, + newListLength, + newCodePointers, + newIndexPointers, + listLengths.data().get(), + listCodes.data().get(), + listIndices.data().get()); + + CUDA_TEST_ERROR(); +} + +// Appends new indices for vectors being added to the IVF indices lists +__global__ void ivfIndicesAppend( + Tensor listIds, + Tensor listOffset, + Tensor indices, + IndicesOptions opt, + void** listIndices) { + int vec = blockIdx.x * blockDim.x + threadIdx.x; + + if (vec >= listIds.getSize(0)) { + return; + } + + int listId = listIds[vec]; + int offset = listOffset[vec]; + + // Add vector could be invalid (contains NaNs etc) + if (listId == -1 || offset == -1) { + return; + } + + auto index = indices[vec]; + + if (opt == INDICES_32_BIT) { + // FIXME: there could be overflow here, but where should we check this? + ((int*)listIndices[listId])[offset] = (int)index; + } else if (opt == INDICES_64_BIT) { + ((Index::idx_t*)listIndices[listId])[offset] = index; + } +} + +void runIVFIndicesAppend( + Tensor& listIds, + Tensor& listOffset, + Tensor& indices, + IndicesOptions opt, + thrust::device_vector& listIndices, + cudaStream_t stream) { + FAISS_ASSERT( + opt == INDICES_CPU || opt == INDICES_IVF || opt == INDICES_32_BIT || + opt == INDICES_64_BIT); + + if (opt != INDICES_CPU && opt != INDICES_IVF) { + int num = listIds.getSize(0); + int threads = std::min(num, getMaxThreadsCurrentDevice()); + int blocks = utils::divUp(num, threads); + + ivfIndicesAppend<<>>( + listIds, listOffset, indices, opt, listIndices.data().get()); + + CUDA_TEST_ERROR(); + } +} + +// +// IVF non-interleaved append +// + +template +__global__ void ivfFlatAppend( + Tensor listIds, + Tensor listOffset, + Tensor vecs, + void** listData, + Codec codec) { + int vec = blockIdx.x; + + int listId = listIds[vec]; + int offset = listOffset[vec]; + + // Add vector could be invalid (contains NaNs etc) + if (listId == -1 || offset == -1) { + return; + } + + // Handle whole encoding (only thread 0 will handle the remainder) + int limit = utils::divDown(vecs.getSize(1), Codec::kDimPerIter); + + int i; + for (i = threadIdx.x; i < limit; i += blockDim.x) { + int realDim = i * Codec::kDimPerIter; + float toEncode[Codec::kDimPerIter]; + +#pragma unroll + for (int j = 0; j < Codec::kDimPerIter; ++j) { + toEncode[j] = vecs[vec][realDim + j]; + } + + codec.encode(listData[listId], offset, i, toEncode); + } + + // Handle remainder with a single thread, if any + if (Codec::kDimPerIter > 1) { + int realDim = limit * Codec::kDimPerIter; + + // Was there any remainder? + if (realDim < vecs.getSize(1)) { + if (threadIdx.x == 0) { + float toEncode[Codec::kDimPerIter]; + + // How many remaining that we need to encode + int remaining = vecs.getSize(1) - realDim; + +#pragma unroll + for (int j = 0; j < Codec::kDimPerIter; ++j) { + int idx = realDim + j; + toEncode[j] = idx < vecs.getSize(1) ? vecs[vec][idx] : 0.0f; + } + + codec.encodePartial( + listData[listId], offset, i, remaining, toEncode); + } + } + } +} + +void runIVFFlatAppend( + Tensor& listIds, + Tensor& listOffset, + Tensor& vecs, + GpuScalarQuantizer* scalarQ, + thrust::device_vector& listData, + cudaStream_t stream) { + int dim = vecs.getSize(1); + int maxThreads = getMaxThreadsCurrentDevice(); + + // Each block will handle appending a single vector +#define RUN_APPEND \ + do { \ + dim3 grid(vecs.getSize(0)); \ + dim3 block(std::min(dim / codec.kDimPerIter, maxThreads)); \ + ivfFlatAppend<<>>( \ + listIds, listOffset, vecs, listData.data().get(), codec); \ + } while (0) + + if (!scalarQ) { + CodecFloat codec(dim * sizeof(float)); + RUN_APPEND; + } else { + switch (scalarQ->qtype) { + case ScalarQuantizer::QuantizerType::QT_8bit: { + Codec codec( + scalarQ->code_size, + scalarQ->gpuTrained.data(), + scalarQ->gpuTrained.data() + dim); + RUN_APPEND; + } break; + case ScalarQuantizer::QuantizerType::QT_8bit_uniform: { + Codec codec( + scalarQ->code_size, + scalarQ->trained[0], + scalarQ->trained[1]); + RUN_APPEND; + } break; + case ScalarQuantizer::QuantizerType::QT_fp16: { + Codec codec( + scalarQ->code_size); + RUN_APPEND; + } break; + case ScalarQuantizer::QuantizerType::QT_8bit_direct: { + Codec codec( + scalarQ->code_size); + RUN_APPEND; + } break; + case ScalarQuantizer::QuantizerType::QT_4bit: { + Codec codec( + scalarQ->code_size, + scalarQ->gpuTrained.data(), + scalarQ->gpuTrained.data() + dim); + RUN_APPEND; + } break; + case ScalarQuantizer::QuantizerType::QT_4bit_uniform: { + Codec codec( + scalarQ->code_size, + scalarQ->trained[0], + scalarQ->trained[1]); + RUN_APPEND; + } break; + default: + // unimplemented, should be handled at a higher level + FAISS_ASSERT(false); + } + } + + CUDA_TEST_ERROR(); + +#undef RUN_APPEND +} + +__global__ void ivfpqAppend( + Tensor listIds, + Tensor listOffset, + Tensor encodings, + void** listCodes) { + int encodingToAdd = blockIdx.x * blockDim.x + threadIdx.x; + + if (encodingToAdd >= listIds.getSize(0)) { + return; + } + + int listId = listIds[encodingToAdd]; + int vectorNumInList = listOffset[encodingToAdd]; + + // Add vector could be invalid (contains NaNs etc) + if (listId == -1 || vectorNumInList == -1) { + return; + } + + auto encoding = encodings[encodingToAdd]; + + // Layout with dimensions innermost + uint8_t* codeStart = ((uint8_t*)listCodes[listId]) + + vectorNumInList * encodings.getSize(1); + + // FIXME: stride with threads instead of single thread + for (int i = 0; i < encodings.getSize(1); ++i) { + codeStart[i] = encoding[i]; + } +} + +void runIVFPQAppend( + Tensor& listIds, + Tensor& listOffset, + Tensor& encodings, + thrust::device_vector& listCodes, + cudaStream_t stream) { + int threads = std::min(listIds.getSize(0), getMaxThreadsCurrentDevice()); + int blocks = utils::divUp(listIds.getSize(0), threads); + + ivfpqAppend<<>>( + listIds, listOffset, encodings, listCodes.data().get()); + + CUDA_TEST_ERROR(); +} + +// +// IVF interleaved append +// + +// Scalar encode a vector to Codec::EncodeT word-sized values; previously this +// was fused into a single append kernel but was refactored so that Flat, SQ and +// PQ all use the same arbitrary bitwidth append kernel +template +__global__ void sqEncode( + Tensor vecs, + Tensor encodedVecs, + Codec codec) { + int vec = blockIdx.x; + + for (int d = threadIdx.x; d < vecs.getSize(1); d += blockDim.x) { + encodedVecs[vec][d] = codec.encodeNew(d, vecs[vec][d]); + } +} + +template +void runSQEncode( + Tensor& vecs, + Tensor& encodedVecs, + Codec codec, + cudaStream_t stream) { + int threads = std::min(vecs.getSize(1), getMaxThreadsCurrentDevice()); + int blocks = vecs.getSize(0); + + sqEncode<<>>(vecs, encodedVecs, codec); +} + +// Handles appending encoded vectors (one per EncodeT word) packed into +// EncodeBits interleaved by 32 vectors. +// This is used by Flat, SQ and PQ code for the interleaved format. +template +__global__ void ivfInterleavedAppend( + // the IDs (offset in listData) of the unique lists + // being added to + Tensor uniqueLists, + // For each of the list IDs in uniqueLists, the start + // offset in vectorsByUniqueList for the vectors that + // we are adding to that list + Tensor uniqueListVectorStart, + // IDs in vecs of the vectors being added to each + // unique list + // The vectors (offset in vecs) added to + // uniqueLists[i] is: + // {vBUL[uLVS[i]], ..., vBUL[uLVS[i+1] - 1]} + Tensor vectorsByUniqueList, + // For each of the list IDs in uniqueLists, the start + // offset (by vector) within that list where we begin + // appending + Tensor uniqueListStartOffset, + // The EncodeT-sized encoded vectors + Tensor encodedVecs, + // The set of addresses for each of the lists + void** listData) { + // FIXME: some issue with getLaneId() and CUDA 10.1 and P4 GPUs? + int laneId = threadIdx.x % kWarpSize; + int warpId = threadIdx.x / kWarpSize; + int warpsPerBlock = blockDim.x / kWarpSize; + + // Each block is dedicated to a separate list + int listId = uniqueLists[blockIdx.x]; + + // The vecs we add to the list are at indices [vBUL[vecIdStart], + // vBUL[vecIdEnd]) + int vecIdStart = uniqueListVectorStart[blockIdx.x]; + // uLVS is explicitly terminated for us with one more than the number of + // blocks that we have + int vecIdEnd = uniqueListVectorStart[blockIdx.x + 1]; + + // How many vectors we are adding to this list + int numVecsAdding = vecIdEnd - vecIdStart; + + // The first vector we are updating within the list + auto listVecStart = uniqueListStartOffset[blockIdx.x]; + + // These are the actual vec IDs that we are adding (in vecs) + int* listVecIds = vectorsByUniqueList[vecIdStart].data(); + + // All data is written by groups of 32 vectors (to mirror the warp). + // listVecStart could be in the middle of this, or even, for sub-byte + // encodings, mean that the first vector piece of data that we need to + // update is in the high part of a byte. + // + // WarpPackedBits allows writing of arbitrary bit packed data in groups of + // 32, but we ensure that it only operates on the group of 32 vectors. In + // order to do this we need to actually start updating vectors at the next + // lower multiple of 32 from listVecStart. + int alignedListVecStart = utils::roundDown(listVecStart, 32); + + // Each block of 32 vectors fully encodes into this many bytes + constexpr int bytesPerVectorBlockDim = EncodeBits * 32 / 8; + constexpr int wordsPerVectorBlockDim = + bytesPerVectorBlockDim / sizeof(EncodeT); + int wordsPerVectorBlock = wordsPerVectorBlockDim * encodedVecs.getSize(1); + + EncodeT* listStart = ((EncodeT*)listData[listId]); + + // Each warp within the block handles a different chunk of 32 + int warpVec = alignedListVecStart + warpId * 32; + + // The warp data starts here + EncodeT* warpData = listStart + (warpVec / 32) * wordsPerVectorBlock; + + // Each warp encodes a single block + for (; warpVec < listVecStart + numVecsAdding; + // but block stride + warpVec += blockDim.x, + // the new warp data base strides by how many vector blocks we are + // encoding, which is one per warp + warpData += warpsPerBlock * wordsPerVectorBlock) { + // This lane is adding this vec (if it is within bounds) + int laneVec = warpVec + laneId; + + // Which vector does this correspond to in the set of vectors that we + // need to add? If this is < 0, then this particular thread is not + // encoding / appending a new vector + int laneVecAdding = laneVec - listVecStart; + + // We are actually adding a new vector if this is within range + bool valid = laneVecAdding >= 0 && laneVecAdding < numVecsAdding; + + // Now, which actual vector in vecs is this? + int vecId = valid ? listVecIds[laneVecAdding] : 0; + + // Each warp that has some vector data available needs to write out the + // vector components + EncodeT* data = warpData; + + for (int dim = 0; dim < encodedVecs.getSize(1); ++dim) { + EncodeT enc = valid ? encodedVecs[vecId][dim] : (EncodeT)0; + WarpPackedBits::write( + laneId, enc, valid, data); + + data += wordsPerVectorBlockDim; + } + } +} + +void runIVFFlatInterleavedAppend( + Tensor& listIds, + Tensor& listOffset, + Tensor& uniqueLists, + Tensor& vectorsByUniqueList, + Tensor& uniqueListVectorStart, + Tensor& uniqueListStartOffset, + Tensor& vecs, + GpuScalarQuantizer* scalarQ, + thrust::device_vector& listData, + GpuResources* res, + cudaStream_t stream) { + int dim = vecs.getSize(1); + +#define RUN_APPEND(ENCODE_T, ENCODE_BITS, DATA) \ + do { \ + dim3 grid(uniqueLists.getSize(0)); \ + dim3 block(128); \ + ivfInterleavedAppend \ + <<>>( \ + uniqueLists, \ + uniqueListVectorStart, \ + vectorsByUniqueList, \ + uniqueListStartOffset, \ + DATA, \ + listData.data().get()); \ + } while (0) + + if (!scalarQ) { + // No encoding is needed, we just append directly + RUN_APPEND(float, 32, vecs); + return; + } + + // only implemented at the moment + FAISS_ASSERT(scalarQ->bits == 16 || scalarQ->bits <= 8); + + if (scalarQ->bits == 16) { + FAISS_ASSERT(scalarQ->qtype == ScalarQuantizer::QuantizerType::QT_fp16); + + using CodecT = Codec; + CodecT codec(scalarQ->qtype); + + DeviceTensor encodedVecs( + res, + makeTempAlloc(AllocType::Other, stream), + {vecs.getSize(0), vecs.getSize(1)}); + + runSQEncode(vecs, encodedVecs, codec, stream); + RUN_APPEND(CodecT::EncodeT, CodecT::kEncodeBits, encodedVecs); + + } else if (scalarQ->bits <= 8) { + DeviceTensor encodedVecs( + res, + makeTempAlloc(AllocType::Other, stream), + {vecs.getSize(0), vecs.getSize(1)}); + + switch (scalarQ->qtype) { + case ScalarQuantizer::QuantizerType::QT_8bit: { + using CodecT = + Codec; + CodecT codec( + scalarQ->code_size, + scalarQ->gpuTrained.data(), + scalarQ->gpuTrained.data() + dim); + + runSQEncode(vecs, encodedVecs, codec, stream); + RUN_APPEND(CodecT::EncodeT, CodecT::kEncodeBits, encodedVecs); + } break; + case ScalarQuantizer::QuantizerType::QT_8bit_uniform: { + using CodecT = + Codec; + CodecT codec( + scalarQ->code_size, + scalarQ->trained[0], + scalarQ->trained[1]); + + runSQEncode(vecs, encodedVecs, codec, stream); + RUN_APPEND(CodecT::EncodeT, CodecT::kEncodeBits, encodedVecs); + } break; + case ScalarQuantizer::QuantizerType::QT_8bit_direct: { + using CodecT = + Codec; + CodecT codec(scalarQ->code_size); + + runSQEncode(vecs, encodedVecs, codec, stream); + RUN_APPEND(CodecT::EncodeT, CodecT::kEncodeBits, encodedVecs); + } break; + case ScalarQuantizer::QuantizerType::QT_6bit: { + using CodecT = + Codec; + CodecT codec( + scalarQ->code_size, + scalarQ->gpuTrained.data(), + scalarQ->gpuTrained.data() + dim); + + runSQEncode(vecs, encodedVecs, codec, stream); + RUN_APPEND(CodecT::EncodeT, CodecT::kEncodeBits, encodedVecs); + } break; + case ScalarQuantizer::QuantizerType::QT_4bit: { + using CodecT = + Codec; + CodecT codec( + scalarQ->code_size, + scalarQ->gpuTrained.data(), + scalarQ->gpuTrained.data() + dim); + + runSQEncode(vecs, encodedVecs, codec, stream); + RUN_APPEND(CodecT::EncodeT, CodecT::kEncodeBits, encodedVecs); + } break; + case ScalarQuantizer::QuantizerType::QT_4bit_uniform: { + using CodecT = + Codec; + CodecT codec( + scalarQ->code_size, + scalarQ->trained[0], + scalarQ->trained[1]); + + runSQEncode(vecs, encodedVecs, codec, stream); + RUN_APPEND(CodecT::EncodeT, CodecT::kEncodeBits, encodedVecs); + } break; + default: + // unimplemented, should be handled at a higher level + FAISS_ASSERT(false); + } + } + +#undef RUN_APPEND + CUDA_TEST_ERROR(); +} + +void runIVFPQInterleavedAppend( + Tensor& listIds, + Tensor& listOffset, + Tensor& uniqueLists, + Tensor& vectorsByUniqueList, + Tensor& uniqueListVectorStart, + Tensor& uniqueListStartOffset, + int bitsPerCode, + Tensor& encodings, + thrust::device_vector& listCodes, + cudaStream_t stream) { + // limitation for now + FAISS_ASSERT(bitsPerCode <= 8); + +#define RUN_APPEND(ENCODE_T, ENCODE_BITS) \ + do { \ + dim3 grid(uniqueLists.getSize(0)); \ + dim3 block(128); \ + \ + ivfInterleavedAppend \ + <<>>( \ + uniqueLists, \ + uniqueListVectorStart, \ + vectorsByUniqueList, \ + uniqueListStartOffset, \ + encodings, \ + listCodes.data().get()); \ + } while (0) + + switch (bitsPerCode) { + case 4: { + RUN_APPEND(uint8_t, 4); + break; + } + case 5: { + RUN_APPEND(uint8_t, 5); + break; + } + case 6: { + RUN_APPEND(uint8_t, 6); + break; + } + case 8: { + RUN_APPEND(uint8_t, 8); + break; + } + default: + // unhandled + FAISS_ASSERT(false); + break; + } + +#undef RUN_APPEND + CUDA_TEST_ERROR(); +} + +} // namespace gpu +} // namespace faiss diff --git a/cuda_code/IndexedSlices_1.cu b/cuda_code/IndexedSlices_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..fb84969bf6a85903841f27b89a03615e17fb9db4 --- /dev/null +++ b/cuda_code/IndexedSlices_1.cu @@ -0,0 +1,48 @@ +#include "gpu_runtime.h" + +__global__ void indexedslices_oneside_add_kernel(const float *values_data, + const float *indices_data, + float *output_data, + size_t size, size_t length) { + size_t index = blockIdx.x * blockDim.x + threadIdx.x; + if (index >= size) + return; + int id = indices_data[index]; + const float *values_ptr = values_data + length * index; + float *output_ptr = output_data + length * id; + for (int i = 0; i < length; i++) + atomicAdd(output_ptr + i, *(values_ptr + i)); +} + +int IndexedSlicesOneSideAdd(const DLArrayHandle indices, + const DLArrayHandle values, DLArrayHandle output, + DLStreamHandle stream_handle = NULL) { + size_t size = 1; + size_t length = output->shape[1]; + for (int i = 0; i < indices->ndim; i++) { + size *= indices->shape[i]; + } + + dim3 blocks; + dim3 threads; + const float *values_data = (const float *)values->data; + float *output_data = (float *)output->data; + const float *indices_data = (const float *)indices->data; + + if (size <= 1024) { + threads.x = size; + blocks.x = 1; + } else { + threads.x = 1024; + blocks.x = (size + 1023) / 1024; + } + + if (stream_handle) + indexedslices_oneside_add_kernel<<< + blocks, threads, 0, *(cudaStream_t *)stream_handle->handle>>>( + values_data, indices_data, output_data, size, length); + else + indexedslices_oneside_add_kernel<<>>( + values_data, indices_data, output_data, size, length); + return 0; +} diff --git a/cuda_code/InterpExecution.cu b/cuda_code/InterpExecution.cu new file mode 100644 index 0000000000000000000000000000000000000000..6cbedef9116d3c387ceb6c8f4b6e9c22c4963a72 --- /dev/null +++ b/cuda_code/InterpExecution.cu @@ -0,0 +1,80 @@ +#include "InterpExecution.hpp" +namespace MNN { +namespace CUDA { + +#define CUDA_KERNEL_LOOP(i, n) for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x) + +template +__global__ void INTERP(const int n, const int ih, const int iw, const int oh, const int ow, + const float scaleh, const float scalew, const float offseth, const float offsetw, const T* in, T* out) { + CUDA_KERNEL_LOOP(index, n) { + int x = index % ow; + int tmp = index / ow; + int y = tmp % oh; + int z = tmp / oh; + int ix = min(max(0, (int)floor((float)x*scalew+offsetw)), iw-1); + int iy = min(max(0, (int)floor((float)y*scaleh+offseth)), ih-1); + out[z*oh*ow + y*oh + x] = in[z*ih*iw + iy*ih + ix]; + } +} + +InterpExecution::InterpExecution(const Interp* interp, Backend *backend) : Execution(backend) { + mWidthOffset = interp->widthOffset(); + mHeightOffset = interp->heightOffset(); + mResizeType = interp->resizeType(); + mScaleWidth = interp->widthScale(); + mScaleHeight = interp->heightScale(); +} +InterpExecution::~InterpExecution() { + //do nothing +} + +ErrorCode InterpExecution::onResize(const std::vector &inputs, const std::vector &outputs) { + //MNN_ASSERT(inputs.size() == 1); + MNN_ASSERT(outputs.size() == 1); + auto input = inputs[0]; + auto output = outputs[0]; + + mChannel = input->channel(); + mBatch = input->batch(); + + mInputHeight = input->height(); + mInputWidth = input->width(); + mOutputHeight = output->height(); + mOutputWidth = output->width(); + + mCount = mBatch*mChannel*mOutputHeight*mOutputWidth; + //printf("%d mInputHeight:%d- mInputWidth:%d- mOutputHeight:%d- mOutputWidth:%d, mScaleHeight:%f- mScaleWidth:%f\n", inputs.size(), mInputHeight,mInputWidth,mOutputHeight, mOutputWidth, mScaleHeight, mScaleWidth); + return NO_ERROR; +} + +ErrorCode InterpExecution::onExecute(const std::vector &inputs, const std::vector &outputs) { + auto runtime = static_cast(backend())->getCUDARuntime(); + + int block_num = runtime->blocks_num(mCount); + int threads_num = runtime->threads_num(); + auto input_addr = (void*)inputs[0]->deviceId(); + auto output_addr = (void*)outputs[0]->deviceId(); + + INTERP<<>>(mCount, mInputHeight, mInputWidth, mOutputHeight, mOutputWidth, + mScaleHeight, mScaleWidth, mHeightOffset, mWidthOffset, (const float *)input_addr, (float *)output_addr); + return NO_ERROR; +} + +class InterpCreator : public CUDABackend::Creator { +public: + virtual Execution* onCreate(const std::vector& inputs, const std::vector& outputs, + const MNN::Op* op, Backend* backend) const override { + auto param = op->main_as_Interp(); + if(param->resizeType() != 1) { + MNN_PRINT("CUDA interp resize type:%d not support, back to CPU\n", param->resizeType()); + return nullptr; + } + return new InterpExecution(param, backend); + } +}; + +static CUDACreatorRegister __init(OpType_Interp); + +} +} \ No newline at end of file diff --git a/cuda_code/InvertNeighborsListOpKernel_14.cu b/cuda_code/InvertNeighborsListOpKernel_14.cu new file mode 100644 index 0000000000000000000000000000000000000000..e2c056b6fc54f0023c496d57af06eb62ce1db063 --- /dev/null +++ b/cuda_code/InvertNeighborsListOpKernel_14.cu @@ -0,0 +1,78 @@ +#include +#include "../TorchHelper.h" +#include "open3d/ml/Misc/Detail/InvertNeighborsList.cuh" +#include "torch/script.h" + +template +std::tuple InvertNeighborsListCUDA( + int64_t num_points, + torch::Tensor inp_neighbors_index, + torch::Tensor inp_neighbors_row_splits, + torch::Tensor inp_neighbors_attributes, + int num_attributes) { + auto device = inp_neighbors_index.device().type(); + auto device_idx = inp_neighbors_index.device().index(); + torch::Tensor neighbors_index = torch::empty( + inp_neighbors_index.sizes(), + torch::dtype(ToTorchDtype()).device(device, device_idx)); + torch::Tensor neighbors_row_splits = torch::empty( + {num_points + 1}, + torch::dtype(torch::kInt64).device(device, device_idx)); + torch::Tensor neighbors_attributes = + torch::empty_like(inp_neighbors_attributes); + + auto stream = at::cuda::getCurrentCUDAStream(); + auto cuda_device_props = at::cuda::getCurrentDeviceProperties(); + const int texture_alignment = cuda_device_props->textureAlignment; + + void* temp_ptr = nullptr; + size_t temp_size = 0; + + // determine temp_size + open3d::ml::detail::InvertNeighborsListCUDA( + stream, temp_ptr, temp_size, texture_alignment, + inp_neighbors_index.data_ptr(), + num_attributes ? inp_neighbors_attributes.data_ptr() + : nullptr, + num_attributes, + (int64_t*)inp_neighbors_row_splits.data_ptr(), + inp_neighbors_row_splits.size(0) - 1, + neighbors_index.data_ptr(), + num_attributes ? neighbors_attributes.data_ptr() : nullptr, + neighbors_index.size(0), + (int64_t*)neighbors_row_splits.data_ptr(), + neighbors_row_splits.size(0) - 1); + + torch::Tensor temp_tensor = torch::empty( + {int64_t(temp_size)}, + torch::dtype(ToTorchDtype()).device(device, device_idx)); + temp_ptr = temp_tensor.data_ptr(); + + // actually invert the list + open3d::ml::detail::InvertNeighborsListCUDA( + stream, temp_ptr, temp_size, texture_alignment, + inp_neighbors_index.data_ptr(), + num_attributes ? inp_neighbors_attributes.data_ptr() + : nullptr, + num_attributes, + (int64_t*)inp_neighbors_row_splits.data_ptr(), + inp_neighbors_row_splits.size(0) - 1, + neighbors_index.data_ptr(), + num_attributes ? neighbors_attributes.data_ptr() : nullptr, + neighbors_index.size(0), + (int64_t*)neighbors_row_splits.data_ptr(), + neighbors_row_splits.size(0) - 1); + + return std::make_tuple(neighbors_index, neighbors_row_splits, + neighbors_attributes); +} +#define INSTANTIATE(TIndex, TAttr) \ + template std::tuple \ + InvertNeighborsListCUDA(int64_t, torch::Tensor, \ + torch::Tensor, torch::Tensor, \ + int num_attributes); + +INSTANTIATE(int32_t, int32_t) +INSTANTIATE(int32_t, int64_t) +INSTANTIATE(int32_t, float) +INSTANTIATE(int32_t, double) diff --git a/cuda_code/Kernel128_winograd.cu b/cuda_code/Kernel128_winograd.cu new file mode 100644 index 0000000000000000000000000000000000000000..b1ea8bb21be5464f6ccdddc1be883bd431d4e57a --- /dev/null +++ b/cuda_code/Kernel128_winograd.cu @@ -0,0 +1,282 @@ +#include "util.h" + +const char inputName128[] = "data/input_14_1_128.bin"; +const char biasName128[] = "data/bias_128.bin"; +const char weight_winograd_Name128[] = "data/weight_winograd_128_128.bin"; +const char bnBias_winograd_Name128[] = "data/bnBias_winograd_128.bin"; +const char bnScale_winograd_Name128[] = "data/bnScale_winograd_128.bin"; + +#define d(input, i, j, Inz) ( input[Inz + i*768 + (j<<7)] ) + +__global__ void kernel_128_winograd_BtdB( + const float *__restrict__ pInputs, + float *__restrict__ pOutputs) +{ + int Inx = blockIdx.x<<2, Iny0 = blockIdx.y<<2, Iny1 = threadIdx.y, Inz = threadIdx.x; + int Iny = Iny0+Iny1, stride_r = 2048, stride_c = 128; // 2048 = 16*128 + int c_glb_start = Inx*stride_r + Iny*stride_c + Inz, c_input = Iny1*stride_c + Inz; + + extern __shared__ float input[]; + + int tmp[6] = {0, 768, 1536, 2304, 3072, 3840}; // 768 = 6*128 + for (int i = 0; i < 6; i++) { + input[c_input + tmp[i]] = pInputs[c_glb_start + i*stride_r]; + } + __syncthreads(); + + float BTd[6]; + switch(Iny1) { + case 0: + for (int j = 0; j < 6; j++) { + BTd[j] = d(input, 0, j, Inz)*4 - d(input, 2, j, Inz)*5 + d(input, 4, j, Inz); + } + break; + case 1: + for (int j = 0; j < 6; j++) { + BTd[j] = -d(input, 1, j, Inz)*4 - d(input, 2, j, Inz)*4 + d(input, 3, j, Inz) + d(input, 4, j, Inz); + } + break; + case 2: + for (int j = 0; j < 6; j++) { + BTd[j] = d(input, 1, j, Inz)*4 - d(input, 2, j, Inz)*4 - d(input, 3, j, Inz) + d(input, 4, j, Inz); + } + break; + case 3: + for (int j = 0; j < 6; j++) { + BTd[j] = -d(input, 1, j, Inz)*2 - d(input, 2, j, Inz) + d(input, 3, j, Inz)*2 + d(input, 4, j, Inz); + } + break; + case 4: + for (int j = 0; j < 6; j++) { + BTd[j] = d(input, 1, j, Inz)*2 - d(input, 2, j, Inz) - d(input, 3, j, Inz)*2 + d(input, 4, j, Inz); + } + break; + case 5: + for (int j = 0; j < 6; j++) { + BTd[j] = d(input, 1, j, Inz)*4 - d(input, 3, j, Inz)*5 + d(input, 5, j, Inz); + } + break; + } + __syncthreads(); + + int tmp_offset = Iny1*768+Inz; + for (int i = 0; i < 6; i++) { + input[tmp_offset + i*stride_c] = BTd[i]; + } + __syncthreads(); + + float BTdB[6]; + switch(Iny1) { + case 0: + for (int i = 0; i < 6; i++) { + BTdB[i] = 4*d(input, i, 0, Inz) - 5*d(input, i, 2, Inz) + d(input, i, 4, Inz); + } + break; + case 1: + for (int i = 0; i < 6; i++) { + BTdB[i] = -4*d(input, i, 1, Inz) - 4*d(input, i, 2, Inz) + d(input, i, 3, Inz) + d(input, i, 4, Inz); + } + break; + case 2: + for (int i = 0; i < 6; i++) { + BTdB[i] = 4*d(input, i, 1, Inz) - 4*d(input, i, 2, Inz) - d(input, i, 3, Inz) + d(input, i, 4, Inz); + } + break; + case 3: + for (int i = 0; i < 6; i++) { + BTdB[i] = -2*d(input, i, 1, Inz) - d(input, i, 2, Inz) + 2*d(input, i, 3, Inz) + d(input, i, 4, Inz); + } + break; + case 4: + for (int i = 0; i < 6; i++) { + BTdB[i] = 2*d(input, i, 1, Inz) - d(input, i, 2, Inz) - 2*d(input, i, 3, Inz) + d(input, i, 4, Inz); + } + break; + case 5: + for (int i = 0; i < 6; i++) { + BTdB[i] = 4*d(input, i, 1, Inz) - 5*d(input, i, 3, Inz) + d(input, i, 5, Inz); + } + break; + } + __syncthreads(); + + for (int i = 0; i < 6; i++) { + pOutputs[(Iny1 + i*6)*2048 + (blockIdx.x*4+blockIdx.y)*128 + Inz] = BTdB[i]; + } +} + +__global__ void kernel_128_winograd_AtIA( + const float *__restrict__ pInputs, + const float *__restrict__ pBiases, + const float *__restrict__ pScales, + float *__restrict__ pOutputs) +{ + int Tilex = blockIdx.x, Tiley = blockIdx.y, Iny = threadIdx.y, kz = blockIdx.z, Inx = threadIdx.x; + int c_input = Inx*6 + Iny; + + __shared__ float bias, scale; + extern __shared__ float input[]; + + input[c_input] = pInputs[c_input*16*128 + (Tilex*4+Tiley)*128 + kz]; + bias = pBiases[kz]; + scale = pScales[kz]; + __syncthreads(); + + float tmp = 0; + switch(Inx) { + case 0: + tmp = input[Iny] + input[6+Iny] + input[12+Iny] + input[18+Iny] + input[24+Iny]; + break; + case 1: + tmp = input[6+Iny] - input[12+Iny] + 2*input[18+Iny] - 2*input[24+Iny]; + break; + case 2: + tmp = input[6+Iny] + input[12+Iny] + 4*input[18+Iny] + 4*input[24+Iny]; + break; + case 3: + tmp = input[6+Iny] - input[12+Iny] + 8*input[18+Iny] - 8*input[24+Iny] + input[30+Iny]; + break; + } + __syncthreads(); + + input[c_input] = tmp; + __syncthreads(); + + if (Inx > 3 || (Tilex == 3 && Inx > 1)) return; + + int x; + float o; + switch(Iny) { + case 0: + x = Inx*6; + o = scale*(input[x]+input[x+1]+input[x+2]+input[x+3]+input[x+4])+ bias; + pOutputs[(((Tilex<<2)+1+Inx)*16 + (Tiley<<2)+1)*128 + kz] = o > 0 ? o : 0; + break; + case 1: + x = Inx*6; + o = scale*(input[x+1] - input[x+2] + 2*input[x+3] - 2*input[x+4]) + bias; + pOutputs[(((Tilex<<2)+1+Inx)*16 + (Tiley<<2)+2)*128 + kz] = o > 0 ? o : 0; + break; + case 2: + if (Tiley == 3) break; + x = Inx*6; + o = scale*(input[x+1] + input[x+2] + 4*input[x+3] + 4*input[x+4]) + bias; + pOutputs[(((Tilex<<2)+1+Inx)*16 + (Tiley<<2)+3)*128 + kz] = o > 0 ? o : 0; + break; + case 3: + if (Tiley == 3) break; + x = Inx*6; + o = scale*(input[x+1] - input[x+2] + 8*input[x+3] - 8*input[x+4] + input[x+5]) + bias; + pOutputs[(((Tilex<<2)+1+Inx)*16 + (Tiley<<2)+4)*128 + kz] = o > 0 ? o : 0; + break; + } +} + +__global__ void kernel_128_OuterProduct_128( + const float *__restrict__ A, + const float *__restrict__ B, + float *__restrict__ C) +{ + int Tile = blockIdx.x, Part = blockIdx.y, tX = threadIdx.x, tY = threadIdx.y; + int c_input = tY*128 + tX, c_kernel = c_input; + int T_offset = (Tile<<11) + (Part<<10) + c_input; + int B_offset = (Tile<<14) + c_kernel; + + extern __shared__ float input[]; + float *kernel = input + 1024, *out = kernel + 8192; + int B_stride[32] = {0, 128, 256, 384, 512, 640, 768, 896, + 1024, 1152, 1280, 1408, 1536, 1664, 1792, 1920, + 2048, 2176, 2304, 2432, 2560, 2688, 2816, 2944, + 3072, 3200, 3328, 3456, 3584, 3712, 3840, 3968}; + out[c_input] = 0.0f; + + input[c_input] = A[T_offset]; + + for (int k = 0; k < 4; k++) { + int B_start = B_offset + (k<<12); // 32*64 + kernel[c_kernel] = B[B_start], kernel[c_kernel+1024] = B[B_start+1024]; + kernel[c_kernel+2048] = B[B_start+2048], kernel[c_kernel+3072] = B[B_start+3072]; + __syncthreads(); + + float sum = 0; + int y_tmp = (tY<<7)+(k<<5); + for (int j = 0; j < 32; j++) { + sum += input[y_tmp + j] * kernel[tX + B_stride[j]]; + } + out[tY*128 + tX] += sum; + __syncthreads(); + } + + C[T_offset] = out[c_input]; +} + +int kernel_128() { + float *input_ = get_parameter(inputName128, 16*16*128); + float *bias = get_parameter(biasName128, 128); + float *input, *output, *l_weights; + uint64_t nT1 = 0, nT2 = 0; + + float *t_input, *ip; + float *kernel = get_parameter(weight_winograd_Name128, 36*128*128); + float *l_bnBias, *l_bnScale, *bnBias, *bnScale; + + int nInput = 16*16*128, nOutput = 16*16*128, nWeights = 36*128*128, nBias = 128, + nTransInput = 16*6*6*128, nInnerProd = 16*6*6*128; + + float result[nOutput]; + + bnBias = get_parameter(bnBias_winograd_Name128, 128); + bnScale = get_parameter(bnScale_winograd_Name128, 128); + + nT1 = getTimeMicroseconds64(); + + hipMalloc((void **) &input, nInput<<2); + hipMalloc((void **) &output, nOutput<<2); + hipMalloc((void **) &l_weights, nWeights<<2); + hipMalloc((void **) &t_input, nTransInput<<2); + hipMalloc((void **) &ip, nInnerProd<<2); + + hipMemset((void *) output, 0, nOutput<<2); + hipMemset((void *) t_input, 0, nTransInput<<2); + hipMemset((void *) ip, 0, nInnerProd<<2); + + hipMemcpy(input, input_, nInput<<2, hipMemcpyHostToDevice); + hipMemcpy(l_weights, kernel, nWeights<<2, hipMemcpyHostToDevice); + + hipMalloc((void **) &l_bnBias, nBias<<2); + hipMalloc((void **) &l_bnScale, nBias<<2); + hipMemcpy(l_bnBias, bnBias, nBias<<2, hipMemcpyHostToDevice); + hipMemcpy(l_bnScale, bnScale, nBias<<2, hipMemcpyHostToDevice); + + kernel_128_winograd_BtdB <<>> (input, t_input); + kernel_128_OuterProduct_128<<>> (t_input, l_weights, ip); + kernel_128_winograd_AtIA <<>> (ip, l_bnBias, l_bnScale, output); + + hipMemcpy(result, output, nOutput<<2, hipMemcpyDeviceToHost); + + nT2 = getTimeMicroseconds64(); + + #ifdef DEBUG + double s = 0; + for (int i = 0; i < nOutput; i++) { + s += result[i]; + } + printf("Check sum: %lf\n", s); + #endif + + hipFree(t_input); + hipFree(output); + hipFree(l_weights); + hipFree(ip); + hipFree(input); + hipFree(l_bnScale); + hipFree(l_bnBias); + + free(kernel); + free(bnScale); + free(bnBias); + free(bias); + free(input_); + + return ((nT2-nT1) << 16); +} diff --git a/cuda_code/L2Select.cu b/cuda_code/L2Select.cu new file mode 100644 index 0000000000000000000000000000000000000000..215f06988c315bb70f0ba451850bacb20fe7b601 --- /dev/null +++ b/cuda_code/L2Select.cu @@ -0,0 +1,244 @@ +/** + * Copyright (c) Facebook, Inc. and its affiliates. + * + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. + */ + + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace faiss { namespace gpu { + +// L2 + select kernel for k == 1, implements re-use of ||c||^2 +template +__global__ void l2SelectMin1(Tensor productDistances, + Tensor centroidDistances, + Tensor outDistances, + Tensor outIndices) { + // Each block handles kRowsPerBlock rows of the distances (results) + Pair threadMin[kRowsPerBlock]; + __shared__ Pair blockMin[kRowsPerBlock * (kBlockSize / kWarpSize)]; + + T distance[kRowsPerBlock]; + +#pragma unroll + for (int i = 0; i < kRowsPerBlock; ++i) { + threadMin[i].k = Limits::getMax(); + threadMin[i].v = -1; + } + + // blockIdx.x: which chunk of rows we are responsible for updating + int rowStart = blockIdx.x * kRowsPerBlock; + + // FIXME: if we have exact multiples, don't need this + bool endRow = (blockIdx.x == gridDim.x - 1); + + if (endRow) { + if (productDistances.getSize(0) % kRowsPerBlock == 0) { + endRow = false; + } + } + + if (endRow) { + for (int row = rowStart; row < productDistances.getSize(0); ++row) { + for (int col = threadIdx.x; col < productDistances.getSize(1); + col += blockDim.x) { + distance[0] = Math::add(centroidDistances[col], + productDistances[row][col]); + + if (Math::lt(distance[0], threadMin[0].k)) { + threadMin[0].k = distance[0]; + threadMin[0].v = col; + } + } + + // Reduce within the block + threadMin[0] = + blockReduceAll, Min>, false, false>( + threadMin[0], Min>(), blockMin); + + if (threadIdx.x == 0) { + outDistances[row][0] = threadMin[0].k; + outIndices[row][0] = threadMin[0].v; + } + + // so we can use the shared memory again + __syncthreads(); + + threadMin[0].k = Limits::getMax(); + threadMin[0].v = -1; + } + } else { + for (int col = threadIdx.x; col < productDistances.getSize(1); + col += blockDim.x) { + T centroidDistance = centroidDistances[col]; + +#pragma unroll + for (int row = 0; row < kRowsPerBlock; ++row) { + distance[row] = productDistances[rowStart + row][col]; + } + +#pragma unroll + for (int row = 0; row < kRowsPerBlock; ++row) { + distance[row] = Math::add(distance[row], centroidDistance); + } + +#pragma unroll + for (int row = 0; row < kRowsPerBlock; ++row) { + if (Math::lt(distance[row], threadMin[row].k)) { + threadMin[row].k = distance[row]; + threadMin[row].v = col; + } + } + } + + // Reduce within the block + blockReduceAll, + Min >, + false, + false>(threadMin, + Min >(), + blockMin); + + if (threadIdx.x == 0) { +#pragma unroll + for (int row = 0; row < kRowsPerBlock; ++row) { + outDistances[rowStart + row][0] = threadMin[row].k; + outIndices[rowStart + row][0] = threadMin[row].v; + } + } + } +} + +// L2 + select kernel for k > 1, no re-use of ||c||^2 +template +__global__ void l2SelectMinK(Tensor productDistances, + Tensor centroidDistances, + Tensor outDistances, + Tensor outIndices, + int k, T initK) { + // Each block handles a single row of the distances (results) + constexpr int kNumWarps = ThreadsPerBlock / kWarpSize; + + __shared__ T smemK[kNumWarps * NumWarpQ]; + __shared__ int smemV[kNumWarps * NumWarpQ]; + + BlockSelect, + NumWarpQ, NumThreadQ, ThreadsPerBlock> + heap(initK, -1, smemK, smemV, k); + + int row = blockIdx.x; + + // Whole warps must participate in the selection + int limit = utils::roundDown(productDistances.getSize(1), kWarpSize); + int i = threadIdx.x; + + for (; i < limit; i += blockDim.x) { + T v = Math::add(centroidDistances[i], + productDistances[row][i]); + heap.add(v, i); + } + + if (i < productDistances.getSize(1)) { + T v = Math::add(centroidDistances[i], + productDistances[row][i]); + heap.addThreadQ(v, i); + } + + heap.reduce(); + for (int i = threadIdx.x; i < k; i += blockDim.x) { + outDistances[row][i] = smemK[i]; + outIndices[row][i] = smemV[i]; + } +} + +template +void runL2SelectMin(Tensor& productDistances, + Tensor& centroidDistances, + Tensor& outDistances, + Tensor& outIndices, + int k, + cudaStream_t stream) { + FAISS_ASSERT(productDistances.getSize(0) == outDistances.getSize(0)); + FAISS_ASSERT(productDistances.getSize(0) == outIndices.getSize(0)); + FAISS_ASSERT(centroidDistances.getSize(0) == productDistances.getSize(1)); + FAISS_ASSERT(outDistances.getSize(1) == k); + FAISS_ASSERT(outIndices.getSize(1) == k); + FAISS_ASSERT(k <= GPU_MAX_SELECTION_K); + + if (k == 1) { + constexpr int kThreadsPerBlock = 256; + constexpr int kRowsPerBlock = 8; + + auto block = dim3(kThreadsPerBlock); + auto grid = dim3(utils::divUp(outDistances.getSize(0), kRowsPerBlock)); + + l2SelectMin1 + <<>>(productDistances, centroidDistances, + outDistances, outIndices); + } else { + auto grid = dim3(outDistances.getSize(0)); + +#define RUN_L2_SELECT(BLOCK, NUM_WARP_Q, NUM_THREAD_Q) \ + do { \ + l2SelectMinK \ + <<>>(productDistances, centroidDistances, \ + outDistances, outIndices, \ + k, Limits::getMax()); \ + } while (0) + + // block size 128 for everything <= 1024 + if (k <= 32) { + RUN_L2_SELECT(128, 32, 2); + } else if (k <= 64) { + RUN_L2_SELECT(128, 64, 3); + } else if (k <= 128) { + RUN_L2_SELECT(128, 128, 3); + } else if (k <= 256) { + RUN_L2_SELECT(128, 256, 4); + } else if (k <= 512) { + RUN_L2_SELECT(128, 512, 8); + } else if (k <= 1024) { + RUN_L2_SELECT(128, 1024, 8); + +#if GPU_MAX_SELECTION_K >= 2048 + } else if (k <= 2048) { + // smaller block for less shared memory + RUN_L2_SELECT(64, 2048, 8); +#endif + + } else { + FAISS_ASSERT(false); + } + } + + CUDA_TEST_ERROR(); +} + +void runL2SelectMin(Tensor& productDistances, + Tensor& centroidDistances, + Tensor& outDistances, + Tensor& outIndices, + int k, + cudaStream_t stream) { + runL2SelectMin(productDistances, + centroidDistances, + outDistances, + outIndices, + k, + stream); +} + +} } // namespace diff --git a/cuda_code/LBVHBuilderKernels.cu b/cuda_code/LBVHBuilderKernels.cu new file mode 100644 index 0000000000000000000000000000000000000000..09b67658e1147389e75760df24c0d90deb6c9380 --- /dev/null +++ b/cuda_code/LBVHBuilderKernels.cu @@ -0,0 +1,457 @@ +/** + * \file LBVHBuilderKernels.cu + * \author Daniel Meister + * \date 2018/01/22 + * \brief LBVHBuilder kernels soruce file. + */ + +#include "LBVHBuilderKernels.h" +#include "CudaBVHUtil.cuh" + +using namespace FW; + +template +__device__ void construct( + const int n, + const int i, + int * nodeParentIndices, + int * nodeLeftIndices, + int * nodeRightIndices, + int * nodeSizes, + T * mortonCodes +) { + + // Determine direction of the range (+1 or -1). + const int d = sgn(delta(i, i + 1, n, mortonCodes) - delta(i, i - 1, n, mortonCodes)); + + // Compute upper bound for the length of the range. + const int deltaMin = delta(i, i - d, n, mortonCodes); + int lmax = 2; + while (delta(i, i + lmax * d, n, mortonCodes) > deltaMin) lmax <<= 1; + + // Find the other end using binary search. + int l = 0; + for (int t = lmax >> 1; t >= 1; t >>= 1) + if (delta(i, i + (l + t) * d, n, mortonCodes) > deltaMin) + l += t; + const int j = i + l * d; + + // Find the split position using binary search. + const int deltaNode = delta(i, j, n, mortonCodes); + int s = 0; + int k = 2; + int t; + do { + t = divCeil(l, k); + k <<= 1; + if (delta(i, i + (s + t) * d, n, mortonCodes) > deltaNode) + s += t; + } while (t > 1); + const int gamma = i + s * d + min(d, 0); + + // Output child pointers. + int left = gamma; + int right = gamma + 1; + if (min(i, j) == gamma) left += n - 1; + if (max(i, j) == gamma + 1) right += n - 1; + + // Write node etc. + nodeLeftIndices[i] = left; + nodeRightIndices[i] = right; + nodeSizes[i] = l + 1; + + // Parent indices. + nodeParentIndices[left] = i; + nodeParentIndices[right] = i; + +} + +extern "C" __global__ void computeSceneBox( + const int threads, + const int numberOfVertices + ) { + + // Thread index. + const int threadIndex = blockDim.x * blockIdx.x + threadIdx.x; + + // Bounding box within the thread. + AABB box; Vec3f vertex; + for (int vertexIndex = threadIndex; vertexIndex < numberOfVertices; vertexIndex += threads) { + vertexFromTexture(vertexIndex, vertex); + box.grow(vertex); + } + + // Cache. + __shared__ float cache[3 * BLOCK_THREADS]; + Vec3f * bound = (Vec3f*)cache; + + // Min. + bound[threadIdx.x] = box.min(); + bound[threadIdx.x] = min(bound[threadIdx.x], bound[threadIdx.x ^ 1]); + bound[threadIdx.x] = min(bound[threadIdx.x], bound[threadIdx.x ^ 2]); + bound[threadIdx.x] = min(bound[threadIdx.x], bound[threadIdx.x ^ 4]); + bound[threadIdx.x] = min(bound[threadIdx.x], bound[threadIdx.x ^ 8]); + bound[threadIdx.x] = min(bound[threadIdx.x], bound[threadIdx.x ^ 16]); + + __syncthreads(); + if ((threadIdx.x & 32) == 0) bound[threadIdx.x] = min(bound[threadIdx.x], bound[threadIdx.x ^ 32]); + + __syncthreads(); + if ((threadIdx.x & 64) == 0) bound[threadIdx.x] = min(bound[threadIdx.x], bound[threadIdx.x ^ 64]); + + __syncthreads(); + if ((threadIdx.x & 128) == 0) bound[threadIdx.x] = min(bound[threadIdx.x], bound[threadIdx.x ^ 128]); + + // Update global bounding box. + if (threadIdx.x == 0) { + atomicMin(&sceneBox[0], bound[threadIdx.x].x); + atomicMin(&sceneBox[1], bound[threadIdx.x].y); + atomicMin(&sceneBox[2], bound[threadIdx.x].z); + } + + // Max. + bound[threadIdx.x] = box.max(); + bound[threadIdx.x] = max(bound[threadIdx.x], bound[threadIdx.x ^ 1]); + bound[threadIdx.x] = max(bound[threadIdx.x], bound[threadIdx.x ^ 2]); + bound[threadIdx.x] = max(bound[threadIdx.x], bound[threadIdx.x ^ 4]); + bound[threadIdx.x] = max(bound[threadIdx.x], bound[threadIdx.x ^ 8]); + bound[threadIdx.x] = max(bound[threadIdx.x], bound[threadIdx.x ^ 16]); + + __syncthreads(); + if ((threadIdx.x & 32) == 0) bound[threadIdx.x] = max(bound[threadIdx.x], bound[threadIdx.x ^ 32]); + + __syncthreads(); + if ((threadIdx.x & 64) == 0) bound[threadIdx.x] = max(bound[threadIdx.x], bound[threadIdx.x ^ 64]); + + __syncthreads(); + if ((threadIdx.x & 128) == 0) bound[threadIdx.x] = max(bound[threadIdx.x], bound[threadIdx.x ^ 128]); + + // Update global bounding box. + if (threadIdx.x == 0) { + atomicMax(&sceneBox[3], bound[threadIdx.x].x); + atomicMax(&sceneBox[4], bound[threadIdx.x].y); + atomicMax(&sceneBox[5], bound[threadIdx.x].z); + } + +} + +extern "C" __global__ void computeMortonCodes30( + const int threads, + const int numberOfTriangles, + unsigned int * mortonCodes, + int * triangleIndices +) { + + // Thread index. + const int threadIndex = blockDim.x * blockIdx.x + threadIdx.x; + + // Scene box. + AABB _sceneBox = *(AABB*)sceneBoxConst; + Vec3f scale = 1.0f / (_sceneBox.max() - _sceneBox.min()); + + for (int triangleIndex = threadIndex; triangleIndex < numberOfTriangles; triangleIndex += threads) { + + // Triangle. + Vec3f v0, v1, v2; + verticesFromTexture(triangleIndex, v0, v1, v2); + + // Box. + AABB box; + box.grow(v0); + box.grow(v1); + box.grow(v2); + + // Triangle index, node index and Morton code. + triangleIndices[triangleIndex] = triangleIndex; + mortonCodes[triangleIndex] = mortonCode((box.midPoint() - _sceneBox.min()) * scale); + + } + +} + +extern "C" __global__ void computeMortonCodes60( + const int threads, + const int numberOfTriangles, + unsigned long long * mortonCodes, + int * triangleIndices +) { + + // Thread index. + const int threadIndex = blockDim.x * blockIdx.x + threadIdx.x; + + // Scene box. + AABB _sceneBox = *(AABB*)sceneBoxConst; + Vec3f scale = 1.0f / (_sceneBox.max() - _sceneBox.min()); + + for (int triangleIndex = threadIndex; triangleIndex < numberOfTriangles; triangleIndex += threads) { + + // Triangle. + Vec3f v0, v1, v2; + verticesFromTexture(triangleIndex, v0, v1, v2); + + // Box. + AABB box; + box.grow(v0); + box.grow(v1); + box.grow(v2); + + // Triangle index, node index and Morton code. + triangleIndices[triangleIndex] = triangleIndex; + mortonCodes[triangleIndex] = mortonCode64((box.midPoint() - _sceneBox.min()) * scale); + + } + +} + +extern "C" __global__ void setupLeaves( + const int threads, + const int numberOfTriangles, + int * triangleIndices, + int * nodeLeftIndices, + int * nodeRightIndices, + int * nodeSizes, + Vec4f * nodeBoxesMin, + Vec4f * nodeBoxesMax +) { + + // Thread index. + const int threadIndex = blockDim.x * blockIdx.x + threadIdx.x; + + for (int boxIndex = threadIndex; boxIndex < numberOfTriangles; boxIndex += threads) { + + // Triangle index. + const int triangleIndex = triangleIndices[boxIndex]; + + // Triangle. + Vec3f v0, v1, v2; + verticesFromTexture(triangleIndex, v0, v1, v2); + + // Box. + AABB box; + box.grow(v0); + box.grow(v1); + box.grow(v2); + + // Leaf node. + const int nodeIndex = boxIndex + numberOfTriangles - 1; + nodeLeftIndices[nodeIndex] = boxIndex; + nodeRightIndices[nodeIndex] = boxIndex + 1; + nodeSizes[nodeIndex] = 1; + nodeBoxesMin[nodeIndex] = Vec4f(box.min(), 0.0f); + nodeBoxesMax[nodeIndex] = Vec4f(box.max(), 0.0f); + + } + +} + +extern "C" __global__ void construct30( + const int n, + int * nodeParentIndices, + int * nodeLeftIndices, + int * nodeRightIndices, + int * nodeSizes, + unsigned int * mortonCodes + ) { + + // Thread index. + const int i = blockDim.x * blockIdx.x + threadIdx.x; + + if (i < n - 1) { + construct( + n, + i, + nodeParentIndices, + nodeLeftIndices, + nodeRightIndices, + nodeSizes, + mortonCodes + ); + } + + // Root parent index. + if (i == 0) + nodeParentIndices[0] = -1; + + +} + +extern "C" __global__ void construct60( + const int n, + int * nodeParentIndices, + int * nodeLeftIndices, + int * nodeRightIndices, + int * nodeSizes, + unsigned long long * mortonCodes +) { + + // Thread index. + const int i = blockDim.x * blockIdx.x + threadIdx.x; + + if (i < n - 1) { + construct( + n, + i, + nodeParentIndices, + nodeLeftIndices, + nodeRightIndices, + nodeSizes, + mortonCodes + ); + } + + // Root parent index. + if (i == 0) + nodeParentIndices[0] = -1; + + +} + +extern "C" __global__ void refit( + const int threads, + const int numberOfNodes, + int * termCounters, + int * nodeParentIndices, + int * nodeLeftIndices, + int * nodeRightIndices, + Vec4f * nodeBoxesMin, + Vec4f * nodeBoxesMax +) { + + // Thread index. + const int threadIndex = blockDim.x * blockIdx.x + threadIdx.x; + + // Number of interior nodes. + const int numberOfInteriorNodes = numberOfNodes >> 1; + + for (int leafIndex = threadIndex + numberOfInteriorNodes; leafIndex < numberOfNodes; leafIndex += threads) { + + // Node index. + int nodeIndex = nodeParentIndices[leafIndex]; + + // Go up to the root. + while (nodeIndex >= 0 && atomicAdd(&termCounters[nodeIndex], 1) > 0) { + + // Node. + int nodeLeftIndex = nodeLeftIndices[nodeIndex]; + int nodeRightIndex = nodeRightIndices[nodeIndex]; + + // Box. + AABB box; + + // Min. + const Vec4f nodeLeftBoxMin = nodeBoxesMin[nodeLeftIndex]; + const Vec4f nodeRightBoxMin = nodeBoxesMin[nodeRightIndex]; + box.grow(Vec3f(nodeLeftBoxMin.x, nodeLeftBoxMin.y, nodeLeftBoxMin.z)); + box.grow(Vec3f(nodeRightBoxMin.x, nodeRightBoxMin.y, nodeRightBoxMin.z)); + nodeBoxesMin[nodeIndex] = Vec4f(box.min(), 0.0f); + + // Max. + const Vec4f nodeLeftBoxMax = nodeBoxesMax[nodeLeftIndex]; + const Vec4f nodeRightBoxMax = nodeBoxesMax[nodeRightIndex]; + box.grow(Vec3f(nodeLeftBoxMax.x, nodeLeftBoxMax.y, nodeLeftBoxMax.z)); + box.grow(Vec3f(nodeRightBoxMax.x, nodeRightBoxMax.y, nodeRightBoxMax.z)); + nodeBoxesMax[nodeIndex] = Vec4f(box.max(), 0.0f); + + // Go to the parent. + nodeIndex = nodeParentIndices[nodeIndex]; + + } + + } + +} + +extern "C" __global__ void woopifyTriangles( + const int threads, + const int numberOfTriangles, + int * triangleIndices, + Vec4f * triWoopsA, + Vec4f * triWoopsB, + Vec4f * triWoopsC + ) { + + // Thread index. + const int threadIndex = blockDim.x * blockIdx.x + threadIdx.x; + + // Woop's matrix. + Mat4f im; + + for (int triangleIndex = threadIndex; triangleIndex < numberOfTriangles; triangleIndex += threads) { + + // Triangle. + Vec3f v0, v1, v2; + verticesFromTexture(triangleIndices[triangleIndex], v0, v1, v2); + + // Woopify triangle. + im.setCol(0, Vec4f(v0 - v2, 0.0f)); + im.setCol(1, Vec4f(v1 - v2, 0.0f)); + im.setCol(2, Vec4f(cross(v0 - v2, v1 - v2), 0.0f)); + im.setCol(3, Vec4f(v2, 1.0f)); + im = invert(im); + + triWoopsA[triangleIndex] = Vec4f(im(2, 0), im(2, 1), im(2, 2), -im(2, 3)); + triWoopsB[triangleIndex] = im.getRow(0); + triWoopsC[triangleIndex] = im.getRow(1); + + } + +} + +extern "C" __global__ void computeCost( + const int threads, + const int numberOfNodes, + const float sceneBoxArea, + const float ct, + const float ci, + CudaBVHNode * nodes + ) { + + // Thread index. + const int threadIndex = blockDim.x * blockIdx.x + threadIdx.x; + + // Cost. + float _cost = 0.0f; + + for (int nodeIndex = threadIndex; nodeIndex < numberOfNodes; nodeIndex += threads) { + + CudaBVHNode node = nodes[nodeIndex]; + float P = node.getSurfaceArea() / sceneBoxArea; + + // Leaf. + if (node.isLeaf()) { + _cost += ci * P * node.getSize(); + } + + // Interior node. + else { + _cost += ct * P; + } + } + + // Cache. + __shared__ volatile float cache[BLOCK_THREADS]; + + // Cost reduction. + cache[threadIdx.x] = _cost; + cache[threadIdx.x] += cache[threadIdx.x ^ 1]; + cache[threadIdx.x] += cache[threadIdx.x ^ 2]; + cache[threadIdx.x] += cache[threadIdx.x ^ 4]; + cache[threadIdx.x] += cache[threadIdx.x ^ 8]; + cache[threadIdx.x] += cache[threadIdx.x ^ 16]; + + __syncthreads(); + if ((threadIdx.x & 32) == 0) cache[threadIdx.x] += cache[threadIdx.x ^ 32]; + + __syncthreads(); + if ((threadIdx.x & 64) == 0) cache[threadIdx.x] += cache[threadIdx.x ^ 64]; + + __syncthreads(); + if ((threadIdx.x & 128) == 0) cache[threadIdx.x] += cache[threadIdx.x ^ 128]; + + // Update total cost. + if (threadIdx.x == 0) { + atomicAdd(&cost, cache[threadIdx.x]); + } + +} + diff --git a/cuda_code/LM_linear_system.cu b/cuda_code/LM_linear_system.cu new file mode 100644 index 0000000000000000000000000000000000000000..8a8637359d6d74346db5066c7e65ef2a230477d2 --- /dev/null +++ b/cuda_code/LM_linear_system.cu @@ -0,0 +1,24 @@ +/** + * MegBA is Licensed under the Apache License, Version 2.0 (the "License") + * + * Copyright (c) 2021 Megvii Inc. All rights reserved. + * + **/ + +#include "linear_system/LM_linear_system.h" + +namespace MegBA { +template +void LMLinearSystem::freeCUDA() { + for (int i = 0; i < this->problemOption.deviceUsed.size(); ++i) { + cudaSetDevice(i); + cudaFree(deltaXPtrBackup[i]); + cudaFree(gBackup[i]); + for (auto p : extractedDiag[i]) { + cudaFree(p); + } + } +} + +SPECIALIZE_CLASS(LMLinearSystem); +} // namespace MegBA diff --git a/cuda_code/LZ4CompressionKernels_3.cu b/cuda_code/LZ4CompressionKernels_3.cu new file mode 100644 index 0000000000000000000000000000000000000000..7a0824e767c89e22d5430fb4236e2231fb2475dc --- /dev/null +++ b/cuda_code/LZ4CompressionKernels_3.cu @@ -0,0 +1,945 @@ +/* + * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of NVIDIA CORPORATION nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "LZ4CompressionKernels.h" +#include "TempSpaceBroker.h" +#include "common.h" + +#ifdef __GNUC__ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Weffc++" +#pragma GCC diagnostic ignored "-Wunused-parameter" +#endif +#include +#ifdef __GNUC__ +#pragma GCC diagnostic pop +#endif + +// align all temp allocations by 512B +#define CUDA_MEM_ALIGN(size) (((size) + 0x1FF) & ~0x1FF) + +#include "cuda_runtime.h" + +#include +#include +#include +#include + +using offset_type = uint16_t; +using word_type = uint32_t; +using position_type = size_t; +using double_word_type = uint64_t; + +namespace nvcomp { + +constexpr const int DECOMP_THREADS = 32; +constexpr const int Y_DIM = 2; +constexpr const position_type BUFFER_SIZE + = DECOMP_THREADS * sizeof(double_word_type); +constexpr const position_type PREFETCH_DIST = BUFFER_SIZE / 2; + +constexpr const position_type HASH_TABLE_SIZE = 1U << 14; +constexpr const offset_type NULL_OFFSET = static_cast(-1); +constexpr const position_type MAX_OFFSET = (1U << 16) - 1; + +struct block_stats_st +{ + uint64_t cycles; + int copy_length_min; + int copy_length_max; + int copy_length_sum; + int copy_length_count; + int copy_lsic_count; + int match_length_min; + int match_length_max; + int match_length_sum; + int match_length_count; + int match_lsic_count; + int match_overlaps; + int offset_min; + int offset_max; + int offset_sum; +}; + +/****************************************************************************** + * DEVICE FUNCTIONS AND KERNELS *********************************************** + *****************************************************************************/ + +inline __device__ void syncCTA() +{ + if (DECOMP_THREADS > 32) { + __syncthreads(); + } else { + __syncwarp(); + } +} + +template +inline __device__ void writeWord(uint8_t* const address, const T word) +{ +#pragma unroll + for (size_t i = 0; i < sizeof(T); ++i) { + address[i] = static_cast((word >> (8 * i)) & 0xff); + } +} + +template +inline __device__ T readWord(const uint8_t* const address) +{ + T word = 0; + for (size_t i = 0; i < sizeof(T); ++i) { + word |= address[i] << (8 * i); + } + + return word; +} +inline __device__ void writeLSIC(uint8_t* const out, position_type number) +{ + size_t i = 0; + while (number >= 0xff) { + out[i] = 0xff; + ++i; + number -= 0xff; + } + out[i] = number; +} + +struct token_type +{ + position_type num_literals; + position_type num_matches; + + __device__ bool hasNumLiteralsOverflow() const + { + return num_literals >= 15; + } + + __device__ bool hasNumMatchesOverflow() const + { + return num_matches >= 19; + } + + __device__ position_type numLiteralsOverflow() const + { + if (hasNumLiteralsOverflow()) { + return num_literals - 15; + } else { + return 0; + } + } + + __device__ uint8_t numLiteralsForHeader() const + { + if (hasNumLiteralsOverflow()) { + return 15; + } else { + return num_literals; + } + } + + __device__ position_type numMatchesOverflow() const + { + if (hasNumMatchesOverflow()) { + assert(num_matches >= 19); + return num_matches - 19; + } else { + assert(num_matches < 19); + return 0; + } + } + + __device__ uint8_t numMatchesForHeader() const + { + if (hasNumMatchesOverflow()) { + return 15; + } else { + return num_matches - 4; + } + } + __device__ position_type lengthOfLiteralEncoding() const + { + if (hasNumLiteralsOverflow()) { + position_type length = 1; + position_type num = numLiteralsOverflow(); + while (num >= 0xff) { + num -= 0xff; + length += 1; + } + + return length; + } + return 0; + } + + __device__ position_type lengthOfMatchEncoding() const + { + if (hasNumMatchesOverflow()) { + position_type length = 1; + position_type num = numMatchesOverflow(); + while (num >= 0xff) { + num -= 0xff; + length += 1; + } + + return length; + } + return 0; + } +}; + +class BufferControl +{ +public: + + __device__ BufferControl( + uint8_t* const buffer, const uint8_t* const compData, const position_type length) : + m_offset(0), + m_length(length), + m_buffer(buffer), + m_compData(compData) + { + // do nothing + } + + #ifdef WARP_READ_LSIC + // this is currently unused as its slower + inline __device__ position_type queryLSIC(const position_type idx) const + { + if (idx + DECOMP_THREADS <= end()) { + // most likely case + const uint8_t byte = rawAt(idx)[threadIdx.x]; + + uint32_t mask = __ballot_sync(0xffffffff, byte != 0xff); + mask = __brev(mask); + + const position_type fullBytes = __clz(mask); + + if (fullBytes < DECOMP_THREADS) { + return fullBytes * 0xff + rawAt(idx)[fullBytes]; + } else { + return DECOMP_THREADS * 0xff; + } + } else { + uint8_t byte; + if (idx + threadIdx.x < end()) { + byte = rawAt(idx)[threadIdx.x]; + } else { + byte = m_compData[idx + threadIdx.x]; + } + + uint32_t mask = __ballot_sync(0xffffffff, byte != 0xff); + mask = __brev(mask); + + const position_type fullBytes = __clz(mask); + + if (fullBytes < DECOMP_THREADS) { + return fullBytes * 0xff + __shfl_sync(0xffffffff, byte, fullBytes); + } else { + return DECOMP_THREADS * 0xff; + } + } + } + #endif + + inline __device__ position_type readLSIC(position_type& idx) const + { + #ifdef WARP_READ_LSIC + position_type num = 0; + while (true) { + const position_type block = queryLSIC(idx); + num += block; + + if (block < DECOMP_THREADS * 0xff) { + idx += (block / 0xff) + 1; + break; + } else { + idx += DECOMP_THREADS; + } + } + return num; + #else + position_type num = 0; + uint8_t next = 0xff; + // read from the buffer + while (next == 0xff && idx < end()) { + next = rawAt(idx)[0]; + ++idx; + num += next; + } + // read from global memory + while (next == 0xff) { + next = m_compData[idx]; + ++idx; + num += next; + } + return num; + #endif + } + + inline __device__ const uint8_t* raw() const + { + return m_buffer; + } + + inline __device__ const uint8_t* rawAt(const position_type i) const + { + return raw() + (i - begin()); + } + inline __device__ uint8_t operator[](const position_type i) const + { + if (i >= m_offset && i - m_offset < BUFFER_SIZE) { + return m_buffer[i - m_offset]; + } else { + return m_compData[i]; + } + } + + inline __device__ void loadAt(const position_type offset) + { + m_offset = (offset / sizeof(double_word_type)) * sizeof(double_word_type); + + if (m_offset + BUFFER_SIZE <= m_length) { + assert(m_offset % sizeof(double_word_type) == 0); + assert(BUFFER_SIZE == DECOMP_THREADS * sizeof(double_word_type)); + const double_word_type* const word_data + = reinterpret_cast(m_compData + m_offset); + double_word_type* const word_buffer + = reinterpret_cast(m_buffer); + word_buffer[threadIdx.x] = word_data[threadIdx.x]; + } else { + #pragma unroll + for (int i = threadIdx.x; i < BUFFER_SIZE; i += DECOMP_THREADS) { + if (m_offset + i < m_length) { + m_buffer[i] = m_compData[m_offset + i]; + } + } + } + + syncCTA(); + } + + inline __device__ position_type begin() const + { + return m_offset; + } + + + inline __device__ position_type end() const + { + return m_offset + BUFFER_SIZE; + } + +private: + position_type m_offset; + const position_type m_length; + uint8_t* const m_buffer; + const uint8_t* const m_compData; +}; //End BufferControl Class + + +inline __device__ void coopCopyNoOverlap( + uint8_t* const dest, const uint8_t* const source, const size_t length) +{ + for (size_t i = threadIdx.x; i < length; i += blockDim.x) { + dest[i] = source[i]; + } +} + +inline __device__ void coopCopyRepeat( + uint8_t* const dest, + const uint8_t* const source, + const position_type dist, + const position_type length) +{ +// if there is overlap, it means we repeat, so we just +// need to organize our copy around that + for (position_type i = threadIdx.x; i < length; i += blockDim.x) { + dest[i] = source[i % dist]; + } +} + +inline __device__ void coopCopyOverlap( + uint8_t* const dest, + const uint8_t* const source, + const position_type dist, + const position_type length) +{ + if (dist < length) { + coopCopyRepeat(dest, source, dist, length); + } else { + coopCopyNoOverlap(dest, source, length); + } +} + +inline __device__ position_type hash(const word_type key) +{ + // needs to be 12 bits +// return ((key >> 16) + key) & (HASH_TABLE_SIZE - 1); + return (__brev(key) + (key^0xc375)) & (HASH_TABLE_SIZE - 1); +} + +inline __device__ uint8_t encodePair(const uint8_t t1, const uint8_t t2) +{ + return ((t1 & 0x0f) << 4) | (t2 & 0x0f); +} + +inline __device__ token_type decodePair(const uint8_t num) +{ + return token_type{static_cast((num & 0xf0) >> 4), + static_cast(num & 0x0f)}; +} + +inline __device__ void copyLiterals( + uint8_t* const dest, const uint8_t* const source, const size_t length) +{ + for (size_t i = 0; i < length; ++i) { + dest[i] = source[i]; + } +} + +inline __device__ position_type lengthOfMatch( + const uint8_t* const data, + const position_type prev_location, + const position_type next_location, + const position_type length) +{ + assert(prev_location < next_location); + + + position_type i; + for (i = 0; i + next_location + 5 < length; ++i) { + if (data[prev_location + i] != data[next_location + i]) { + break; + } + } + return i; +} + +inline __device__ position_type +convertIdx(const offset_type offset, const position_type pos) +{ + constexpr const position_type OFFSET_SIZE = MAX_OFFSET + 1; + + assert(offset <= pos); + + position_type realPos = (pos / OFFSET_SIZE) * OFFSET_SIZE + offset; + if (realPos >= pos) { + realPos -= OFFSET_SIZE; + } + assert(realPos < pos); + + return realPos; +} + +inline __device__ bool isValidHash( + const uint8_t* const data, + const offset_type* const hashTable, + const position_type key, + const position_type hashPos, + const position_type decomp_idx) +{ + if (hashTable[hashPos] == NULL_OFFSET) { + return false; + } + + const position_type offset = convertIdx(hashTable[hashPos], decomp_idx); + + if (decomp_idx - offset > MAX_OFFSET) { + // the offset can be up to 2^16-1, but the converted idx can be up to 2^16, + // so we need to eliminate this case. + return false; + } + + const word_type hashKey = readWord(data + offset); + + if (hashKey != key) { + return false; + } + + return true; +} + +inline __device__ void writeSequenceData( + uint8_t* const compData, + const uint8_t* const decompData, + const token_type token, + const offset_type offset, + const position_type decomp_idx, + position_type& comp_idx) +{ + assert(token.num_matches == 0 || token.num_matches >= 4); + + // -> add token + compData[comp_idx] + = encodePair(token.numLiteralsForHeader(), token.numMatchesForHeader()); + ++comp_idx; + + // -> add literal length + const position_type literalEncodingLength = token.lengthOfLiteralEncoding(); + if (literalEncodingLength) { + writeLSIC(compData + comp_idx, token.numLiteralsOverflow()); + comp_idx += literalEncodingLength; + } + + // -> add literals + copyLiterals( + compData + comp_idx, decompData + decomp_idx, token.num_literals); + comp_idx += token.num_literals; + + // -> add offset + if (token.num_matches > 0) { + assert(offset > 0); + + writeWord(compData + comp_idx, offset); + comp_idx += sizeof(offset); + + // -> add match length + if (token.hasNumMatchesOverflow()) { + writeLSIC(compData + comp_idx, token.numMatchesOverflow()); + comp_idx += token.lengthOfMatchEncoding(); + } + } +} + +__device__ void compressStream( + uint8_t* compData, + const uint8_t* decompData, + size_t length, + size_t* comp_length) +{ + position_type decomp_idx = 0; + position_type comp_idx = 0; + + __shared__ offset_type hashTable[HASH_TABLE_SIZE]; + + // fill hash-table with null-entries + for (position_type i = threadIdx.x; i < HASH_TABLE_SIZE; i += blockDim.x) { + hashTable[i] = NULL_OFFSET; + } + + while (decomp_idx < length) { + const position_type tokenStart = decomp_idx; + while (true) { + // begin adding tokens to the hash table until we find a match + const word_type next = readWord(decompData + decomp_idx); + const position_type pos = decomp_idx; + position_type hashPos = hash(next); + + if (decomp_idx + 5 + 4 >= length) { + // jump to end + decomp_idx = length; + + // no match -- literals to the end + token_type tok; + tok.num_literals = length - tokenStart; + tok.num_matches = 0; + writeSequenceData(compData, decompData, tok, 0, tokenStart, comp_idx); + break; + } else if (isValidHash(decompData, hashTable, next, hashPos, pos)) { + token_type tok; + const position_type match_location + = convertIdx(hashTable[hashPos], pos); + assert(match_location < decomp_idx); + assert(decomp_idx - match_location <= MAX_OFFSET); + + // we found a match + const offset_type match_offset = decomp_idx - match_location; + assert(match_offset > 0); + assert(match_offset <= decomp_idx); + const position_type num_literals = pos - tokenStart; + + // compute match length + const position_type num_matches + = lengthOfMatch(decompData, match_location, pos, length); + decomp_idx += num_matches; + + // -> write our token and literal length + tok.num_literals = num_literals; + tok.num_matches = num_matches; + writeSequenceData( + compData, decompData, tok, match_offset, tokenStart, comp_idx); + + break; + } else if (decomp_idx + 12 < length) { + // last match cannot be within 12 bytes of the end + + // TODO: we should overwrite matches in our hash table too, as they + // are more recent + + // add it to our literals and dictionary + hashTable[hashPos] = pos & MAX_OFFSET; + } + ++decomp_idx; + } + } + + *comp_length = comp_idx; +} + +inline __device__ void decompressStream( + uint8_t* buffer, + uint8_t* decompData, + const uint8_t* compData, + const position_type comp_start, + position_type length, + block_stats_st* stats) +{ +#ifdef LOG_CTA_CYCLES + uint64_t start_clock; + if (threadIdx.x == 0) { + start_clock = clock64(); + } +#endif + + position_type comp_end = length + comp_start; + + BufferControl ctrl(buffer, compData, comp_end); + ctrl.loadAt(comp_start); + + position_type decomp_idx = 0; + position_type comp_idx = comp_start; + while (comp_idx < comp_end) { + if (comp_idx + PREFETCH_DIST > ctrl.end()) { + ctrl.loadAt(comp_idx); + } + + // read header byte + token_type tok = decodePair(*ctrl.rawAt(comp_idx)); + ++comp_idx; + + // read the length of the literals + position_type num_literals = tok.num_literals; + if (tok.num_literals == 15) { + num_literals += ctrl.readLSIC(comp_idx); + } +#ifdef LOG_STATS + if (threadIdx.x == 0) { + atomicMin(&stats->copy_length_min, num_literals); + atomicMax(&stats->copy_length_max, num_literals); + atomicAdd(&stats->copy_length_sum, num_literals); + if (tok.num_literals == 15) { + atomicAdd(&stats->copy_lsic_count, 1); + } + atomicAdd(&stats->copy_length_count, 1); + } +#endif + const position_type literalStart = comp_idx; + + + // copy the literals to the out stream + if (num_literals + comp_idx > ctrl.end()) { + coopCopyNoOverlap( + decompData + decomp_idx, compData + comp_idx, num_literals); + } else { + // our buffer can copy + coopCopyNoOverlap( + decompData + decomp_idx, ctrl.rawAt(comp_idx), num_literals); + } + + comp_idx += num_literals; + decomp_idx += num_literals; + + // Note that the last sequence stops right after literals field. + // There are specific parsing rules to respect to be compatible with the + // reference decoder : 1) The last 5 bytes are always literals 2) The last + // match cannot start within the last 12 bytes Consequently, a file with + // less then 13 bytes can only be represented as literals These rules are in + // place to benefit speed and ensure buffer limits are never crossed. + if (comp_idx < comp_end) { + + // read the offset + offset_type offset; + if (comp_idx + sizeof(offset_type) > ctrl.end()) { + offset = readWord(compData + comp_idx); + } else { + offset = readWord(ctrl.rawAt(comp_idx)); + } + + comp_idx += sizeof(offset_type); + + // read the match length + position_type match = 4 + tok.num_matches; + if (tok.num_matches == 15) { + match += ctrl.readLSIC(comp_idx); + } + +#ifdef LOG_STATS + if (threadIdx.x == 0) { + atomicMin(&stats->match_length_min, match); + atomicMax(&stats->match_length_max, match); + atomicAdd(&stats->match_length_sum, match); + atomicAdd(&stats->match_length_count, 1); + if (tok.num_matches == 15) { + atomicAdd(&stats->match_lsic_count, 1); + } + if (offset < match) + atomicAdd(&stats->match_overlaps, 1); + atomicMin(&stats->offset_min, offset); + atomicMax(&stats->offset_max, offset); + atomicAdd(&stats->offset_sum, offset); + } +#endif + + // copy match + if (offset <= num_literals + && (ctrl.begin() <= literalStart + && ctrl.end() >= literalStart + num_literals)) { + // we are using literals already present in our buffer + + coopCopyOverlap( + decompData + decomp_idx, + ctrl.rawAt(literalStart + (num_literals - offset)), + offset, + match); + // we need to sync after we copy since we use the buffer + syncCTA(); + } else { + // we need to sync before we copy since we use decomp + syncCTA(); + + coopCopyOverlap( + decompData + decomp_idx, + decompData + decomp_idx - offset, + offset, + match); + } + decomp_idx += match; + } + } +#ifdef LOG_CTA_CYCLES + if (threadIdx.x == 0) + stats->cycles = clock64() - start_clock; +#endif + assert(comp_idx == comp_end); +} + + +__global__ void lz4CompressMultistreamKernel( + uint8_t* compData, + const uint8_t* decompData, + size_t chunk_size, + size_t stride, + size_t last_chunk_size, + size_t* comp_length, + size_t batch_bytes) +{ + const uint8_t* decomp_ptr = &decompData[blockIdx.x*chunk_size]; + uint8_t* comp_ptr = &compData[blockIdx.x*(stride)]; + + size_t decomp_length = chunk_size; + if(blockIdx.x == gridDim.x-1 && last_chunk_size != 0) { + decomp_length = last_chunk_size; + } + + compressStream( + comp_ptr, + decomp_ptr, + decomp_length, + comp_length + blockIdx.x); + +} + + +__global__ void copyToContig( + void* compData, + void* tempData, + int stride, + size_t* prefix_output, + size_t* metadata_ptr) +{ + for(size_t i=threadIdx.x; i<(prefix_output[blockIdx.x+1] - prefix_output[blockIdx.x]); i+=blockDim.x) { + ((uint8_t*)compData)[prefix_output[blockIdx.x] + i] = ((uint8_t*)tempData)[blockIdx.x*stride + i]; + } + +__syncthreads(); +if(threadIdx.x==0) { + metadata_ptr[blockIdx.x] = prefix_output[blockIdx.x]; + metadata_ptr[blockIdx.x+1] = prefix_output[blockIdx.x+1]; +} + +} + +__global__ void lz4DecompressMultistreamKernel( + uint8_t* decompData, + const uint8_t* compData, + size_t* offsets, + size_t decomp_chunk_size, + size_t last_decomp_chunk_size, + int num_chunks, + block_stats_st* stats) +{ + const int bid = blockIdx.x * Y_DIM + threadIdx.y; + + __shared__ uint8_t buffer[BUFFER_SIZE * Y_DIM]; + + if (bid < num_chunks) { + uint8_t* decomp_ptr = &(decompData[bid * decomp_chunk_size]); + size_t chunk_length = offsets[bid + 1] - offsets[bid]; + + if (bid == num_chunks - 1 && last_decomp_chunk_size != 0) + decomp_chunk_size = last_decomp_chunk_size; + + decompressStream( + buffer + threadIdx.y * BUFFER_SIZE, + decomp_ptr, + compData, + offsets[bid], + chunk_length, + stats + bid); + } +} + +/****************************************************************************** + * PUBLIC FUNCTIONS *********************************************************** + *****************************************************************************/ + +void lz4CompressBatch( + void* const compData, + void* const tempData, + const size_t temp_bytes, + const uint8_t* decomp_ptr, + uint8_t* metadata_ptr, + size_t batch_bytes, + int chunk_bytes, + int chunks_in_batch, + int blocks, + cudaStream_t stream) +{ + const size_t stride = lz4ComputeMaxSize(chunk_bytes); + + TempSpaceBroker broker(tempData, temp_bytes); + + uint8_t* multiStreamTempSpace; + broker.reserve(&multiStreamTempSpace, chunks_in_batch * stride); + + lz4CompressMultistreamKernel<<>>( + multiStreamTempSpace, + decomp_ptr, + chunk_bytes, + stride, + batch_bytes % (chunk_bytes), + reinterpret_cast(metadata_ptr), + batch_bytes); + + size_t prefix_temp_size=0; + + size_t* prefix_out; + broker.reserve(&prefix_out, chunks_in_batch + 1); + + // Compute exact temp space needed by cub + cudaError_t err = cub::DeviceScan::InclusiveSum( + NULL, + prefix_temp_size, + (((size_t*)metadata_ptr) - 1), + prefix_out, + (size_t)chunks_in_batch + 1, + stream); + if (err != cudaSuccess) { + throw std::runtime_error( + "Failed to get inclusvie some temp space requirements: " + + std::to_string(err)); + } + + uint8_t* prefix_temp_storage; + broker.reserve(&prefix_temp_storage, prefix_temp_size); + + err = cub::DeviceScan::InclusiveSum( + prefix_temp_storage, + prefix_temp_size, + (((size_t*)metadata_ptr) - 1), + prefix_out, + (size_t)chunks_in_batch + 1, + stream); + if (err != cudaSuccess) { + throw std::runtime_error( + "Failed to launch inclusive sum: " + std::to_string(err)); + } + // Copy prefix sums values to metadata header and copy compressed data into + // contiguous space + copyToContig<<>>( + compData, + multiStreamTempSpace, + stride, + prefix_out, + ((size_t*)metadata_ptr) - 1); +} + + +void lz4DecompressBatch( + void* decompData, + const void* compData, + int headerOffset, + int chunk_size, + int last_chunk_size, + int chunks_in_batch, + cudaStream_t stream) +{ + + lz4DecompressMultistreamKernel<<< + ((chunks_in_batch - 1) / Y_DIM)+1, + dim3(DECOMP_THREADS, Y_DIM, 1), + 0, + stream>>> + ((uint8_t*)decompData, + ((uint8_t*)compData), + (size_t*)(((uint8_t*)compData)+headerOffset), + chunk_size, + last_chunk_size, + chunks_in_batch, + NULL); + +} + +size_t lz4ComputeTempSize(const size_t maxChunksInBatch, const size_t chunkSize) +{ + size_t prefix_temp_size; + cudaError_t err = cub::DeviceScan::InclusiveSum( + NULL, + prefix_temp_size, + static_cast(nullptr), + static_cast(nullptr), + maxChunksInBatch + 1); + if (err != cudaSuccess) { + throw std::runtime_error( + "Failed to get space for cub inclusive sub: " + std::to_string(err)); + } + + const size_t strideSize = lz4ComputeMaxSize(chunkSize); + const size_t prefix_out_size = sizeof(size_t) * (maxChunksInBatch + 1); + + return prefix_temp_size + prefix_out_size + strideSize * maxChunksInBatch; +} + +size_t lz4ComputeMaxSize(const size_t size) +{ + const size_t expansion = size + 1 + roundUpDiv(size, 255); + return roundUpTo(expansion, sizeof(size_t)); +} + +} // nvcomp namespace + diff --git a/cuda_code/LambdaTests_4.cu b/cuda_code/LambdaTests_4.cu new file mode 100644 index 0000000000000000000000000000000000000000..b98d7ea939723c84697823be025a4167c434b7ba --- /dev/null +++ b/cuda_code/LambdaTests_4.cu @@ -0,0 +1,221 @@ +/* ****************************************************************************** + * + * + * This program and the accompanying materials are made available under the + * terms of the Apache License, Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0. + * + * See the NOTICE file distributed with this work for additional + * information regarding copyright ownership. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + * + * SPDX-License-Identifier: Apache-2.0 + ******************************************************************************/ + +// +// @author raver119@gmail.com +// + +#include "testlayers.h" +#include +#include +#include +#include + +using namespace sd; + +class LambdaTests : public testing::Test { +public: + + LambdaTests() { + printf("\n"); + fflush(stdout); + } +}; + +template +__global__ void runLambda(double *input, double *output, Nd4jLong length, Lambda lambda) { + auto tid = blockIdx.x * blockDim.x + threadIdx.x; + for (Nd4jLong e = tid; e < length; e += gridDim.x * blockDim.x) { + output[e] = lambda(input[e]); + } +} + +void launcher(cudaStream_t *stream, double *input, double *output, Nd4jLong length) { + //auto f = [] __host__ __device__ (double x) -> double { + // return x + 1.; + //}; + auto f = LAMBDA_D(x) { + return x+1.; + }; + + + runLambda<<<128, 128, 128, *stream>>>(input, output, length, f); +} + + +// TEST_F(LambdaTests, test_basic_1) { +// auto x = NDArrayFactory::create('c', {5}); +// auto e = NDArrayFactory::create('c', {5}, {1., 1., 1., 1., 1.}); + + + +// //x.applyLambda(f, nullptr); +// launcher(LaunchContext::defaultContext()->getCudaStream(), (double *)x.specialBuffer(), (double *)x.specialBuffer(), x.lengthOf()); +// auto res = cudaStreamSynchronize(*LaunchContext::defaultContext()->getCudaStream()); +// ASSERT_EQ(0, res); + +// ASSERT_EQ(e, x); +// } + +// void test(NDArray &x) { +// auto f = LAMBDA_D(x) { +// return x+1.; +// }; + +// x.applyLambda(f, x); +// } + +// template +// void test2(NDArray &x) { +// auto f = LAMBDA_T(x) { +// return x+1.; +// }; + +// x.applyLambda(f, x); +// } + +// void testPairwise(NDArray &x, NDArray &y) { +// auto f = LAMBDA_DD(x, y) { +// return x + y +1.; +// }; + +// x.applyPairwiseLambda(y, f, x); +// } + +// void testTriplewise(NDArray &i, NDArray &j, NDArray &k) { +// auto f = LAMBDA_DDD(i, j, k) { +// return i + j + k + 2.; +// }; + +// i.applyTriplewiseLambda(j, k, f, i); +// } + +// void testIndexed(NDArray &x) { +// auto f = ILAMBDA_D(x) { +// return _idx + 1.; +// }; + +// x.applyIndexedLambda(f, x); +// } + +// void testIndexedPairwise(NDArray &x, NDArray &y) { +// auto f = ILAMBDA_DD(x, y) { +// return _idx + x + y +1.; +// }; + +// x.applyIndexedPairwiseLambda(y, f, x); +// } + +// TEST_F(LambdaTests, test_basic_2) { +// auto x = NDArrayFactory::create('c', {5}); +// auto e = NDArrayFactory::create('c', {5}, {1., 1., 1., 1., 1.}); + +// test(x); + +// ASSERT_EQ(e, x); +// } + +// TEST_F(LambdaTests, test_basic_3) { +// auto x = NDArrayFactory::create('c', {5}); +// auto e = NDArrayFactory::create('c', {5}, {1.f, 1.f, 1.f, 1.f, 1.f}); + +// test(x); + +// ASSERT_EQ(e, x); +// } + +// TEST_F(LambdaTests, test_basic_4) { +// auto x = NDArrayFactory::create('c', {5}); +// auto e = NDArrayFactory::create('c', {5}, {1.f, 1.f, 1.f, 1.f, 1.f}); + +// test2(x); + +// ASSERT_EQ(e, x); +// } + +// TEST_F(LambdaTests, test_basic_5) { +// auto x = NDArrayFactory::create('c', {5}, {1., 1., 1., 1., 1.}); +// auto y = NDArrayFactory::create('c', {5}, {2., 2., 2., 2., 2.}); +// auto e = NDArrayFactory::create('c', {5}, {4., 4., 4., 4., 4.}); + +// testPairwise(x, y); + +// ASSERT_EQ(e, x); +// } + +// TEST_F(LambdaTests, test_basic_6) { +// auto x = NDArrayFactory::create('c', {5}); +// auto e = NDArrayFactory::create('c', {5}, {1., 2., 3., 4., 5.}); + +// testIndexed(x); + +// ASSERT_EQ(e, x); +// } + +// TEST_F(LambdaTests, test_basic_7) { +// auto w = NDArrayFactory::create('c', {5}, {0., 0., 0., 0., 0.}); +// auto x = NDArrayFactory::create('c', {5}, {1., 1., 1., 1., 1.}); +// auto y = NDArrayFactory::create('c', {5}, {2., 2., 2., 2., 2.}); +// auto e = NDArrayFactory::create('c', {5}, {5., 5., 5., 5., 5.}); + +// testTriplewise(w, x, y); + +// ASSERT_EQ(e, w); +// } + +// TEST_F(LambdaTests, test_basic_8) { +// auto x = NDArrayFactory::create('c', {5}, {1., 1., 1., 1., 1.}); +// auto y = NDArrayFactory::create('c', {5}, {2., 2., 2., 2., 2.}); +// auto e = NDArrayFactory::create('c', {5}, {4., 5., 6., 7., 8.}); + +// testIndexedPairwise(x, y); + +// ASSERT_EQ(e, x); +// } + + +// template +// void testPairwiseMy(NDArray &x, NDArray &y, NDArray &z) { + +// auto f = LAMBDA_TT(x, y){ +// return sd::math::nd4j_max(x, (T)0.f) +// - x * y +// + sd::math::nd4j_log((T)1.f +// + sd::math::nd4j_exp(-sd::math::nd4j_abs(x))); +// }; + +// x.applyPairwiseLambda(y, f, z); +// } + +// /////////////////////////////////////////////////////////////////// +// TEST_F(LambdaTests, test_basic_9) { + +// NDArray labels('c', {2,3,4},{0,1,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,0,1,1,0,1,0}); +// NDArray logits('c', {2,3,4}, sd::DataType::DOUBLE); +// NDArray output('c', {2,3,4}, sd::DataType::DOUBLE); +// NDArray expected('c', {2,3,4}, {0.744397, 0.598139, 0.554355, 0.913015, 0.474077, 1.037488, 0.403186, 1.171101, 0.341154, 1.313262, 0.287335, 1.463282, 0.241008, 1.620417, 0.201413, 1.783901, 0.167786, 1.952978, 2.039387, 0.126928, 0.115520, 2.305083, 0.095545, 2.486836}); + +// logits.linspace(0.1, 0.1); + +// NDArray::prepareSpecialUse({&output}, {&logits, &labels}); +// testPairwiseMy(logits, labels, output); +// NDArray::registerSpecialUse({&output}, {&logits, &labels}); + +// // output.printBuffer(nullptr, -1, true); +// ASSERT_TRUE(expected.equalsTo(output)); +// } diff --git a/cuda_code/LinearCalculater.cu b/cuda_code/LinearCalculater.cu new file mode 100644 index 0000000000000000000000000000000000000000..4ca853ac54a1c748ad0697aab778331dd70b8f61 --- /dev/null +++ b/cuda_code/LinearCalculater.cu @@ -0,0 +1,36 @@ +/** + * LinearCalculater.cu + * Created on: May 14, 2013 + * Author: Zeyi Wen + * Copyright @DBGroup University of Melbourne + **/ + +#include "kernelCalculater.h" +#include "kernelCalGPUHelper.h" +#include "../my_assert.h" + +/* + * @brief: compute a certain # of rows of the Hessian Matrix by RBF function + * @param: pfDevSamples: a device pointer to the whole samples. These samples indicate which rows are computed in this round + * @param: pfDevTransSamples: a device pointer to the whole samples with transposition + * @param: pfdevHessianRows: a device pointer to a certain # of Hessian Matrix rows to be computed + * @param: nNumofSamples: indicates the length of pfDevTransSamples + * @param: nNumofRows: indicates the length of pfDevSamples + */ +bool CLinearKernel::ComputeHessianRows(real *pfDevSamples, real *pfDevTransSamples, real *pfDevHessianRows, + const int &nNumofSamples, const int &nNumofDim, + const int &nNumofRows, const int &nStartRow) +{ + bool bReturn = true; + + int nBlockSize = 0; + dim3 dimGrid; + GetGPUSpec(dimGrid, nBlockSize, nNumofSamples, nNumofRows); + assert(nBlockSize >= 0); + LinearKernel<<>> + (pfDevSamples, pfDevTransSamples, pfDevHessianRows, nNumofSamples, nNumofDim, nStartRow); + cudaDeviceSynchronize(); + assert(cudaGetLastError() == cudaSuccess); + + return bReturn; +} diff --git a/cuda_code/LinearDiffusion.cu b/cuda_code/LinearDiffusion.cu new file mode 100644 index 0000000000000000000000000000000000000000..a7300cad25a69c7851f7be724f7afb1f78c6f29e --- /dev/null +++ b/cuda_code/LinearDiffusion.cu @@ -0,0 +1,306 @@ +#include "../../common/core/Logger.h" + +#include "LinearDiffusion.h" +#include "LinearDiffusionKernels.h" + + +//////////////////////////////////////////////////////////////////////////////// +LinearDiffusion::LinearDiffusion() : + tau_(0.1f), + eta_(2.0f), + maxDisp_(255), + evenUpdate_(true) +{ + Logger logger("LinearDiffusion::LinearDiffusion"); + + invalidatePointer(); + + // initialize disparity -> diffusivity + size1_ = 256; // supposes 8U grayscale images + bytes1f_ = size1_ * sizeof(float); + + h_diff_ = new float[size1_]; + cudaMalloc(&d_diff_, bytes1f_); CUDA_CHECK; + cudaDeviceSynchronize(); + + memset(h_diff_, 0, bytes1f_); + cudaMemset(d_diff_, 0, bytes1f_); CUDA_CHECK; + cudaDeviceSynchronize(); +} + + +//////////////////////////////////////////////////////////////////////////////// +// also update properties and fix all sizes and buffers +void LinearDiffusion::setImage(const cv::Mat& img) +{ + Logger logger("LinearDiffusion::setImage"); + + const cv::Size prevSize = img_.size(); + + // convert, save and upload image + img.convertTo(img_, CV_32F); + img_ /= 255.0f; + + if (prevSize != img_.size()) + { + updateParameters(); + freeMemory(); + + h_img_ = new float[size2v_]; + h_disp_ = new int[size2_]; + + cudaMalloc(&d_u0_, bytes2fv_); CUDA_CHECK; + cudaMalloc(&d_u1_, bytes2fv_); CUDA_CHECK; + cudaMalloc(&d_disp_, bytes2i_); CUDA_CHECK; + cudaDeviceSynchronize(); + } + + convert_mat_to_layered(h_img_, img_); + cudaMemcpy(d_u0_, h_img_, bytes2fv_, cudaMemcpyHostToDevice); CUDA_CHECK; +} + + +//////////////////////////////////////////////////////////////////////////////// +void LinearDiffusion::setDisparityMap(const cv::Mat& disp) +{ + Logger logger("LinearDiffusion::setDisparityMap"); + + if (disp.size() != img_.size()) { + logger << "disparity size does not match image size"; logger.eol(); + logger.pop(false); + } + + // convert, save and upload disparity + disp.convertTo(disp_, CV_32S); + + cudaMemcpy(d_disp_, disp_.ptr(), bytes2i_, cudaMemcpyHostToDevice); + CUDA_CHECK; + + // save maximal disparity + double min, max; + cv::minMaxIdx(disp_, &min, &max); + maxDisp_ = ceil(max); +} + + +//////////////////////////////////////////////////////////////////////////////// +void LinearDiffusion::setParameters(int argc, char* argv[]) +{ + Logger logger("LinearDiffusion::setParameters"); + + getParam("tau", tau_, argc, argv); + getParam("eta", eta_, argc, argv); + + logger << "tau: " << tau_; logger.eol(); + logger << "eta: " << eta_; logger.eol(); +} + + +//////////////////////////////////////////////////////////////////////////////// +void LinearDiffusion::updateParameters() +{ + Logger logger("LinearDiffusion::updateParameters"); + + // image properties + w_ = img_.cols; + h_ = img_.rows; + nc_ = img_.channels(); + + // unit sizes + size2_ = w_ * h_; + size2v_ = w_ * h_ * nc_; + + // memory sizes + bytes2i_ = size2_ * sizeof(int); + bytes2fv_ = size2v_ * sizeof(float); + + // kernel + dim_ = dim3(w_, h_, nc_); + +#ifdef ZORAH + block_ = dim3(16, 8, 1); +#else + block_ = dim3(32, 16, 1); +#endif + + grid_.x = (w_ + block_.x - 1) / block_.x; + grid_.y = (h_ + block_.y - 1) / block_.y; + grid_.z = 1; + + // print parameters + logger << "image size: (" << w_ << " x " << h_ << " x " << nc_ << ")"; + logger << " [" << bytes2fv_ / 1000000.0f << " mb]"; logger.eol(); + + logger << "block: " << block_; logger.eol(); + logger << "grid: " << grid_; logger.eol(); +} + + +//////////////////////////////////////////////////////////////////////////////// +void LinearDiffusion::reset() +{ + Logger logger("LinearDiffusion::reset"); + + cudaMemcpy(d_u0_, h_img_, bytes2fv_, cudaMemcpyHostToDevice); CUDA_CHECK; + cudaMemset(d_diff_, 0, bytes1f_); CUDA_CHECK; + + evenUpdate_ = true; // start with u0 +} + + +//////////////////////////////////////////////////////////////////////////////// +void LinearDiffusion::update() +{ + Logger logger("LinearDiffusion::update"); + + if (evenUpdate_) { + linear_diffusion<<>>(d_u0_, d_u1_, tau_, dim_); + } else { + linear_diffusion<<>>(d_u1_, d_u0_, tau_, dim_); + } + CUDA_CHECK; + cudaDeviceSynchronize(); + + evenUpdate_ = !evenUpdate_; +} + + +//////////////////////////////////////////////////////////////////////////////// +void LinearDiffusion::adaptiveUpdate() +{ + Logger logger("LinearDiffusion::adaptiveUpdate"); + + if (evenUpdate_) { + adaptive_linear_diffusion<<>>( + d_u0_, d_u1_, d_disp_, d_diff_, dim_, tau_ + ); + } else { + adaptive_linear_diffusion<<>>( + d_u1_, d_u0_, d_disp_, d_diff_, dim_, tau_ + ); + } + CUDA_CHECK; + cudaDeviceSynchronize(); + + evenUpdate_ = !evenUpdate_; +} + + +//////////////////////////////////////////////////////////////////////////////// +void LinearDiffusion::intensityUpdate() +{ + Logger logger("LinearDiffusion::intensityUpdate"); + + adaptive_intensity<<>>(d_u0_, d_disp_, d_diff_, dim_); + CUDA_CHECK; + cudaDeviceSynchronize(); +} + + +//////////////////////////////////////////////////////////////////////////////// +void LinearDiffusion::setFocus(const int depth) +{ + Logger logger("LinearDiffusion::setFocus"); + + for (int i = 0; i <= maxDisp_; i++) { + const float d = abs(depth - i) / (float)maxDisp_; + h_diff_[i] = powf(d, eta_); + } + + // copy to device + cudaMemcpy(d_diff_, h_diff_, bytes1f_, cudaMemcpyHostToDevice); CUDA_CHECK; +} + + +//////////////////////////////////////////////////////////////////////////////// +void LinearDiffusion::setFocus(const int x, const int y) { + const int depth = disp_.at(y, x); + setFocus(depth); +} + + +//////////////////////////////////////////////////////////////////////////////// +void LinearDiffusion::showInputImages(const string& windowName) +{ + cv::Mat disp; + cv::cvtColor(disp_, disp, CV_GRAY2BGR); + disp *= (255.0f / maxDisp_); + + cv::Mat img12; + cv::hconcat(img_, disp, img12); + cv::imshow(windowName, img12); +} + + +//////////////////////////////////////////////////////////////////////////////// +void LinearDiffusion::showDiffusedImage(const string& windowName) +{ + float* h_u0 = new float[size2v_]; + cudaMemcpy(h_u0, d_u0_, bytes2fv_, cudaMemcpyDeviceToHost); CUDA_CHECK; + + cv::Mat img = cv::Mat(h_, w_, CV_32FC3); + convert_layered_to_mat(img, h_u0); + + cv::imshow(windowName, img); + + delete h_u0; +} + +//////////////////////////////////////////////////////////////////////////////// +void LinearDiffusion::getDiffusedImage(cv::Mat& diff) +{ + float* h_u0 = new float[size2v_]; + cudaMemcpy(h_u0, d_u0_, bytes2fv_, cudaMemcpyDeviceToHost); CUDA_CHECK; + + diff = cv::Mat(h_, w_, CV_32FC3); + convert_layered_to_mat(diff, h_u0); + diff *= 255; + diff.convertTo(diff, CV_8U); + + delete h_u0; +} + + +//////////////////////////////////////////////////////////////////////////////// +void LinearDiffusion::invalidatePointer() +{ + Logger logger("LinearDiffusion::invalidatePointer"); + + // invalidate pointers + d_u0_ = 0; + d_u1_ = 0; + d_disp_ = 0; + + h_img_ = 0; + h_disp_ = 0; +} + + +//////////////////////////////////////////////////////////////////////////////// +void LinearDiffusion::freeMemory() +{ + Logger logger("LinearDiffusion::freeMemory"); + + // free vram + if (d_u0_) cudaFree(d_u0_); CUDA_CHECK; + if (d_u1_) cudaFree(d_u1_); CUDA_CHECK; + if (d_disp_) cudaFree(d_disp_); CUDA_CHECK; + + // free ram + if (h_img_) delete[] h_img_; + if (h_disp_) delete[] h_disp_; + + invalidatePointer(); +} + + +//////////////////////////////////////////////////////////////////////////////// +LinearDiffusion::~LinearDiffusion() +{ + Logger logger("LinearDiffusion::~LinearDiffusion"); + + if (d_diff_) cudaFree(d_diff_); CUDA_CHECK; + if (h_diff_) delete[] h_diff_; + + freeMemory(); +} diff --git a/cuda_code/LoadBalancerGPU_1.cu b/cuda_code/LoadBalancerGPU_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..7933bd1ad5b018030a9794dee02d5bc434f8a375 --- /dev/null +++ b/cuda_code/LoadBalancerGPU_1.cu @@ -0,0 +1,157 @@ +// Copyright (c) 2009-2016 The Regents of the University of Michigan +// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. + + +// Maintainer: mphoward + +/*! \file LoadBalancerGPU.cu + \brief Implementation the GPU functions for load balancing +*/ + +#ifdef ENABLE_MPI + +#include "LoadBalancerGPU.cuh" +#include "hoomd/extern/cub/cub/cub.cuh" + +//! Mark the particles that are off rank +/*! + * \param d_ranks The current rank of each particle + * \param d_pos Particle positions + * \param d_cart_ranks Map from Cartesian coordinates to rank number + * \param rank_pos Cartesian coordinates of current rank + * \param box Local box + * \param di Domain indexer + * \param N Number of local particles + * + * Using a thread per particle, the current rank of each particle is computed assuming that a particle cannot migrate + * more than a single rank in any direction. The Cartesian rank of the particle is computed, and mapped back to a physical + * rank. + */ +__global__ void gpu_load_balance_mark_rank_kernel(unsigned int *d_ranks, + const Scalar4 *d_pos, + const unsigned int *d_cart_ranks, + const uint3 rank_pos, + const BoxDim box, + const Index3D di, + const unsigned int N) + { + // particle index + const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; + + // one thread per particle + if (idx >= N) + return; + + const Scalar4 postype = d_pos[idx]; + const Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z); + const Scalar3 f = box.makeFraction(pos); + + int3 grid_pos = make_int3(rank_pos.x, rank_pos.y, rank_pos.z); + + if (f.x >= Scalar(1.0)) ++grid_pos.x; + if (f.x < Scalar(0.0)) --grid_pos.x; + if (f.y >= Scalar(1.0)) ++grid_pos.y; + if (f.y < Scalar(0.0)) --grid_pos.y; + if (f.z >= Scalar(1.0)) ++grid_pos.z; + if (f.z < Scalar(0.0)) --grid_pos.z; + + if (grid_pos.x == (int)di.getW()) + grid_pos.x = 0; + else if (grid_pos.x < 0) + grid_pos.x += di.getW(); + + if (grid_pos.y == (int)di.getH()) + grid_pos.y = 0; + else if (grid_pos.y < 0) + grid_pos.y += di.getH(); + + if (grid_pos.z == (int)di.getD()) + grid_pos.z = 0; + else if (grid_pos.z < 0) + grid_pos.z += di.getD(); + + const unsigned int cur_rank = d_cart_ranks[di(grid_pos.x,grid_pos.y,grid_pos.z)]; + + d_ranks[idx] = cur_rank; + } + +/*! + * \param d_ranks The current rank of each particle + * \param d_pos Particle positions + * \param d_cart_ranks Map from Cartesian coordinates to rank number + * \param rank_pos Cartesian coordinates of current rank + * \param box Local box + * \param di Domain indexer + * \param N Number of local particles + * \param block_size Kernel launch block size + * + * This simply a kernel driver, see gpu_load_balance_mark_rank_kernel for details. + */ +void gpu_load_balance_mark_rank(unsigned int *d_ranks, + const Scalar4 *d_pos, + const unsigned int *d_cart_ranks, + const uint3 rank_pos, + const BoxDim& box, + const Index3D& di, + const unsigned int N, + const unsigned int block_size) + { + static unsigned int max_block_size = UINT_MAX; + if (max_block_size == UINT_MAX) + { + cudaFuncAttributes attr; + cudaFuncGetAttributes(&attr, (const void *)gpu_load_balance_mark_rank_kernel); + max_block_size = attr.maxThreadsPerBlock; + } + unsigned int run_block_size = min(block_size, max_block_size); + unsigned int n_blocks = N/run_block_size + 1; + + gpu_load_balance_mark_rank_kernel<<>>(d_ranks, d_pos, d_cart_ranks, rank_pos, box, di, N); + } + +//! Functor for selecting ranks not equal to the current rank +struct NotEqual + { + unsigned int not_eq_val; //!< Value to test if not equal to + + __host__ __device__ __forceinline__ + NotEqual(unsigned int _not_eq_val) : not_eq_val(_not_eq_val) {} + + __host__ __device__ __forceinline__ + bool operator()(const unsigned int &a) const + { + return (a != not_eq_val); + } + }; + +/*! + * \param d_off_rank (Reduced) list of particles that are off the current rank + * \param d_n_select Number of particles that are off the current rank + * \param d_ranks The current rank of each particle + * \param d_tmp_storage Temporary storage array, or NULL + * \param tmp_storage_bytes Size of temporary storage, or 0 + * \param N Number of local particles + * \param cur_rank Current rank index + * + * This function uses the CUB DeviceSelect::If primitive to select particles that are off rank using the NotEqual + * functor. As is usual, this function must be called twice in order to perform the selection. If \a d_tmp_storage + * is NULL, the temporary storage requirement is computed and saved in \a tmp_storage_bytes. This is externally + * allocated from the CachedAllocator. When called the second time, the ranks of the particles not on the current + * rank are saved in \a d_off_rank, and the number of these particles is saved in \a d_n_select. + */ +void gpu_load_balance_select_off_rank(unsigned int *d_off_rank, + unsigned int *d_n_select, + unsigned int *d_ranks, + void *d_tmp_storage, + size_t &tmp_storage_bytes, + const unsigned int N, + const unsigned int cur_rank) + { + // final precaution against calling with an empty array + if (N == 0) return; + + NotEqual select_op(cur_rank); + cub::DeviceSelect::If(d_tmp_storage, tmp_storage_bytes, d_ranks, d_off_rank, d_n_select, N, select_op); + } + +#endif // ENABLE_MPI diff --git a/cuda_code/LookupTable_20.cu b/cuda_code/LookupTable_20.cu new file mode 100644 index 0000000000000000000000000000000000000000..3d8c42363946fedad45e56f11c06128478d0f00b --- /dev/null +++ b/cuda_code/LookupTable_20.cu @@ -0,0 +1,244 @@ +#include "THCUNN.h" +#include +#include +#include + +#ifndef DIVUP +#define DIVUP(x, y) (((x) + (y) - 1) / (y)) +#endif + +const int WARP_SIZE = 32; + +__device__ __forceinline__ bool warpHasCollision(int val) +{ + // Compare our value to the values stored in the next 16 lanes, + // wrapping around at 32. If any pair of values is the same than + // there is a collision in the warp. + bool dup = 0; + const int laneId = threadIdx.x % 32; + +#if __CUDA_ARCH__ >= 300 + + #pragma unroll + for (int i = 1; i <= 16; i++) + { + dup |= (__shfl(val, (laneId + i) % 32) == val); + } + +#else + + volatile __shared__ int values[128]; + values[threadIdx.x] = val; + const int offset = threadIdx.x - laneId; + + #pragma unroll + for (int i = 1; i <= 16; i++) + { + dup |= (values[offset + ((laneId + i) % 32)] == val); + } + +#endif + + return __any(dup) != 0; +} + +__global__ void cunn_LookupTable_accGradParametersKernelByFeature( + float *input, float *gradOutput, float *gradWeight, float scale, long numel, long stride) +{ + + const int featureDim = blockIdx.x * 4 + threadIdx.x / 32; + if (featureDim >= stride) + return; + + // The strategy here is that each warp handles a single feature + // dimension. + // Within that feature dimension, points in the [batch][element] + // dimension can overlap, and we need to determine if threads want + // to add to the gradient in a colliding manner. + // Typically one would use floating-point atomicAdd() to resolve + // these collisions, but that is non-deterministic if there are + // collisions. Non-determinism for this code is really bad, + // especially in RNNs, and is prone to snowballing error. + // In order to get a deterministic order of execution, we handle + // non-colliding updates separately from colliding ones. Colliding + // updates are serialized in their order of execution by using the + // warp-wide collision detector `warpHasCollision`. + const int laneId = threadIdx.x % 32; + for (int i = laneId; i < numel; i += WARP_SIZE) + { + int weightIndex = (int) (input[i] - 1); + float update = gradOutput[i*stride + featureDim] * scale; + + // Check for collision + if (warpHasCollision(weightIndex)) + { + // Run all lanes sequentially; warp divergence + for (int i = 0; i < WARP_SIZE; ++i) + { + if (laneId == i) + { + gradWeight[weightIndex*stride + featureDim] += update; + } + } + } + else + { + // No collision; warp coherence + gradWeight[weightIndex*stride + featureDim] += update; + } + } +} + +__global__ void cunn_LookupTable_accGradParametersKernel( + float *input, float *indices, float *gradOutput, float *gradWeight, + float *count, float defaultScale, long numel, long stride) +{ + int idx = blockIdx.x * 4 + threadIdx.y; + + // Each warp is responsible for an input into the LookupTable. + // If the preceeding input has the same as this input, then the warp + // exits immediately. The warp also processes subsequent inputs with the + // same value. + // + // Input Warp + // 1 + // 1 ( exits without doing any work) + // 5 + // 8 + + // Number of values proceessed by each thread (grain size) + const int SZ = 4; + + if (idx < numel && (idx == 0 || input[idx] != input[idx - 1])) + { + do + { + const int startFeature = threadIdx.x + blockIdx.y * blockDim.x * SZ; + const int weightRow = ((int) input[idx] - 1) * stride; + const int gradOutputRow = ((int) indices[idx] - 1) * stride; + const float scale = count ? defaultScale / count[idx] : defaultScale; + + float gradient[SZ]; + float weight[SZ]; + + #pragma unroll + for (int ii = 0; ii < SZ; ii++) + { + int featureDim = startFeature + ii * WARP_SIZE; + if (featureDim < stride) + { + gradient[ii] = gradOutput[gradOutputRow + featureDim]; + weight[ii] = gradWeight[weightRow + featureDim]; + } + } + + #pragma unroll + for (int ii = 0; ii < SZ; ii++) + { + weight[ii] += gradient[ii] * scale; + } + + #pragma unroll + for (int ii = 0; ii < SZ; ii++) + { + int featureDim = startFeature + ii * WARP_SIZE; + if (featureDim < stride) + { + gradWeight[weightRow + featureDim] = weight[ii]; + } + } + + idx++; + } while (idx < numel && input[idx] == input[idx - 1]); + } +} + +void THNN_CudaLookupTable_accGradParameters(THCState *state, THIndexTensor *input, THCudaTensor *gradOutput, + THCudaTensor *gradWeight, float scale, bool scaleGradByFreq, THIntegerTensor *count, + THCudaTensor *sorted, THCudaTensor *indices) +{ + THAssert(THCudaTensor_checkGPU(state, 5, input, gradOutput, gradWeight, sorted, indices)); + if (!(THCudaTensor_isContiguous(state, input) && + THCudaTensor_isContiguous(state, gradOutput) && + THCudaTensor_isContiguous(state, gradWeight))) + { + THError("Tensors must be contiguous"); + } + + int nDim = THCudaTensor_nDimension(state, input); + if (nDim != 1 && nDim != 2) + THError("input must be a vector or matrix"); + + long numel = THCudaTensor_nElement(state, input); + long stride = gradWeight->stride[0]; + + cudaStream_t stream = THCState_getCurrentStream(state); + + if (numel <= 768 && !scaleGradByFreq) + { + cunn_LookupTable_accGradParametersKernelByFeature<<>>( + THCudaTensor_data(state, input), + THCudaTensor_data(state, gradOutput), + THCudaTensor_data(state, gradWeight), + scale, + numel, + stride + ); + + return; + } + + THCudaTensor_resizeAs(state, sorted, input); + THCudaTensor_resizeAs(state, indices, input); + + // Sort the inputs into sorted with the corresponding indices + THCudaTensor_sort(state, sorted, indices, input, 0, 0); + + float *sorted_data = THCudaTensor_data(state, sorted); + float *indices_data = THCudaTensor_data(state, indices); + float *count_data = NULL; + + if (scaleGradByFreq) + { + THIntegerTensor_(resizeAs)(state, count, input); + count_data = THIntegerTensor_(data)(state, count); + + thrust::device_ptr sorted_ptr(sorted_data); + thrust::device_ptr count_ptr(count_data); + + // Compute an increasing sequence per unique item in sorted: + // sorted: 2 5 5 5 7 7 8 9 9 + // count: 1 1 2 3 1 2 1 1 2 + thrust::inclusive_scan_by_key( + sorted_ptr, + sorted_ptr + numel, + thrust::make_constant_iterator(1), + count_ptr + ); + + // Take the maximum of each count per unique key in reverse: + // sorted: 2 5 5 5 7 7 8 9 9 + // count: 1 3 3 3 2 2 1 2 2 + thrust::inclusive_scan_by_key( + thrust::make_reverse_iterator(sorted_ptr + numel), + thrust::make_reverse_iterator(sorted_ptr), + thrust::make_reverse_iterator(count_ptr + numel), + thrust::make_reverse_iterator(count_ptr + numel), + thrust::equal_to(), + thrust::maximum() + ); + } + + dim3 grid(DIVUP(numel,4), DIVUP(stride,128)); + dim3 block(32, 4); + cunn_LookupTable_accGradParametersKernel<<>>( + sorted_data, + indices_data, + THCudaTensor_data(state, gradOutput), + THCudaTensor_data(state, gradWeight), + count_data, + scale, + numel, + stride + ); +} diff --git a/cuda_code/Loss_7.cu b/cuda_code/Loss_7.cu new file mode 100644 index 0000000000000000000000000000000000000000..352a688333ed10de247d120dc090cfa84e7efd5b --- /dev/null +++ b/cuda_code/Loss_7.cu @@ -0,0 +1,609 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +constexpr float EPSILON = 1e-12; + +namespace { + +using namespace at; + +void binary_cross_entropy_backward_out_kernel(Tensor& grad_input, const Tensor& grad, const Tensor& input, const Tensor& target) { + at::TensorIterator iter = TensorIteratorConfig() + .add_output(grad_input) + .add_input(grad) + .add_input(input) + .add_input(target) + .build(); + AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "binary_cross_entropy_backward_out_cuda", [&]() { + at::native::gpu_kernel(iter, [] GPU_LAMBDA ( + scalar_t grad_val, + scalar_t input_val, + scalar_t target_val + ) -> scalar_t { + const scalar_t one = 1; + const scalar_t epsilon = EPSILON; + + scalar_t grad_input_denominator = max( + (one - input_val) * input_val, + epsilon + ); + + return grad_val * (input_val - target_val) / grad_input_denominator; + } + ); + }); +} + +} // namespace + +namespace at { namespace native { + +Tensor kl_div_backward_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, int64_t reduction, bool log_target) { + auto grad_input = at::empty_like(input); + if (!log_target) { + TensorIterator iter = TensorIteratorConfig() + .add_output(grad_input) + .add_input(target) + .add_input(grad) + .build(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "kl_div_backward_cuda", [&]() { + scalar_t inv = (reduction == at::Reduction::Mean) ? scalar_t(1.0 / input.numel()) : scalar_t(1.0); + gpu_kernel(iter, + [inv] GPU_LAMBDA (scalar_t target_val, scalar_t grad_val) { + return (target_val > 0) ? scalar_t(-target_val * grad_val * inv) : scalar_t(0.0); + }); + }); + } + else { + grad_input = -at::exp(target) * grad; + if (reduction == at::Reduction::Mean) { + grad_input /= input.numel(); + } + } + + return grad_input; +} + +Tensor binary_cross_entropy_cuda(const Tensor& input, const Tensor& target, const c10::optional& weight_opt, int64_t reduction) { + // See [Note: hacky wrapper removal for optional tensor] + c10::MaybeOwned weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); + const Tensor& weight = *weight_maybe_owned; + + Tensor loss = at::empty_like(input); + return at::native::binary_cross_entropy_out_cuda( + input, target, weight, reduction, loss); +} + +Tensor& binary_cross_entropy_out_cuda(const Tensor& input, const Tensor& target, const c10::optional& weight_opt, int64_t reduction, Tensor& loss) { + // See [Note: hacky wrapper removal for optional tensor] + c10::MaybeOwned weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); + const Tensor& weight = *weight_maybe_owned; + + Tensor loss_squeezed = at::squeeze(loss); + + TensorIterator iter = TensorIteratorConfig() + .add_output(loss_squeezed) + .add_owned_input(at::squeeze(input)) + .add_owned_input(at::squeeze(target)) + .build(); + AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "binary_cross_entropy_out_cuda", [&]() { + gpu_kernel(iter, + [] GPU_LAMBDA (scalar_t input_val, scalar_t target_val) -> scalar_t { + const scalar_t zero = 0; + const scalar_t one = 1; + const scalar_t neg_100 = -100; + + CUDA_KERNEL_ASSERT(input_val >= zero && input_val <= one); + + scalar_t log_input_val = std::log(input_val); + scalar_t log_1_minus_input_val = std::log(one - input_val); + + log_input_val = std::max(log_input_val, neg_100); + log_1_minus_input_val = std::max(log_1_minus_input_val, neg_100); + + return ((target_val - one) * log_1_minus_input_val) - (target_val * log_input_val); + } + ); + }); + if (weight.defined()) { + loss.mul_(weight); + } + + if (reduction != at::Reduction::None) { + Tensor loss_reduced; + if (reduction == at::Reduction::Mean) { + loss_reduced = loss.mean(); + } else if (reduction == at::Reduction::Sum) { + loss_reduced = loss.sum(); + } + loss.resize_as_(loss_reduced).copy_(loss_reduced); + } + + return loss; +} + +Tensor binary_cross_entropy_backward_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, const c10::optional& weight_opt, int64_t reduction) { + // See [Note: hacky wrapper removal for optional tensor] + c10::MaybeOwned weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); + const Tensor& weight = *weight_maybe_owned; + + Tensor grad_input = at::empty_like(input); + return at::native::binary_cross_entropy_backward_out_cuda( + grad, input, target, weight, reduction, grad_input); +} + +Tensor& binary_cross_entropy_backward_out_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, const c10::optional& weight_opt, int64_t reduction, Tensor& grad_input) { + // See [Note: hacky wrapper removal for optional tensor] + c10::MaybeOwned weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); + const Tensor& weight = *weight_maybe_owned; + + Tensor grad_expand = grad.expand_as(input); + binary_cross_entropy_backward_out_kernel(grad_input, grad_expand, input, target); + + if (weight.defined()) { + grad_input.mul_(weight); + } + if (reduction == at::Reduction::Mean) { + grad_input.div_(input.numel()); + } + return grad_input; +} + +// ----------------------------------- +// nll_loss +// ----------------------------------- +namespace { + +const int NLL_LOSS_THREADS = 32; + +#define AT_DISPATCH_NLL_LOSS_INDEX_TYPES(TYPE, NAME, ...) \ + [&] { \ + at::ScalarType _it = TYPE; \ + RECORD_KERNEL_FUNCTION_DTYPE(NAME, _it) \ + switch (_it) { \ + AT_PRIVATE_CASE_TYPE_USING_HINT(NAME, at::ScalarType::Byte, uint8_t, index_t, __VA_ARGS__) \ + AT_PRIVATE_CASE_TYPE_USING_HINT(NAME, at::ScalarType::Long, int64_t, index_t, __VA_ARGS__)\ + default: \ + AT_ERROR(#NAME, " not implemented for '", toString(_it), "'"); \ + } \ + }() + +template +__global__ void nll_loss_forward_no_reduce_cuda_kernel( + int64_t batch_size, + PackedTensorAccessor64 input, + index_t* target, + scalar_t* output, + scalar_t* weights, + int n_classes, + int ignore_index) { + CUDA_KERNEL_LOOP(index, batch_size) { + int cur_target = target[index]; + if (cur_target == ignore_index) { + output[index] = static_cast(0); + continue; + } + CUDA_KERNEL_ASSERT(cur_target >= 0 && cur_target < n_classes); + auto cur_weight = + weights != nullptr ? weights[cur_target] : static_cast(1); + output[index] = -cur_weight * input[index][cur_target]; + } +} + +template +__global__ void nll_loss_forward_reduce_cuda_kernel_1d( + scalar_t* output, + scalar_t* total_weight, + scalar_t* input, + index_t* target, + scalar_t* weights, + bool size_average, + int n_classes, + int64_t ignore_index) { + CUDA_KERNEL_ASSERT(threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0); + + int t = static_cast(*target); + if (t != static_cast(ignore_index)) { + CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes); + scalar_t cur_weight = + weights != nullptr ? weights[t] : static_cast(1); + *output = -cur_weight * input[t]; + *total_weight = cur_weight; + if (size_average && *total_weight > 0) { + *output /= *total_weight; + } + } +} + +template +__global__ void nll_loss_forward_reduce_cuda_kernel_2d( + scalar_t* output, + scalar_t* total_weight, + scalar_t* input, + index_t* target, + scalar_t* weights, + bool size_average, + int nframe, + int ndim, + int n_classes, + int64_t ignore_index) { + // NOLINTNEXTLINE(cppcoreguidelines-init-variables) + __shared__ accscalar_t sh_inputs[NLL_LOSS_THREADS], + acc_weight[NLL_LOSS_THREADS]; + + sh_inputs[threadIdx.x] = static_cast(0); + acc_weight[threadIdx.x] = static_cast(0); + for (int i = threadIdx.x; i < nframe; i += NLL_LOSS_THREADS) { + int t = target[i]; + if (t != static_cast(ignore_index)) { + CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes); + scalar_t cur_weight = + weights != nullptr ? weights[t] : static_cast(1); + sh_inputs[threadIdx.x] -= input[i * ndim + t] * cur_weight; + acc_weight[threadIdx.x] += cur_weight; + } + } + + __syncthreads(); + + if (threadIdx.x == 0) { + accscalar_t output_acc = 0; + accscalar_t total_weight_acc = 0; + for (int i = 0; i < NLL_LOSS_THREADS; ++i) { + output_acc += sh_inputs[i]; + total_weight_acc += acc_weight[i]; + } + *total_weight = static_cast(total_weight_acc); + if (size_average && nframe == 0) { + // Mean reduction on empty tensors produces NaN + *output = std::numeric_limits::quiet_NaN(); + } else if (size_average && total_weight_acc != 0) { + *output = static_cast(output_acc / total_weight_acc); + } else { + *output = static_cast(output_acc); + } + } +} + +void nll_loss_forward_out_cuda_template( + const Tensor& output, + const Tensor& total_weight, + const Tensor& input_, + const Tensor& target_, + const Tensor& weight, + int64_t reduction, + int64_t ignore_index) { + auto input = *input_.expect_contiguous(); + auto target = *target_.expect_contiguous(); + + int64_t n_classes = input.size(-1); + int64_t n_dims = input.dim(); + int64_t batch_size = n_dims == 1 ? 1 : input.size(0); + + auto weight_ = weight.defined() ? weight.contiguous() : weight; + + if (reduction == Reduction::None && n_dims == 2) { + output.resize_({batch_size}); + if (batch_size == 0) { + // This guards from unnecessary operations and launching CUDA kernel with + // 0 blocks. + return; + } + + AT_DISPATCH_FLOATING_TYPES_AND2( + at::ScalarType::Half, + at::ScalarType::BFloat16, + input.scalar_type(), + "nll_loss_forward_no_reduce_cuda_kernel", + [&] { + AT_DISPATCH_NLL_LOSS_INDEX_TYPES( + target.scalar_type(), + "nll_loss_forward_no_reduce_cuda_kernel_index", + [&] { + nll_loss_forward_no_reduce_cuda_kernel + <<>>( + batch_size, + input.packed_accessor64(), + target.data_ptr(), + output.data_ptr(), + weight_.defined() ? weight_.data_ptr() + : nullptr, + n_classes, + ignore_index); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + }); + }); + return; + } + + output.resize_({}); + total_weight.resize_({}); + + if (n_dims == 1) { + AT_DISPATCH_FLOATING_TYPES_AND2( + at::ScalarType::Half, + at::ScalarType::BFloat16, + input.scalar_type(), + "nll_loss_forward_reduce_cuda_kernel_1d", + [&] { + AT_DISPATCH_NLL_LOSS_INDEX_TYPES( + target.scalar_type(), + "nll_loss_forward_reduce_cuda_kernel_1d_index", + [&] { + nll_loss_forward_reduce_cuda_kernel_1d + <<<1, 1, 0, at::cuda::getCurrentCUDAStream()>>>( + output.data_ptr(), + total_weight.data_ptr(), + input.data_ptr(), + target.data_ptr(), + weight_.defined() ? weight_.data_ptr() + : nullptr, + reduction == at::Reduction::Mean, + n_classes, + ignore_index); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + }); + }); + } else if (n_dims == 2) { + AT_DISPATCH_FLOATING_TYPES_AND2( + at::ScalarType::Half, + at::ScalarType::BFloat16, + input.scalar_type(), + "nll_loss_forward_reduce_cuda_kernel_2d", + [&] { + AT_DISPATCH_NLL_LOSS_INDEX_TYPES( + target.scalar_type(), + "nll_loss_forward_reduce_cuda_kernel_2d_index", + [&] { + using accscalar_t = at::acc_type; + nll_loss_forward_reduce_cuda_kernel_2d + <<<1, + NLL_LOSS_THREADS, + 0, + at::cuda::getCurrentCUDAStream()>>>( + output.data_ptr(), + total_weight.data_ptr(), + input.data_ptr(), + target.data_ptr(), + weight_.defined() ? weight_.data_ptr() + : nullptr, + reduction == at::Reduction::Mean, + input.size(0), + input.size(1), + n_classes, + ignore_index); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + }); + }); + } +} + +template +__global__ void nll_loss_backward_no_reduce_cuda_kernel( + int batch_size, + index_t *target, + PackedTensorAccessor64 grad_output, + PackedTensorAccessor64 grad_input, + scalar_t *weights, + int n_classes, + int ignore_index) { + + CUDA_KERNEL_LOOP(index, batch_size) { + int cur_target = target[index]; + if (cur_target == ignore_index) { + continue; + } + CUDA_KERNEL_ASSERT(cur_target >= 0 && cur_target < n_classes); + scalar_t weight = weights != nullptr ? weights[cur_target] : static_cast(1); + grad_input[index][cur_target] = -weight * grad_output[index]; + } +}; + +template +__global__ void nll_loss_backward_reduce_cuda_kernel_1d( + scalar_t *grad_input, + scalar_t *grad_output, + scalar_t *weights, + index_t *target, + scalar_t *total_weight, + bool size_average, + int n_classes, + int64_t ignore_index +) { + if (*total_weight <= 0) { + return; + } + scalar_t norm = size_average ? (static_cast(1) / *total_weight) : static_cast(1); + int t = static_cast(*target); + if (t != static_cast(ignore_index)) { + CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes); + grad_input[t] = -(weights != nullptr ? weights[t] : static_cast(1)) * norm * grad_output[0]; + } +}; + +template +__global__ void nll_loss_backward_reduce_cuda_kernel_2d( + scalar_t* grad_input, + scalar_t* grad_output, + index_t* target, + scalar_t* weights, + scalar_t* total_weight, + bool size_average, + int nframe, + int ndim, + int n_classes, + int64_t ignore_index) { + if (*total_weight <= 0) { + return; + } + scalar_t norm = size_average ? (static_cast(1) / *total_weight) : static_cast(1); + + for (int i = threadIdx.x; i < nframe; i += NLL_LOSS_THREADS) { + int t = target[i]; + if (t != static_cast(ignore_index)) { + CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes); + grad_input[i * ndim + t] = -(weights != nullptr ? weights[t] : static_cast(1)) * norm * grad_output[0]; + } + } +}; + +void nll_loss_backward_out_cuda_template( + const Tensor& grad_input_, + const Tensor& grad_output_, + const Tensor& input_, + const Tensor& target_, + const Tensor& total_weight, + const Tensor& weight, + int64_t reduction, + int64_t ignore_index) { + auto target = *target_.expect_contiguous(); + auto input = *input_.expect_contiguous(); + auto grad_input = *grad_input_.expect_contiguous(); + auto grad_output = *grad_output_.expect_contiguous(); + + int64_t n_dims = input.dim(); + int64_t n_classes = input.size(-1); + int64_t batch_size = n_dims == 1 ? 1 : input.size(0); + + auto weight_ = weight.defined() ? weight.contiguous() : weight; + + if (reduction == at::Reduction::None && n_dims == 2) { + check_dim_size(grad_output, 1, 0, batch_size); + if (batch_size == 0) { + // This guards from unnecessary operations and launching CUDA kernel with 0 blocks. + return; + } + AT_DISPATCH_FLOATING_TYPES_AND2( + at::ScalarType::Half, + at::ScalarType::BFloat16, + input.scalar_type(), + "nll_loss_backward_no_reduce_cuda_kernel", + [&] { + AT_DISPATCH_NLL_LOSS_INDEX_TYPES( + target.scalar_type(), + "nll_loss_backward_no_reduce_cuda_kernel_index", + [&] { + nll_loss_backward_no_reduce_cuda_kernel + <<>>( + batch_size, + target.data_ptr(), + grad_output.packed_accessor64(), + grad_input.packed_accessor64(), + weight.defined() ? weight_.data_ptr() + : nullptr, + n_classes, + ignore_index); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + }); + }); + return; + } + + TORCH_CHECK(grad_output.numel() == 1); + + if (n_dims == 1) { + AT_DISPATCH_FLOATING_TYPES_AND2( + at::ScalarType::Half, + at::ScalarType::BFloat16, + input.scalar_type(), + "nll_loss_backward_reduce_cuda_kernel_1d", + [&] { + AT_DISPATCH_NLL_LOSS_INDEX_TYPES( + target.scalar_type(), + "nll_loss_backward_reduce_cuda_kernel_1d_index", + [&] { + nll_loss_backward_reduce_cuda_kernel_1d + <<<1, 1, 0, at::cuda::getCurrentCUDAStream()>>>( + grad_input.data_ptr(), + grad_output.data_ptr(), + weight.defined() ? weight_.data_ptr() + : nullptr, + target.data_ptr(), + total_weight.data_ptr(), + reduction == at::Reduction::Mean, + n_classes, + ignore_index); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + }); + }); + } else { + AT_DISPATCH_FLOATING_TYPES_AND2( + at::ScalarType::Half, + at::ScalarType::BFloat16, + input.scalar_type(), + "nll_loss_backward_reduce_cuda_kernel_2d", + [&] { + AT_DISPATCH_NLL_LOSS_INDEX_TYPES( + target.scalar_type(), + "nll_loss_backward_reduce_cuda_kernel_2d_index", + [&] { + nll_loss_backward_reduce_cuda_kernel_2d + <<<1, NLL_LOSS_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( + grad_input.data_ptr(), + grad_output.data_ptr(), + target.data_ptr(), + weight.defined() ? weight_.data_ptr() : nullptr, + total_weight.data_ptr(), + reduction == at::Reduction::Mean, + input.size(0), + input.size(1), + n_classes, + ignore_index); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + }); + }); + } +} + +#undef AT_DISPATCH_NLL_LOSS_INDEX_TYPES + +} // namespace + +TORCH_IMPL_FUNC(nll_loss_forward_out_cuda) +(const Tensor& self, + const Tensor& target, + const OptionalTensorRef weight_opt, + int64_t reduction, + int64_t ignore_index, + const Tensor& output, + const Tensor& total_weight) { + const Tensor& weight = weight_opt.getTensorRef(); + nll_loss_forward_out_cuda_template( + output, total_weight, self, target, weight, reduction, ignore_index); +} + +TORCH_IMPL_FUNC(nll_loss_backward_out_cuda) +(const Tensor& grad_output, + const Tensor& self, + const Tensor& target, + OptionalTensorRef weight_opt, + int64_t reduction, + int64_t ignore_index, + const Tensor& total_weight, + const Tensor& grad_input) { + const Tensor& weight = weight_opt.getTensorRef(); + grad_input.zero_(); + nll_loss_backward_out_cuda_template( + grad_input, + grad_output, + self, + target, + total_weight, + weight, + reduction, + ignore_index); +} +}} // namespace at::native diff --git a/cuda_code/MM11.cu b/cuda_code/MM11.cu new file mode 100644 index 0000000000000000000000000000000000000000..9ae5cc4d349af6a612c10d225005e7d664f27a8f --- /dev/null +++ b/cuda_code/MM11.cu @@ -0,0 +1,213 @@ +#include +#include + +#define SIZE 16 + +#ifndef PINNED +#define PINNED 0 +#endif + +// Matriz por Matriz +// C(NxM) <- A(NxP) * B (PxM) + +__global__ void Kernel11(int N, int M, int P, float *A, float *B, float *C) { + + __shared__ float sA[SIZE][SIZE]; + __shared__ float sB[SIZE][SIZE]; + + int bx = blockIdx.x; int by = blockIdx.y; + int tx = threadIdx.x; int ty = threadIdx.y; + int row = by * SIZE + ty; + int col = bx * SIZE + tx; + int m, k, iter; + + + + float tmp = 0.0; + iter = P%SIZE; + if (iter == 0) { + for (m=0; m < P; m=m+SIZE) { + sA[ty][tx] = A[row*P + m + tx]; + sB[ty][tx] = B[col + (m + ty)*M]; + __syncthreads(); + for (k=0; k>>(N, N, N, d_A, d_B, d_C); + + cudaEventRecord(E2, 0); + cudaEventSynchronize(E2); + + // Obtener el resultado desde el host + cudaMemcpy(h_C, d_C, numBytes, cudaMemcpyDeviceToHost); + + // Liberar Memoria del device + cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); + + cudaEventRecord(E3, 0); + cudaEventSynchronize(E3); + + cudaEventElapsedTime(&TiempoTotal, E0, E3); + cudaEventElapsedTime(&TiempoKernel, E1, E2); + printf("\nKERNEL 11\n"); + printf("Dimensiones: %dx%d\n", N, N); + printf("nThreads: %dx%d (%d)\n", nThreads, nThreads, nThreads * nThreads); + printf("nBlocks: %dx%d (%d)\n", nBlocks, nBlocks, nBlocks*nBlocks); + if (PINNED) printf("Usando Pinned Memory\n"); else printf("NO usa Pinned Memory\n"); + printf("Tiempo Global: %4.6f milseg\n", TiempoTotal); + printf("Tiempo Kernel: %4.6f milseg\n", TiempoKernel); + printf("Rendimiento Global: %4.2f GFLOPS\n", (2.0 * (float) N * (float) N * (float) N) / (1000000.0 * TiempoTotal)); + printf("Rendimiento Kernel: %4.2f GFLOPS\n", (2.0 * (float) N * (float) N * (float) N) / (1000000.0 * TiempoKernel)); + + cudaEventDestroy(E0); cudaEventDestroy(E1); cudaEventDestroy(E2); cudaEventDestroy(E3); + + if (test == 'N') + printf ("NO TEST\n"); + else if (TestMM(N, N, N, h_A, h_B, h_C)) + printf ("TEST PASS\n"); + else + printf ("TEST FAIL\n"); + + if (PINNED) { cudaFreeHost(h_A); cudaFreeHost(h_B); cudaFreeHost(h_C); } + else { free(h_A); free(h_B); free(h_C); } + +} + + +void InitM(int N, int M, float *Mat) { + int i; + for (i=0; i 0.0001) return 1; + else return 0; + +} + +int TestMM(int N, int M, int P, float *A, float *B, float *C) { + int i, j, k; + float tmp; + // El test completo es muy costoso, por eso sólo probamos algunos casos + // Para comprobar completamente el resultado hay que poner: i++ y j++ + for (i=0; i +#include +#include +#include +#include + +struct mse_functor +{ + mse_functor() {} + + __host__ __device__ float operator()(const float& x, const float& y) const + { + float z = x-y; + return z*z; + } +}; + + +static int cunn_MSECriterion_updateOutput(lua_State *L) +{ + THCState *state = getCutorchState(L); + THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); + THCudaTensor *target = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); + THAssert(THCudaTensor_checkGPU(state, 2, input, target)); + + int sizeAverage = luaT_getfieldcheckboolean(L, 1, "sizeAverage"); + luaL_argcheck(L, THCudaTensor_nElement(state, input) == THCudaTensor_nElement(state, target), 2, + "input and target need to have the same number of elements"); + + float sum; + + long size = THCudaTensor_nElement(state, input); + + input = THCudaTensor_newContiguous(state, input); + target = THCudaTensor_newContiguous(state, target); + + thrust::device_ptr input_data(THCudaTensor_data(state, input)); + thrust::device_ptr target_data(THCudaTensor_data(state, target)); + sum = thrust::inner_product(input_data, input_data+size, target_data, (float) 0, thrust::plus(), mse_functor()); + + if(sizeAverage) + sum /= size; + + THCudaTensor_free(state, input); + THCudaTensor_free(state, target); + + lua_pushnumber(L, sum); + lua_setfield(L, 1, "output"); + + lua_pushnumber(L, sum); + return 1; +} + + +struct mse_updateGradInput_functor +{ + const float norm; + + mse_updateGradInput_functor(float norm_) : norm(norm_) {} + + __host__ __device__ float operator()(const float& x, const float& y) const + { + return norm * (x - y); + } +}; + +static int cunn_MSECriterion_updateGradInput(lua_State *L) +{ + THCState *state = getCutorchState(L); + THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); + THCudaTensor *target = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); + int sizeAverage = luaT_getfieldcheckboolean(L, 1, "sizeAverage"); + THCudaTensor *gradInput = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor"); + luaL_argcheck(L, THCudaTensor_nElement(state, input) == THCudaTensor_nElement(state, target), 2, + "input and target need to have the same number of elements"); + THAssert(THCudaTensor_checkGPU(state, 3, input, target, gradInput)); + + long size = THCudaTensor_nElement(state, input); + float norm = (sizeAverage ? 2./size : 2.); + + input = THCudaTensor_newContiguous(state, input); + target = THCudaTensor_newContiguous(state, target); + + THCudaTensor_resizeAs(state, gradInput, input); + + thrust::device_ptr input_data(THCudaTensor_data(state, input)); + thrust::device_ptr target_data(THCudaTensor_data(state, target)); + thrust::device_ptr gradInput_data(THCudaTensor_data(state, gradInput)); + + thrust::transform(input_data, input_data+size, target_data, gradInput_data, mse_updateGradInput_functor(norm)); + + THCudaTensor_free(state, input); + THCudaTensor_free(state, target); + return 1; +} + +#define MSECRITERION_THREADS 128 + +__global__ void cunn_MSECriterion_updateOutput_kernel(float* output, float *input, float *target, int nframe, int dim, int sizeAverage) +{ + __shared__ float buffer[MSECRITERION_THREADS]; + int k = blockIdx.x; + float *input_k = input + k*dim; + float *target_k = target + k*dim; + + int i_start = threadIdx.x; + int i_end = dim; + int i_step = blockDim.x; + + // mse + buffer[threadIdx.x] = 0; + for (int i=i_start; i>>(output->data, + THCudaTensor_data(state, input), + THCudaTensor_data(state, target), + 1, size, + sizeAverage); + + lua_pushnumber(L, THCudaStorage_get(state, output, 0)); + + cudaError errcode = cudaGetLastError(); + if(errcode != cudaSuccess) + THError(cudaGetErrorString(errcode)); + + THCudaTensor_free(state, input); + THCudaTensor_free(state, target); + THCudaStorage_free(state, output); + + lua_pushstring(L, "output"); + lua_pushvalue(L, -2); + lua_rawset(L, 1); + + return 1; +} + +static int cunn_MSECriterion_updateGradInput2(lua_State *L) +{ + THCState *state = getCutorchState(L); + THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); + THCudaTensor *target = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); + int sizeAverage = luaT_getfieldcheckboolean(L, 1, "sizeAverage"); + THCudaTensor *gradInput = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor"); + long size = THCudaTensor_nElement(state, input); + float norm = (sizeAverage ? 2./size : 2.); + + input = THCudaTensor_newContiguous(state, input); + target = THCudaTensor_newContiguous(state, target); + + THCudaTensor_resizeAs(state, gradInput, input); + + dim3 blocks(1); + dim3 threads(MSECRITERION_THREADS); + + cunn_MSECriterion_updateGradInput_kernel<<>>(THCudaTensor_data(state, gradInput), + THCudaTensor_data(state, input), + THCudaTensor_data(state, target), + norm, + 1, size); + + cudaError errcode = cudaGetLastError(); + if(errcode != cudaSuccess) + THError(cudaGetErrorString(errcode)); + + THCudaTensor_free(state, input); + THCudaTensor_free(state, target); + return 1; +} + + +static const struct luaL_Reg cunn_MSECriterion__ [] = { + {"MSECriterion_updateOutput", cunn_MSECriterion_updateOutput}, + {"MSECriterion_updateGradInput", cunn_MSECriterion_updateGradInput}, + {"MSECriterion_updateOutput2", cunn_MSECriterion_updateOutput2}, + {"MSECriterion_updateGradInput2", cunn_MSECriterion_updateGradInput2}, + {NULL, NULL} +}; + +void cunn_MSECriterion_init(lua_State *L) +{ + luaT_pushmetatable(L, "torch.CudaTensor"); + luaT_registeratname(L, cunn_MSECriterion__, "nn"); + lua_pop(L,1); +} diff --git a/cuda_code/Main_17.cu b/cuda_code/Main_17.cu new file mode 100644 index 0000000000000000000000000000000000000000..fae7328853e1362205d901f14c03f7bd1087c53e --- /dev/null +++ b/cuda_code/Main_17.cu @@ -0,0 +1,44 @@ +#include + +#include "CellEnv.cuh" +#include "ParaCellsError.cuh" + +using namespace std; + +void work() +{ + CellEnv cellEnv(50, 2, 1); + + //Add cell attributes + cellEnv.addCellAttribute("ancestor"); + cellEnv.addCellAttribute("value"); + + //Set threshold + cellEnv.addEnvironmentAttribute("threshold", 0.5f); + + //Add cells and initialization + cellEnv.addCells(20, 0); + + //Print all + cellEnv.print(); + + //Cell fate decision + cellEnv.updateAllCells(1); + + //Print cells + cellEnv.printCells(); +} + +int main() +{ + try + { + work(); + } + catch (ParaCellsError e) + { + cout << e.getMessage() << endl; + } + + return 0; +} diff --git a/cuda_code/MapTest.cu b/cuda_code/MapTest.cu new file mode 100644 index 0000000000000000000000000000000000000000..e876ce3003d86d8b3cb61823947ab779d545e6b9 --- /dev/null +++ b/cuda_code/MapTest.cu @@ -0,0 +1,60 @@ +//===------------------ GeantX --------------------------------------------===// +// +// Geant Exascale Pilot +// +// For the licensing terms see LICENSE file. +// For the list of contributors see CREDITS file. +// Copyright (C) 2019, Geant Exascale Pilot team, All rights reserved. +//===----------------------------------------------------------------------===// + +#include "backend/cuda/Interface.h" +#include "base/Map.h" + +__global__ void testNew(vecgeom::map *devMap, double *key, int N) +{ + for (int i = 0; i < N; i++) { + double key1 = (*devMap)[key[i]]; + double key2 = devMap->find(key[i])->second; + // printf("Key %f, Value from op[] = %f and from find %f\n",key[i],key1, key2); + } +} + +__global__ void rebuildMap(vecgeom::map *devMap, double *key, + double *value, int N) +{ + // vecgeom::map *myDevMap = new vecgeom::map; + // for (int i=0;i>::SizeOf(); +template void DevicePtr>::Construct() const; + +} // namespace cxx +} // namespace vecgeom + +void launchTestNew(vecgeom::cxx::DevicePtr> &devMap, + vecgeom::cxx::DevicePtr key, int N, int nBlocks, int nThreads) +{ + int threadsPerBlock = nThreads; + int blocksPerGrid = nBlocks; + testNew<<>>(devMap, key, N); +} + +void launchRebuildMap(vecgeom::cxx::DevicePtr> &devMap, + vecgeom::cxx::DevicePtr key, + vecgeom::cxx::DevicePtr value, int N, int nBlocks, + int nThreads) +{ + int threadsPerBlock = nThreads; + int blocksPerGrid = nBlocks; + rebuildMap<<>>(devMap, key, value, N); +} diff --git a/cuda_code/MatMul_6.cu b/cuda_code/MatMul_6.cu new file mode 100644 index 0000000000000000000000000000000000000000..7e307d4ad48fe4c3e59b225603dd71425e57037a --- /dev/null +++ b/cuda_code/MatMul_6.cu @@ -0,0 +1,203 @@ +//////////////////////////////////////////////////////////////////////////////// +// BSD 3-Clause License +// +// Copyright (c) 2021, NVIDIA Corporation +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +///////////////////////////////////////////////////////////////////////////////// + +#include "assert.h" +#include "matx.h" +#include "matx_matmul.h" +#include "matx_pybind.h" +#include "test_types.h" +#include "utilities.h" +#include "gtest/gtest.h" + +using namespace matx; + +/* NOTE: CUTLASS tests are disabled for now. The compile times are too long at + * the moment */ +template class MatMulTest : public ::testing::Test { +protected: + void SetUp() override + { + CheckTestTypeSupport(); + + pb = std::make_unique(); // Half precision needs a bit more + // tolerance when compared to fp32 + if constexpr (is_complex_half_v || is_matx_half_v) { + thresh = 0.5f; + } + } + + void TearDown() { pb.reset(); } + + std::unique_ptr pb; + float thresh = 0.01f; +}; + +template +class MatMulTestFloatTypes : public MatMulTest { +}; + +TYPED_TEST_SUITE(MatMulTestFloatTypes, MatXFloatTypes); + +TYPED_TEST(MatMulTestFloatTypes, SmallRect) +{ + MATX_ENTER_HANDLER(); + constexpr index_t m = 4; + constexpr index_t k = 8; + constexpr index_t n = 16; + tensor_t a{{m, k}}; + tensor_t b{{k, n}}; + tensor_t c{{m, n}}; + + this->pb->template InitAndRunTVGenerator( + "00_transforms", "matmul_operators", "run", {m, k, n}); + + this->pb->NumpyToTensorView(a, "a"); + this->pb->NumpyToTensorView(b, "b"); + + matmul(c, a, b); + MATX_TEST_ASSERT_COMPARE(this->pb, c, "c", this->thresh); + + // matmul(c, a, + // b); + // MATX_TEST_ASSERT_COMPARE(this->pb, c, "c", this->thresh); + + MATX_EXIT_HANDLER(); +} + +TYPED_TEST(MatMulTestFloatTypes, SmallRectUserPointer) +{ + MATX_ENTER_HANDLER(); + constexpr index_t m = 4; + constexpr index_t k = 8; + constexpr index_t n = 16; + TypeParam *ap, *bp, *cp; + cudaMallocManaged(&ap, m*k*sizeof(TypeParam)); + cudaMallocManaged(&bp, k*n*sizeof(TypeParam)); + cudaMallocManaged(&cp, m*n*sizeof(TypeParam)); + + auto a = make_tensor(ap, {m, k}); + auto b = make_tensor(bp, {k, n}); + auto c = make_tensor(cp, {m, n}); + + this->pb->template InitAndRunTVGenerator( + "00_transforms", "matmul_operators", "run", {m, k, n}); + + this->pb->NumpyToTensorView(a, "a"); + this->pb->NumpyToTensorView(b, "b"); + + matmul(c, a, b); + MATX_TEST_ASSERT_COMPARE(this->pb, c, "c", this->thresh); + + cudaFree(ap); + cudaFree(bp); + cudaFree(cp); + + MATX_EXIT_HANDLER(); +} + + +TYPED_TEST(MatMulTestFloatTypes, DISABLED_SmallRectTranspose) +{ + MATX_ENTER_HANDLER(); + constexpr index_t m = 4; + constexpr index_t k = 8; + constexpr index_t n = 16; + tensor_t a{{m, k}}; + tensor_t b{{k, n}}; + tensor_t c{{m, n}}; + + auto at = a.Permute({1,0}); + auto bt = b.Permute({1,0}); + auto ct = c.Permute({1,0}); + + this->pb->template InitAndRunTVGenerator( + "00_transforms", "matmul_operators", "run_transpose", {m, k, n}); + + this->pb->NumpyToTensorView(a, "a"); + this->pb->NumpyToTensorView(b, "b"); + + matmul(ct, bt, at); + + MATX_TEST_ASSERT_COMPARE(this->pb, ct, "c", 0.01); + MATX_EXIT_HANDLER(); +} + +TYPED_TEST(MatMulTestFloatTypes, SmallSquare) +{ + MATX_ENTER_HANDLER(); + constexpr index_t m = 4; + constexpr index_t k = 4; + constexpr index_t n = 4; + tensor_t a{{m, k}}; + tensor_t b{{k, n}}; + tensor_t c{{m, n}}; + + this->pb->template InitAndRunTVGenerator( + "00_transforms", "matmul_operators", "run", {m, k, n}); + + this->pb->NumpyToTensorView(a, "a"); + this->pb->NumpyToTensorView(b, "b"); + + matmul(c, a, b); + MATX_TEST_ASSERT_COMPARE(this->pb, c, "c", this->thresh); + + // matmul(c, a, + // b); + // MATX_TEST_ASSERT_COMPARE(this->pb, c, "c", this->thresh); + MATX_EXIT_HANDLER(); +} + +TYPED_TEST(MatMulTestFloatTypes, MediumRect) +{ + MATX_ENTER_HANDLER(); + constexpr index_t m = 128; + constexpr index_t k = 256; + constexpr index_t n = 512; + tensor_t a{{m, k}}; + tensor_t b{{k, n}}; + tensor_t c{{m, n}}; + + this->pb->template InitAndRunTVGenerator( + "00_transforms", "matmul_operators", "run", {m, k, n}); + + this->pb->NumpyToTensorView(a, "a"); + this->pb->NumpyToTensorView(b, "b"); + + matmul(c, a, b); + MATX_TEST_ASSERT_COMPARE(this->pb, c, "c", this->thresh); + + // matmul(c, a, + // b); + // MATX_TEST_ASSERT_COMPARE(this->pb, c, "c", this->thresh); + + MATX_EXIT_HANDLER(); +} \ No newline at end of file diff --git a/cuda_code/MsnhPaddingLayerGPU.cu b/cuda_code/MsnhPaddingLayerGPU.cu new file mode 100644 index 0000000000000000000000000000000000000000..eebabc3d062daee2e501eabe7b7d72cbc7f47472 --- /dev/null +++ b/cuda_code/MsnhPaddingLayerGPU.cu @@ -0,0 +1,55 @@ +#include "Msnhnet/layers/cuda/MsnhPaddingLayerGPU.h" +namespace Msnhnet +{ + +__global__ void paddingKernel(const int num, const int outHeight, const int outWidth, const int outChannel, + const int height, const int width, const int channel, + const int top, const int left, + const float paddingVal, + float *const input, float *const output) +{ + + int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; + if(index < num) + { + int n = index % outWidth; + index = index / outWidth; + int m = index % outHeight; + index = index / outHeight; + int j = index % outChannel; + index = index / outChannel; + int i = index; + + float val = 0; + + if(m < top || m>=(height+top)) + { + val = paddingVal; + } + else + { + if(n < left || n >=(width+left)) + { + val = paddingVal; + } + else + { + val = input[i*channel*height*width + j*height*width + (m - top)*width + (n-left)]; + } + } + output[i*outChannel*outHeight*outWidth + j*outHeight*outWidth + m*outWidth + n] = val; + } +} + +void PaddingLayerGPU::forwardNormalGPU(const int &batch, const int &outChannel, const int &outHeight, const int &outWidth, + const int &height, const int &width, const int &channel, + const int &top, const int &left, + const float &paddingVal, + float * const &input, float * const &output) +{ + size_t n = outHeight * outWidth * outChannel * batch; + paddingKernel<<>>(n, outHeight, outWidth, outChannel, height, width, channel, top, left, paddingVal, input, output); + CUDA_CHECK(cudaPeekAtLastError()); +} + +} diff --git a/cuda_code/MultiplicationScalar.cu b/cuda_code/MultiplicationScalar.cu new file mode 100644 index 0000000000000000000000000000000000000000..8f41706191635ee2a11c007aba739fece147d6aa --- /dev/null +++ b/cuda_code/MultiplicationScalar.cu @@ -0,0 +1,63 @@ +// +// Created by root on 23/03/2020. +// + +#include "../Matrix.cuh" +#include + +__global__ void matrixMultiplyScalar(double *a, double b, double *c, int cr, int cc){ + + int x = blockIdx.x * blockDim.x + threadIdx.x; // col + int y = blockIdx.y * blockDim.y + threadIdx.y; // row + + + if(x < cc && y < cr){ + + c[y * cc + x] = a[y * cc + x]*b; + } + +} + + + +Matrix Matrix::multiplyScalar(double m){ + + static double* c; + c = (double*) calloc(this->Rows*this->Columns,sizeof(double)); + + //Define os endereçoes da memória de vídeo + double *d_a, *d_c; + + //Define o tamanho de cada matriz e escalar na memória + long aSize = this->Rows*this->Columns*sizeof(double); + long cSize = this->Rows*this->Columns*sizeof(double); + + //Aloca espaço na memória de vídeo + + cudaMalloc((void**)&d_a, aSize); + cudaMalloc((void**)&d_c, cSize); + + //Move a matriz e o escalar para a memória de vídeo alocada + + cudaMemcpy(d_a, this->Value, aSize, cudaMemcpyHostToDevice); + + //Define as dimensões + dim3 dimBlock(32,32); // 32x32 -> 1024 Threads + dim3 dimGrid(this->Rows,this->Columns); + + //Efetua a multiplicação + matrixMultiplyScalar<<>>(d_a, m, d_c, this->Rows, this->Columns); + //Copia o resultado de volta + cudaMemcpy(c, d_c, cSize, cudaMemcpyDeviceToHost); + + //Limpa a memória de vídeo + cudaFree(d_a); + cudaFree(d_c); + + //Salva + + return {this->Columns, this->Rows, c}; + +} + + diff --git a/cuda_code/MyKernel.cu b/cuda_code/MyKernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..b2ae224abca5887e8794fa54272d8e83fddc6e89 --- /dev/null +++ b/cuda_code/MyKernel.cu @@ -0,0 +1,1777 @@ +#include "cuda.h" +#include "cuda_runtime.h" +#include "device_launch_parameters.h" +#include "cuda_device_runtime_api.h" + +#include +#include + +#include "device_functions.h" //atomicAdd - required for mutex + + +#include +#include +#include"problem.h" + +//liczba bokow tylko potegi 2, (blocks 12 dziala jak blocks 8) + +#define MAX_BLOCKS 16 //GeForce 770GTX nie ma sensu wiecej niz 16, wiecej tylko wolniej +#define MAX_THREADS 1024 //if greater then GPU does not calculate at all, if more than 1024 - alg on GPU does not run +#define MAX_PROBLEM_SIZE 1024 +#define MAX_MACHINE_SIZE 128 + +#define MAX_TABU_LIST 128 +#define BIG_NUMBER 1000000 + +using namespace std; + +__device__ int g_mutex; + +__device__ int g_blocksCompleted[2]; //completed calculations +__device__ int g_blocksReady[1]; //copied global to shared and ready for next iteration ([MAX_BLOCKS], ale 1 zeby miesjca nie zajmowalo podczas testow +__device__ int g_blocksActive[1]; //can go futher +__device__ float g_value[2]; +__device__ int g_best_j[2]; +__device__ int g_best_v[2]; + +__device__ float g_value_overall; +__device__ int g_best_j_overall; +__device__ int g_best_v_overall; + +__device__ int l_mutex; +__device__ inline void my_lock(void) { + while (atomicCAS(&l_mutex, 0, 1) != 0); +} +__device__ inline void my_unlock(void) { + atomicExch(&l_mutex, 0); +} + +void HostSwapTable(int *perm, int a, int b) +{ + int tmp; + tmp = perm[a]; + perm[a] = perm[b]; + perm[b] = tmp; +} + +void HostInsertTable(int *perm, int a, int b) +{ + int i; + + if (a == b) return; + + if (a < b) + { + for (i = a; i < b; i++) + { + HostSwapTable(perm, i, i + 1); + } + } + else + { + for (i = a; i > b; i--) + { + HostSwapTable(perm, i - 1, i); + } + } +} + + +__device__ inline void SwapTable(int *perm, int a, int b) +{ + int tmp; + tmp = perm[a]; + perm[a] = perm[b]; + perm[b] = tmp; +} + +__device__ inline void InsertTable(int *perm, int a, int b) +{ + int i; + + if (a == b) return; + + if (a < b) + { + for (i = a; i < b; i++) + { + SwapTable(perm, i, i + 1); + } + } + else + { + for (i = a; i > b; i--) + { + SwapTable(perm, i - 1, i); + } + } +} + + + +__device__ inline float Criterion(int *n_s, int *m_s, int *pi, float *d_aj_s, float *d_alphaj_s, float *CP_shared) +{ + //float CP[10]; + int i, j; + float v; + float C; + int lindex = threadIdx.x; + + + //for (i = 0; i= (*(n)) - (*(m)) + 1) + { + i++; + v = 0; + } + else + { + CP[i] += d_aj_s[pi[j]] * pow(float(v + 1.0), -d_alphaj_s[pi[j]]); + v += d_aj_s[pi[j]]; + } + } + + C = CP[0]; + + for (i = 1; i<*m; i++) + { + if (C < CP[i]) C = CP[i]; + } + + return(C); + +} +//--------- global variable for block to synchronize blocks -----------// + + + +//------------ Kernel TS BaT -------------// +// blocks and threads +// +// teoretycznie powinno dzialac, ale nie dziala, gdyz nie zawsze bloki moga byc zsynchronizowane +// moze byc tak, ze scheduler czeka na zakonczenie bloku, zanim kolejny zacznie sie wykonywac +// +// insert by swap - multithread = number of jobs +// shared tabu list +// shared pi_neigh_best - wolniejsze niz w TS2 bez shared tylko kazdy watek ma swoj pi_neigh_best (1024 - 51s vs 53s) +// shared pi_best +// +// parallel moze dawac inny wynik niz jednowatkowy, +// gdyz to samo kryterium moze byc dla roznych j,v, wowczas inne j,v trafiaja na liste tabu (w innej kolejnosci) +//// +// mozna zrobic, zeby tylko lindex == 0 sprawdzal najlepsze wstawienie i przebudowal shared_pi_neigh_best +// trzeba wtedy dodac shared_best_j, shared_best_v +// +// ??? czy n wlicza m?, czy musze robic (n+m)*(n+m), ale chyba n wlicza m +// niebezpieczna sytuacja do sprawdzanie 111|100, wtedy block drugi jak watki wyladaja?, albo 111|000 czy wtedy block 2 powstanie? +//??? a jak wszystkie ruchy danego watku na liscie tabu? czy cos bedzie mial lepszego niz wczesniej?, czy zamazywac duza wartoscia 10000000 +// +// +__global__ void KernelTS_FU_BaT(int *d_blocksN, int *d_threads_per_block, int *d_n, int *d_m, int *d_pi, float *d_aj, float *d_alphaj, int *d_parametersTS, int *d_listTabu, float *d_best_value_neigh, int *d_best_job_j, int *d_best_job_v, unsigned int seed, float *d_err_note) +{ + int i, l, r; + int best_job_j, best_job_v, j_best, v_best; + int iterN; + int listN, isInTabu; + int index_best; + int index_block_best; + + int firstNeigh; + + //int listTabu[2 * MAX_TABU_LIST]; //[1,1; 2,2; ...] + + float CP[MAX_MACHINE_SIZE]; //for criterion value + + float value_C, value_neigh_best; + int pi[MAX_PROBLEM_SIZE]; // temporary pi for analysing neighbourhood + + __shared__ int listTabu[2 * MAX_TABU_LIST]; //[1,1; 2,2; ...] + + __shared__ int shared_pi_neigh_best[MAX_PROBLEM_SIZE]; //the best for the neighourhood + + __shared__ float shared_value[MAX_THREADS]; //the best solution value for each thread + //__shared__ int shared_best_j[MAX_THREADS]; + //__shared__ int shared_best_v[MAX_THREADS]; + + __shared__ float aj_shared[MAX_PROBLEM_SIZE]; //much faster than local float aj_shared[MAX_PROBLEM_SIZE]; + __shared__ float alphaj_shared[MAX_PROBLEM_SIZE]; + + //float aj_shared[MAX_PROBLEM_SIZE]; + //float alphaj_shared[MAX_PROBLEM_SIZE]; + + int threads_per_blockN; + int blocksN; + int threadsN; + int n; + int m; + + int j, v, k; + int N; //number of moves per neighbourhood + int N_k; // number of moves per this thread + int rem_N; //reminder + int div_N; //division + int index_start_move_k; //the index of a start move of this thread, e.g., (0,0) is 0, (0,1) is 1, etc. (0,n-1) is n-1, (1,0) is n + int job_j; + int job_v; + + int gindex; + int lindex; //local index for each block + int block_index; + + threads_per_blockN = *d_threads_per_block; + blocksN = *d_blocksN; + threadsN = (*d_threads_per_block)*(*d_blocksN); + + n = *d_n; + m = *d_m; + + lindex = threadIdx.x; //local index for each block + block_index = blockIdx.x; + gindex = threadIdx.x + blockIdx.x*threads_per_blockN; + + //gindex = blockIdx.x * blockDim.x + threadIdx.x; + + + listN = d_parametersTS[0]; + iterN = d_parametersTS[1]; + + //initialize shared parameters + + if (lindex == 0) + { + //problem parameters + for (j = 0; j= rem_N) + gindex*(gindex < rem_N); //gindex >= rem_N->rem_n otherwise gindex + + + // okreslone tylko liczy dany watek + //for (iter = 0; iter < iterN; iter++) + { + best_job_j = 0; //means that nothing has changed in reference to the previouse solution (po iteracji 0,0 oznacza, ze bez zmian) + best_job_v = 0; + + firstNeigh = 0; //moze nie byc potrzebny + + value_neigh_best = BIG_NUMBER; + + //pi = pi_neigh_best + //for (l = 0; l < n; l++) + //{ + // pi[l] = shared_pi_neigh_best[l]; + //} + + + //-------- search neighbourhood ----------// + j = (int)(index_start_move_k / n); + v = index_start_move_k % n; + job_j = -1; + + for (k = 0; k < N_k; k++) + { + //------- obtain move --------// + if (job_j == j) + { + SwapTable(pi, v - 1, v); //here v is > 0 - always + job_j = j; + job_v = v; + } + else + { + //at first time always copied for each thread + //pi = pi_neigh_best + for (l = 0; l < n; l++) + { + pi[l] = shared_pi_neigh_best[l]; + } + + InsertTable(pi, j, v); + job_j = j; + job_v = v; + } + //----------------------------// + + if (j != v) + { + value_C = Criterion2(&n, &m, pi, aj_shared, alphaj_shared, CP); + + //---------- check Tabu List -------------// + if ((job_j != job_v) && (firstNeigh == 1 || value_C < value_neigh_best)) + { + isInTabu = 0; + for (l = 0; l < listN; l++) + { + if ((job_j == listTabu[0 * listN + l]) && (job_v == listTabu[1 * listN + l])) //[0][l], [1][l] + { + isInTabu = 1; + break; + } + } + + if (!isInTabu) + { + value_neigh_best = value_C; + firstNeigh = 0; + best_job_j = job_j; + best_job_v = job_v; + } + } + //----------------------------------------// + } + + + //---- update j and v ---// + v++; + if (v >= n) + { + v = 0; + j++; + } + //-----------------------// + } + + /* + shared_value[lindex] = value_neigh_best; + shared_best_j[lindex] = best_job_j; + shared_best_v[lindex] = best_job_v; + + // wait for all threads to reach the barrier + __syncthreads(); + //<-------------------- barrier -------// + + + if (lindex == 0) + { + index_best = 0; + value_neigh_best = shared_value[index_best]; + + for (i = 1; i < threads_per_blockN; i++) //to the number of threads, check if the currecn thread is the best + { + if (shared_value[i] < value_neigh_best) + { + value_neigh_best = shared_value[i]; + index_best = i; + } + } + + d_best_value_neigh[block_index] = shared_value[index_best]; + d_best_job_j[block_index] = shared_best_j[index_best]; + d_best_job_v[block_index] = shared_best_v[index_best]; + } + + */ + + shared_value[lindex] = value_neigh_best; + + // wait for all threads to reach the barrier + __syncthreads(); + //<-------------------- barrier -------// + + + //using index best - requires less memory per block than using lindex ==0 (i.e., job_j, job_v for each thred is not necessary, which give 2x4KB less memory + + index_best = lindex; //if value_i = value_k, then solution i is chosen instead of solution k (in; + m = p->m; + + //--------------------// + + //threadsN = MAX_THREADS; + if (threadsN <= 0) + { + threadsN = MAX_THREADS; + } + threadsN = min(threadsN, MAX_THREADS); + threadsN = min(threadsN, n*n); // cannot be greater than neighourhood size, N= n*n + + //--------------------// + + //threadsN = MAX_THREADS; + if (blocksN <= 0) + { + blocksN = MAX_BLOCKS; + } + blocksN = min(blocksN, MAX_BLOCKS); + blocksN = min(blocksN, (int)((n*n) / threadsN)); // blocksN*threadsN cannot be greater than neighourhood size, N= n*n + blocksN = max(blocksN, 1); + //--------------------// + + + //------------- local variables ------// + int *listTabu; + listTabu = new int[2 * listN]; + for (i = 0; i < 2 * listN; i++) + { + listTabu[i] = 0; + } + int listIdx; + listIdx = 0; + + float *best_value_neigh; + int *best_job_j; + int *best_job_v; + best_value_neigh = new float[blocksN]; + best_job_j = new int[blocksN]; + best_job_v = new int[blocksN]; + for (i = 0; i < blocksN; i++) + { + best_value_neigh[i] = 0; + best_job_j[i] = 0; + best_job_v[i] = 0; + } + + float *aj, *alphaj; + aj = new float[n]; + alphaj = new float[n]; + for (j = 0; j < n; j++) + { + aj[j] = p->jobs[0][j].aj; + alphaj[j] = p->jobs[0][j].alphaj; + } + + int *parametersTS; + parametersTS = new int[2]; + parametersTS[0] = listN; + parametersTS[1] = iterN; + + int *pi; + int *pi_best; //only locally important + pi = new int[n]; + pi_best = new int[n]; + for (j = 0; j < n; j++) + { + pi[j] = result.pi[j]; + pi_best[j] = result.pi[j]; + } + float value_best; + value_best = result.value; + + cout << "===== TS GPU FU BaT =====" << endl; + cout << "blocksN " << blocksN << endl; + cout << "threadsN " << threadsN << endl; + cout << "listN " << parametersTS[0] << endl; + cout << "iterN " << parametersTS[1] << endl; + cout << "value " << value_best << endl; + + + t1 = clock(); + + cudaMalloc(&d_blocksN, sizeof(int)); + cudaMalloc(&d_threadsN, sizeof(int)); + cudaMalloc(&d_pi, n * sizeof(int)); + cudaMalloc(&d_aj, n * sizeof(float)); + cudaMalloc(&d_alphaj, n * sizeof(float)); + cudaMalloc(&d_n, sizeof(int)); + cudaMalloc(&d_m, sizeof(int)); + cudaMalloc(&d_parametersTS, 2 * sizeof(int)); + cudaMalloc(&d_err_note, sizeof(float)); + + cudaMalloc(&d_listTabu, 2*listN * sizeof(int)); + cudaMalloc(&d_best_value_neigh, blocksN * sizeof(float)); + cudaMalloc(&d_best_job_j, blocksN * sizeof(int)); + cudaMalloc(&d_best_job_v, blocksN * sizeof(int)); + + + t2 = clock(); + + cout << "cuda alloc " << t2 - t1 << endl; + + t1 = clock(); + + float err_note = -1; + + + cudaMemcpy(d_blocksN, &blocksN, sizeof(int), cudaMemcpyHostToDevice); + cudaMemcpy(d_threadsN, &threadsN, sizeof(int), cudaMemcpyHostToDevice); + cudaMemcpy(d_pi, pi, n * sizeof(int), cudaMemcpyHostToDevice); + cudaMemcpy(d_aj, aj, n * sizeof(float), cudaMemcpyHostToDevice); + cudaMemcpy(d_alphaj, alphaj, n * sizeof(float), cudaMemcpyHostToDevice); + cudaMemcpy(d_n, &n, sizeof(int), cudaMemcpyHostToDevice); + cudaMemcpy(d_m, &m, sizeof(int), cudaMemcpyHostToDevice); + cudaMemcpy(d_parametersTS, parametersTS, 2 * sizeof(int), cudaMemcpyHostToDevice); + cudaMemcpy(d_err_note, &err_note, sizeof(float), cudaMemcpyHostToDevice); + + cudaMemcpy(d_listTabu, listTabu, 2*listN * sizeof(int), cudaMemcpyHostToDevice); + cudaMemcpy(d_best_value_neigh, best_value_neigh, blocksN * sizeof(float), cudaMemcpyHostToDevice); + cudaMemcpy(d_best_job_j, best_job_j, blocksN * sizeof(int), cudaMemcpyHostToDevice); + cudaMemcpy(d_best_job_v, best_job_v, blocksN * sizeof(int), cudaMemcpyHostToDevice); + + t2 = clock(); + + cout << "cuda mem copy " << t2 - t1 << endl; + + //system("PAUSE"); + + t1 = clock(); + + + // !!!??? zobaczyc jak jest w innych programach ustalane threads/blocks, etc. + + + + //(int *d_blocksN, int *d_threads_per_block, int *d_n, int *d_m, int *d_pi, float *d_aj, float *d_alphaj, int *d_parametersTS, int *d_listTabu, float *d_best_value_neigh, int *d_best_job_j, int *d_best_job_v, unsigned int seed, int *d_err_note) + + int iter; + int job_j; + int job_v; + float value_neigh; + int best_block_index; + //permutation tmp_pi(n); + + for (iter = 0; iter < iterN; iter++) + { + //for (j = 0; j < n; j++) + //{ + // tmp_pi.Set(j, pi[j]); + //} + //cout << " pi in " << tmp_pi << endl; + + //cudaDeviceSynchronize(); + + KernelTS_FU_BaT <<< blocksN, threadsN >> > (d_blocksN, d_threadsN, d_n, d_m, d_pi, d_aj, d_alphaj, d_parametersTS, d_listTabu, d_best_value_neigh, d_best_job_j, d_best_job_v, time(NULL), d_err_note); + + cudaDeviceSynchronize(); + + //Device -> Host + cudaMemcpy(best_value_neigh, d_best_value_neigh, blocksN * sizeof(float), cudaMemcpyDeviceToHost); + cudaMemcpy(best_job_j, d_best_job_j, blocksN * sizeof(int), cudaMemcpyDeviceToHost); + cudaMemcpy(best_job_v, d_best_job_v, blocksN * sizeof(int), cudaMemcpyDeviceToHost); + + //cout << endl; + + best_block_index = 0; + value_neigh = best_value_neigh[best_block_index]; + //cout << "C : " << best_value_neigh[0] << " (" << best_job_j[0] << " , " << best_job_v[0] << ") | "; + for (i = 1; i < blocksN; i++) + { + //cout << "C : "<< best_value_neigh[i] << " ("< value_neigh) + { + value_best = value_neigh; + for (i = 0; i < n; i++) + { + pi_best[i] = pi[i]; + } + } + //------------------------------------// + + //------ add to tabu list -------// + listTabu[0 * listN + listIdx] = job_j; + listTabu[1 * listN + listIdx] = job_v; + listIdx = (listIdx + 1) % listN; + + + //for (i = 0; i < listN; i++) + //{ + // cout <<"("<Criterion(result.pi); + + t1 = clock(); + + cudaFree(d_blocksN); + cudaFree(d_threadsN); + cudaFree(d_pi); + cudaFree(d_aj); + cudaFree(d_alphaj); + cudaFree(d_n); + cudaFree(d_m); + cudaFree(d_parametersTS); + cudaFree(d_err_note); + + cudaFree(d_listTabu); + cudaFree(d_best_value_neigh); + cudaFree(d_best_job_j); + cudaFree(d_best_job_v); + + t2 = clock(); + + cout << "cuda free " << t2 - t1 << endl << endl; + + + delete[]parametersTS; + delete[]aj; + delete[]alphaj; + delete[]pi; + + delete[]listTabu; + delete[]best_value_neigh; + delete[]best_job_j; + delete[]best_job_v; + + delete[]pi_best; +} + + + + +//------------ Kernel TS BaT -------------// +// Full Utilization of Blocks and Threads +// Revised notation and names of variables +// +// teoretycznie powinno dzialac, ale nie dziala, gdyz nie zawsze bloki moga byc zsynchronizowane +// moze byc tak, ze scheduler czeka na zakonczenie bloku, zanim kolejny zacznie sie wykonywac +// +// insert by swap - multithread = number of jobs +// shared tabu list +// shared pi_neigh_best - wolniejsze niz w TS2 bez shared tylko kazdy watek ma swoj pi_neigh_best (1024 - 51s vs 53s) +// shared pi_best +// +// parallel moze dawac inny wynik niz jednowatkowy, +// gdyz to samo kryterium moze byc dla roznych j,v, wowczas inne j,v trafiaja na liste tabu (w innej kolejnosci) +//// +// mozna zrobic, zeby tylko lindex == 0 sprawdzal najlepsze wstawienie i przebudowal shared_pi_neigh_best +// trzeba wtedy dodac shared_best_j, shared_best_v +// +// ??? czy n wlicza m?, czy musze robic (n+m)*(n+m), ale chyba n wlicza m +// niebezpieczna sytuacja do sprawdzanie 111|100, wtedy block drugi jak watki wyladaja?, albo 111|000 czy wtedy block 2 powstanie? +//??? a jak wszystkie ruchy danego watku na liscie tabu? czy cos bedzie mial lepszego niz wczesniej?, czy zamazywac duza wartoscia 10000000 +// +// +__global__ void KernelTS_FFU_BaT(int *d_blocksN, int *d_threads_per_block, int *d_n, int *d_m, int *d_pi, float *d_aj, float *d_alphaj, int *d_parametersTS, int *d_listTabu, float *d_best_value_neigh, int *d_best_job_j, int *d_best_job_v, unsigned int seed, float *d_err_note) +{ + float CP[MAX_MACHINE_SIZE]; //for criterion value + + float value_C; //the temporary crterion value + float value_neigh_best; //the best found value for the given thread + int best_job_j, best_job_v; + + int pi[MAX_PROBLEM_SIZE]; // temporary pi for analysing neighbourhood + + __shared__ float shared_aj[MAX_PROBLEM_SIZE]; //much faster than local float aj_shared[MAX_PROBLEM_SIZE]; + __shared__ float shared_alphaj[MAX_PROBLEM_SIZE]; + + int isInTabu; + int tabuListSize; + __shared__ int shared_tabuList[2 * MAX_TABU_LIST]; //[1,1; 2,2; ...] + + __shared__ int shared_pi_neigh_best[MAX_PROBLEM_SIZE]; //pi_neigh the best for the block + __shared__ float shared_value_neigh_best[MAX_THREADS]; //the best solution value for each thread + + + int threads_per_block; + int blocks_per_grid; + int number_all_threads; + int n; + int m; + + int j, v, k, i; + int neighborhoodSizeN; //number of moves per neighbourhood + int N_k; // number of moves per this thread + int rem_N; //reminder + int div_N; //division + int index_start_move_k; //the index of a start move of this thread, e.g., (0,0) is 0, (0,1) is 1, etc. (0,n-1) is n-1, (1,0) is n + int job_j; + int job_v; + + int gindex; + int lindex; //local index for each block + int block_index; + int index_thread_best; + + n = *d_n; + m = *d_m; + + threads_per_block = *d_threads_per_block; + blocks_per_grid = *d_blocksN; + number_all_threads = (*d_threads_per_block)*(*d_blocksN); + + lindex = threadIdx.x; //local index for each block + block_index = blockIdx.x; + gindex = threadIdx.x + blockIdx.x*threads_per_block; + + //gindex = blockIdx.x * blockDim.x + threadIdx.x; + + + tabuListSize = d_parametersTS[0]; + + //initialize shared parameters + if (lindex == 0) + { + //problem parameters + for (j = 0; j= rem_N) + gindex*(gindex < rem_N); //gindex >= rem_N->rem_n otherwise gindex + + + value_neigh_best = BIG_NUMBER; + + //-------- search neighbourhood ----------// + j = (int)(index_start_move_k / n); + v = index_start_move_k % n; + job_j = -1; + + for (k = 0; k < N_k; k++) + { + //------- obtain move --------// + if (job_j == j) + { + SwapTable(pi, v - 1, v); //here v is > 0 - always + job_j = j; + job_v = v; + } + else + { + //at first time always copied for each thread + //pi = pi_neigh_best + for (i = 0; i < n; i++) + { + pi[i] = shared_pi_neigh_best[i]; + } + + InsertTable(pi, j, v); + job_j = j; + job_v = v; + } + //----------------------------// + + if (j != v) + { + value_C = Criterion2(&n, &m, pi, shared_aj, shared_alphaj, CP); + + //---------- check Tabu List -------------// + if ((job_j != job_v) && (value_C < value_neigh_best)) + { + isInTabu = 0; + for (i = 0; i < tabuListSize; i++) + { + if ((job_j == shared_tabuList[0 * tabuListSize + i]) && (job_v == shared_tabuList[1 * tabuListSize + i])) //[0][l], [1][l] + { + isInTabu = 1; + break; + } + } + + if (!isInTabu) + { + value_neigh_best = value_C; + best_job_j = job_j; + best_job_v = job_v; + } + } + //----------------------------------------// + } + + //---- update j and v ---// + v++; + if (v >= n) + { + v = 0; + j++; + } + //-----------------------// + } + + //d_err_note[0] = 8 + 10*best_job_j + 100 * best_job_v + 8*1000; + + shared_value_neigh_best[lindex] = value_neigh_best; + + // wait for all threads to reach the barrier + __syncthreads(); + //<-------------------- barrier -------// + + //using index best - requires less memory per block than using lindex ==0 (i.e., job_j, job_v for each thred is not necessary, which give 2x4KB less memory + + index_thread_best = lindex; //if value_i = value_k, then solution i is chosen instead of solution k (in; + m = p->m; + + threads_per_block = _threads_per_block; + blocks_per_grid = _blocks_per_grid; + + //--------------------// + if (threads_per_block <= 0) + { + threads_per_block = MAX_THREADS; + } + threads_per_block = min(threads_per_block, MAX_THREADS); + threads_per_block = min(threads_per_block, n*n); // cannot be greater than neighourhood size, N= n*n + + + if (blocks_per_grid <= 0) + { + blocks_per_grid = MAX_BLOCKS; + } + blocks_per_grid = min(blocks_per_grid, MAX_BLOCKS); + blocks_per_grid = min(blocks_per_grid, (int)((n*n) / blocks_per_grid)); // blocksN*threadsN cannot be greater than neighourhood size, N= n*n + blocks_per_grid = max(blocks_per_grid, 1); + //--------------------// + + + //------------- local variables ------// + int tabuListSize; + int *tabuList; + tabuListSize = _tabuListSize; + tabuList = new int[2 * tabuListSize]; + for (i = 0; i < 2 * tabuListSize; i++) + { + tabuList[i] = 0; + } + int tabuListIdx; + tabuListIdx = 0; + + float *best_value_neigh; + int *best_job_j; + int *best_job_v; + best_value_neigh = new float[blocks_per_grid]; + best_job_j = new int[blocks_per_grid]; + best_job_v = new int[blocks_per_grid]; + for (i = 0; i < blocks_per_grid; i++) + { + best_value_neigh[i] = 0; + best_job_j[i] = 0; + best_job_v[i] = 0; + } + + float *aj, *alphaj; + aj = new float[n]; + alphaj = new float[n]; + for (i = 0; i < n; i++) + { + aj[i] = p->jobs[0][i].aj; + alphaj[i] = p->jobs[0][i].alphaj; + } + + int *parametersTS; + parametersTS = new int[2]; + parametersTS[0] = tabuListSize; + parametersTS[1] = iterN; + + int *pi; + int *pi_best; //only locally important + pi = new int[n]; + pi_best = new int[n]; + for (i = 0; i < n; i++) + { + pi[i] = result.pi[i]; + pi_best[i] = result.pi[i]; + } + float value_best; + value_best = result.value; + + float err_note = -1; + + cout << "===== TS GPU FFU BaT =====" << endl; + cout << "blocksN " << blocks_per_grid << endl; + cout << "threadsN " << threads_per_block << endl; + cout << "listN " << parametersTS[0] << endl; + cout << "iterN " << parametersTS[1] << endl; + cout << "value " << value_best << endl; + + + cudaMalloc(&d_blocks_per_grid, sizeof(int)); + cudaMalloc(&d_threads_per_block, sizeof(int)); + cudaMalloc(&d_pi, n * sizeof(int)); + cudaMalloc(&d_aj, n * sizeof(float)); + cudaMalloc(&d_alphaj, n * sizeof(float)); + cudaMalloc(&d_n, sizeof(int)); + cudaMalloc(&d_m, sizeof(int)); + cudaMalloc(&d_parametersTS, 2 * sizeof(int)); + cudaMalloc(&d_err_note, sizeof(float)); + cudaMalloc(&d_tabuList, 2 * tabuListSize * sizeof(int)); + cudaMalloc(&d_best_value_neigh, blocks_per_grid * sizeof(float)); + cudaMalloc(&d_best_job_j, blocks_per_grid * sizeof(int)); + cudaMalloc(&d_best_job_v, blocks_per_grid * sizeof(int)); + + + cudaMemcpy(d_blocks_per_grid, &blocks_per_grid, sizeof(int), cudaMemcpyHostToDevice); + cudaMemcpy(d_threads_per_block, &threads_per_block, sizeof(int), cudaMemcpyHostToDevice); + cudaMemcpy(d_pi, pi, n * sizeof(int), cudaMemcpyHostToDevice); + cudaMemcpy(d_aj, aj, n * sizeof(float), cudaMemcpyHostToDevice); + cudaMemcpy(d_alphaj, alphaj, n * sizeof(float), cudaMemcpyHostToDevice); + cudaMemcpy(d_n, &n, sizeof(int), cudaMemcpyHostToDevice); + cudaMemcpy(d_m, &m, sizeof(int), cudaMemcpyHostToDevice); + cudaMemcpy(d_parametersTS, parametersTS, 2 * sizeof(int), cudaMemcpyHostToDevice); + cudaMemcpy(d_err_note, &err_note, sizeof(float), cudaMemcpyHostToDevice); + cudaMemcpy(d_tabuList, tabuList, 2 * tabuListSize * sizeof(int), cudaMemcpyHostToDevice); + cudaMemcpy(d_best_value_neigh, best_value_neigh, blocks_per_grid * sizeof(float), cudaMemcpyHostToDevice); + cudaMemcpy(d_best_job_j, best_job_j, blocks_per_grid * sizeof(int), cudaMemcpyHostToDevice); + cudaMemcpy(d_best_job_v, best_job_v, blocks_per_grid * sizeof(int), cudaMemcpyHostToDevice); + + for (iter = 0; iter < iterN; iter++) + { + + //input data <<>>, i.e., sufficient that xy coveres N + + KernelTS_FFU_BaT <<< blocks_per_grid, threads_per_block >>> (d_blocks_per_grid, d_threads_per_block, d_n, d_m, d_pi, d_aj, d_alphaj, d_parametersTS, d_tabuList, d_best_value_neigh, d_best_job_j, d_best_job_v, time(NULL), d_err_note); + + //synchronize blocks + cudaDeviceSynchronize(); + + //Device -> Host + cudaMemcpy(best_value_neigh, d_best_value_neigh, blocks_per_grid * sizeof(float), cudaMemcpyDeviceToHost); + cudaMemcpy(best_job_j, d_best_job_j, blocks_per_grid * sizeof(int), cudaMemcpyDeviceToHost); + cudaMemcpy(best_job_v, d_best_job_v, blocks_per_grid * sizeof(int), cudaMemcpyDeviceToHost); + + index_block_best = 0; + value_neigh = best_value_neigh[index_block_best]; + + for (i = 1; i < blocks_per_grid; i++) + { + if (best_value_neigh[i] < value_neigh) + { + index_block_best= i; + value_neigh = best_value_neigh[i]; + } + } + + HostInsertTable(pi, best_job_j[index_block_best], best_job_v[index_block_best]); + + //---------- update the best ---------// + if (value_best > value_neigh) + { + value_best = value_neigh; + for (i = 0; i < n; i++) + { + pi_best[i] = pi[i]; + } + } + //------------------------------------// + + //------ add to tabu list -------// + tabuList[0 * tabuListSize + tabuListIdx] = best_job_j[index_block_best]; + tabuList[1 * tabuListSize + tabuListIdx] = best_job_v[index_block_best]; + tabuListIdx = (tabuListIdx + 1) % tabuListSize; + //-------------------------------// + + cudaMemcpy(d_pi, pi, n * sizeof(int), cudaMemcpyHostToDevice); + cudaMemcpy(d_tabuList, tabuList, 2 * tabuListSize * sizeof(int), cudaMemcpyHostToDevice); + } + + //for tests only + cudaMemcpy(&err_note, d_err_note, sizeof(float), cudaMemcpyDeviceToHost); + + for (i = 0; i < n; i++) + { + result.pi.Set(i, pi_best[i]); + } + + result.value = p->Criterion(result.pi); + + cudaFree(d_blocks_per_grid); + cudaFree(d_threads_per_block); + cudaFree(d_pi); + cudaFree(d_aj); + cudaFree(d_alphaj); + cudaFree(d_n); + cudaFree(d_m); + cudaFree(d_parametersTS); + cudaFree(d_err_note); + + cudaFree(d_tabuList); + cudaFree(d_best_value_neigh); + cudaFree(d_best_job_j); + cudaFree(d_best_job_v); + + delete[]parametersTS; + delete[]aj; + delete[]alphaj; + delete[]pi; + + delete[]tabuList; + delete[]best_value_neigh; + delete[]best_job_j; + delete[]best_job_v; + + delete[]pi_best; +} + + + +//------------ Kernel TS BaT -------------// +// Full Utilization of Blocks and Threads +// Revised notation and names of variables +// +// blocks are synchronized by global function, - host does not calculate any values, blocks communicate by global memory +// gindex == 0 manages d_tabuList, d_pi_best +// +// teoretycznie powinno dzialac, ale nie dziala, gdyz nie zawsze bloki moga byc zsynchronizowane +// moze byc tak, ze scheduler czeka na zakonczenie bloku, zanim kolejny zacznie sie wykonywac +// +// insert by swap - multithread = number of jobs +// shared tabu list +// shared pi_neigh_best - wolniejsze niz w TS2 bez shared tylko kazdy watek ma swoj pi_neigh_best (1024 - 51s vs 53s) +// shared pi_best +// +// parallel moze dawac inny wynik niz jednowatkowy, +// gdyz to samo kryterium moze byc dla roznych j,v, wowczas inne j,v trafiaja na liste tabu (w innej kolejnosci) +//// +// mozna zrobic, zeby tylko lindex == 0 sprawdzal najlepsze wstawienie i przebudowal shared_pi_neigh_best +// trzeba wtedy dodac shared_best_j, shared_best_v +// +// ??? czy n wlicza m?, czy musze robic (n+m)*(n+m), ale chyba n wlicza m +// niebezpieczna sytuacja do sprawdzanie 111|100, wtedy block drugi jak watki wyladaja?, albo 111|000 czy wtedy block 2 powstanie? +//??? a jak wszystkie ruchy danego watku na liscie tabu? czy cos bedzie mial lepszego niz wczesniej?, czy zamazywac duza wartoscia 10000000 +// +// +__global__ void KernelTS_FFU_BaT_block_sync(int *d_blocksN, int *d_threads_per_block, int *d_n, int *d_m, int *d_pi, int *d_pi_best, float *d_value_best, float *d_aj, float *d_alphaj, int *d_parametersTS, int *d_tabuList, int *d_tabuListIdx, float *d_best_value_neigh, int *d_best_job_j, int *d_best_job_v, unsigned int seed, float *d_err_note) +{ + float CP[MAX_MACHINE_SIZE]; //for criterion value //cannot be shared, since all threads will use the same space, otherwise MAX_MACHINE_SIZE * MAX_THREADS + + float value_C; //the temporary crterion value + float value_neigh_best; //the best found value for the given thread + int best_job_j, best_job_v; + + int pi[MAX_PROBLEM_SIZE]; // temporary pi for analysing neighbourhood + + __shared__ float shared_aj[MAX_PROBLEM_SIZE]; //much faster than local float aj_shared[MAX_PROBLEM_SIZE]; + __shared__ float shared_alphaj[MAX_PROBLEM_SIZE]; + + int isInTabu; + int tabuListSize; + int tabuListIdx; //only for faster update by gindex == 0 + __shared__ int shared_tabuList[2 * MAX_TABU_LIST]; //[1,1; 2,2; ...] + + __shared__ int shared_pi_neigh_best[MAX_PROBLEM_SIZE]; //pi_neigh the best for the block + __shared__ float shared_value_neigh_best[MAX_THREADS]; //the best solution value for each thread + + int index_block_best; + + + int threads_per_block; + int blocks_per_grid; + int number_all_threads; + int n; + int m; + + int j, v, k, i; + int neighborhoodSizeN; //number of moves per neighbourhood + int N_k; // number of moves per this thread + int rem_N; //reminder + int div_N; //division + int index_start_move_k; //the index of a start move of this thread, e.g., (0,0) is 0, (0,1) is 1, etc. (0,n-1) is n-1, (1,0) is n + int job_j; + int job_v; + + int gindex; + int lindex; //local index for each block + int block_index; + int index_thread_best; + + n = *d_n; + m = *d_m; + + threads_per_block = *d_threads_per_block; + blocks_per_grid = *d_blocksN; + number_all_threads = (*d_threads_per_block)*(*d_blocksN); + + lindex = threadIdx.x; //local index for each block + block_index = blockIdx.x; + gindex = threadIdx.x + blockIdx.x*threads_per_block; + + //gindex = blockIdx.x * blockDim.x + threadIdx.x; + + + tabuListSize = d_parametersTS[0]; + + //initialize shared parameters + if (lindex == 0) + { + //problem parameters + for (j = 0; j= rem_N) + gindex*(gindex < rem_N); //gindex >= rem_N->rem_n otherwise gindex + + + value_neigh_best = BIG_NUMBER; + + //-------- search neighbourhood ----------// + j = (int)(index_start_move_k / n); + v = index_start_move_k % n; + job_j = -1; + + for (k = 0; k < N_k; k++) + { + //------- obtain move --------// + if (job_j == j) + { + SwapTable(pi, v - 1, v); //here v is > 0 - always + job_j = j; + job_v = v; + } + else + { + //at first time always copied for each thread + //pi = pi_neigh_best + for (i = 0; i < n; i++) + { + pi[i] = shared_pi_neigh_best[i]; + } + + InsertTable(pi, j, v); + job_j = j; + job_v = v; + } + //----------------------------// + + if (j != v) + { + value_C = Criterion2(&n, &m, pi, shared_aj, shared_alphaj, CP); + + //---------- check Tabu List -------------// + if ((job_j != job_v) && (value_C < value_neigh_best)) + { + isInTabu = 0; + for (i = 0; i < tabuListSize; i++) + { + if ((job_j == shared_tabuList[0 * tabuListSize + i]) && (job_v == shared_tabuList[1 * tabuListSize + i])) //[0][l], [1][l] + { + isInTabu = 1; + break; + } + } + + if (!isInTabu) + { + value_neigh_best = value_C; + best_job_j = job_j; + best_job_v = job_v; + } + } + //----------------------------------------// + } + if ((job_j == 2) && (job_v == 3)) + { + // d_err_note[0] = value_C; + // d_err_note[0] = isInTabu; + } + + //---- update j and v ---// + v++; + if (v >= n) + { + v = 0; + j++; + } + //-----------------------// + } + + //d_err_note[0] = 8 + 10*best_job_j + 100 * best_job_v + 8*1000; + //d_err_note[0] = value_neigh_best; + + d_err_note[0] = 8 + 10 * best_job_j + 100 * best_job_v + 8 * 1000; + + shared_value_neigh_best[lindex] = value_neigh_best; + + // wait for all threads to reach the barrier + __syncthreads(); + //<-------------------- barrier -------// + + //using index best - requires less memory per block than using lindex ==0 (i.e., job_j, job_v for each thred is not necessary, which give 2x4KB less memory + + index_thread_best = lindex; //if value_i = value_k, then solution i is chosen instead of solution k (in; + m = p->m; + + threads_per_block = _threads_per_block; + blocks_per_grid = _blocks_per_grid; + + //--------------------// + if (threads_per_block <= 0) + { + threads_per_block = MAX_THREADS; + } + threads_per_block = min(threads_per_block, MAX_THREADS); + threads_per_block = min(threads_per_block, n*n); // cannot be greater than neighourhood size, N= n*n + + + if (blocks_per_grid <= 0) + { + blocks_per_grid = MAX_BLOCKS; + } + blocks_per_grid = min(blocks_per_grid, MAX_BLOCKS); + blocks_per_grid = min(blocks_per_grid, (int)((n*n) / blocks_per_grid)); // blocksN*threadsN cannot be greater than neighourhood size, N= n*n + blocks_per_grid = max(blocks_per_grid, 1); + //--------------------// + + + //------------- local variables ------// + int tabuListSize; + int *tabuList; + tabuListSize = _tabuListSize; + tabuList = new int[2 * tabuListSize]; + for (i = 0; i < 2 * tabuListSize; i++) + { + tabuList[i] = 0; + } + int tabuListIdx; + tabuListIdx = 0; + + float *best_value_neigh; + int *best_job_j; + int *best_job_v; + best_value_neigh = new float[blocks_per_grid]; + best_job_j = new int[blocks_per_grid]; + best_job_v = new int[blocks_per_grid]; + for (i = 0; i < blocks_per_grid; i++) + { + best_value_neigh[i] = BIG_NUMBER; + best_job_j[i] = 0; + best_job_v[i] = 0; + } + + float *aj, *alphaj; + aj = new float[n]; + alphaj = new float[n]; + for (i = 0; i < n; i++) + { + aj[i] = p->jobs[0][i].aj; + alphaj[i] = p->jobs[0][i].alphaj; + } + + int *parametersTS; + parametersTS = new int[2]; + parametersTS[0] = tabuListSize; + parametersTS[1] = iterN; + + int *pi; + int *pi_best; //only locally important + pi = new int[n]; + pi_best = new int[n]; + for (i = 0; i < n; i++) + { + pi[i] = result.pi[i]; + pi_best[i] = result.pi[i]; + } + float value_best; + value_best = result.value; + + float err_note = -1; + + cout << "===== TS GPU FFU BaT block sync =====" << endl; + cout << "blocksN " << blocks_per_grid << endl; + cout << "threadsN " << threads_per_block << endl; + cout << "listN " << parametersTS[0] << endl; + cout << "iterN " << parametersTS[1] << endl; + cout << "value " << value_best << endl; + + + cudaMalloc(&d_blocks_per_grid, sizeof(int)); + cudaMalloc(&d_threads_per_block, sizeof(int)); + cudaMalloc(&d_n, sizeof(int)); + cudaMalloc(&d_m, sizeof(int)); + cudaMalloc(&d_pi, n * sizeof(int)); + cudaMalloc(&d_pi_best, n * sizeof(int)); + cudaMalloc(&d_value_best, sizeof(float)); + cudaMalloc(&d_aj, n * sizeof(float)); + cudaMalloc(&d_alphaj, n * sizeof(float)); + cudaMalloc(&d_parametersTS, 2 * sizeof(int)); + cudaMalloc(&d_tabuList, 2 * tabuListSize * sizeof(int)); + cudaMalloc(&d_tabuListIdx, sizeof(int)); + cudaMalloc(&d_best_value_neigh, blocks_per_grid * sizeof(float)); + cudaMalloc(&d_best_job_j, blocks_per_grid * sizeof(int)); + cudaMalloc(&d_best_job_v, blocks_per_grid * sizeof(int)); + cudaMalloc(&d_err_note, sizeof(float)); + + cudaMemcpy(d_blocks_per_grid, &blocks_per_grid, sizeof(int), cudaMemcpyHostToDevice); + cudaMemcpy(d_threads_per_block, &threads_per_block, sizeof(int), cudaMemcpyHostToDevice); + cudaMemcpy(d_n, &n, sizeof(int), cudaMemcpyHostToDevice); + cudaMemcpy(d_m, &m, sizeof(int), cudaMemcpyHostToDevice); + cudaMemcpy(d_pi, pi, n * sizeof(int), cudaMemcpyHostToDevice); + cudaMemcpy(d_pi_best, pi, n * sizeof(int), cudaMemcpyHostToDevice); + cudaMemcpy(d_value_best, &value_best, sizeof(float), cudaMemcpyHostToDevice); + cudaMemcpy(d_aj, aj, n * sizeof(float), cudaMemcpyHostToDevice); + cudaMemcpy(d_alphaj, alphaj, n * sizeof(float), cudaMemcpyHostToDevice); + cudaMemcpy(d_parametersTS, parametersTS, 2 * sizeof(int), cudaMemcpyHostToDevice); + cudaMemcpy(d_tabuList, tabuList, 2 * tabuListSize * sizeof(int), cudaMemcpyHostToDevice); + cudaMemcpy(d_tabuListIdx, &tabuListIdx, sizeof(int), cudaMemcpyHostToDevice); + cudaMemcpy(d_best_value_neigh, best_value_neigh, blocks_per_grid * sizeof(float), cudaMemcpyHostToDevice); + cudaMemcpy(d_best_job_j, best_job_j, blocks_per_grid * sizeof(int), cudaMemcpyHostToDevice); + cudaMemcpy(d_best_job_v, best_job_v, blocks_per_grid * sizeof(int), cudaMemcpyHostToDevice); + cudaMemcpy(d_err_note, &err_note, sizeof(float), cudaMemcpyHostToDevice); + + + for (iter = 0; iter < iterN; iter++) + { + //input data <<>>, i.e., sufficient that xy coveres N //przyklad przeliczania + KernelTS_FFU_BaT_block_sync <<< blocks_per_grid, threads_per_block >>> (d_blocks_per_grid, d_threads_per_block, d_n, d_m, d_pi, d_pi_best, d_value_best, d_aj, d_alphaj, d_parametersTS, d_tabuList, d_tabuListIdx, d_best_value_neigh, d_best_job_j, d_best_job_v, time(NULL), d_err_note); + + //synchronize blocks + cudaDeviceSynchronize(); + } + //final update (from the last iteration // + //Device -> Host + cudaMemcpy(pi, d_pi, n * sizeof(int), cudaMemcpyDeviceToHost); + cudaMemcpy(pi_best, d_pi_best, n * sizeof(int), cudaMemcpyDeviceToHost); + cudaMemcpy(&value_best, d_value_best, sizeof(float), cudaMemcpyDeviceToHost); + cudaMemcpy(best_value_neigh, d_best_value_neigh, blocks_per_grid * sizeof(float), cudaMemcpyDeviceToHost); + cudaMemcpy(best_job_j, d_best_job_j, blocks_per_grid * sizeof(int), cudaMemcpyDeviceToHost); + cudaMemcpy(best_job_v, d_best_job_v, blocks_per_grid * sizeof(int), cudaMemcpyDeviceToHost); + + index_block_best = 0; + value_neigh = best_value_neigh[index_block_best]; + for (i = 1; i < blocks_per_grid; i++) + { + if (best_value_neigh[i] < value_neigh) + { + index_block_best = i; + value_neigh = best_value_neigh[i]; + } + } + + //---------- update the best ---------// + if (value_best > value_neigh) + { + HostInsertTable(pi, best_job_j[index_block_best], best_job_v[index_block_best]); //do not copy if the last neigh is not the best + value_best = value_neigh; + for (i = 0; i < n; i++) + { + pi_best[i] = pi[i]; + } + } + //------------------------------------// + + + //for tests only + cudaMemcpy(&err_note, d_err_note, sizeof(float), cudaMemcpyDeviceToHost); + + for (i = 0; i < n; i++) + { + result.pi.Set(i, pi_best[i]); + } + + result.value = p->Criterion(result.pi); + + cudaFree(d_blocks_per_grid); + cudaFree(d_threads_per_block); + cudaFree(d_n); + cudaFree(d_m); + cudaFree(d_pi); + cudaFree(d_pi_best); + cudaFree(d_value_best); + cudaFree(d_aj); + cudaFree(d_alphaj); + cudaFree(d_tabuList); + cudaFree(d_tabuListIdx); + cudaFree(d_best_value_neigh); + cudaFree(d_best_job_j); + cudaFree(d_best_job_v); + cudaFree(d_parametersTS); + cudaFree(d_err_note); + + + + delete[]pi; + delete[]pi_best; + delete[]tabuList; + delete[]aj; + delete[]alphaj; + delete[]parametersTS; + delete[]best_value_neigh; + delete[]best_job_j; + delete[]best_job_v; + + +} + diff --git a/cuda_code/NDArrayCudaBasicsTests_9.cu b/cuda_code/NDArrayCudaBasicsTests_9.cu new file mode 100644 index 0000000000000000000000000000000000000000..46f962dda4811de9e461d52ccd5202011a01ce69 --- /dev/null +++ b/cuda_code/NDArrayCudaBasicsTests_9.cu @@ -0,0 +1,2198 @@ +/******************************************************************************* + * Copyright (c) 2015-2018 Skymind, Inc. + * + * This program and the accompanying materials are made available under the + * terms of the Apache License, Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0. + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + * + * SPDX-License-Identifier: Apache-2.0 + ******************************************************************************/ + + // + // @author raver119@gmail.com + // + +#include "testlayers.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +using namespace nd4j; +using namespace nd4j::graph; + +class NDArrayCudaBasicsTests : public testing::Test { +public: + +}; + +////////////////////////////////////////////////////////////////////////// +static cudaError_t allocateDeviceMem(LaunchContext& lc, std::vector& devicePtrs, const std::vector>& hostData) { + + if(devicePtrs.size() != hostData.size()) + throw std::invalid_argument("prepareDataForCuda: two input sts::vectors should same sizes !"); + + cudaError_t cudaResult; + + void* reductionPointer; + cudaResult = cudaMalloc(reinterpret_cast(&reductionPointer), 1024*1024); if(cudaResult != 0) return cudaResult; + int* allocationPointer; + cudaResult = cudaMalloc(reinterpret_cast(&allocationPointer), 1024*1024); if(cudaResult != 0) return cudaResult; + + lc.setReductionPointer(reductionPointer); + lc.setAllocationPointer(allocationPointer); + cudaStream_t stream = *lc.getCudaStream(); + + for(int i = 0; i < devicePtrs.size(); ++i) { + + cudaResult = cudaMalloc(reinterpret_cast(&devicePtrs[i]), hostData[i].second); if(cudaResult != 0) return cudaResult; + cudaMemcpyAsync(devicePtrs[i], hostData[i].first, hostData[i].second, cudaMemcpyHostToDevice, stream); + } + return cudaResult; +} + +TEST_F(NDArrayCudaBasicsTests, Test_Registration_1) { + auto x = NDArrayFactory::create('c', {5}, {1, 2, 3, 4, 5}); + auto y = NDArrayFactory::create('c', {5}, {5, 4, 3, 2, 1}); + + ASSERT_TRUE(x.isActualOnDeviceSide()); + ASSERT_FALSE(x.isActualOnHostSide()); +} + +TEST_F(NDArrayCudaBasicsTests, Test_Registration_2) { + auto x = NDArrayFactory::create('c', {5}); + auto y = NDArrayFactory::create('c', {5}); + + ASSERT_TRUE(x.isActualOnDeviceSide()); + ASSERT_FALSE(x.isActualOnHostSide()); +} + +TEST_F(NDArrayCudaBasicsTests, Test_Registration_3) { + auto x = NDArrayFactory::create('c', {5}, {1, 2, 3, 4, 5}); + auto y = NDArrayFactory::create('c', {5}, {5, 4, 3, 2, 1}); + + ASSERT_TRUE(x.isActualOnDeviceSide()); + ASSERT_FALSE(x.isActualOnHostSide()); + + NDArray::registerSpecialUse({&x}, {&y}); + + ASSERT_TRUE(x.isActualOnDeviceSide()); + ASSERT_FALSE(x.isActualOnHostSide()); + + ASSERT_TRUE(y.isActualOnDeviceSide()); + ASSERT_FALSE(y.isActualOnHostSide()); +} + +TEST_F(NDArrayCudaBasicsTests, Test_Registration_01) { + auto x = NDArrayFactory::create_('c', {5}, {1, 2, 3, 4, 5}); + auto y = NDArrayFactory::create_('c', {5}, {5, 4, 3, 2, 1}); + + ASSERT_TRUE(x->isActualOnDeviceSide()); + ASSERT_FALSE(x->isActualOnHostSide()); + delete x; + delete y; +} + +TEST_F(NDArrayCudaBasicsTests, Test_Registration_02) { + auto x = NDArrayFactory::create_('c', {5}); + auto y = NDArrayFactory::create_('c', {5}); + + ASSERT_TRUE(x->isActualOnDeviceSide()); + ASSERT_FALSE(x->isActualOnHostSide()); + delete x; + delete y; +} + +TEST_F(NDArrayCudaBasicsTests, Test_Registration_03) { + auto x = NDArrayFactory::create_('c', {5}, {1, 2, 3, 4, 5}); + auto y = NDArrayFactory::create_('c', {5}, {5, 4, 3, 2, 1}); + + ASSERT_TRUE(x->isActualOnDeviceSide()); + ASSERT_FALSE(x->isActualOnHostSide()); + + NDArray::registerSpecialUse({y}, {x}); + x->applyTransform(transform::Neg, *y); + //ASSERT_TRUE(x->isActualOnDeviceSide()); + //ASSERT_FALSE(x->isActualOnHostSide()); + + //ASSERT_TRUE(y->isActualOnDeviceSide()); + //ASSERT_TRUE(y->isActualOnHostSide()); + //y->syncToHost(); + // y->printBuffer("Negatives"); + delete x; + delete y; +} + +TEST_F(NDArrayCudaBasicsTests, Test_Cosine_1) { + auto x = NDArrayFactory::create_('c', {5}, {1, 2, 3, 4, 5}); + auto y = NDArrayFactory::create_('c', {5}, {5, 4, 3, 2, 1}); + + ASSERT_TRUE(x->isActualOnDeviceSide()); + ASSERT_FALSE(x->isActualOnHostSide()); + + NDArray::registerSpecialUse({y}, {x}); + x->applyTransform(transform::Cosine, *y); + //ASSERT_TRUE(x->isActualOnDeviceSide()); + //ASSERT_FALSE(x->isActualOnHostSide()); + + //ASSERT_TRUE(y->isActualOnDeviceSide()); + //ASSERT_TRUE(y->isActualOnHostSide()); + //y->syncToHost(); + delete x; + delete y; +} + +////////////////////////////////////////////////////////////////////////// +TEST_F(NDArrayCudaBasicsTests, TestAdd_1) { + // allocating host-side arrays + auto x = NDArrayFactory::create('c', { 5 }, { 1, 2, 3, 4, 5}); + auto y = NDArrayFactory::create('c', { 5 }, { 1, 2, 3, 4, 5}); + auto z = NDArrayFactory::create('c', { 5 }, {10, 10, 10, 10, 10}); + + auto exp = NDArrayFactory::create('c', { 5 }, { 2, 4, 6, 8, 10 }); + + // making raw buffers + //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; + //cudaError_t res = cudaMalloc(reinterpret_cast(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); + //ASSERT_EQ(0, res); + //res = cudaMalloc(reinterpret_cast(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); + //ASSERT_EQ(0, res); + //res = cudaMalloc(reinterpret_cast(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); + //ASSERT_EQ(0, res); + + Nd4jPointer nativeStream = (Nd4jPointer)malloc(sizeof(cudaStream_t)); + CHECK_ALLOC(nativeStream, "Failed to allocate memory for new CUDA stream", sizeof(cudaStream_t)); + cudaError_t dZ = cudaStreamCreate(reinterpret_cast(&nativeStream)); + auto stream = reinterpret_cast(&nativeStream); + + //cudaMemcpyAsync(devBufferPtrX, x.buffer(), x.lengthOf() * x.sizeOfT(), cudaMemcpyHostToDevice, *stream); + //cudaMemcpyAsync(devShapePtrX, x.shapeInfo(), shape::shapeInfoByteLength(x.shapeInfo()), cudaMemcpyHostToDevice, *stream); + + LaunchContext lc(stream, nullptr, nullptr); + NativeOpExecutioner::execPairwiseTransform(&lc, pairwise::Add, x.buffer(), x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), y.buffer(), y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), z.buffer(), z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr); + z.tickWriteDevice(); + auto res = cudaStreamSynchronize(*stream); + ASSERT_EQ(0, res); + + for (int e = 0; e < z.lengthOf(); e++) + ASSERT_NEAR(exp.e(e), z.e(e), 1e-5); +} + +////////////////////////////////////////////////////////////////////////// +TEST_F(NDArrayCudaBasicsTests, TestAdd_2) { + // allocating host-side arrays + NDArray x('c', { 5 }, { 1, 2, 3, 4, 5}); + NDArray y('c', { 5 }, { 1, 2, 3, 4, 5}); + NDArray z('c', { 5 }, nd4j::DataType::DOUBLE); + + NDArray exp('c', { 5 }, { 2, 4, 6, 8, 10 }); + + Nd4jPointer nativeStream = (Nd4jPointer)malloc(sizeof(cudaStream_t)); + CHECK_ALLOC(nativeStream, "Failed to allocate memory for new CUDA stream", sizeof(cudaStream_t)); + cudaError_t dZ = cudaStreamCreate(reinterpret_cast(&nativeStream)); + auto stream = reinterpret_cast(&nativeStream); + + LaunchContext lc(stream, *stream, nullptr, nullptr); + NativeOpExecutioner::execPairwiseTransform(&lc, pairwise::Add, nullptr, x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr); + auto res = cudaStreamSynchronize(*stream); + ASSERT_EQ(0, res); + + for (int e = 0; e < z.lengthOf(); e++) + ASSERT_NEAR(exp.e(e), z.e(e), 1e-5); +} + +////////////////////////////////////////////////////////////////////////// +TEST_F(NDArrayCudaBasicsTests, TestAdd_3) { + // allocating host-side arrays + auto x = NDArrayFactory::create('c', { 5 }, { 1, 2, 3, 4, 5}); + auto y = NDArrayFactory::create('c', { 5 }, { 1, 2, 3, 4, 5}); + auto z = NDArrayFactory::create('c', { 5 }, {10, 10, 10, 10, 10}); + + auto exp = NDArrayFactory::create('c', { 5 }, { 2, 4, 6, 8, 10 }); + + // making raw buffers + //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; + //cudaError_t res = cudaMalloc(reinterpret_cast(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); + //ASSERT_EQ(0, res); + //res = cudaMalloc(reinterpret_cast(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); + //ASSERT_EQ(0, res); + //res = cudaMalloc(reinterpret_cast(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); + //ASSERT_EQ(0, res); + + Nd4jPointer nativeStream = (Nd4jPointer)malloc(sizeof(cudaStream_t)); + CHECK_ALLOC(nativeStream, "Failed to allocate memory for new CUDA stream", sizeof(cudaStream_t)); + cudaError_t dZ = cudaStreamCreate(reinterpret_cast(&nativeStream)); + auto stream = reinterpret_cast(&nativeStream); + + //cudaMemcpyAsync(devBufferPtrX, x.buffer(), x.lengthOf() * x.sizeOfT(), cudaMemcpyHostToDevice, *stream); + //cudaMemcpyAsync(devShapePtrX, x.shapeInfo(), shape::shapeInfoByteLength(x.shapeInfo()), cudaMemcpyHostToDevice, *stream); + + LaunchContext lc(stream, *stream, nullptr, nullptr); + NativeOpExecutioner::execPairwiseTransform(&lc, pairwise::Add, x.buffer(), x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), y.buffer(), y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), z.buffer(), z.shapeInfo(), z.specialBuffer(), z.specialShapeInfo(), nullptr); + z.tickWriteDevice(); + auto res = cudaStreamSynchronize(*stream); + ASSERT_EQ(0, res); + //double* localBuffer = ; + z.syncToHost(); + cudaMemcpy(z.buffer(), z.specialBuffer(), z.lengthOf() * z.sizeOfT(), cudaMemcpyDeviceToHost); + res = cudaStreamSynchronize(*stream); + z.tickWriteHost(); + ASSERT_EQ(0, res); + + // + // cudaFree(devBufferPtrX); + //cudaFree(devBufferPtrZ); + //cudaFree(devShapePtrX); + + for (int e = 0; e < z.lengthOf(); e++) { + ASSERT_NEAR(exp.e(e), z.e(e), 1e-5); + } +} + +////////////////////////////////////////////////////////////////////////// +TEST_F(NDArrayCudaBasicsTests, TestAdd_4) { + // allocating host-side arrays + auto x = NDArrayFactory::create('c', { 5 }, { 1, 2, 3, 4, 5}); + auto y = NDArrayFactory::create('c', { 5 }, { 1, 2, 3, 4, 5}); + auto z = NDArrayFactory::create('c', { 5 }); + + auto exp = NDArrayFactory::create('c', { 5 }, { 2, 4, 6, 8, 10 }); + + // making raw buffers + //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; + //cudaError_t res = cudaMalloc(reinterpret_cast(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); + //ASSERT_EQ(0, res); + //res = cudaMalloc(reinterpret_cast(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); + //ASSERT_EQ(0, res); + //res = cudaMalloc(reinterpret_cast(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); + //ASSERT_EQ(0, res); + x.applyPairwiseTransform(pairwise::Add, y, z); + + // + // cudaFree(devBufferPtrX); + //cudaFree(devBufferPtrZ); + //cudaFree(devShapePtrX); + + for (int e = 0; e < z.lengthOf(); e++) { + ASSERT_NEAR(exp.e(e), z.e(e), 1e-5); + } +} + +////////////////////////////////////////////////////////////////////////// +TEST_F(NDArrayCudaBasicsTests, TestAdd_5) { + // allocating host-side arrays + auto x = NDArrayFactory::create('c', { 5 }, { 1, 2, 3, 4, 5}); + auto y = NDArrayFactory::create('c', { 5 }, { 1, 2, 3, 4, 5}); + //auto z = NDArrayFactory::create('c', { 5 }); + + auto exp = NDArrayFactory::create('c', { 5 }, { 2, 4, 6, 8, 10 }); + + // making raw buffers + //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; + //cudaError_t res = cudaMalloc(reinterpret_cast(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); + //ASSERT_EQ(0, res); + //res = cudaMalloc(reinterpret_cast(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); + //ASSERT_EQ(0, res); + //res = cudaMalloc(reinterpret_cast(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); + //ASSERT_EQ(0, res); + x += y; + //x.applyPairwiseTransform(pairwise::Add, &y, &z, nullptr); + x.syncToHost(); + //y.printBuffer("3Y = "); + //z.printBuffer("3Result out"); + + // + // cudaFree(devBufferPtrX); + //cudaFree(devBufferPtrZ); + //cudaFree(devShapePtrX); + + for (int e = 0; e < x.lengthOf(); e++) { + ASSERT_NEAR(exp.e(e), x.e(e), 1e-5); + } +} + +////////////////////////////////////////////////////////////////////////// +TEST_F(NDArrayCudaBasicsTests, TestAdd_6) { + // allocating host-side arrays + auto x = NDArrayFactory::create('c', { 5 }, { 1, 2, 3, 4, 5}); + auto y = NDArrayFactory::create(2); //.'c', { 5 }, { 1, 2, 3, 4, 5}); + //auto z = NDArrayFactory::create('c', { 5 }); + + auto exp = NDArrayFactory::create('c', { 5 }, { 3, 4, 5, 6, 7 }); + + // making raw buffers + //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; + //cudaError_t res = cudaMalloc(reinterpret_cast(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); + //ASSERT_EQ(0, res); + //res = cudaMalloc(reinterpret_cast(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); + //ASSERT_EQ(0, res); + //res = cudaMalloc(reinterpret_cast(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); + //ASSERT_EQ(0, res); + x += y; + //x.applyPairwiseTransform(pairwise::Add, &y, &z, nullptr); + x.syncToHost(); + + // cudaFree(devBufferPtrX); + //cudaFree(devBufferPtrZ); + //cudaFree(devShapePtrX); + + for (int e = 0; e < x.lengthOf(); e++) { + ASSERT_NEAR(exp.e(e), x.e(e), 1e-5); + } +} + +////////////////////////////////////////////////////////////////////////// +TEST_F(NDArrayCudaBasicsTests, TestAdd_7) { + // allocating host-side arrays + auto x = NDArrayFactory::create('c', { 5 }, { 1, 2, 3, 4, 5}); + //auto y = NDArrayFactory::create(2); //.'c', { 5 }, { 1, 2, 3, 4, 5}); + //auto z = NDArrayFactory::create('c', { 5 }); + + auto exp = NDArrayFactory::create('c', { 5 }, { 3, 4, 5, 6, 7 }); + + // making raw buffers + //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; + //cudaError_t res = cudaMalloc(reinterpret_cast(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); + //ASSERT_EQ(0, res); + //res = cudaMalloc(reinterpret_cast(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); + //ASSERT_EQ(0, res); + //res = cudaMalloc(reinterpret_cast(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); + //ASSERT_EQ(0, res); + x += 2.; + //x.applyPairwiseTransform(pairwise::Add, &y, &z, nullptr); + x.syncToHost(); + + // cudaFree(devBufferPtrX); + //cudaFree(devBufferPtrZ); + //cudaFree(devShapePtrX); + + for (int e = 0; e < x.lengthOf(); e++) { + ASSERT_NEAR(exp.e(e), x.e(e), 1e-5); + } +} + +////////////////////////////////////////////////////////////////////////// +TEST_F(NDArrayCudaBasicsTests, TestMultiply_1) { + // allocating host-side arrays + auto x = NDArrayFactory::create('c', { 5 }, { 1, 2, 3, 4, 5}); + auto y = NDArrayFactory::create('c', { 5 }, { 1, 2, 3, 4, 5}); + auto z = NDArrayFactory::create('c', { 5 }); + + auto exp = NDArrayFactory::create('c', { 5 }, { 1, 4, 9, 16, 25 }); + + // making raw buffers + //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; + //cudaError_t res = cudaMalloc(reinterpret_cast(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); + //ASSERT_EQ(0, res); + //res = cudaMalloc(reinterpret_cast(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); + //ASSERT_EQ(0, res); + //res = cudaMalloc(reinterpret_cast(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); + //ASSERT_EQ(0, res); + x.applyPairwiseTransform(pairwise::Multiply, y, z); + // x.printBuffer("3X = "); + // y.printBuffer("3Y = "); + // z.printBuffer("3Result out"); + + // + // cudaFree(devBufferPtrX); + //cudaFree(devBufferPtrZ); + //cudaFree(devShapePtrX); + + for (int e = 0; e < z.lengthOf(); e++) { + ASSERT_NEAR(exp.e(e), z.e(e), 1e-5); + } +} + +////////////////////////////////////////////////////////////////////////// +TEST_F(NDArrayCudaBasicsTests, TestMultiply_2) { + // allocating host-side arrays + auto x = NDArrayFactory::create('c', { 5 }, { 1, 2, 3, 4, 5}); + auto y = NDArrayFactory::create('c', { 5 }, { 1, 2, 3, 4, 5}); + NDArray z('c', { 5 }, nd4j::DataType::DOUBLE); + + auto exp = NDArrayFactory::create('c', { 5 }, { 1, 4, 9, 16, 25 }); + + // making raw buffers + //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; + //cudaError_t res = cudaMalloc(reinterpret_cast(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); + //ASSERT_EQ(0, res); + //res = cudaMalloc(reinterpret_cast(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); + //ASSERT_EQ(0, res); + //res = cudaMalloc(reinterpret_cast(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); + //ASSERT_EQ(0, res); + x.applyPairwiseTransform(pairwise::Multiply, y, z); + + // + // cudaFree(devBufferPtrX); + //cudaFree(devBufferPtrZ); + //cudaFree(devShapePtrX); + + for (int e = 0; e < z.lengthOf(); e++) { + ASSERT_NEAR(exp.e(e), z.e(e), 1e-5); + } +} + +////////////////////////////////////////////////////////////////////////// +TEST_F(NDArrayCudaBasicsTests, TestMultiply_3) { + // allocating host-side arrays + NDArray x('c', { 5 }, { 1, 2, 3, 4, 5}, nd4j::DataType::DOUBLE); + NDArray y('c', { 5 }, { 1., 2., 3., 4., 5.}, nd4j::DataType::DOUBLE); + auto z = NDArrayFactory::create('c', { 5 }); + + auto exp = NDArrayFactory::create('c', { 5 }, { 1, 4, 9, 16, 25 }); + + // making raw buffers + //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; + //cudaError_t res = cudaMalloc(reinterpret_cast(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); + //ASSERT_EQ(0, res); + //res = cudaMalloc(reinterpret_cast(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); + //ASSERT_EQ(0, res); + //res = cudaMalloc(reinterpret_cast(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); + //ASSERT_EQ(0, res); + x.applyPairwiseTransform(pairwise::Multiply, y, z); + //x.printBuffer("23X = "); + //y.printBuffer("23Y = "); + // z.printBuffer("23Result out"); + + // + // cudaFree(devBufferPtrX); + //cudaFree(devBufferPtrZ); + //cudaFree(devShapePtrX); + + for (int e = 0; e < z.lengthOf(); e++) { + ASSERT_NEAR(exp.e(e), z.e(e), 1e-5); + } +} + +////////////////////////////////////////////////////////////////////////// +TEST_F(NDArrayCudaBasicsTests, TestMultiply_4) { + // allocating host-side arrays + NDArray x('c', { 5 }, { 1, 2, 3, 4, 5}, nd4j::DataType::DOUBLE); + NDArray y('c', { 5 }, { 1., 2., 3., 4., 5.}, nd4j::DataType::DOUBLE); + //auto z = NDArrayFactory::create('c', { 5 }); + + auto exp = NDArrayFactory::create('c', { 5 }, { 1, 4, 9, 16, 25 }); + + // making raw buffers + //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; + //cudaError_t res = cudaMalloc(reinterpret_cast(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); + //ASSERT_EQ(0, res); + //res = cudaMalloc(reinterpret_cast(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); + //ASSERT_EQ(0, res); + //res = cudaMalloc(reinterpret_cast(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); + //ASSERT_EQ(0, res); + //x.applyPairwiseTransform(pairwise::Multiply, &y, &z, nullptr); + //x.printBuffer("23X = "); + //y.printBuffer("23Y = "); + x *= y; + //x.tickWriteDevice(); + // x.printBuffer("33Result out"); + + // + // cudaFree(devBufferPtrX); + //cudaFree(devBufferPtrZ); + //cudaFree(devShapePtrX); + + for (int e = 0; e < x.lengthOf(); e++) { + ASSERT_NEAR(exp.e(e), x.e(e), 1e-5); + } +} + +////////////////////////////////////////////////////////////////////////// +TEST_F(NDArrayCudaBasicsTests, TestPrimitiveNeg_01) { + // allocating host-side arrays + auto x = NDArrayFactory::create('c', { 5 }, { 1, 2, 3, 4, 5}); + auto y = NDArrayFactory::create('c', { 5 }, { 1, 2, 3, 4, 5}); + auto exp = NDArrayFactory::create('c', { 5 }, { -1, -2, -3, -4, -5 }); + + auto stream = x.getContext()->getCudaStream();//reinterpret_cast(&nativeStream); + + NativeOpExecutioner::execTransformSame(x.getContext(), transform::Neg, x.buffer(), x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), y.buffer(), y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo(), nullptr, nullptr, nullptr); + auto res = cudaStreamSynchronize(*stream); + ASSERT_EQ(0, res); + y.tickWriteDevice(); + + // x.printBuffer("X = "); + // y.printBuffer("Y = "); + + for (int e = 0; e < y.lengthOf(); e++) { + ASSERT_NEAR(exp.e(e), y.e(e), 1e-5); + } +} + +TEST_F(NDArrayCudaBasicsTests, Test_PrimitiveNeg_2) { + auto x = NDArrayFactory::create('c', {5}, {1, 2, 3, 4, 5}); + auto y = NDArrayFactory::create('c', {5}); + + ASSERT_TRUE(x.isActualOnDeviceSide()); + ASSERT_FALSE(x.isActualOnHostSide()); + + x.applyTransform(transform::Neg, y); + //ASSERT_TRUE(x->isActualOnDeviceSide()); + //ASSERT_FALSE(x->isActualOnHostSide()); + + //ASSERT_TRUE(y->isActualOnDeviceSide()); + //ASSERT_TRUE(y->isActualOnHostSide()); + //auto res = cudaStreamSynchronize(*y.getContext()->getCudaStream()); + //ASSERT_EQ(0, res); + // y.printBuffer("Negatives2"); + //delete x; + //delete y; +} + +TEST_F(NDArrayCudaBasicsTests, Test_PrimitiveSqrt_1) { // strict + auto x = NDArrayFactory::create('c', {5}, {1, 2, 3, 4, 5}); + auto y = NDArrayFactory::create('c', {5}); + auto exp = NDArrayFactory::create({1.000000, 1.414214, 1.732051, 2.000000, 2.236068}); + ASSERT_TRUE(x.isActualOnDeviceSide()); + ASSERT_FALSE(x.isActualOnHostSide()); + + x.applyTransform(transform::Sqrt, y); + //ASSERT_TRUE(x->isActualOnDeviceSide()); + //ASSERT_FALSE(x->isActualOnHostSide()); + + //ASSERT_TRUE(y->isActualOnDeviceSide()); + //ASSERT_TRUE(y->isActualOnHostSide()); + //auto res = cudaStreamSynchronize(*y.getContext()->getCudaStream()); + //ASSERT_EQ(0, res); + ASSERT_TRUE(y.equalsTo(exp)); + //y.printBuffer("SQRT output"); + //delete x; + //delete y; +} + +TEST_F(NDArrayCudaBasicsTests, Test_PrimitiveAssign_1) { // strict + auto x = NDArrayFactory::create('c', {5}, {1, 2, 3, 4, 5}); + auto y = NDArrayFactory::create('c', {5}); + //auto exp = NDArrayFactory::create({1.000000, 1.414214, 1.732051, 2.000000, 2.236068}); + //ASSERT_TRUE(x.isActualOnDeviceSide()); + //ASSERT_TRUE(x.isActualOnHostSide()); + + x.applyTransform(transform::Assign, y); + //ASSERT_TRUE(x->isActualOnDeviceSide()); + //ASSERT_FALSE(x->isActualOnHostSide()); + + //ASSERT_TRUE(y->isActualOnDeviceSide()); + //ASSERT_TRUE(y->isActualOnHostSide()); + //auto res = cudaStreamSynchronize(*y.getContext()->getCudaStream()); + //ASSERT_EQ(0, res); + + // printf("Assigned to another array\n"); + // y.printBuffer("OUput"); + ASSERT_TRUE(y.equalsTo(x)); + //y.syncToHost(); + //y.printBuffer("IsMax output"); + //delete x; + //delete y; +} + +TEST_F(NDArrayCudaBasicsTests, Test_PrimitiveCosine_1) { // strict + auto x = NDArrayFactory::create('c', {5}, {1, 2, 3, 4, 5}); + auto y = NDArrayFactory::create('c', {5}); + auto exp = NDArrayFactory::create('c', {5}, {0.540302, -0.416147, -0.989992, -0.653644, 0.283662}); + + ASSERT_TRUE(x.isActualOnDeviceSide()); + ASSERT_FALSE(x.isActualOnHostSide()); + + x.applyTransform(transform::Cosine, y); + //ASSERT_TRUE(x->isActualOnDeviceSide()); + //ASSERT_FALSE(x->isActualOnHostSide()); + + //ASSERT_TRUE(y->isActualOnDeviceSide()); + //ASSERT_TRUE(y->isActualOnHostSide()); + //auto res = cudaStreamSynchronize(*y.getContext()->getCudaStream()); + //ASSERT_EQ(0, res); + ASSERT_TRUE(exp.isSameShape(y)); + ASSERT_TRUE(exp.dataType() == y.dataType()); + //y.printBuffer("Cosine2"); + //delete x; + //delete y; +} + +TEST_F(NDArrayCudaBasicsTests, Test_PrimitiveCosine_2) { + auto x = NDArrayFactory::create('c', {5}, {1, 2, 3, 4, 5}); + auto y = NDArrayFactory::create('c', {5}); + auto exp = NDArrayFactory::create('c', {5}, {0.540302, -0.416147, -0.989992, -0.653644, 0.283662}); + + ASSERT_TRUE(x.isActualOnDeviceSide()); + ASSERT_FALSE(x.isActualOnHostSide()); + x.applyTransform(transform::Cosine, y); + //ASSERT_TRUE(x->isActualOnDeviceSide()); + //ASSERT_FALSE(x->isActualOnHostSide()); + + //ASSERT_TRUE(y->isActualOnDeviceSide()); + //ASSERT_TRUE(y->isActualOnHostSide()); + //auto res = cudaStreamSynchronize(*y.getContext()->getCudaStream()); + //ASSERT_EQ(0, res); + //exp.syncToHost(); + //y.printBuffer("PrimitiveCosine2"); + //exp.printBuffer("Primitive Cosine exp"); + ASSERT_TRUE(exp.isSameShape(y)); + ASSERT_TRUE(exp.dataType() == y.dataType()); + //for (int e = 0; e < y.lengthOf(); e++) { + // ASSERT_NEAR(exp.e(e), y.e(e), 1e-5); + //} + + ASSERT_TRUE(exp.equalsTo(y)); + //delete x; + //delete y; +} + +TEST_F(NDArrayCudaBasicsTests, Test_PrimitiveCosine_3) { + auto x = NDArrayFactory::create('c', {5}, {1, 2, 3, 4, 5}); + auto y = NDArrayFactory::create('c', {5}); + auto exp = NDArrayFactory::create({0.540302, -0.416147, -0.989992, -0.653644, 0.283662}); + + ASSERT_TRUE(x.isActualOnDeviceSide()); + ASSERT_FALSE(x.isActualOnHostSide()); + x.applyTransform(transform::Cosine, y); + //ASSERT_TRUE(x->isActualOnDeviceSide()); + //ASSERT_FALSE(x->isActualOnHostSide()); + + //ASSERT_TRUE(y->isActualOnDeviceSide()); + //ASSERT_TRUE(y->isActualOnHostSide()); + //auto res = cudaStreamSynchronize(*y.getContext()->getCudaStream()); + //ASSERT_EQ(0, res); + //exp.syncToHost(); +// y.printBuffer("PrimitiveCosine3"); +// exp.printBuffer("Primitive Cosine3 exp"); +// y.printShapeInfo("Y shape"); +// exp.printShapeInfo("Exp Shape"); + ASSERT_TRUE(exp.isSameShape(y)); +// +// for (int e = 0; e < y.lengthOf(); e++) { +// printf("%lf == %lf\n", exp.e(e), y.e(e)); +//// ASSERT_NEAR(exp.e(e), y.e(e), 1e-5); +// } + + ASSERT_TRUE(exp.equalsTo(y)); + //delete x; + //delete y; +} + +TEST_F(NDArrayCudaBasicsTests, TestRawBroadcast_2) { + + //if (!Environment::getInstance()->isExperimentalBuild()) + // return; + + NDArray x = NDArrayFactory::create('c', {2,3,4}); + NDArray y('c', {2,4}, {10,20,30,40,50,60,70,80}, nd4j::DataType::DOUBLE); + NDArray z('c', {2,3,4}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}, nd4j::DataType::DOUBLE); +// NDArray exp('c', {2,3,4}, {10., 21., 32., 43., 14., 25., 36., 47., 18., 29., 40., 51., 62., 73., 84., 95., 66., 77., 88., 99., 70., 81., 92., 103}, nd4j::DataType::DOUBLE); + NDArray exp('c', {2,3,4}, {10., 40., 90., 160., 50., 120., 210., 320., 90., 200., 330., 480., 650., 840., 1050., 1280., 850., 1080., 1330., 1600., 1050., 1320., 1610., 1920.}, nd4j::DataType::DOUBLE); + x.linspace(1); x.syncToDevice(); + + std::vector dimensions = {0,2}; + + // evaluate xTad data + shape::TAD xTad; + xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size()); + xTad.createTadOnlyShapeInfo(); + xTad.createOffsets(); + + // prepare input arrays for prepareDataForCuda function + std::vector> hostData; + hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions + hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo + hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets + std::vector devicePtrs(hostData.size(), nullptr); + + // create cuda stream and LaunchContext + cudaError_t cudaResult; + cudaStream_t stream; + cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); + LaunchContext lc(&stream); + + // allocate required amount of global device memory and copy host data to it + cudaResult = allocateDeviceMem(lc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); + + // call cuda kernel which calculates result + NativeOpExecutioner::execBroadcast(&lc, nd4j::broadcast::Multiply, + nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(), + nullptr, y.getShapeInfo(), y.specialBuffer(), y.specialShapeInfo(), + nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(), + (int*)devicePtrs[0], dimensions.size(), + (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], + nullptr, nullptr); + + cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); + z.tickWriteDevice(); + + // verify results + for (int e = 0; e < z.lengthOf(); e++) + ASSERT_NEAR(exp.e(e), z.e(e), 1e-5); + + // free allocated global device memory + for(int i = 0; i < devicePtrs.size(); ++i) + cudaFree(devicePtrs[i]); + + // delete cuda stream + cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); +} + +TEST_F(NDArrayCudaBasicsTests, TestRawBroadcast_3) { + + //if (!Environment::getInstance()->isExperimentalBuild()) + // return; + + NDArray x('c', {2,3,4}, nd4j::DataType::DOUBLE); + NDArray y('c', {2,4}, {10,20,30,40,50,60,70,80}, nd4j::DataType::DOUBLE); + NDArray z('c', {2,3,4}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}, nd4j::DataType::DOUBLE); +// NDArray exp('c', {2,3,4}, {10., 21., 32., 43., 14., 25., 36., 47., 18., 29., 40., 51., 62., 73., 84., 95., 66., 77., 88., 99., 70., 81., 92., 103}, nd4j::DataType::DOUBLE); + NDArray exp('c', {2,3,4}, {10., 40., 90., 160., 50., 120., 210., 320., 90., 200., 330., 480., 650., 840., 1050., 1280., 850., 1080., 1330., 1600., 1050., 1320., 1610., 1920.}, nd4j::DataType::DOUBLE); + x.linspace(1); x.syncToDevice(); + + std::vector dimensions = {0,2}; + + // evaluate xTad data + shape::TAD xTad; + xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size()); + xTad.createTadOnlyShapeInfo(); + xTad.createOffsets(); + + // prepare input arrays for prepareDataForCuda function + std::vector> hostData; + hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(int)); // 0 -- dimensions + hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo + hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets + std::vector devicePtrs(hostData.size(), nullptr); + + // create cuda stream and LaunchContext + cudaError_t cudaResult; + //cudaStream_t stream; + //cudaResult = cudaStreamCreate(&stream); ASSERT_EQ(0, cudaResult); + LaunchContext* pLc = x.getContext();//(&stream); + cudaStream_t* stream = pLc->getCudaStream(); + // allocate required amount of global device memory and copy host data to it +// cudaResult = allocateDeviceMem(*pLc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); + for(int i = 0; i < devicePtrs.size(); ++i) { + + cudaResult = cudaMalloc(reinterpret_cast(&devicePtrs[i]), hostData[i].second); ASSERT_EQ(0, cudaResult); + cudaMemcpyAsync(devicePtrs[i], hostData[i].first, hostData[i].second, cudaMemcpyHostToDevice, *stream); + } + + NDArray::registerSpecialUse({&z}, {&x, &y}); + // call cuda kernel which calculates result + NativeOpExecutioner::execBroadcast(pLc, nd4j::broadcast::Multiply, + nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(), + nullptr, y.getShapeInfo(), y.specialBuffer(), y.specialShapeInfo(), + nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(), + (int*)devicePtrs[0], dimensions.size(), + (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], + nullptr, nullptr); + + //cudaResult = cudaStreamSynchronize(stream); ASSERT_EQ(0, cudaResult); + //z.syncToHost(); + // verify results + for (int e = 0; e < z.lengthOf(); e++) + ASSERT_NEAR(exp.e(e), z.e(e), 1e-5); + + // free allocated global device memory + for(int i = 0; i < devicePtrs.size(); ++i) + cudaFree(devicePtrs[i]); + ASSERT_TRUE(exp.equalsTo(z)); + // delete cuda stream + //cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); +} + + +TEST_F(NDArrayCudaBasicsTests, TestBroadcastMultiply_1) { + // allocating host-side arrays + NDArray x('c', { 2, 3 }, { 1, 2, 3, 4, 5, 6}, nd4j::DataType::DOUBLE); + NDArray y = NDArrayFactory::create(3.); //'c', { 3 }, { 2., 3., 4.}, nd4j::DataType::DOUBLE); + //auto z = NDArrayFactory::create('c', { 5 }); + + auto exp = NDArrayFactory::create('c', { 2, 3 }, { 3, 6, 9, 12, 15, 18 }); + + // making raw buffers + //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; + //cudaError_t res = cudaMalloc(reinterpret_cast(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); + //ASSERT_EQ(0, res); + //res = cudaMalloc(reinterpret_cast(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); + //ASSERT_EQ(0, res); + //res = cudaMalloc(reinterpret_cast(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); + //ASSERT_EQ(0, res); + //x.applyPairwiseTransform(pairwise::Multiply, &y, &z, nullptr); + x *= y; + //x.syncToHost(); + + // + // cudaFree(devBufferPtrX); + //cudaFree(devBufferPtrZ); + //cudaFree(devShapePtrX); + ASSERT_TRUE(exp.equalsTo(x)); +// for (int e = 0; e < x.lengthOf(); e++) { +// ASSERT_NEAR(exp.e(e), x.e(e), 1e-5); +// } +} + +TEST_F(NDArrayCudaBasicsTests, TestBroadcastMultiply_01) { + // allocating host-side arrays + NDArray x('c', { 2, 3 }, { 1, 2, 3, 4, 5, 6}, nd4j::DataType::DOUBLE); + NDArray y = NDArrayFactory::create(3.); //'c', { 3 }, { 2., 3., 4.}, nd4j::DataType::DOUBLE); + auto z = NDArrayFactory::create('c', { 2, 3 }); + + auto exp = NDArrayFactory::create('c', { 2, 3 }, { 3, 6, 9, 12, 15, 18 }); + + // making raw buffers + //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; + //cudaError_t res = cudaMalloc(reinterpret_cast(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); + //ASSERT_EQ(0, res); + //res = cudaMalloc(reinterpret_cast(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); + //ASSERT_EQ(0, res); + //res = cudaMalloc(reinterpret_cast(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); + //ASSERT_EQ(0, res); + //x.applyPairwiseTransform(pairwise::Multiply, &y, &z, nullptr); + //x.printBuffer("23X = "); + //y.printBuffer("23Y = "); + x.applyTrueBroadcast(BroadcastOpsTuple::Multiply(), y, z);// *= y; + // z.printBuffer("53Result out"); + + // + // cudaFree(devBufferPtrX); + //cudaFree(devBufferPtrZ); + //cudaFree(devShapePtrX); + ASSERT_TRUE(exp.equalsTo(z)); + +// for (int e = 0; e < x.lengthOf(); e++) { +// ASSERT_NEAR(exp.e(e), z.e(e), 1e-5); +// } +} + +TEST_F(NDArrayCudaBasicsTests, TestBroadcastMultiply_02) { + // allocating host-side arrays + auto x = NDArrayFactory::create('c', { 2, 3 }, { 1, 2, 3, 4, 5, 6}); //, nd4j::DataType::DOUBLE); + auto y = NDArrayFactory::create('c', {2,3}, {3, 3, 3, 3, 3, 3}); //'c', { 3 }, { 2., 3., 4.}, nd4j::DataType::DOUBLE); + auto z = NDArrayFactory::create('c', { 2, 3 }); + + auto exp = NDArrayFactory::create('c', { 2, 3 }, { 3, 6, 9, 12, 15, 18 }); + //if (x.isActualOnHostSide() && !x.isActualOnDeviceSide()) + // making raw buffers + //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; + //cudaError_t res = cudaMalloc(reinterpret_cast(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); + //ASSERT_EQ(0, res); + //res = cudaMalloc(reinterpret_cast(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); + //ASSERT_EQ(0, res); + //res = cudaMalloc(reinterpret_cast(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); + //ASSERT_EQ(0, res); + //x.applyPairwiseTransform(pairwise::Multiply, &y, &z, nullptr); + //x.printBuffer("23X = "); + //y.printBuffer("23Y = "); + x.applyTrueBroadcast(BroadcastOpsTuple::Multiply(), y, z);// *= y; + + // z.printBuffer("52Result out"); + + // + // cudaFree(devBufferPtrX); + //cudaFree(devBufferPtrZ); + //cudaFree(devShapePtrX); + ASSERT_TRUE(exp.equalsTo(z)); + +// for (int e = 0; e < x.lengthOf(); e++) { +// ASSERT_NEAR(exp.e(e), z.e(e), 1e-5); +// } +} + +TEST_F(NDArrayCudaBasicsTests, TestBroadcastMultiply_002) { + // allocating host-side arrays + auto x = NDArrayFactory::create('c', { 2, 3 }, { 1, 2, 3, 4, 5, 6}); //, nd4j::DataType::DOUBLE); + auto y = NDArrayFactory::create('c', {2, 3}, {2., 3., 3., 3., 3., 3.}); //'c', { 3 }, { 2., 3., 4.}, nd4j::DataType::DOUBLE); + auto z = NDArrayFactory::create('c', { 2, 3 }); + + auto exp = NDArrayFactory::create('c', { 2, 3 }, { 2, 6, 9, 12, 15, 18 }); + //if (x.isActualOnHostSide() && !x.isActualOnDeviceSide()) + // making raw buffers + //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; + //cudaError_t res = cudaMalloc(reinterpret_cast(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); + //ASSERT_EQ(0, res); + //res = cudaMalloc(reinterpret_cast(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); + //ASSERT_EQ(0, res); + //res = cudaMalloc(reinterpret_cast(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); + //ASSERT_EQ(0, res); + //x.applyPairwiseTransform(pairwise::Multiply, &y, &z, nullptr); + //x.printBuffer("23X = "); + //y.printBuffer("23Y = "); + x.applyPairwiseTransform(pairwise::Multiply, y, z);// *= y; + + // z.printBuffer("51Result out"); + + // + // cudaFree(devBufferPtrX); + //cudaFree(devBufferPtrZ); + //cudaFree(devShapePtrX); + ASSERT_TRUE(exp.equalsTo(z)); + +// for (int e = 0; e < x.lengthOf(); e++) { +// ASSERT_NEAR(exp.e(e), z.e(e), 1e-5); +// } +} + +//////////////////////////////////////////////////////////////////////////// +TEST_F(NDArrayCudaBasicsTests, TestBroadcastRaw_1) { + + //if (!Environment::getInstance()->isExperimentalBuild()) + // return; + + NDArray x('c', {2,3,4}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}, nd4j::DataType::INT32); + NDArray y('c', {3}, {10, 20, 30}, nd4j::DataType::INT64); + NDArray z('c', {2,3,4}, {100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100}, nd4j::DataType::INT32); + NDArray exp('c', {2,3,4}, {10, 11, 12, 13,24, 25, 26, 27,38, 39, 40, 41,22, 23, 24, 25,36, 37, 38, 39,50, 51, 52, 53}, nd4j::DataType::INT32); + //real output [10, 11, 12, 13, 4, 5, 6, 7, 28, 29, 30, 31, 22, 23, 24, 25, 16, 17, 18, 19, 40, 41, 42, 43] + x.linspace(0); x.syncToDevice(); + + std::vector dimensions = {1}; + + // evaluate xTad data + shape::TAD xTad; + xTad.init(x.getShapeInfo(), dimensions.data(), dimensions.size()); + xTad.createTadOnlyShapeInfo(); + xTad.createOffsets(); + + // prepare input arrays for prepareDataForCuda function + std::vector> hostData; + hostData.emplace_back(dimensions.data(), dimensions.size() * sizeof(Nd4jLong)); // 0 -- dimensions + hostData.emplace_back(xTad.tadOnlyShapeInfo, shape::shapeInfoByteLength(xTad.tadOnlyShapeInfo)); // 1 -- xTadShapeInfo + hostData.emplace_back(xTad.tadOffsets, xTad.numTads * sizeof(Nd4jLong)); // 2 -- xTadOffsets + std::vector devicePtrs(hostData.size(), nullptr); + + // create cuda stream and LaunchContext + cudaError_t cudaResult; + cudaStream_t* stream = x.getContext()->getCudaStream(); + LaunchContext* pLc = x.getContext(); + + // allocate required amount of global device memory and copy host data to it + //cudaResult = allocateDeviceMem(*pLc, devicePtrs, hostData); ASSERT_EQ(0, cudaResult); + for(size_t i = 0; i < devicePtrs.size(); ++i) { + cudaResult = cudaMalloc(&devicePtrs[i], hostData[i].second); //if(cudaResult != 0) return cudaResult; + ASSERT_EQ(cudaResult, 0); + cudaMemcpy(devicePtrs[i], hostData[i].first, hostData[i].second, cudaMemcpyHostToDevice); + } + + // call cuda kernel which calculates result + NativeOpExecutioner::execBroadcast(pLc, nd4j::broadcast::Add, + nullptr, x.getShapeInfo(), x.specialBuffer(), x.specialShapeInfo(), + nullptr, y.getShapeInfo(), y.specialBuffer(), y.specialShapeInfo(), + nullptr, z.getShapeInfo(), z.specialBuffer(), z.specialShapeInfo(), + (int*)devicePtrs[0], dimensions.size(), + (Nd4jLong*)devicePtrs[1], (Nd4jLong*)devicePtrs[2], + nullptr, nullptr); + + cudaResult = cudaStreamSynchronize(*stream); ASSERT_EQ(0, cudaResult); + + // x.printIndexedBuffer(" X"); + // y.printIndexedBuffer("+Y"); + // z.printBuffer("ADD broadcasted output"); + // verify results + // for (int e = 0; e < z.lengthOf(); e++) + // ASSERT_NEAR(exp.e(e), z.e(e), 1e-5); + + // free allocated global device memory + for(int i = 0; i < devicePtrs.size(); ++i) + cudaFree(devicePtrs[i]); + + // delete cuda stream + //cudaResult = cudaStreamDestroy(stream); ASSERT_EQ(0, cudaResult); +} + +TEST_F(NDArrayCudaBasicsTests, TestBroadcastMultiply) { + // allocating host-side arrays + NDArray x('c', { 2, 3 }, { 1, 2, 3, 4, 5, 6}, nd4j::DataType::DOUBLE); + NDArray y('c', { 3 }, { 2., 3., 4.}, nd4j::DataType::DOUBLE); + //auto z = NDArrayFactory::create('c', { 5 }); + + auto exp = NDArrayFactory::create('c', { 2, 3 }, { 2, 6, 12, 8, 15, 24 }); + + // making raw buffers + //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; + //cudaError_t res = cudaMalloc(reinterpret_cast(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); + //ASSERT_EQ(0, res); + //res = cudaMalloc(reinterpret_cast(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); + //ASSERT_EQ(0, res); + //res = cudaMalloc(reinterpret_cast(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); + //ASSERT_EQ(0, res); + //x.applyPairwiseTransform(pairwise::Multiply, &y, &z, nullptr); + //x.printBuffer("23X = "); + //y.printBuffer("23Y = "); + x *= y; + + // + // cudaFree(devBufferPtrX); + //cudaFree(devBufferPtrZ); + //cudaFree(devShapePtrX); + + //for (int e = 0; e < x.lengthOf(); e++) { + // ASSERT_NEAR(exp.e(e), x.e(e), 1e-5); + //} +} + + +TEST_F(NDArrayCudaBasicsTests, TestBroadcastMultiply_2) { + // allocating host-side arrays + NDArray x('c', { 2, 3 }, { 1, 2, 3, 4, 5, 6}, nd4j::DataType::DOUBLE); + NDArray y('c', { 3 }, { 2., 3., 4.}, nd4j::DataType::DOUBLE); + //auto z = NDArrayFactory::create('c', { 5 }); + + auto exp = NDArrayFactory::create('c', { 2, 3 }, { 11,12, 13,14, 15, 16 }); + auto expZ = NDArrayFactory::create('c', { 2, 3 }, { 2, 6, 12, 8, 15, 24 }); + + // making raw buffers + //Nd4jPointer devBufferPtrX, devBufferPtrZ, devShapePtrX; + //cudaError_t res = cudaMalloc(reinterpret_cast(&devBufferPtrX), x.lengthOf() * x.sizeOfT()); + //ASSERT_EQ(0, res); + //res = cudaMalloc(reinterpret_cast(&devBufferPtrZ), x.lengthOf() * x.sizeOfT()); + //ASSERT_EQ(0, res); + //res = cudaMalloc(reinterpret_cast(&devShapePtrX), shape::shapeInfoByteLength(x.shapeInfo())); + //ASSERT_EQ(0, res); + //x.applyPairwiseTransform(pairwise::Multiply, &y, &z, nullptr); + //x.printBuffer("23X = "); + //y.printBuffer("23Y = "); + //void NDArray::applyTrueBroadcast(nd4j::BroadcastOpsTuple op, const NDArray* other, NDArray* target, const bool checkTargetShape, ExtraArguments *extraArgs) + x.applyTrueBroadcast(BroadcastOpsTuple::Multiply(), y, exp); + + // + // cudaFree(devBufferPtrX); + //cudaFree(devBufferPtrZ); + //cudaFree(devShapePtrX); + + //for (int e = 0; e < x.lengthOf(); e++) { + // ASSERT_NEAR(exp.e(e), x.e(e), 1e-5); + //} + ASSERT_TRUE(exp.equalsTo(expZ)); + +} + + +////////////////////////////////////////////////////////////////////////// +TEST_F(NDArrayCudaBasicsTests, TestReduceSum_1) { + // allocating host-side arrays + auto x = NDArrayFactory::create('c', { 5 }, { 1, 2, 3, 4, 5}); + auto y = NDArrayFactory::create(15); + auto exp = NDArrayFactory::create(15); + + auto stream = x.getContext()->getCudaStream();//reinterpret_cast(&nativeStream); + + NativeOpExecutioner::execReduceSameScalar(x.getContext(), reduce::Sum, x.buffer(), x.shapeInfo(), x.specialBuffer(), x.specialShapeInfo(), nullptr, y.buffer(), y.shapeInfo(), y.specialBuffer(), y.specialShapeInfo()); + auto res = cudaStreamSynchronize(*stream); + ASSERT_EQ(0, res); + y.syncToHost(); + + ASSERT_NEAR(y.e(0), 15, 1e-5); +} + +////////////////////////////////////////////////////////////////////// +TEST_F(NDArrayCudaBasicsTests, TestDup1) { + + NDArray array('c', {2,3}, {1,2,3,4,5,6}); + auto arrC = array.dup('c'); + auto arrF = array.dup('f'); + // arrC->printBuffer("arrC"); + + // arrF->printBuffer("arrF"); + //arrC->printShapeInfo("C shape"); + //arrF->printShapeInfo("F shape"); + + ASSERT_TRUE(array.equalsTo(arrF)); + ASSERT_TRUE(array.equalsTo(arrC)); + + ASSERT_TRUE(arrF.equalsTo(arrC)); +} + +////////////////////////////////////////////////////////////////////////// +TEST_F(NDArrayCudaBasicsTests, equalsTo_1) { + + NDArray x('c', {2,5}, {1,2,3,4,5,6,7,8,9,10}, nd4j::DataType::DOUBLE); + NDArray y('c', {2,5}, {1,2,3,4,5,6,7,8,9,10}, nd4j::DataType::DOUBLE); + + ASSERT_TRUE(x.equalsTo(y)); + + x.permutei({1,0}); + y.permutei({1,0}); + + ASSERT_TRUE(x.equalsTo(y)); +} + +////////////////////////////////////////////////////////////////////////// +TEST_F(NDArrayCudaBasicsTests, equalsTo_2) { + + NDArray x('c', {2,5}, {1,2,3,4,5,6,7,8,10,10}, nd4j::DataType::DOUBLE); + NDArray y('c', {2,5}, {1,2,5,4,5,6,7,8,9,10}, nd4j::DataType::DOUBLE); + + ASSERT_FALSE(x.equalsTo(y)); + + x.permutei({1,0}); + y.permutei({1,0}); + + ASSERT_FALSE(x.equalsTo(y)); +} + +////////////////////////////////////////////////////////////////////////// +TEST_F(NDArrayCudaBasicsTests, equalsTo_3) { + + NDArray x('c', {2,5}, {1,2,3,4,5,6,7,8,9,10}, nd4j::DataType::DOUBLE); + NDArray y('c', {2,5}, {1.f,2.f,3.f,4.f,5.f,6.f,7.f,8.f,9.f,10.f}, nd4j::DataType::FLOAT32); + + ASSERT_FALSE(x.equalsTo(y)); + + x.permutei({1,0}); + y.permutei({1,0}); + + ASSERT_FALSE(x.equalsTo(y)); +} + +//////////////////////////////////////////////////////////////////////////// +TEST_F(NDArrayCudaBasicsTests, applyReduce3_1) { + + NDArray x('c', {2,3,4}, {-10,-9,-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13}, nd4j::DataType::INT32); + NDArray x2('c', {2,3,4}, {-10,-9,-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13}, nd4j::DataType::INT32); + NDArray y('c', {2,3,4}, {-2,3,-4,5,-2,3,-4,5,-2,3,-4,5,-2,3,-4,5,-2,3,-4,5,-2,3,-4,5}, nd4j::DataType::INT32); + NDArray k('c', {2,3}, {-2,3,-4,5,-2,3}, nd4j::DataType::INT32); + NDArray k2('c', {3,2}, {-2,3,-4,5,-2,3}, nd4j::DataType::INT32); + + NDArray exp1('c', {3}, {4.f, 20.f, 36.f}, nd4j::DataType::FLOAT32); + NDArray exp2('c', {2,3}, {-10.f, -2.f, 6.f,14.f, 22.f, 30.f}, nd4j::DataType::FLOAT32); + NDArray exp3('c', {4}, {38.f, 41.f, 44.f, 47.f}, nd4j::DataType::FLOAT32); + NDArray exp4('c', {4}, {114.f, 117.f, 120.f, 123.f}, nd4j::DataType::FLOAT32); + + + NDArray z = x.applyReduce3(nd4j::reduce3::Dot, y, {0,2}); + ASSERT_TRUE(z.equalsTo(&exp1)); + + z = x.applyReduce3(nd4j::reduce3::Dot, k, {0,1}); + ASSERT_TRUE(z.equalsTo(&exp3)); + + x.permutei({0,2,1}); + y.permutei({0,2,1}); + + z = y.applyReduce3(nd4j::reduce3::Dot, x, {1}); + ASSERT_TRUE(z.equalsTo(&exp2)); + + x2.permutei({1,0,2}); + + z = x2.applyReduce3(nd4j::reduce3::Dot, k2, {0,1}); + ASSERT_TRUE(z.equalsTo(&exp4)); +} + +//////////////////////////////////////////////////////////////////////////// +TEST_F(NDArrayCudaBasicsTests, applyReduce3_2) { + + NDArray x('c', {2,3,4}, {-10,-9,-8.5,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10,11,12,13}, nd4j::DataType::DOUBLE); + NDArray x2('c', {2,3,4}, {-10,-9,-8,-7,-6,-5,-4,-3,-2,-1,0.5,1,2,3,4,5,6,7,8,9,10,11,12,13}, nd4j::DataType::DOUBLE); + NDArray y('c', {2,3,4}, {-2,3,-4,5,-2,3,-4,5,-2,3,-4,5,-2.5,3,-4,5,-2,3,-4,5,-2,3,-4,5}, nd4j::DataType::DOUBLE); + NDArray k('c', {2,3}, {-2,3,-4,5.5,-2,3}, nd4j::DataType::DOUBLE); + NDArray k2('c', {3,2}, {-2,3,-4,5,-2,3.5}, nd4j::DataType::DOUBLE); + + NDArray exp1('c', {3}, {5., 20., 36.}, nd4j::DataType::DOUBLE); + NDArray exp2('c', {2,3}, {-8., -2., 6., 13., 22., 30.}, nd4j::DataType::DOUBLE); + NDArray exp3('c', {4}, {39., 42.5, 47., 49.5}, nd4j::DataType::DOUBLE); + NDArray exp4('c', {4}, {119., 122.5, 125., 129.5}, nd4j::DataType::DOUBLE); + + NDArray z = x.applyReduce3(nd4j::reduce3::Dot, y, {0,2}); + ASSERT_TRUE(z.equalsTo(&exp1)); + + z = x.applyReduce3(nd4j::reduce3::Dot, k, {0,1}); + ASSERT_TRUE(z.equalsTo(&exp3)); + + x.permutei({0,2,1}); + y.permutei({0,2,1}); + + z = y.applyReduce3(nd4j::reduce3::Dot, x, {1}); + ASSERT_TRUE(z.equalsTo(&exp2)); + + x2.permutei({1,0,2}); + + z = x2.applyReduce3(nd4j::reduce3::Dot, k2, {0,1}); + ASSERT_TRUE(z.equalsTo(&exp4)); +} + +//////////////////////////////////////////////////////////////////////////// +TEST_F(NDArrayCudaBasicsTests, applyReduce3_3) { + + NDArray x1('c', {2,2,2}, {1,2,3,4,5,6,7,8}, nd4j::DataType::INT32); + NDArray x2('c', {2,2,2}, {-1,-2,-3,-4,-5,-6,-7,-8}, nd4j::DataType::INT32); + NDArray x3('c', {3,2}, {1.5,1.5,1.5,1.5,1.5,1.5}, nd4j::DataType::DOUBLE); + NDArray x4('c', {3,2}, {1,2,3,4,5,6}, nd4j::DataType::DOUBLE); + + NDArray exp1('c', {}, std::vector{-204}, nd4j::DataType::FLOAT32); + NDArray exp2('c', {}, std::vector{31.5}, nd4j::DataType::DOUBLE); + + + auto z = x1.applyReduce3(reduce3::Dot, x2); + ASSERT_TRUE(z.equalsTo(&exp1)); + + z = x3.applyReduce3(reduce3::Dot, x4); + ASSERT_TRUE(z.equalsTo(&exp2)); + + x1.permutei({2,1,0}); + x2.permutei({2,1,0}); + x3.permutei({1,0}); + x4.permutei({1,0}); + + z = x1.applyReduce3(reduce3::Dot, x2); + ASSERT_TRUE(z.equalsTo(&exp1)); + + z = x3.applyReduce3(reduce3::Dot, x4); + ASSERT_TRUE(z.equalsTo(&exp2)); +} + +//////////////////////////////////////////////////////////////////////////// +TEST_F(NDArrayCudaBasicsTests, applyAllReduce3_1) { + + NDArray x1('c', {2,3,2}, {1,2,3,4,5,6,7,8,-1,-2,-3,-4,}, nd4j::DataType::INT32); + NDArray x2('c', {2,2,2}, {-1,-2,-3,-4,-5,-6,-7,-8}, nd4j::DataType::INT32); + NDArray x3('c', {3,2}, {1.5,1.5,1.5,1.5,1.5,1.5}, nd4j::DataType::DOUBLE); + NDArray x4('c', {3,2}, {1,2,3,4,5,6}, nd4j::DataType::DOUBLE); + + NDArray exp1('c', {3,2}, {-88.f, -124.f, 6.f, -2.f, 22.f, 14.f}, nd4j::DataType::FLOAT32); + NDArray exp2('c', {6,4}, {-36.f, -44.f, -52.f, -60.f,-42.f, -52.f, -62.f, -72.f, 2.f, 0.f, -2.f, + -4.f, 6.f, 4.f, 2.f, 0.f, 10.f, 8.f, 6.f, 4.f, 14.f, 12.f, 10.f, 8.f}, + nd4j::DataType::FLOAT32); + NDArray exp3('c', {1,1}, std::vector{31.5}, nd4j::DataType::DOUBLE); + NDArray exp4('c', {3,3}, {4.5, 10.5, 16.5,4.5, 10.5, 16.5,4.5, 10.5, 16.5}, nd4j::DataType::DOUBLE); + + auto z = x1.applyAllReduce3(reduce3::Dot, x2, {0,2}); + ASSERT_TRUE(z.equalsTo(&exp1)); + + z = x1.applyAllReduce3(reduce3::Dot, x2, {0}); + ASSERT_TRUE(z.equalsTo(&exp2)); + + z = x3.applyAllReduce3(reduce3::Dot, x4, {0,1}); + ASSERT_TRUE(z.equalsTo(&exp3)); + + z = x3.applyAllReduce3(reduce3::Dot, x4, {1}); + ASSERT_TRUE(z.equalsTo(&exp4)); + + x1.permutei({2,1,0}); + x2.permutei({2,1,0}); + x3.permutei({1,0}); + x4.permutei({1,0}); + + z = x1.applyAllReduce3(reduce3::Dot, x2, {0,2}); + ASSERT_TRUE(z.equalsTo(&exp1)); + + z = x3.applyAllReduce3(reduce3::Dot, x4, {0}); + ASSERT_TRUE(z.equalsTo(&exp4)); +} + +////////////////////////////////////////////////////////////////////////////// +TEST_F(NDArrayCudaBasicsTests, applyIndexReduce_test1) { + + NDArray x('c', {2,3}, {0, 10, 1, 2, 2.5,-4}, nd4j::DataType::DOUBLE); + + NDArray scalar('c', {}, std::vector{100}, nd4j::DataType::INT64); + NDArray vec1('c', {2}, {100,100}, nd4j::DataType::INT64); + NDArray vec2('c', {3}, {100,100,100}, nd4j::DataType::INT64); + + NDArray exp1('c', {}, std::vector{1}, nd4j::DataType::INT64); + NDArray exp2('c', {2}, {1,1}, nd4j::DataType::INT64); + NDArray exp3('c', {3}, {1,0,0}, nd4j::DataType::INT64); + + NDArray exp4('c', {}, std::vector{2}, nd4j::DataType::INT64); + NDArray exp5('c', {2}, {1,1}, nd4j::DataType::INT64); + NDArray exp6('c', {3}, {1,0,0}, nd4j::DataType::INT64); + + x.applyIndexReduce(nd4j::indexreduce::IndexMax, scalar, {0,1}); + ASSERT_TRUE(scalar.equalsTo(&exp1)); + + x.applyIndexReduce(nd4j::indexreduce::IndexMax, vec1, {1}); + ASSERT_TRUE(vec1.equalsTo(&exp2)); + + x.applyIndexReduce(nd4j::indexreduce::IndexMax, vec2, {0}); + ASSERT_TRUE(vec2.equalsTo(&exp3)); + + x.permutei({1,0}); + + x.applyIndexReduce(nd4j::indexreduce::IndexMax, scalar, {0,1}); + ASSERT_TRUE(scalar.equalsTo(&exp4)); + + x.applyIndexReduce(nd4j::indexreduce::IndexMax, vec1, {0}); + ASSERT_TRUE(vec1.equalsTo(&exp5)); + + x.applyIndexReduce(nd4j::indexreduce::IndexMax, vec2, {1}); + ASSERT_TRUE(vec2.equalsTo(&exp6)); +} + + +////////////////////////////////////////////////////////////////////////////// +TEST_F(NDArrayCudaBasicsTests, applyIndexReduce_test2) { + + NDArray x('c', {2,3}, {0, 10, 1, 2, 2.5,-4}, nd4j::DataType::DOUBLE); + + NDArray exp1('c', {}, std::vector{1}, nd4j::DataType::INT64); + NDArray exp2('c', {2}, {1,1}, nd4j::DataType::INT64); + NDArray exp3('c', {3}, {1,0,0}, nd4j::DataType::INT64); + + NDArray exp4('c', {}, std::vector{2}, nd4j::DataType::INT64); + NDArray exp5('c', {2}, {1,1}, nd4j::DataType::INT64); + NDArray exp6('c', {3}, {1,0,0}, nd4j::DataType::INT64); + + auto z = x.applyIndexReduce(nd4j::indexreduce::IndexMax, {0,1}); + ASSERT_TRUE(z.equalsTo(&exp1)); + + z = x.applyIndexReduce(nd4j::indexreduce::IndexMax, {1}); + ASSERT_TRUE(z.equalsTo(&exp2)); + + z = x.applyIndexReduce(nd4j::indexreduce::IndexMax, {0}); + ASSERT_TRUE(z.equalsTo(&exp3)); + + x.permutei({1,0}); + + z = x.applyIndexReduce(nd4j::indexreduce::IndexMax, {0,1}); + ASSERT_TRUE(z.equalsTo(&exp4)); + + z = x.applyIndexReduce(nd4j::indexreduce::IndexMax, {0}); + ASSERT_TRUE(z.equalsTo(&exp5)); + + z = x.applyIndexReduce(nd4j::indexreduce::IndexMax, {1}); + ASSERT_TRUE(z.equalsTo(&exp6)); +} + +//////////////////////////////////////////////////////////////////////////////// +TEST_F(NDArrayCudaBasicsTests, reduceAlongDimension_float_test1) { + + NDArray x('c', {2,3,2}, {1,2,3,4,5,6,7,8,-1,-2,-3,-4,}, nd4j::DataType::INT32); + + NDArray z1('c', {}, std::vector{100}, nd4j::DataType::DOUBLE); + NDArray z2('c', {2,2}, {100,100,100,100}, nd4j::DataType::FLOAT32); + NDArray z3('c', {3}, {100,100,100}, nd4j::DataType::DOUBLE); + NDArray z4('c', {3,2}, {100,100,100,100,100,100}, nd4j::DataType::FLOAT32); + NDArray z5('c', {2}, {100,100}, nd4j::DataType::FLOAT32); + + NDArray exp1('c', {}, std::vector{2.166667}, nd4j::DataType::DOUBLE); + NDArray exp2('c', {2,2}, {3.f,4.f,1.f,0.666667f}, nd4j::DataType::FLOAT32); + NDArray exp3('c', {3}, {4.5,1,1}, nd4j::DataType::DOUBLE); + NDArray exp4('c', {3,2}, {4,5,1,1,1,1}, nd4j::DataType::FLOAT32); + NDArray exp5('c', {2}, {3.5f,0.833333f}, nd4j::DataType::FLOAT32); + + x.reduceAlongDimension(nd4j::reduce::Mean, z1, {0,1,2}); + ASSERT_TRUE(z1.equalsTo(&exp1)); + + x.reduceAlongDimension(nd4j::reduce::Mean, z2, {1}); + ASSERT_TRUE(z2.equalsTo(&exp2)); + + x.reduceAlongDimension(nd4j::reduce::Mean, z3, {0,2}); + ASSERT_TRUE(z3.equalsTo(&exp3)); + + x.permutei({1,0,2}); // 3x2x2 + + x.reduceAlongDimension(nd4j::reduce::Mean, z1, {0,1,2}); + ASSERT_TRUE(z1.equalsTo(&exp1)); + + x.reduceAlongDimension(nd4j::reduce::Mean, z4, {1}); + ASSERT_TRUE(z4.equalsTo(&exp4)); + + x.reduceAlongDimension(nd4j::reduce::Mean, z5, {0,2}); + ASSERT_TRUE(z5.equalsTo(&exp5)); +} + +//////////////////////////////////////////////////////////////////////////////// +TEST_F(NDArrayCudaBasicsTests, reduceAlongDimension_float_test2) { + + NDArray x('c', {2,3,2}, {1,2,3,4,5,6,7,8,-1,-2,-3,-4,}, nd4j::DataType::DOUBLE); + + NDArray exp1('c', {}, std::vector{2.166667}, nd4j::DataType::DOUBLE); + NDArray exp2('c', {2,2}, {3,4,1,0.666667}, nd4j::DataType::DOUBLE); + NDArray exp3('c', {3}, {4.5,1,1}, nd4j::DataType::DOUBLE); + NDArray exp4('c', {3,2}, {4,5,1,1,1,1}, nd4j::DataType::DOUBLE); + NDArray exp5('c', {2}, {3.5,0.833333}, nd4j::DataType::DOUBLE); + + NDArray z1 = x.reduceAlongDimension(nd4j::reduce::Mean, {0,1,2}); + ASSERT_TRUE(z1.equalsTo(&exp1)); + + NDArray z2 = x.reduceAlongDimension(nd4j::reduce::Mean, {1}); + ASSERT_TRUE(z2.equalsTo(&exp2)); + + NDArray z3 = x.reduceAlongDimension(nd4j::reduce::Mean, {0,2}); + ASSERT_TRUE(z3.equalsTo(&exp3)); + + x.permutei({1,0,2}); // 3x2x2 + + NDArray z4 = x.reduceAlongDimension(nd4j::reduce::Mean, {0,1,2}); + ASSERT_TRUE(z4.equalsTo(&exp1)); + + NDArray z5 = x.reduceAlongDimension(nd4j::reduce::Mean, {1}); + ASSERT_TRUE(z5.equalsTo(&exp4)); + + NDArray z6 = x.reduceAlongDimension(nd4j::reduce::Mean, {0,2}); + ASSERT_TRUE(z6.equalsTo(&exp5)); +} + +////////////////////////////////////////////////////////////////////// +TEST_F(NDArrayCudaBasicsTests, EqualityTest1) { + auto arrayA = NDArrayFactory::create_('f', {3, 5}); + auto arrayB = NDArrayFactory::create_('f', {3, 5}); + auto arrayC = NDArrayFactory::create_('f', {3, 5}); + + auto arrayD = NDArrayFactory::create_('f', {2, 4}); + auto arrayE = NDArrayFactory::create_('f', {1, 15}); + + for (int i = 0; i < arrayA->rows(); i++) { + for (int k = 0; k < arrayA->columns(); k++) { + arrayA->p(i, k, (float) i); + } + } + + for (int i = 0; i < arrayB->rows(); i++) { + for (int k = 0; k < arrayB->columns(); k++) { + arrayB->p(i, k, (float) i); + } + } + + for (int i = 0; i < arrayC->rows(); i++) { + for (int k = 0; k < arrayC->columns(); k++) { + arrayC->p(i, k, (float) i+1); + } + } + + ASSERT_TRUE(arrayA->equalsTo(arrayB, 1e-5)); + + ASSERT_FALSE(arrayC->equalsTo(arrayB, 1e-5)); + + ASSERT_FALSE(arrayD->equalsTo(arrayB, 1e-5)); + + ASSERT_FALSE(arrayE->equalsTo(arrayB, 1e-5)); + + delete arrayA; + delete arrayB; + delete arrayC; + delete arrayD; + delete arrayE; +} + +//////////////////////////////////////////////////////////////////////////////// +TEST_F(NDArrayCudaBasicsTests, reduceAlongDimension_same_test1) { + + NDArray x('c', {2,3,2}, {1.5f,2.f,3.f,4.f,5.f,6.f,7.5f,8.f,-1.f,-2.f,-3.5f,-4.f}, nd4j::DataType::FLOAT32); + + NDArray z1('c', {}, std::vector{100}, nd4j::DataType::FLOAT32); + NDArray z2('c', {2,2}, {100,100,100,100}, nd4j::DataType::FLOAT32); + NDArray z3('c', {3}, {100,100,100}, nd4j::DataType::FLOAT32); + NDArray z4('c', {3,2}, {100,100,100,100,100,100}, nd4j::DataType::FLOAT32); + NDArray z5('c', {2}, {100,100}, nd4j::DataType::FLOAT32); + + NDArray exp1('c', {}, std::vector{26.5f}, nd4j::DataType::FLOAT32); + NDArray exp2('c', {2,2}, {9.5f,12.f,3.f,2.f}, nd4j::DataType::FLOAT32); + NDArray exp3('c', {3}, {19.f,4.f,3.5f}, nd4j::DataType::FLOAT32); + NDArray exp4('c', {3,2}, {9.f,10.f,2.f,2.f,1.5f,2.f}, nd4j::DataType::FLOAT32); + NDArray exp5('c', {2}, {21.5f,5.f}, nd4j::DataType::FLOAT32); + + x.reduceAlongDimension(nd4j::reduce::Sum, z1, {0,1,2}); + ASSERT_TRUE(z1.equalsTo(&exp1)); + + x.reduceAlongDimension(nd4j::reduce::Sum, z2, {1}); + ASSERT_TRUE(z2.equalsTo(&exp2)); + + x.reduceAlongDimension(nd4j::reduce::Sum, z3, {0,2}); + ASSERT_TRUE(z3.equalsTo(&exp3)); + + x.permutei({1,0,2}); // 3x2x2 + + x.reduceAlongDimension(nd4j::reduce::Sum, z1, {0,1,2}); + ASSERT_TRUE(z1.equalsTo(&exp1)); + + x.reduceAlongDimension(nd4j::reduce::Sum, z4, {1}); + ASSERT_TRUE(z4.equalsTo(&exp4)); + + x.reduceAlongDimension(nd4j::reduce::Sum, z5, {0,2}); + ASSERT_TRUE(z5.equalsTo(&exp5)); +} + +//////////////////////////////////////////////////////////////////////////////// +TEST_F(NDArrayCudaBasicsTests, reduceAlongDimension_same_test2) { + + NDArray x('c', {2,3,2}, {1.5,2,3,4,5,6,7.5,8,-1,-2,-3.5,-4,}, nd4j::DataType::INT64); + + NDArray exp1('c', {}, std::vector{26}, nd4j::DataType::INT64); + NDArray exp2('c', {2,2}, {9,12,3,2}, nd4j::DataType::INT64); + NDArray exp3('c', {3}, {18,4,4}, nd4j::DataType::INT64); + NDArray exp4('c', {3,2}, {8,10,2,2,2,2}, nd4j::DataType::INT64); + NDArray exp5('c', {2}, {21,5}, nd4j::DataType::INT64); + + NDArray z1 = x.reduceAlongDimension(nd4j::reduce::Sum, {0,1,2}); + ASSERT_TRUE(z1.equalsTo(&exp1)); + + NDArray z2 = x.reduceAlongDimension(nd4j::reduce::Sum, {1}); + ASSERT_TRUE(z2.equalsTo(&exp2)); + + NDArray z3 = x.reduceAlongDimension(nd4j::reduce::Sum, {0,2}); + ASSERT_TRUE(z3.equalsTo(&exp3)); + + x.permutei({1,0,2}); // 3x2x2 + + NDArray z4 = x.reduceAlongDimension(nd4j::reduce::Sum, {0,1,2}); + ASSERT_TRUE(z4.equalsTo(&exp1)); + + NDArray z5 = x.reduceAlongDimension(nd4j::reduce::Sum, {1}); + ASSERT_TRUE(z5.equalsTo(&exp4)); + + NDArray z6 = x.reduceAlongDimension(nd4j::reduce::Sum, {0,2}); + ASSERT_TRUE(z6.equalsTo(&exp5)); +} + +//////////////////////////////////////////////////////////////////////////////// +TEST_F(NDArrayCudaBasicsTests, reduceAlongDimension_bool_test1) { + + NDArray x('c', {2,3,2}, {0.5,2,3,-4,5,6,-7.5,8,-1,-0.5,-3.5,4}, nd4j::DataType::DOUBLE); + + NDArray z1('c', {}, std::vector{true}, nd4j::DataType::BOOL); + NDArray z2('c', {2,2}, {true,true,true,true}, nd4j::DataType::BOOL); + NDArray z3('c', {3}, {true,true,true}, nd4j::DataType::BOOL); + NDArray z4('c', {3,2}, {true,true,true,true,true,true}, nd4j::DataType::BOOL); + NDArray z5('c', {2}, {true,true}, nd4j::DataType::BOOL); + + NDArray exp1('c', {}, std::vector{true}, nd4j::DataType::BOOL); + NDArray exp2('c', {2,2}, {true,true,false,true}, nd4j::DataType::BOOL); + NDArray exp3('c', {3}, {true,true,true}, nd4j::DataType::BOOL); + NDArray exp4('c', {3,2}, {true,true,true,false,true,true}, nd4j::DataType::BOOL); + NDArray exp5('c', {2}, {true,true}, nd4j::DataType::BOOL); + + x.reduceAlongDimension(nd4j::reduce::IsPositive, z1, {0,1,2}); + ASSERT_TRUE(z1.equalsTo(&exp1)); + + x.reduceAlongDimension(nd4j::reduce::IsPositive, z2, {1}); + ASSERT_TRUE(z2.equalsTo(&exp2)); + + x.reduceAlongDimension(nd4j::reduce::IsPositive, z3, {0,2}); + ASSERT_TRUE(z3.equalsTo(&exp3)); + + x.permutei({1,0,2}); // 3x2x2 + + x.reduceAlongDimension(nd4j::reduce::IsPositive, z1, {0,1,2}); + ASSERT_TRUE(z1.equalsTo(&exp1)); + + x.reduceAlongDimension(nd4j::reduce::IsPositive, z4, {1}); + ASSERT_TRUE(z4.equalsTo(&exp4)); + + x.reduceAlongDimension(nd4j::reduce::IsPositive, z5, {0,2}); + ASSERT_TRUE(z5.equalsTo(&exp5)); +} + +//////////////////////////////////////////////////////////////////////////////// +TEST_F(NDArrayCudaBasicsTests, reduceAlongDimension_bool_test2) { + + NDArray x('c', {2,3,2}, {0.5,2,3,-4,5,6,-7.5,8,-1,-0.5,-3.5,4}, nd4j::DataType::INT32); + + NDArray exp1('c', {}, std::vector{1}, nd4j::DataType::BOOL); + NDArray exp2('c', {2,2}, {1,1,0,1}, nd4j::DataType::BOOL); + NDArray exp3('c', {3}, {1,1,1}, nd4j::DataType::BOOL); + NDArray exp4('c', {3,2}, {0,1,1,0,1,1}, nd4j::DataType::BOOL); + NDArray exp5('c', {2}, {1,1}, nd4j::DataType::BOOL); + + NDArray z1 = x.reduceAlongDimension(nd4j::reduce::IsPositive, {0,1,2}); + ASSERT_TRUE(z1.equalsTo(&exp1)); + + NDArray z2 = x.reduceAlongDimension(nd4j::reduce::IsPositive, {1}); + ASSERT_TRUE(z2.equalsTo(&exp2)); + + NDArray z3 = x.reduceAlongDimension(nd4j::reduce::IsPositive, {0,2}); + ASSERT_TRUE(z3.equalsTo(&exp3)); + + x.permutei({1,0,2}); // 3x2x2 + + NDArray z4 = x.reduceAlongDimension(nd4j::reduce::IsPositive, {0,1,2}); + ASSERT_TRUE(z4.equalsTo(&exp1)); + + NDArray z5 = x.reduceAlongDimension(nd4j::reduce::IsPositive, {1}); + ASSERT_TRUE(z5.equalsTo(&exp4)); + + NDArray z6 = x.reduceAlongDimension(nd4j::reduce::IsPositive, {0,2}); + ASSERT_TRUE(z6.equalsTo(&exp5)); +} + +//////////////////////////////////////////////////////////////////////////////// +TEST_F(NDArrayCudaBasicsTests, reduceAlongDimension_long_test1) { + + NDArray x('c', {2,3,2}, {0.5f,2.f,3.f,-0.f,5.f,6.f,-7.5f,0.f,-1.f,-0.5f,-3.5f,4.f}, nd4j::DataType::FLOAT32); + + NDArray z1('c', {}, std::vector{100}, nd4j::DataType::INT64); + NDArray z2('c', {2,2}, {100,100,100,100}, nd4j::DataType::INT64); + NDArray z3('c', {3}, {100,100,100}, nd4j::DataType::INT64); + NDArray z4('c', {3,2}, {100,100,100,100,100,100}, nd4j::DataType::INT64); + NDArray z5('c', {2}, {100,100}, nd4j::DataType::INT64); + + NDArray exp1('c', {}, std::vector{2}, nd4j::DataType::INT64); + NDArray exp2('c', {2,2}, {0,1,0,1}, nd4j::DataType::INT64); + NDArray exp3('c', {3}, {1,1,0}, nd4j::DataType::INT64); + NDArray exp4('c', {3,2}, {0,1,0,1,0,0}, nd4j::DataType::INT64); + NDArray exp5('c', {2}, {1,1}, nd4j::DataType::INT64); + + x.reduceAlongDimension(nd4j::reduce::CountZero, z1, {0,1,2}); + ASSERT_TRUE(z1.equalsTo(&exp1)); + + x.reduceAlongDimension(nd4j::reduce::CountZero, z2, {1}); + ASSERT_TRUE(z2.equalsTo(&exp2)); + + x.reduceAlongDimension(nd4j::reduce::CountZero, z3, {0,2}); + ASSERT_TRUE(z3.equalsTo(&exp3)); + + x.permutei({1,0,2}); // 3x2x2 + + x.reduceAlongDimension(nd4j::reduce::CountZero, z1, {0,1,2}); + ASSERT_TRUE(z1.equalsTo(&exp1)); + + x.reduceAlongDimension(nd4j::reduce::CountZero, z4, {1}); + ASSERT_TRUE(z4.equalsTo(&exp4)); + + x.reduceAlongDimension(nd4j::reduce::CountZero, z5, {0,2}); + ASSERT_TRUE(z5.equalsTo(&exp5)); +} + +//////////////////////////////////////////////////////////////////////////////// +TEST_F(NDArrayCudaBasicsTests, reduceAlongDimension_long_test2) { + + NDArray x('c', {2,3,2}, {0.5,2,3,-0,5,6,-7.5,0,-1,-0.5,-3.5,4}, nd4j::DataType::INT32); + + NDArray exp1('c', {}, std::vector{4}, nd4j::DataType::INT64); + NDArray exp2('c', {2,2}, {1,1,0,2}, nd4j::DataType::INT64); + NDArray exp3('c', {3}, {2,2,0}, nd4j::DataType::INT64); + NDArray exp4('c', {3,2}, {1,1,0,2,0,0}, nd4j::DataType::INT64); + NDArray exp5('c', {2}, {2,2}, nd4j::DataType::INT64); + + NDArray z1 = x.reduceAlongDimension(nd4j::reduce::CountZero, {0,1,2}); + ASSERT_TRUE(z1.equalsTo(&exp1)); + + NDArray z2 = x.reduceAlongDimension(nd4j::reduce::CountZero, {1}); + ASSERT_TRUE(z2.equalsTo(&exp2)); + + NDArray z3 = x.reduceAlongDimension(nd4j::reduce::CountZero, {0,2}); + ASSERT_TRUE(z3.equalsTo(&exp3)); + + x.permutei({1,0,2}); // 3x2x2 + + NDArray z4 = x.reduceAlongDimension(nd4j::reduce::CountZero, {0,1,2}); + ASSERT_TRUE(z4.equalsTo(&exp1)); + + NDArray z5 = x.reduceAlongDimension(nd4j::reduce::CountZero, {1}); + ASSERT_TRUE(z5.equalsTo(&exp4)); + + NDArray z6 = x.reduceAlongDimension(nd4j::reduce::CountZero, {0,2}); + ASSERT_TRUE(z6.equalsTo(&exp5)); +} + +TEST_F(NDArrayCudaBasicsTests, BroadcastOpsTest1) { + + auto x = NDArrayFactory::create('c', {5, 5}); + auto z = NDArrayFactory::create('c', {5, 5}); + auto row = NDArrayFactory::linspace(1.0f, 5.0f, 5); + NDArray expRow('c', {1, 5,}, {1,2,3,4,5}, nd4j::DataType::FLOAT32); + NDArray exp('c', {5,5}, {1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5}, nd4j::DataType::FLOAT32); + + ASSERT_TRUE(row->equalsTo(&expRow)); + + x.applyBroadcast(broadcast::Add, {1}, *row, z); + x += *row; + + ASSERT_TRUE(x.equalsTo(z)); + //ASSERT_TRUE(z.equalsTo(&exp)); + + delete row; +} + +TEST_F(NDArrayCudaBasicsTests, BroadcastOpsTest2) { + + auto x = NDArrayFactory::create('c', {5, 5}); + //auto z = NDArrayFactory::create('c', {5, 5}); + auto row = NDArrayFactory::linspace(1.0f, 5.0f, 5); + NDArray expRow('c', {1, 5,}, {1,2,3,4,5}, nd4j::DataType::FLOAT32); + NDArray exp('c', {5,5}, {1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5}, nd4j::DataType::FLOAT32); + + ASSERT_TRUE(row->equalsTo(&expRow)); + x.applyBroadcast(broadcast::Add, {1}, *row, x); + ASSERT_TRUE(x.equalsTo(&exp)); +} + +////////////////////////////////////////////////////////////////////// +TEST_F(NDArrayCudaBasicsTests, TestBroadcast_1) { + + NDArray exp('c', {2, 3, 2, 2}, {1., 1., 1., 1., 2., 2., 2., 2., 3., 3., 3., 3., 1., 1., 1., 1., 2., 2., 2., 2., 3., 3., 3., 3.}, nd4j::DataType::DOUBLE); + + auto input = NDArrayFactory::create('c',{ 2, 3, 2, 2}); + auto bias = NDArrayFactory::create('c', {1, 3}); + + bias.linspace(1); + input.applyBroadcast(broadcast::Add, {1}, bias, input); + ASSERT_TRUE(exp.equalsTo(&input)); +} + +TEST_F(NDArrayCudaBasicsTests, TestFloat16_1) { + auto x = NDArrayFactory::create({1,2,3,4,5,7,8,9}); + auto y = NDArrayFactory::create({1,2,3,4,5,7,8,9}); + ASSERT_TRUE(x.equalsTo(&y)); +} + +TEST_F(NDArrayCudaBasicsTests, TestFloat16_2) { + auto x = NDArrayFactory::create('c', {9}, {1,2,3,4,5,6,7,8,9}); + auto y = NDArrayFactory::create('c', {9}, {1,2,3,4,5,6,7,8,9}); + ASSERT_TRUE(x.equalsTo(y)); + //for (int e = 0; e < x.lengthOf(); e++) + // ASSERT_NEAR(x.e(e), y.e(e), 1.e-5f); +} + +TEST_F(NDArrayCudaBasicsTests, TestFloat16_3) { + auto x = NDArrayFactory::create({1,2,3,4,5,7,8,9}); + auto y = NDArrayFactory::create({1,2,3,4,5,7,8,9}); + ASSERT_TRUE(x.equalsTo(&y)); +} + +TEST_F(NDArrayCudaBasicsTests, TestFloat_4) { + auto x = NDArrayFactory::create({1,2,3,4,5,7,8,9}); + auto y = NDArrayFactory::create({2,4,5,5,6,7,8,9}); + ASSERT_FALSE(x.equalsTo(&y)); +} + +TEST_F(NDArrayCudaBasicsTests, TestFloat_5) { + auto x = NDArrayFactory::create('c', {3,3}, {1,2,3,4,5,6,7,8,9}); + auto y = NDArrayFactory::create('c', {3,3}, {2,4,5,5,6,7,8,9, 10}); + ASSERT_FALSE(x.equalsTo(&y)); +} + +TEST_F(NDArrayCudaBasicsTests, TestFloat_6) { + auto x = NDArrayFactory::create('f', {3,3}, {1,2,3,4,5,6,7,8,9}); + auto y = NDArrayFactory::create('f', {3,3}, {2,4,5,5,6,7,8,9,10}); + ASSERT_FALSE(x.equalsTo(&y)); +} + +////////////////////////////////////////////////////////////////////// +TEST_F(NDArrayCudaBasicsTests, Operator_Plus_Test_05) +{ + auto x = NDArrayFactory::create('c', {8, 8, 8}); + auto y = NDArrayFactory::create('c', {1, 8, 8}); + auto expected = NDArrayFactory::create('c', {8, 8, 8}); + NDArray res2 = NDArrayFactory::create(expected.ordering(), expected.getShapeAsVector()); + x = 1.; + y = 2.; + expected = 3.; + res2 = 0.f; + + x.applyTrueBroadcast(BroadcastOpsTuple::Add(), y, res2);// *= y; + + ASSERT_TRUE(expected.isSameShape(&res2)); + ASSERT_TRUE(expected.equalsTo(&res2)); +} + +////////////////////////////////////////////////////////////////////// +TEST_F(NDArrayCudaBasicsTests, Operator_Plus_Test_5) +{ + auto x = NDArrayFactory::create('c', {8, 8, 8}); + auto y = NDArrayFactory::create('c', {8, 1, 8}); + auto expected = NDArrayFactory::create('c', {8, 8, 8}); + NDArray res2(expected); + x = 1.; + y = 2.; + expected = 3.; + //x.printBuffer("X="); + //y.printBuffer("Y="); + //expected.printBuffer("EXPECTED"); + auto result = x + y; + //result.printBuffer("1 + 2 ="); + //res2.assign(x + y); + + //x.applyTrueBroadcast(BroadcastOpsTuple::Add(), &y, &res2); + //res2.printBuffer("Z="); + //x.applyTrueBroadcast(BroadcastOpsTuple::Add(), &y, &res2);// *= y; +// x += y; + //x.printBuffer("OutputX"); + //res2.syncToHost(); + //res2.printBuffer("OUputZ"); + //x.printIndexedBuffer("OUtputX"); + ASSERT_TRUE(expected.isSameShape(&result)); + ASSERT_TRUE(expected.equalsTo(&result)); +} + +////////////////////////////////////////////////////////////////////// +TEST_F(NDArrayCudaBasicsTests, Operator_Plus_Test_51) +{ + auto x = NDArrayFactory::create('c', {8, 8, 8}); + auto y = NDArrayFactory::create('c', {8, 8}); + auto expected = NDArrayFactory::create('c', {8, 8, 8}); + NDArray res2(expected); + x = 1.; + y = 2.; + expected = 3.; + //x.printBuffer("X="); + //y.printBuffer("Y="); + //expected.printBuffer("EXPECTED"); + auto result = x + y; + //result.printBuffer("1 + 2 ="); + //res2.assign(x + y); + + //x.applyTrueBroadcast(BroadcastOpsTuple::Add(), &y, &res2); + //res2.printBuffer("Z="); + //x.applyTrueBroadcast(BroadcastOpsTuple::Add(), &y, &res2);// *= y; +// x += y; + //x.printBuffer("OutputX"); + //res2.syncToHost(); + //res2.printBuffer("OUputZ"); + //x.printIndexedBuffer("OUtputX"); + ASSERT_TRUE(expected.isSameShape(&result)); + ASSERT_TRUE(expected.equalsTo(&result)); +} + +TEST_F(NDArrayCudaBasicsTests, Tile_Test_2_1) +{ + auto x = NDArrayFactory::create('c', {2, 1, 2}); + x = 10.; + auto y = x.tile({1,2,1}); + auto exp = NDArrayFactory::create('c', {2, 2, 2}); + exp = 10.; + + // y.printShapeInfo("Output SHAPE"); + // y.printBuffer("Output TILE"); + // exp.printBuffer("Expect TILE"); + ASSERT_TRUE(exp.equalsTo(y)); +} + +TEST_F(NDArrayCudaBasicsTests, Tile_Test_2_2) +{ + auto x = NDArrayFactory::create('f', {2, 1, 2}); + x = 10.; + auto y = x.tile({1,2,1}); + auto exp = NDArrayFactory::create('f', {2, 2, 2}); + exp = 10.; + ASSERT_TRUE(exp.equalsTo(y)); +} + +TEST_F(NDArrayCudaBasicsTests, Tile_Test_2_3) +{ + auto x = NDArrayFactory::create('f', {2, 1, 2}); + x = 10.; + x.p(1,0,1, 20); + x.syncToDevice(); + auto y = x.tile({1,2,1}); + auto exp = NDArrayFactory::create('f', {2, 2, 2}); + exp = 10.; + exp.p(1,0,1, 20.); + exp.p(1, 1, 1, 20.); + exp.syncToDevice(); + ASSERT_TRUE(exp.equalsTo(y)); +} + +////////////////////////////////////////////////////////////////////// +TEST_F(NDArrayCudaBasicsTests, Operator_Plus_Test_2) +{ + double expBuff[] = {2., 3, 3., 4., 4., 5, 5., 6., 6., 7, 7., 8.}; + NDArray a('c', {4,4}, {1,2,3,4,5,6,7,8,9,2,3,2,1,0,4,7}, nd4j::DataType::FLOAT32); + auto x = NDArrayFactory::create('c', {3, 2, 1}); + auto y = NDArrayFactory::create('c', {1, 2}); + auto expected = NDArrayFactory::create(expBuff, 'c', {3, 2, 2}); + + x.linspace(1); + y.linspace(1); + auto result = x + y; + + ASSERT_TRUE(expected.isSameShape(&result)); + ASSERT_TRUE(expected.equalsTo(&result)); +} + +////////////////////////////////////////////////////////////////////// +TEST_F(NDArrayCudaBasicsTests, assign_2) +{ + NDArray x('c', {4}, {1.5f,2.5f,3.5f,4.5f}, nd4j::DataType::FLOAT32); + NDArray y('c', {4}, nd4j::DataType::INT32); + NDArray expected('c', {4}, {1,2,3,4}, nd4j::DataType::INT32); + + y.assign(x); + // y.printBuffer("ASSIGN VECTOR"); + + ASSERT_TRUE(expected.equalsTo(&y)); +} + +////////////////////////////////////////////////////////////////////// +TEST_F(NDArrayCudaBasicsTests, subarray_1) +{ + NDArray x('c', {2,3,4}, {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24}, nd4j::DataType::FLOAT32); + NDArray y('f', {2,3,4}, {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24}, nd4j::DataType::FLOAT32); + + Nd4jLong shapeExpX0[] = {1, 2, 12, 8192, 1, 99}; + float buffExpX0[] = {1.f, 13.f}; + Nd4jLong shapeExpX1[] = {1, 2, 12, 8192, 1, 99}; + float buffExpX1[] = {2.f, 14.f}; + Nd4jLong shapeExpX2[] = {3, 2, 1, 1, 12, 4, 1, 8192, 1, 99}; + float buffExpX2[] = {1.f, 13.f}; + Nd4jLong shapeExpX3[] = {2, 2, 4, 12, 1, 8192, 1, 99}; + float buffExpX3[] = {9.f, 10.f, 11.f, 12.f, 21.f, 22.f, 23.f, 24.f}; + Nd4jLong shapeExpX4[] = {3, 2, 1, 4, 12, 4, 1, 8192, 1, 99}; + float buffExpX4[] = {9.f, 10.f, 11.f, 12.f, 21.f, 22.f, 23.f, 24.f}; + Nd4jLong shapeExpX5[] = {2, 2, 3, 12, 4, 8192, 1, 99}; + float buffExpX5[] = {4.f, 8.f, 12.f, 16.f, 20.f, 24.f}; + + Nd4jLong shapeExpY0[] = {1, 2, 1, 8192, 1, 99}; + float buffExpY0[] = {1.f, 2.f}; + Nd4jLong shapeExpY1[] = {1, 2, 1, 8192, 1, 99}; + float buffExpY1[] = {7.f, 8.f}; + Nd4jLong shapeExpY2[] = {3, 2, 1, 1, 1, 2, 6, 8192, 1, 102}; + float buffExpY2[] = {1.f, 2.f}; + Nd4jLong shapeExpY3[] = {2, 2, 4, 1, 6, 8192, 1, 99}; + float buffExpY3[] = {5.f, 11.f, 17.f, 23.f, 6.f, 12.f, 18.f, 24.f}; + Nd4jLong shapeExpY4[] = {3, 2, 1, 4, 1, 2, 6, 8192, 1, 102}; + float buffExpY4[] = {5.f, 11.f, 17.f, 23.f, 6.f, 12.f, 18.f, 24.f}; + Nd4jLong shapeExpY5[] = {2, 2, 3, 1, 2, 8192, 1, 99}; + float buffExpY5[] = {19.f, 21.f, 23.f, 20.f, 22.f, 24.f}; + + + NDArray x0 = x(0, {1,2}); + NDArray xExp(buffExpX0, shapeExpX0); + + ASSERT_TRUE(xExp.isSameShape(x0)); + ASSERT_TRUE(xExp.equalsTo(x0)); +// for(int i = 0; i < shape::shapeInfoLength(x0.rankOf()); ++i) +// ASSERT_TRUE(x0.getShapeInfo()[i] == shapeExpX0[i]); +// for(int i = 0; i < x0.lengthOf(); ++i) +// ASSERT_TRUE(x0.e(i) == buffExpX0[i]); + + NDArray x1 = x(1, {1,2}); + NDArray x1Exp(buffExpX1, shapeExpX1); + ASSERT_TRUE(x1Exp.isSameShape(x1)); + ASSERT_TRUE(x1Exp.equalsTo(x1)); + +// for(int i = 0; i < shape::shapeInfoLength(x1.rankOf()); ++i) +// ASSERT_TRUE(x1.getShapeInfo()[i] == shapeExpX1[i]); +// for(int i = 0; i < x1.lengthOf(); ++i) +// ASSERT_TRUE(x1.e(i) == buffExpX1[i]); + + NDArray x2 = x(0, {1,2}, true); + NDArray x2Exp(buffExpX2, shapeExpX2); + ASSERT_TRUE(x2Exp.isSameShape(x2)); +// x2.printBuffer("X2"); +// x2Exp.printBuffer("X2 EXPECT"); + ASSERT_TRUE(x2Exp.equalsTo(x2)); +// for(int i = 0; i < shape::shapeInfoLength(x2.rankOf()); ++i) +// ASSERT_TRUE(x2.getShapeInfo()[i] == shapeExpX2[i]); +// for(int i = 0; i < x2.lengthOf(); ++i) +// ASSERT_TRUE(x2.e(i) == buffExpX2[i]); + + NDArray x3 = x(2, {1}); + NDArray x3Exp(buffExpX3, shapeExpX3); + ASSERT_TRUE(x3Exp.isSameShape(x3)); + ASSERT_TRUE(x3Exp.equalsTo(x3)); +// for(int i = 0; i < shape::shapeInfoLength(x3.rankOf()); ++i) +// ASSERT_TRUE(x3.getShapeInfo()[i] == shapeExpX3[i]); +// for(int i = 0; i < x3.lengthOf(); ++i) +// ASSERT_TRUE(x3.e(i) == buffExpX3[i]); + + NDArray x4 = x(2, {1}, true); + NDArray x4Exp(buffExpX4, shapeExpX4); + ASSERT_TRUE(x4Exp.isSameShape(x4)); + ASSERT_TRUE(x4Exp.equalsTo(x4)); +// for(int i = 0; i < shape::shapeInfoLength(x4.rankOf()); ++i) +// ASSERT_TRUE(x4.getShapeInfo()[i] == shapeExpX4[i]); +// for(int i = 0; i < x4.lengthOf(); ++i) +// ASSERT_TRUE(x4.e(i) == buffExpX4[i]); + + NDArray x5 = x(3, {2}); + NDArray x5Exp(buffExpX5, shapeExpX5); + ASSERT_TRUE(x5Exp.isSameShape(x5)); + ASSERT_TRUE(x5Exp.equalsTo(x5)); + +// for(int i = 0; i < shape::shapeInfoLength(x5.rankOf()); ++i) +// ASSERT_TRUE(x5.getShapeInfo()[i] == shapeExpX5[i]); +// for(int i = 0; i < x5.lengthOf(); ++i) +// ASSERT_TRUE(x5.e(i) == buffExpX5[i]); + + // ******************* // + NDArray y0 = y(0, {1,2}); + NDArray y0Exp(buffExpY0, shapeExpY0); + ASSERT_TRUE(y0Exp.isSameShape(y0)); + ASSERT_TRUE(y0Exp.equalsTo(y0)); +// for(int i = 0; i < shape::shapeInfoLength(y0.rankOf()); ++i) +// ASSERT_TRUE(y0.getShapeInfo()[i] == shapeExpY0[i]); +// for(int i = 0; i < y0.lengthOf(); ++i) +// ASSERT_TRUE(y0.e(i) == buffExpY0[i]); + + NDArray y1 = y(1, {1,2}); + NDArray y1Exp(buffExpY1, shapeExpY1); + ASSERT_TRUE(y1Exp.isSameShape(y1)); + ASSERT_TRUE(y1Exp.equalsTo(y1)); +// for(int i = 0; i < shape::shapeInfoLength(y1.rankOf()); ++i) +// ASSERT_TRUE(y1.getShapeInfo()[i] == shapeExpY1[i]); +// for(int i = 0; i < y1.lengthOf(); ++i) +// ASSERT_TRUE(y1.e(i) == buffExpY1[i]); + + NDArray y2 = y(0, {1,2}, true); + NDArray y2Exp(buffExpY2, shapeExpY2); + ASSERT_TRUE(y2Exp.isSameShape(y2)); + ASSERT_TRUE(y2Exp.equalsTo(y2)); +// for(int i = 0; i < shape::shapeInfoLength(y2.rankOf()); ++i) +// ASSERT_TRUE(y2.getShapeInfo()[i] == shapeExpY2[i]); +// for(int i = 0; i < y2.lengthOf(); ++i) +// ASSERT_TRUE(y2.e(i) == buffExpY2[i]); + + NDArray y3 = y(2, {1}); + NDArray y3Exp(buffExpY3, shapeExpY3); + ASSERT_TRUE(y3Exp.isSameShape(y3)); + ASSERT_TRUE(y3Exp.equalsTo(y3)); +// for(int i = 0; i < shape::shapeInfoLength(y3.rankOf()); ++i) +// ASSERT_TRUE(y3.getShapeInfo()[i] == shapeExpY3[i]); +// for(int i = 0; i < y3.lengthOf(); ++i) +// ASSERT_TRUE(y3.e(i) == buffExpY3[i]); + + NDArray y4 = y(2, {1}, true); + NDArray y4Exp = NDArrayFactory::create('f', {2,1,4}, {5, 6, 11, 12, 17, 18, 23, 24}); + ASSERT_TRUE(y4Exp.isSameShape(y4)); + ASSERT_TRUE(y4Exp.equalsTo(y4)); +// for(int i = 0; i < shape::shapeInfoLength(y4.rankOf()); ++i) +// ASSERT_TRUE(y4.getShapeInfo()[i] == shapeExpY4[i]); +// for(int i = 0; i < y4.lengthOf(); ++i) +// ASSERT_TRUE(y4.e(i) == buffExpY4[i]); + + NDArray y5 = y(3, {2}); + NDArray y5Exp(buffExpY5, shapeExpY5); + ASSERT_TRUE(y5Exp.isSameShape(y5)); + ASSERT_TRUE(y5Exp.equalsTo(y5)); +// for(int i = 0; i < shape::shapeInfoLength(y5.rankOf()); ++i) +// ASSERT_TRUE(y5.getShapeInfo()[i] == shapeExpY5[i]); +// for(int i = 0; i < y5.lengthOf(); ++i) +// ASSERT_TRUE(y5.e(i) == buffExpY5[i]); + +} +////////////////////////////////////////////////////////////////////// +TEST_F(NDArrayCudaBasicsTests, Test_diagonal_1) { + + auto x = NDArrayFactory::create('c', {2, 3}, {1, 2, 3, 4, 5, 6}); + auto exp = NDArrayFactory::create('c', {2, 1}, {1, 5}); + + auto diag = x.diagonal('c'); + //diag.syncToDevice(); + for (Nd4jLong e = 0; e < exp.lengthOf(); ++e) { + printf("VAL[%ld] = %f\n", e, diag.e(e)); //, exp.e(e), 1.e-5); + } + + for (Nd4jLong e = 0; e < exp.lengthOf(); ++e) { + ASSERT_NEAR(diag.e(e), exp.e(e), 1.e-5); + } + double eps(1.e-5); + NDArray tmp(nd4j::DataType::FLOAT32, x.getContext()); // scalar = 0 + + ExtraArguments extras({eps}); + NativeOpExecutioner::execReduce3Scalar(diag.getContext(), reduce3::EqualsWithEps, diag.getBuffer(), + diag.getShapeInfo(), diag.getSpecialBuffer(), diag.getSpecialShapeInfo(), extras.argumentsAsT(nd4j::DataType::FLOAT32), + exp.getBuffer(), exp.getShapeInfo(), exp.getSpecialBuffer(), exp.getSpecialShapeInfo(), + tmp.buffer(), tmp.shapeInfo(), tmp.specialBuffer(), tmp.specialShapeInfo()); + cudaStream_t* stream = x.getContext()->getCudaStream(); + auto res = cudaStreamSynchronize(*stream); + // tmp.printBuffer("Compare result is (expected 0)"); + ASSERT_TRUE(exp.isSameShape(diag)); + ASSERT_TRUE(exp.equalsTo(diag)); +} + +TEST_F(NDArrayCudaBasicsTests, Test_PermuteEquality_02) { + auto x = NDArrayFactory::linspace(1.f, 60.f, 60); //('c', {1, 60}); + //x.linspace(1); + auto exp = NDArrayFactory::create('c', {3, 4, 5}, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f, 25.0f, 26.0f, 27.0f, 28.0f, 29.0f, 30.0f, 31.0f, 32.0f, 33.0f, 34.0f, 35.0f, 36.0f, 37.0f, 38.0f, 39.0f, 40.0f, 41.0f, 42.0f, 43.0f, 44.0f, 45.0f, 46.0f, 47.0f, 48.0f, 49.0f, 50.0f, 51.0f, 52.0f, 53.0f, 54.0f, 55.0f, 56.0f, 57.0f, 58.0f, 59.0f, 60.0}); + x->reshapei('c', {3, 4, 5}); + + x->permutei({0, 1, 2}); + x->streamline(); + +// x.printShapeInfo("{0, 1, 2} shape"); +// x.printBuffer("{0, 1, 2} data"); + + ASSERT_TRUE(exp.isSameShape(x)); + ASSERT_TRUE(exp.equalsTo(x)); + delete x; +} + +TEST_F(NDArrayCudaBasicsTests, Test_PermuteEquality_0) { + auto x = NDArrayFactory::create('c', {1, 60}); + x.linspace(1); + auto exp = NDArrayFactory::create('c', {3, 4, 5}, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f, 25.0f, 26.0f, 27.0f, 28.0f, 29.0f, 30.0f, 31.0f, 32.0f, 33.0f, 34.0f, 35.0f, 36.0f, 37.0f, 38.0f, 39.0f, 40.0f, 41.0f, 42.0f, 43.0f, 44.0f, 45.0f, 46.0f, 47.0f, 48.0f, 49.0f, 50.0f, 51.0f, 52.0f, 53.0f, 54.0f, 55.0f, 56.0f, 57.0f, 58.0f, 59.0f, 60.0}); + x.reshapei('c', {3, 4, 5}); + + x.permutei({0, 1, 2}); + x.streamline(); + +// x.printShapeInfo("{0, 1, 2} shape"); +// x.printBuffer("{0, 1, 2} data"); + + ASSERT_TRUE(exp.isSameShape(&x)); + ASSERT_TRUE(exp.equalsTo(&x)); +} +TEST_F(NDArrayCudaBasicsTests, Test_PermuteEquality_1) { + auto x = NDArrayFactory::create('c', {1, 60}); + x.linspace(1); + auto exp = NDArrayFactory::create('c', {3, 4, 5}, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f, 25.0f, 26.0f, 27.0f, 28.0f, 29.0f, 30.0f, 31.0f, 32.0f, 33.0f, 34.0f, 35.0f, 36.0f, 37.0f, 38.0f, 39.0f, 40.0f, 41.0f, 42.0f, 43.0f, 44.0f, 45.0f, 46.0f, 47.0f, 48.0f, 49.0f, 50.0f, 51.0f, 52.0f, 53.0f, 54.0f, 55.0f, 56.0f, 57.0f, 58.0f, 59.0f, 60.0}); + x.reshapei('c', {3, 4, 5}); + + x.permutei({0, 1, 2}); + x.streamline(); + +// x.printShapeInfo("{0, 1, 2} shape"); +// x.printBuffer("{0, 1, 2} data"); + + ASSERT_TRUE(exp.isSameShape(&x)); + ASSERT_TRUE(exp.equalsTo(&x)); +} +TEST_F(NDArrayCudaBasicsTests, Test_PermuteEquality_2) { + //auto x = NDArrayFactory::create('c', {1, 60}); + auto xx = NDArrayFactory::linspace(1.f, 60.f, 60); //('c', {1, 60}); +// auto x = *xx; + //x.linspace(1); +// auto exp = NDArrayFactory::create('c', {3, 4, 5}, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f, 25.0f, 26.0f, 27.0f, 28.0f, 29.0f, 30.0f, 31.0f, 32.0f, 33.0f, 34.0f, 35.0f, 36.0f, 37.0f, 38.0f, 39.0f, 40.0f, 41.0f, 42.0f, 43.0f, 44.0f, 45.0f, 46.0f, 47.0f, 48.0f, 49.0f, 50.0f, 51.0f, 52.0f, 53.0f, 54.0f, 55.0f, 56.0f, 57.0f, 58.0f, 59.0f, 60.0}); +// x.reshapei('c', {3, 4, 5}); + +// x.permutei({0, 1, 2}); +// x.streamline(); + +// x.printShapeInfo("{0, 1, 2} shape"); +// x.printBuffer("{0, 1, 2} data"); + +// ASSERT_TRUE(exp.isSameShape(&x)); +// ASSERT_TRUE(exp.equalsTo(&x)); + delete xx; +} +TEST_F(NDArrayCudaBasicsTests, Test_PermuteEquality_3) { + auto x = NDArrayFactory::create('c', {1, 60}); + //x.linspace(1); + for (int l = 0; l < x.lengthOf(); l++) + x.p(l, float(l + 1.f)); + auto exp = NDArrayFactory::create('c', {3, 4, 5}, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f, 25.0f, 26.0f, 27.0f, 28.0f, 29.0f, 30.0f, 31.0f, 32.0f, 33.0f, 34.0f, 35.0f, 36.0f, 37.0f, 38.0f, 39.0f, 40.0f, 41.0f, 42.0f, 43.0f, 44.0f, 45.0f, 46.0f, 47.0f, 48.0f, 49.0f, 50.0f, 51.0f, 52.0f, 53.0f, 54.0f, 55.0f, 56.0f, 57.0f, 58.0f, 59.0f, 60.0}); + x.reshapei('c', {3, 4, 5}); + + x.permutei({0, 1, 2}); + x.streamline(); + +// x.printShapeInfo("{0, 1, 2} shape"); +// x.printBuffer("{0, 1, 2} data"); + + ASSERT_TRUE(exp.isSameShape(&x)); + ASSERT_TRUE(exp.equalsTo(&x)); +} + +TEST_F(NDArrayCudaBasicsTests, Test_Empty_1) { + auto x = NDArrayFactory::empty(); + ASSERT_TRUE(x.isActualOnHostSide()); + ASSERT_TRUE(x.isEmpty()); +} + +TEST_F(NDArrayCudaBasicsTests, Test_Empty_2) { + auto x = NDArrayFactory::empty_(); + + ASSERT_TRUE(x->isEmpty()); + delete x; +} + +TEST_F(NDArrayCudaBasicsTests, Test_Empty_3) { + auto x = NDArrayFactory::empty(nd4j::DataType::FLOAT32); + + ASSERT_TRUE(x.isEmpty()); +} + +TEST_F(NDArrayCudaBasicsTests, Test_Empty_4) { + auto x = NDArrayFactory::empty_(nd4j::DataType::FLOAT32); + + ASSERT_TRUE(x->isEmpty()); + delete x; +} \ No newline at end of file diff --git a/cuda_code/NVEncFilterNnedi_4.cu b/cuda_code/NVEncFilterNnedi_4.cu new file mode 100644 index 0000000000000000000000000000000000000000..3becc291eee60f76461b6d57570488f3d90ceaef --- /dev/null +++ b/cuda_code/NVEncFilterNnedi_4.cu @@ -0,0 +1,1881 @@ +// ----------------------------------------------------------------------------------------- +// NVEnc by rigaya +// ----------------------------------------------------------------------------------------- +// +// The MIT License +// +// Copyright (c) 2014-2016 rigaya +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. +// +// ------------------------------------------------------------------------------------------ + +#include +#include +#include +#include +#include +#include +#define _USE_MATH_DEFINES +#include +#include "convert_csp.h" +#include "NVEncFilterNnedi.h" +#include "NVEncParam.h" +#pragma warning (push) +#pragma warning (disable: 4819) +#include "cuda_runtime.h" +#if __CUDACC_VER_MAJOR__ >= 10 +#include "cuda_fp16.h" +#include "cuda_fp16.hpp" +#endif +#include "device_launch_parameters.h" +#pragma warning (pop) +#include "rgy_cuda_util_kernel.h" +#include "rgy_filesystem.h" +#include "rgy_resource.h" + +static const int NNEDI_BLOCK_X = 32; +static const int NNEDI_BLOCK_Y = 8; + +static const int weight0size = 49 * 4 + 5 * 4 + 9 * 4; +static const int weight0sizenew = 4 * 65 + 4 * 5; + +__device__ __inline__ +static float exp_(float val) { + return __expf(clamp(val, -80.0f, 80.0f)); +} + +#define ENABLE_CUDA_FP16_DEVICE (__CUDACC_VER_MAJOR__ >= 10 && __CUDA_ARCH__ >= 530) +#define ENABLE_CUDA_FP16_HOST (__CUDACC_VER_MAJOR__ >= 10) + +//dot_product1で重み(nns)方向のループアンロールを行う +//これにより、一度sharedメモリからレジスタにのせたpixel情報を使いまわすことができる +#define ENABLE_DP1_WEIGHT_LOOP_UNROLL 1 + +//ENABLE_DP1_WEIGHT_LOOP_UNROLLに対応して通常の重みの並び [nns*2][nnxy]を変更する +//並びは[nns/WEIGHT_LOOP][nnxy][WEIGHT_LOOP][2] +#define ENABLE_DP1_WEIGHT_ARRAY_OPT (1 && ENABLE_DP1_WEIGHT_LOOP_UNROLL) + +//shuffle命令を使ったweight係数の分配により高速化する +#define ENABLE_DP1_SHUFFLE_OPT 1 + +#define SSRC(x,y) ((y)*(ssrc_dim)+(x)) +#define SPIX(x,y) ((y)*(spix_dim)+(x)) +#define SWHT_IDX(i,thIdWeight) ((thIdWeight)*sweight_dim+(i)) + +__device__ __inline__ +float elliott(float val) { + return val * __frcp_rn(1.0f + fabs(val)); +} +#if ENABLE_CUDA_FP16_HOST +__device__ __inline__ +__half2 __half2_abs(__half2 val) { + __half2 h; + RGY_HALF2_TO_UI(h) = RGY_HALF2_TO_UI(val) & 0x7fff7fffu; + return h; +} + +__device__ __inline__ +__half2 elliott(__half2 val) { +#if ENABLE_CUDA_FP16_DEVICE + return val * h2rcp(__float2half2_rn(1.0f) + __half2_abs(val)); +#else + return val; //dummy +#endif +} +#endif + +template +__device__ __inline__ +void load_texSrc(float *const ptr_src, const int ssrc_dim, TypePixel *const ptr_pix, const int spix_dim, cudaTextureObject_t texSrc, const int nnx, const int nny, const int nnx_2_m1, const int nny_2, const int thIdX, const int thIdY) { + for (int y = 0; y + thIdY < NNEDI_BLOCK_Y * thread_y_loop + nny; y += NNEDI_BLOCK_Y) { + for (int x = 0; x + thIdX < ssrc_dim; x += NNEDI_BLOCK_X) { + const float px = blockIdx.x * NNEDI_BLOCK_X /*blockDim.x*/ * pix_x_per_thread + thIdX + x - nnx_2_m1 + 0.5f; + const float py = blockIdx.y * NNEDI_BLOCK_Y /*blockDim.y*/ * thread_y_loop + thIdY + y - nny_2 + 0.5f; + const float value = (float)tex2D(texSrc, px, py); + ptr_src[SSRC(x + thIdX, y + thIdY)] = value * 256.0f; //floatのときはここで256倍して8bit相当に戻す + if (load_for_interp && 0 <= thIdX + x - nnx_2_m1 && thIdX + x - nnx_2_m1 < spix_dim) { + ptr_pix[SPIX(x + thIdX - nnx_2_m1, y + thIdY)] = (TypePixel)(value * (float)(1< +__device__ __inline__ +void load_texSrc(__half2 *const ptr_src, const int ssrc_dim, TypePixel *const ptr_pix, const int spix_dim, cudaTextureObject_t texSrc, const int nnx, const int nny, const int nnx_2_m1, const int nny_2, const int thIdX, const int thIdY) { +#if ENABLE_CUDA_FP16_DEVICE + static_assert(pix_x_per_thread == 1 || pix_x_per_thread == 4, "pix_x_per_thread == 1 or 4"); + if (pix_x_per_thread == 1) { + //sharedメモリ上に、以下のように重複配置する + // | 0, 1 | 1, 2 | 2, 3 | 3, 4 | 4, 5 | ... + for (int y = 0; y + thIdY < NNEDI_BLOCK_Y * thread_y_loop + nny; y += NNEDI_BLOCK_Y) { + for (int x = 0; x + thIdX < ssrc_dim; x += NNEDI_BLOCK_X) { + const float px = blockIdx.x * NNEDI_BLOCK_X /*blockDim.x*/ + thIdX + x - nnx_2_m1 + 0.5f; + const float py = blockIdx.y * NNEDI_BLOCK_Y /*blockDim.y*/ * thread_y_loop + thIdY + y - nny_2 + 0.5f; + const float v0 = tex2D(texSrc, px, py); + const float v1 = tex2D(texSrc, px+1.0f, py); + ptr_src[SSRC(x + thIdX, y + thIdY)] = __floats2half2_rn(v0, v1); //half2のときはここでは256倍せず、0~1の範囲を使用する + if (load_for_interp && 0 <= thIdX + x - nnx_2_m1 && thIdX + x - nnx_2_m1 < spix_dim) { + ptr_pix[SPIX(x + thIdX - nnx_2_m1, y + thIdY)] = (TypePixel)(v0 * (float)(1<(texSrc, px, py); + const float v1 = tex2D(texSrc, px+1.0f, py); + ptr_src[SSRC(x + thIdX, y + thIdY)] = __floats2half2_rn(v0, v1); //half2のときはここでは256倍せず、0~1の範囲を使用する + if (load_for_interp && 0 <= load_x && load_x < spix_dim) { + struct __align__(sizeof(TypePixel) * 2) TypePixel2 { + TypePixel x, y; + } p; + p.x = (TypePixel)(v0 * (float)(1< +__device__ __inline__ +TypePixel prescreen_flag() { + return (1< __device__ __inline__ T setval(float val); +template<> __device__ __inline__ float setval(float val) { return val; }; +template __device__ __inline__ constexpr int kernel_comute_network1_calc_scale_step(); +template<> __device__ __inline__ constexpr int kernel_comute_network1_calc_scale_step() { return 1; }; +template +__device__ __inline__ bool compute_kernel0_get_flag_original(const float ret[thread_y_loop][nns], int ithy) { + return (fmaxf(ret[ithy][2], ret[ithy][3]) <= fmaxf(ret[ithy][0], ret[ithy][1])); +} +template +__device__ __inline__ void compute_kernel0_get_flags_new(bool flags[4], const float ret[thread_y_loop][nns], int ithy) { + flags[0] = ret[ithy][0] > 0.0f; + flags[1] = ret[ithy][1] > 0.0f; + flags[2] = ret[ithy][2] > 0.0f; + flags[3] = ret[ithy][3] > 0.0f; +} +#if ENABLE_CUDA_FP16_HOST +template<> __device__ __inline__ constexpr int kernel_comute_network1_calc_scale_step<__half2>() { return 2; }; +template<> __device__ __inline__ __half setval(float val) { return __half(val); }; +template<> __device__ __inline__ __half2 setval(float val) { return __float2half2_rn(val); } +__device__ __inline__ __half half_max(const __half& a, const __half& b) { +#if ENABLE_CUDA_FP16_DEVICE + return a < b ? b : a; +#else + return a; //dummy +#endif +} +template +__device__ __inline__ bool compute_kernel0_get_flag_original(const __half2 ret[thread_y_loop][nns], int ithy) { +#if ENABLE_CUDA_FP16_DEVICE + //__hlaf2には重み方向に2つの値が入っている + //やっていることはfloat版と同じ + return (half_max(ret[ithy][1].x, ret[ithy][1].y) <= half_max(ret[ithy][0].x, ret[ithy][0].y)); +#else + return true; //dummy +#endif +} +template +__device__ __inline__ void compute_kernel0_get_flags_new(bool flags[4], const __half2 ret[thread_y_loop][nns], int ithy) { +#if ENABLE_CUDA_FP16_DEVICE + flags[0] = ret[ithy][0].x > __half(0.0f); + flags[1] = ret[ithy][0].y > __half(0.0f); + flags[2] = ret[ithy][1].x > __half(0.0f); + flags[3] = ret[ithy][1].y > __half(0.0f); +#endif //#if ENABLE_CUDA_FP16_DEVICE +} +#endif //#if ENABLE_CUDA_FP16_HOST + +template +__device__ __inline__ +void dot_product0( + float sum[thread_y_loop][weight_loop], + const float *const ptr_src, const int ssrc_dim, + const float *const ptr_weight, const int sweight_dim, + const float *__restrict__ weight_offset, + const int nnx, const int nny, const int thIdX, const int thIdY, + const int pix_x_per_thread, + const float mstd[thread_y_loop][4] +) { + #pragma unroll + for (int ithy = 0; ithy < thread_y_loop; ithy++) { + #pragma unroll + for (int i = 0; i < weight_loop; i++) { + sum[ithy][i] = 0.0f; + } + } + const auto *ptr_w = ptr_weight; + for (int y = 0; y < nny; y++) { + const int src_index = (src_is_frame) + //srcがフレームのキャッシュを指しているとき + //通常、pix_x_per_thread=1なので、thIdXによって各スレッドが担当するpixelをロードする + //pre_screen=newの時には各スレッドが4pixel担当するので、pix_x_per_threadが4になり、とびとびの値をロードする + ? SSRC(thIdX * pix_x_per_thread, thIdY * thread_y_loop + y) + //kernel_comute_network0で、srcがptr_tmpの計算結果の場合 + //担当pixelはstmp_dim(ssrc_dim)ごとに並んでいるので、x=0、y=担当行でロードする + : SSRC(0, thIdY * thread_y_loop * NNEDI_BLOCK_X + thIdX); + const auto *ptr_s = &ptr_src[src_index]; + + for (int x = 0; x < nnx; x++, ptr_s++, ptr_w++) { + float s0[thread_y_loop]; + #pragma unroll + for (int ithy = 0; ithy < thread_y_loop; ithy++) { + s0[ithy] = ptr_s[(src_is_frame) ? (SSRC(0, ithy)) : (SSRC(0, ithy * NNEDI_BLOCK_X))]; + } + #pragma unroll + for (int i = 0; i < weight_loop; i++) { + auto w0 = ptr_w[SWHT_IDX(0, i)]; + #pragma unroll + for (int ithy = 0; ithy < thread_y_loop; ithy++) { + sum[ithy][i] += s0[ithy] * w0; + } + } + } + } + + #pragma unroll + for (int i = 0; i < weight_loop; i++, weight_offset++) { + const auto wo = weight_offset[0]; + #pragma unroll + for (int ithy = 0; ithy < thread_y_loop; ithy++) { + const auto scale = setval((scale_dummy) ? 1.0f : mstd[ithy][2]); + sum[ithy][i] = sum[ithy][i] * scale + wo; + } + } +} + +#if ENABLE_CUDA_FP16_HOST +template +__device__ __inline__ +void dot_product0( + __half2 sum[thread_y_loop][weight_loop], + const __half2 *const ptr_src, const int ssrc_dim, + const __half2 *const ptr_weight, const int sweight_dim, + const __half2 *__restrict__ weight_offset, + const int nnx, const int nny, const int thIdX, const int thIdY, + const int pix_x_per_thread, + const float mstd[thread_y_loop][4] +) { +#if ENABLE_CUDA_FP16_DEVICE + #pragma unroll + for (int ithy = 0; ithy < thread_y_loop; ithy++) { + #pragma unroll + for (int i = 0; i < weight_loop; i++) { + sum[ithy][i] = setval<__half2>(0.0f); + } + } + const int pix_x_per_thread_for_half2 = (prescreen_new) ? 2 : 1; + const int wstep = kernel_comute_network1_calc_scale_step<__half2>(); + const auto *ptr_w = ptr_weight; + for (int y = 0; y < nny; y++) { + const int src_index = (src_is_frame) + //srcがフレームのキャッシュを指しているとき + //通常、pix_x_per_thread=1なので、thIdXによって各スレッドが担当するpixelをロードする + //pre_screen=originalでは、重複配置をしているので、各スレッドは、__hlaf2ごとにロードすればよい + // th=0 th=1 th=2 th=3 th=4 + // | 0, 1 | 1, 2 | 2, 3 | 3, 4 | 4, 5 | ... + //pre_screen=newの時には各スレッドが4pixel担当するので、とびとびの値をロードする。 + //このとき、half2に2pixel分収まり、pre_screen=originalのときのように重複配置はしていないので、 + //pix_x_per_thread_for_half2=2をthIdXに積算する + // th=0 th=1 th=2 + // | 0, 1 | 2, 3 | 3, 4 | 5, 6 | 7, 8 | + ? SSRC(thIdX * pix_x_per_thread_for_half2, thIdY * thread_y_loop + y) + //kernel_comute_network0で、srcがptr_tmpの計算結果の場合 + //担当pixelはstmp_dim(ssrc_dim)ごとに並んでいるので、x=0、y=担当行でロードする + : SSRC(0, thIdY * thread_y_loop * NNEDI_BLOCK_X + thIdX); + const auto *ptr_s = &ptr_src[src_index]; + + //src_is_frame = trueのとき + //pre_screen=originalでは、重複配置をしているので、各スレッドは、2つおきに読む必要がある + // 最初 次 その次 + // ↓ ↓ ↓ + // | 0, 1 | 1, 2 | 2, 3 | 3, 4 | 4, 5 | ... + // + //pre_screen=newの時には重複配置ではないので、各スレッドはすぐ隣を読めばよい + // 最初 次 その次 + // ↓ ↓ ↓ + // | 0, 1 | 2, 3 | 3, 4 | 5, 6 | 7, 8 | + const int sstep = ((src_is_frame && !prescreen_new) ? wstep : 1); + + for (int x = 0; x < nnx; x += wstep, ptr_s += sstep) { + __half2 s0[thread_y_loop]; + #pragma unroll + for (int ithy = 0; ithy < thread_y_loop; ithy++) { + s0[ithy] = ptr_s[(src_is_frame) ? (SSRC(0, ithy)) : (SSRC(0, ithy * NNEDI_BLOCK_X))]; + } + + //kernel_comute_network0ではhalf2の場合 nns= 4 / 2 + //なので、weight_loopが2より大きいとおかしなことになる + static_assert(weight_loop <= 2, "weight_loop <= 2"); + + //wに連続するweight_loop*2の値を読み込み、shuffleによりbroadcastする + //重みは、nns方向にまず並んでいる + //基本的には下記のような感じでロード + // <------ nns ------> + // <-- half2--> + // w0 w1 w2 w3 + // |0----|---->|1----|---->| <<< x0にかかる重み + // |2----|---->|3----|---->| <<< x1にかかる重み + // w4 w5 w6 w7 + __half2 w; + if (thIdX < weight_loop*2) w = ptr_w[thIdX]; + ptr_w += weight_loop*2; + #pragma unroll + for (int i = 0; i < weight_loop; i++) { + auto w0 = __shfl(w, i+0); //x0にかかる重み + auto w1 = __shfl(w, i+weight_loop); //x1にかかる重み + #pragma unroll + for (int ithy = 0; ithy < thread_y_loop; ithy++) { + //nns方向の計算をhalf2内で同時に行っていくイメージ + sum[ithy][i] += __low2half2(s0[ithy]) * w0; //x0 * (w0, w1) + sum[ithy][i] += __high2half2(s0[ithy]) * w1; //x1 * (w4, w5) + } + } + } + } + + #pragma unroll + for (int i = 0; i < weight_loop; i++, weight_offset++) { + const auto wo = weight_offset[0]; + #pragma unroll + for (int ithy = 0; ithy < thread_y_loop; ithy++) { + //srcがフレームのキャッシュを指しているときは、 + //half2の場合、ロード時に256倍していないので、ここで256倍する + //kernel_comute_network0で、srcがptr_tmpの計算結果の場合は必要ない + //なお、ここで256倍しないと、後段のelliottが本来の値を返さない + const auto scale = setval<__half2>((src_is_frame) ? 256.0f : 1.0f); + sum[ithy][i] = sum[ithy][i] * scale + wo; + } + } +#endif //#if ENABLE_CUDA_FP16_DEVICE +} +#endif //#if ENABLE_CUDA_FP16_HOST + +template +__device__ __inline__ +static TypePixel interp_ret(const TypeCalc *const ptr_src, const int ssrc_dim, + const bool flag, const int thIdX, const int thIdY, int ithy, const int nnx_2_m1, const int nny_2) { + TypePixel val = prescreen_flag(); + if (flag) { + float tmp = + (19.0f / 32.0f) * ((float)ptr_src[SSRC(thIdX + nnx_2_m1, thIdY * thread_y_loop + ithy + 1)] + (float)ptr_src[SSRC(thIdX + nnx_2_m1, thIdY * thread_y_loop + ithy + 2)]) + - (3.0f / 32.0f) * ((float)ptr_src[SSRC(thIdX + nnx_2_m1, thIdY * thread_y_loop + ithy + 0)] + (float)ptr_src[SSRC(thIdX + nnx_2_m1, thIdY * thread_y_loop + ithy + 3)]); + val = (TypePixel)clamp(tmp + 0.5f, 0.0f, (1< +__global__ void kernel_compute_network0( + uint8_t *__restrict__ pDst, //top field / bottom field は考慮済みとする + const int dstPitch, //1行おきなので通常の2倍の値が入っている + const int dstWidth, + const int dstHeight, + cudaTextureObject_t texSrc, //有効フィールドのみのテクスチャ(縦解像度は半分) + const TypeCalc *__restrict__ weight, + const NnediTargetField targetField + ) { + const int wstep = kernel_comute_network1_calc_scale_step(); //half2なら2, floatなら1 + const int pix_x_per_thread = prescreen_new ? 4/*4ピクセル分一度に処理する*/ : 1; + const int nnx = (prescreen_new) ? 16 : 12; + const int nny = 4; + const int nnxy = nnx * nny; + const int nns = 4 / wstep; //half2の場合、nns方向を2つ格納できる + const int thIdX = threadIdx.x; //(サイズ: NNEDI_BLOCK_X) + const int thIdY = threadIdx.y; //(サイズ: NNEDI_BLOCK_Y) + const int gIdX =(blockIdx.x * NNEDI_BLOCK_X /*blockDim.x*/ + thIdX) * pix_x_per_thread; + const int gIdY =(blockIdx.y * NNEDI_BLOCK_Y /*blockDim.y*/ + thIdY) * thread_y_loop; //フィールド単位 + const int stmp_dim = ((prescreen_new) ? 4 : 8) / wstep; //half2の場合、値を2つ格納できる + const int ssrc_dim = (prescreen_new && wstep == 2 /*__half2使用*/) + ? (NNEDI_BLOCK_X * pix_x_per_thread + nnx) / 2 //prescreen=new かつ __half2使用の時は、重複した配置を行わない + : NNEDI_BLOCK_X * pix_x_per_thread + nnx; //floatの時のサイズ また、__half2でもprescreen=originalの時は重複配置するので、floatと同じサイズ + + //sharedメモリのサイズと使途 + __shared__ char shared[ + ssrc_dim * (NNEDI_BLOCK_Y * thread_y_loop + nny) * sizeof(TypeCalc) + //src 計算用 + NNEDI_BLOCK_X * NNEDI_BLOCK_Y * thread_y_loop * stmp_dim * sizeof(TypeCalc) + //tmp (計算結果の一時保管用) + (NNEDI_BLOCK_X * pix_x_per_thread) * (NNEDI_BLOCK_Y * thread_y_loop + nny) * sizeof(decltype(TypePixel4::x)) //interp_retで補間に使うため + ]; + TypeCalc *const ptr_src = (TypeCalc *)shared; + + TypeCalc *const ptr_temp = (TypeCalc *)((char *)ptr_src + + (ssrc_dim * (NNEDI_BLOCK_Y * thread_y_loop + nny) * sizeof(ptr_src[0]))); +#define STMP_IDX(i,x,y) ( ((y)*(NNEDI_BLOCK_X)+(x)) * stmp_dim + (i)) + + //interp_ret()で補間を行う時に使用するデータ + //16bit精度(int)の場合、fp16では精度が落ちる可能性があるため、ptr_srcとは別に保持することにした + //interp_ret()では縦方向にしか補間しないので、ptr_srcのようにnnx分余分に読む必要はない + //ここではsharedメモリ節約のため、floatではなく整数で保持する + decltype(TypePixel4::x) *const ptr_pix = (decltype(TypePixel4::x) *)((char *)ptr_temp + + NNEDI_BLOCK_X * NNEDI_BLOCK_Y * thread_y_loop * stmp_dim * sizeof(TypeCalc)); + const int spix_dim = NNEDI_BLOCK_X * pix_x_per_thread; + + //input(texture) -> shared, spix + //textureからpixel情報をsharedメモリにロードする + //範囲外の折り返し等はtextureでやってくれるのでここでは無視 + const int nnx_2_m1 = (prescreen_new) ? 6 : 5; + const int nny_2 = nny / 2 - (targetField == NNEDI_GEN_FIELD_BOTTOM ? 1 : 0); + load_texSrc(ptr_src, ssrc_dim, ptr_pix, spix_dim, texSrc, nnx, nny, nnx_2_m1, nny_2, thIdX, thIdY); + __syncthreads(); + + float dummy[thread_y_loop][4]; + const int sweight_dim = (wstep == 1) ? nnxy : nnxy * weight_loop; + if (!prescreen_new) { + #pragma unroll + for (int iw = 0; iw < nns; iw += weight_loop) { + TypeCalc sum[thread_y_loop][weight_loop]; //レジスタにのることを期待する + dot_product0(sum, ptr_src, ssrc_dim, weight+iw*sweight_dim, /*sweight_dim=*/nnxy, weight+48*nns+iw, nnx, nny, thIdX, thIdY, pix_x_per_thread, dummy); + #pragma unroll + for (int ithy = 0; ithy < thread_y_loop; ithy++) { + #pragma unroll + for (int ithw = 0; ithw < weight_loop; ithw++) { + ptr_temp[STMP_IDX(iw+ithw, thIdX, thIdY * thread_y_loop + ithy)] = elliott(sum[ithy][ithw]); + } + } + } + __syncthreads(); + + #pragma unroll + for (int iw = 0; iw < nns; iw += weight_loop) { + TypeCalc sum[thread_y_loop][weight_loop]; //レジスタにのることを期待する + dot_product0(sum, ptr_temp, stmp_dim, weight+49*nns+iw*nns, /*sweight_dim=nnxy=*/4, weight+49*nns + 4*nns+iw, /*nnx=*/4, /*nny=*/1, thIdX, thIdY, pix_x_per_thread, dummy); + #pragma unroll + for (int ithy = 0; ithy < thread_y_loop; ithy++) { + #pragma unroll + for (int ithw = 0; ithw < weight_loop; ithw++) { + //half2なら、値を2つ格納できることに注意して、4/wstepとする + ptr_temp[STMP_IDX(4/wstep+iw+ithw, thIdX, thIdY * thread_y_loop + ithy)] = elliott(sum[ithy][ithw]); + } + } + } + __syncthreads(); + + TypeCalc ret[thread_y_loop][nns]; //レジスタにのることを期待する + #pragma unroll + for (int iw = 0; iw < nns; iw += weight_loop) { + TypeCalc sum[thread_y_loop][weight_loop]; //レジスタにのることを期待する + dot_product0(sum, ptr_temp, stmp_dim, weight + nns*49 + nns*5+stmp_dim*iw, /*sweight_dim=nnxy=*/8, weight + nns*49 + nns*5 + nns*8+iw, /*nnx=*/8, /*nny=*/1, thIdX, thIdY, pix_x_per_thread, dummy); + #pragma unroll + for (int ithy = 0; ithy < thread_y_loop; ithy++) { + #pragma unroll + for (int ithw = 0; ithw < weight_loop; ithw++) { + ret[ithy][ithw+iw] = sum[ithy][ithw]; + } + } + } + + if (gIdX < dstWidth) { + #pragma unroll + for (int ithy = 0; ithy < thread_y_loop; ithy++) { + if ((gIdY + ithy) * 2 < dstHeight) { //縦方向は1行おきの処理となるので "*2" + const bool flag = compute_kernel0_get_flag_original(ret, ithy); + decltype(TypePixel4::x) *const ptr_dst = (decltype(TypePixel4::x) *)((uint8_t *)pDst + (gIdY + ithy) * dstPitch + gIdX * sizeof(TypePixel4::x)); + //ptr_dst[0] = interp_ret(ptr_src, ssrc_dim, flag, thIdX, thIdY, ithy, nnx_2_m1, nny_2); + ptr_dst[0] = interp_ret(ptr_pix, spix_dim, flag, thIdX, thIdY, ithy, 0, nny_2); + } + } + } + } else { + #pragma unroll + for (int iw = 0; iw < nns; iw += weight_loop) { + TypeCalc sum[thread_y_loop][weight_loop]; //レジスタにのることを期待する + dot_product0(sum, ptr_src, ssrc_dim, weight+iw*sweight_dim, /*sweight_dim=*/nnxy, weight+64*nns+iw, nnx, nny, thIdX, thIdY, pix_x_per_thread, dummy); + #pragma unroll + for (int ithy = 0; ithy < thread_y_loop; ithy++) { + #pragma unroll + for (int ithw = 0; ithw < weight_loop; ithw++) { + ptr_temp[STMP_IDX(iw+ithw, thIdX, thIdY * thread_y_loop + ithy)] = elliott(sum[ithy][ithw]); + } + } + } + __syncthreads(); + + TypeCalc ret[thread_y_loop][nns]; //レジスタにのることを期待する + #pragma unroll + for (int iw = 0; iw < nns; iw += weight_loop) { + TypeCalc sum[thread_y_loop][weight_loop]; //レジスタにのることを期待する + dot_product0(sum, ptr_temp, stmp_dim, weight+65*nns+iw*nns, /*sweight_dim=nnxy=*/4, weight+65*nns + 4*nns + iw, /*nnx=*/4, /*nny=*/1, thIdX, thIdY, pix_x_per_thread, dummy); + #pragma unroll + for (int ithy = 0; ithy < thread_y_loop; ithy++) { + #pragma unroll + for (int ithw = 0; ithw < weight_loop; ithw++) { + ret[ithy][ithw+iw] = sum[ithy][ithw]; + } + } + } + + if (gIdX < dstWidth) { + #pragma unroll + for (int ithy = 0; ithy < thread_y_loop; ithy++) { + if ((gIdY + ithy) * 2 < dstHeight) { //縦方向は1行おきの処理となるので "*2" + TypePixel4 *const ptr_dst = (TypePixel4 *)((uint8_t *)pDst + (gIdY + ithy) * dstPitch + gIdX * sizeof(decltype(TypePixel4::x))); + //1スレッドで4pixel分出力する + bool flags[4]; + compute_kernel0_get_flags_new(flags, ret, ithy); + TypePixel4 out; + //out.x = interp_ret(ptr_src+0, ssrc_dim, flags[0], thIdX * pix_x_per_thread, thIdY, ithy, nnx_2_m1, nny_2); + //out.y = interp_ret(ptr_src+1, ssrc_dim, flags[1], thIdX * pix_x_per_thread, thIdY, ithy, nnx_2_m1, nny_2); + //out.z = interp_ret(ptr_src+2, ssrc_dim, flags[2], thIdX * pix_x_per_thread, thIdY, ithy, nnx_2_m1, nny_2); + //out.w = interp_ret(ptr_src+3, ssrc_dim, flags[3], thIdX * pix_x_per_thread, thIdY, ithy, nnx_2_m1, nny_2); + out.x = interp_ret(ptr_pix+0, spix_dim, flags[0], thIdX * pix_x_per_thread, thIdY, ithy, 0, nny_2); + out.y = interp_ret(ptr_pix+1, spix_dim, flags[1], thIdX * pix_x_per_thread, thIdY, ithy, 0, nny_2); + out.z = interp_ret(ptr_pix+2, spix_dim, flags[2], thIdX * pix_x_per_thread, thIdY, ithy, 0, nny_2); + out.w = interp_ret(ptr_pix+3, spix_dim, flags[3], thIdX * pix_x_per_thread, thIdY, ithy, 0, nny_2); + ptr_dst[0] = out; + } + } + } + } +} + +template __device__ + void kernel_comute_network1_calc_scale_get_sum_sumsq(float& sum, float& sumsq, TypeCalc tsum, TypeCalc tsumsq); +template<> __device__ __inline__ + void kernel_comute_network1_calc_scale_get_sum_sumsq(float& sum, float& sumsq, float tsum, float tsumsq) { + sum = tsum, sumsq = tsumsq; +} +#if ENABLE_CUDA_FP16_HOST +template<> __device__ __inline__ + void kernel_comute_network1_calc_scale_get_sum_sumsq<__half2>(float& sum, float& sumsq, __half2 tsum, __half2 tsumsq) { + //half2では、textureからのロード時に256倍していない + //ここで、256倍して、本来の値に戻す(ここで256倍しないと、後段のelliottが本来の値を返さない) + //なお、textureからのロード時に256倍してしまうとtsumsqの計算がオーバーフローしてしまう + sum = ((float)tsum.x + (float)tsum.y) * 256.0f; + sumsq = ((float)tsumsq.x + (float)tsumsq.y) * 256.0f * 256.0f; +} +#endif //#if ENABLE_CUDA_FP16_HOST + +template +__device__ __inline__ +void kernel_comute_network1_calc_scale( + float mstd[][4], + TypeCalc *__restrict__ const ptr_temp, + const TypeCalc *__restrict__ const ptr_src, const int ssrc_dim, + const int nnx, const int nny, const int nnxy, + const int thIdX, const int thIdY, + const int thread_y_loop) { + const int step = kernel_comute_network1_calc_scale_step(); +#define TMP_IDX(x,y,i) ((((i)*(nny + NNEDI_BLOCK_Y * thread_y_loop)+(y))*NNEDI_BLOCK_X)+(x)) + for (int y = 0; y + thIdY < nny + NNEDI_BLOCK_Y * thread_y_loop; y += NNEDI_BLOCK_Y) { + TypeCalc sum = setval(0.0f), sumsq = setval(0.0f); + //まず各ピクセルごとに、x方向の総和をとる + #pragma unroll (4) + for (int x = 0; x < nnx; x += step) { + const auto value = ptr_src[SSRC(x + thIdX, y + thIdY)]; + sum += value; + sumsq += value * value; + } + //一度sharedメモリに格納 + ptr_temp[TMP_IDX(thIdX, thIdY+y, 0)] = sum; + ptr_temp[TMP_IDX(thIdX, thIdY+y, 1)] = sumsq; + } + __syncthreads(); + + const float inv_nnxy = __frcp_rn(nnxy); + + //次にy方向の総和をとる + #pragma unroll + for (int ithy = 0; ithy < thread_y_loop; ithy++) { + TypeCalc tsum = setval(0.0f), tsumsq = setval(0.0f); + #pragma unroll + for (int y = 0; y < nny; y++) { + tsum += ptr_temp[TMP_IDX(thIdX, thIdY*thread_y_loop+ithy+y, 0)]; + tsumsq += ptr_temp[TMP_IDX(thIdX, thIdY*thread_y_loop+ithy+y, 1)]; + } + + //half2使用時に並列で計算したものを集約するとともに、256倍の補正を適用する + float sum, sumsq; + kernel_comute_network1_calc_scale_get_sum_sumsq(sum, sumsq, tsum, tsumsq); + + mstd[ithy][3] = 0.0f; + mstd[ithy][0] = sum * inv_nnxy; + float tmp = sumsq * inv_nnxy - mstd[ithy][0] * mstd[ithy][0]; + if (tmp <= RGY_FLT_EPS) { + mstd[ithy][1] = 0.0f; + mstd[ithy][2] = 0.0f; + } else { + mstd[ithy][1] = __fsqrt_rn(tmp); + mstd[ithy][2] = __frcp_rn(mstd[ithy][1]); + } + } +#undef TMP_IDX +} +#if ENABLE_CUDA_FP16_HOST && (!ENABLE_CUDA_FP16_DEVICE) +template<> +__device__ __inline__ +void kernel_comute_network1_calc_scale( + float mstd[][4], + __half2 *__restrict__ const ptr_temp, + const __half2 *__restrict__ const ptr_src, const int ssrc_dim, + const int nnx, const int nny, const int nnxy, + const int thIdX, const int thIdY, + const int thread_y_loop) { + //ダミー + assert(false); +} +#endif //#if ENABLE_CUDA_FP16_HOST && (!ENABLE_CUDA_FP16_DEVICE) + +template +__device__ __inline__ +void dot_product_frame1_fp32( + float sum0[thread_y_loop][weight_loop], //レジスタにのることを期待する + float sum1[thread_y_loop][weight_loop], //レジスタにのることを期待する + TypeCalc *__restrict__ const ptr_src, const int ssrc_dim, + const TypeCalc *__restrict__ const ptr_weight, const int sweight_dim, + const TypeCalc *__restrict__ weight_offset, + const int nnx, const int nny, const int nns, const int thIdX, const int thIdY, + const float mstd[thread_y_loop][4] +) { + #pragma unroll + for (int ithy = 0; ithy < thread_y_loop; ithy++) { + #pragma unroll + for (int i = 0; i < weight_loop; i++) { + sum0[ithy][i] = sum1[ithy][i] = 0.0f; + } + } + const TypeCalc *ptr_w = ptr_weight; + for (int y = 0; y < nny; y++) { + const TypeCalc *ptr_s = &ptr_src[SSRC(thIdX, thIdY * thread_y_loop + y)]; +#if ENABLE_DP1_WEIGHT_ARRAY_OPT + //#pragma unroll (4) + for (int x = 0; x < nnx; x++, ptr_s++) { + //このsharedメモリからロードしたpixelデータをレジスタ上で使いまわすのが重要 + TypeCalc s0[thread_y_loop]; + #pragma unroll + for (int ithy = 0; ithy < thread_y_loop; ithy++) { + s0[ithy] = ptr_s[SSRC(0, ithy)]; + } +#if ENABLE_DP1_SHUFFLE_OPT + //[nns/weight_loop][nnxy][weight_loop][2] + //最後の2つには、nns方向の[i]と[i+nns]のものを配置している + // <--------------- nns --------------------> + // <--- weight_loop ---> (weight_loop = 2の場合) + // [0] [nns] [1] [1+nns] + // |0----|1--->|2----|3--->| + //まず、各スレッドでweight_loop*2分だけ重みをwにロードし、 + //これをshuffleで全スレッドにbroadcastして使用するようにする + TypeCalc w; + if (thIdX < weight_loop*2) w = ptr_w[thIdX]; + ptr_w += weight_loop*2; + #pragma unroll + for (int i = 0; i < weight_loop; i++) { + const auto w0 = __shfl(w, i*2+0); //[i]の重み + const auto w1 = __shfl(w, i*2+1); //[i+nns]の重み + #pragma unroll + for (int ithy = 0; ithy < thread_y_loop; ithy++) { + sum0[ithy][i] += s0[ithy] * w0; + sum1[ithy][i] += s0[ithy] * w1; + } + } +#else + #pragma unroll + for (int i = 0; i < weight_loop; i++, ptr_w += 2) { + const auto w0 = ptr_w[0]; + const auto w1 = ptr_w[1]; + #pragma unroll + for (int ithy = 0; ithy < thread_y_loop; ithy++) { + sum0[i][ithy] += s0[ithy] * w0; + sum1[i][ithy] += s0[ithy] * w1; + } + } +#endif + } + } +#else + #pragma unroll (4) + for (int x = 0; x < nnx; x++, ptr_w++, ptr_s++) { + //このsharedメモリからロードしたpixelデータをレジスタ上で使いまわすのが重要 + TypePixel s0[thread_y_loop]; + #pragma unroll + for (int ithy = 0; ithy < thread_y_loop; ithy++) { + s0[ithy] = ptr_s[SSRC(0, ithy*NNEDI_BLOCK_Y)]; + } + #pragma unroll + for (int i = 0; i < weight_loop; i++) { + TypeCalc w0 = ptr_w[SWHT_IDX(0, i)]; + TypeCalc w1 = ptr_w[SWHT_IDX(0, i+nns)]; + #pragma unroll + for (int ithy = 0; ithy < thread_y_loop; ithy++) { + sum0[i][ithy] += s0[ithy] * w0; + sum1[i][ithy] += s0[ithy] * w1; + } + } + } +#endif +#if ENABLE_DP1_WEIGHT_ARRAY_OPT + #pragma unroll + for (int i = 0; i < weight_loop; i++, weight_offset += 2) { + #pragma unroll + for (int ithy = 0; ithy < thread_y_loop; ithy++) { + //weight offsetもw([i], [i+nns])の並びになっている + sum0[ithy][i] = sum0[ithy][i] * mstd[ithy][2] + weight_offset[0]; //w[i]用のweight_offset + sum1[ithy][i] = sum1[ithy][i] * mstd[ithy][2] + weight_offset[1]; //w[i+nns]用のweight_offset + } + } +#else + #pragma unroll + for (int i = 0; i < weight_loop; i++, weight_offset++) { + #pragma unroll + for (int ithy = 0; ithy < thread_y_loop; ithy++) { + sum0[ithy][i] = sum0[ithy][i] * mstd[ithy][2] + weight_offset[0]; + sum1[ithy][i] = sum1[ithy][i] * mstd[ithy][2] + weight_offset[nns]; + } + } +#endif +} + +#if ENABLE_CUDA_FP16_HOST +template +__device__ __inline__ +void dot_product_frame1_fp16( + __half2 sum[thread_y_loop][weight_loop], + __half2 *__restrict__ const ptr_src, const int ssrc_dim, + const __half2 *__restrict__ const ptr_weight, const int sweight_dim, + const __half2 *__restrict__ weight_offset, + const int nnx, const int nny, const int nns, const int thIdX, const int thIdY, + const __half2 weight_scale[thread_y_loop] +) { +#if ENABLE_CUDA_FP16_DEVICE + #pragma unroll + for (int ithy = 0; ithy < thread_y_loop; ithy++) { + #pragma unroll + for (int i = 0; i < weight_loop; i++) { + sum[ithy][i] = setval<__half2>(0.0f); + } + } + const __half2 *ptr_w = ptr_weight; + for (int y = 0; y < nny; y++) { + const __half2 *ptr_s = &ptr_src[SSRC(thIdX, thIdY * thread_y_loop + y)]; + + //ptr_srcでは、重複配置をしているので、各スレッドは、2つおきに読む必要がある + // 最初 次 その次 + // ↓ ↓ ↓ + // | 0, 1 | 1, 2 | 2, 3 | 3, 4 | 4, 5 | ... + for (int x = 0; x < nnx; x += 2, ptr_s += 2) { + //このsharedメモリからロードしたpixelデータをレジスタ上で使いまわすのが重要 + __half2 s0[thread_y_loop]; + #pragma unroll + for (int ithy = 0; ithy < thread_y_loop; ithy++) { + s0[ithy] = ptr_s[SSRC(0, ithy)]; + } + //[nns/weight_loop][nnxy][weight_loop][2] + //最後の2つには、nns方向の[i]と[i+nns]のものを配置しているので、これがセットでhalf2に乗る + // <--------------- nns --------------------> + // <--- weight_loop ---> (weight_loop = 2の場合) + // <-- half2--> + // [0] [nns] [1] [1+nns] + // |0----|---->|1----|---->| <<< x0にかかる重み + // |2----|---->|3----|---->| <<< x1にかかる重み + //まず、各スレッドでweight_loop*2分だけ重みをwにロードし、 + //これをshuffleで全スレッドにbroadcastして使用するようにする + __half2 w; + if (thIdX < weight_loop*2) w = ptr_w[thIdX]; + ptr_w += weight_loop*2; + #pragma unroll + for (int i = 0; i < weight_loop; i++) { + __half2 w0 = __shfl(w, +i); //x0にかかる重み + __half2 w1 = __shfl(w, weight_loop+i); //x1にかかる重み + #pragma unroll + for (int ithy = 0; ithy < thread_y_loop; ithy++) { + sum[ithy][i] += __low2half2(s0[ithy]) * w0; //x0 * w([i], [i+nns]) + sum[ithy][i] += __high2half2(s0[ithy]) * w1; //x1 * w([i], [i+nns]) + } + } + } + } + #pragma unroll + for (int i = 0; i < weight_loop; i++, weight_offset++) { + #pragma unroll + for (int ithy = 0; ithy < thread_y_loop; ithy++) { + //weight offsetもw([i], [i+nns])の並びになっている + sum[ithy][i] = sum[ithy][i] * weight_scale[ithy] + weight_offset[0]; + } + } +#endif //#if ENABLE_CUDA_FP16_DEVICE +} +#endif //#if ENABLE_CUDA_FP16_HOST + +template +__device__ __inline__ +void kernel_comute_network1_dot_product( + float wsum[thread_y_loop], + float vsum[thread_y_loop], + float *const ptr_src, const int ssrc_dim, + const float *const weight, + float mstd[thread_y_loop][4], + const int nnx, const int nny, const int nnxy, const int nns, + const int thIdX, const int thIdY) { + const int sweight_dim = (ENABLE_DP1_WEIGHT_ARRAY_OPT) ? 2 * nnxy : nnxy; + for (int iw = 0; iw < nns; iw += weight_loop) { + float sum0[thread_y_loop][weight_loop]; //レジスタにのることを期待する + dot_product0(sum0, ptr_src, ssrc_dim, weight+ (iw)*nnxy, sweight_dim, weight + (nns*2)*nnxy + iw, nnx, nny, thIdX, thIdY, 1, mstd); + + float sum1[thread_y_loop][weight_loop]; //レジスタにのることを期待する + dot_product0(sum1, ptr_src, ssrc_dim, weight+ (nns+iw)*nnxy, sweight_dim, weight + (nns*2)*nnxy+nns + iw, nnx, nny, thIdX, thIdY, 1, mstd); + + #pragma unroll + for (int ithy = 0; ithy < thread_y_loop; ithy++) { + #pragma unroll + for (int ithw = 0; ithw < weight_loop; ithw++) { + float ret0 = exp_(sum0[ithy][ithw]); + float ret1 = sum1[ithy][ithw]; + wsum[ithy] += ret0; + vsum[ithy] += ret0 * (ret1 * __frcp_rn(1.0f + fabs(ret1))); + } + } + } +} + +#if ENABLE_CUDA_FP16_HOST +template +__device__ __inline__ +void kernel_comute_network1_dot_product( + float wsum[thread_y_loop], + float vsum[thread_y_loop], + __half2 *const ptr_src, const int ssrc_dim, + const __half2 *const weight, + float mstd[thread_y_loop][4], + const int nnx, const int nny, const int nnxy, const int nns, + const int thIdX, const int thIdY) { + //未実装 + assert(false); +} +#endif //#if ENABLE_CUDA_FP16_HOST + +template +__device__ __inline__ +void kernel_comute_network1_dot_product_opt( + float wsum[thread_y_loop], + float vsum[thread_y_loop], + float *const ptr_src, const int ssrc_dim, + const float *const weight, + float mstd[thread_y_loop][4], + const int nnx, const int nny, const int nnxy, const int nns, + const int thIdX, const int thIdY) { + //ENABLE_DP1_WEIGHT_ARRAY_OPTが有効の場合、 + //[iw]と[iw+nns]の重みが隣り合って並んでいるので、sweight_dimは2倍 + const int sweight_dim = (ENABLE_DP1_WEIGHT_ARRAY_OPT) ? 2 * nnxy : nnxy; + for (int iw = 0; iw < nns; iw += weight_loop) { + float sum0[thread_y_loop][weight_loop]; //レジスタにのることを期待する + float sum1[thread_y_loop][weight_loop]; //レジスタにのることを期待する + // 重み(nns)方向に、weight_loop分のdotproduct + // sum0[i] <- iw - iw+weight_loop + // sum1[i] <- iw+nns - iw+weight_loop+nns + dot_product_frame1_fp32( + sum0, sum1, ptr_src, ssrc_dim, weight+iw*sweight_dim, sweight_dim, weight + (nns*2)*nnxy + iw*2, nnx, nny, nns, thIdX, thIdY, mstd); + #pragma unroll + for (int ithy = 0; ithy < thread_y_loop; ithy++) { + #pragma unroll + for (int ithw = 0; ithw < weight_loop; ithw++) { + float ret0 = exp_(sum0[ithy][ithw]); // iw - iw+weight_loop の計算結果 + float ret1 = sum1[ithy][ithw]; // iw+nns - iw+weight_loop+nns の計算結果 + wsum[ithy] += ret0; + vsum[ithy] += ret0 * (ret1 * __frcp_rn(1.0f + fabs(ret1))); + } + } + } +} + +#if ENABLE_CUDA_FP16_HOST +template +__device__ __inline__ +void kernel_comute_network1_dot_product_opt( + float wsum[thread_y_loop], + float vsum[thread_y_loop], + __half2 *const ptr_src, const int ssrc_dim, + const __half2 *const weight, + float mstd[thread_y_loop][4], + const int nnx, const int nny, const int nnxy, const int nns, + const int thIdX, const int thIdY) { +#if ENABLE_CUDA_FP16_DEVICE + //[iw]と[iw+nns]の重みが隣り合って_half2に入るので、half2としてはnnxyのまま + const int sweight_dim = nnxy; + for (int iw = 0; iw < nns; iw += weight_loop) { + __half2 sum[thread_y_loop][weight_loop]; //レジスタにのることを期待する + // 重み(nns)方向に、weight_loop分のdotproduct + //ひとつの__half2に[iw, iw+nns]の両方の内積の結果が入っている + // sum0[i](iw, iw+nns) + __half2 weight_scale[thread_y_loop]; + #pragma unroll + for (int ithy = 0; ithy < thread_y_loop; ithy++) { + weight_scale[ithy] = __float2half2_rn(mstd[ithy][2]); + } + dot_product_frame1_fp16( + sum, ptr_src, ssrc_dim, weight+iw*sweight_dim, sweight_dim, weight + nns*nnxy + iw, nnx, nny, nns, thIdX, thIdY, weight_scale); + #pragma unroll + for (int ithy = 0; ithy < thread_y_loop; ithy++) { + #pragma unroll + for (int ithw = 0; ithw < weight_loop; ithw++) { + //half2使用時には、オーバーフローを避けるため、textureからのロード時に256倍していないので、ここでfloatにしてから補正する + float ret0 = exp_(__low2float(sum[ithy][ithw]) * 256.0f); + float ret1 = __high2float(sum[ithy][ithw]) * 256.0f; + wsum[ithy] += ret0; + vsum[ithy] += ret0 * (ret1 * __frcp_rn(1.0f + fabs(ret1))); + } + } + } +#endif //#if ENABLE_CUDA_FP16_DEVICE +} +#endif //#if ENABLE_CUDA_FP16_HOST + + +template +__global__ void kernel_comute_network1( + uint8_t *__restrict__ pDst, //top field / bottom field は考慮済みとする + const int dstPitch, //1行おきなので通常の2倍の値が入っている + const int dstWidth, + const int dstHeight, + cudaTextureObject_t texSrc, //有効フィールドのみのテクスチャ(縦解像度は半分) + const TypeCalc *__restrict__ weight10, + const TypeCalc *__restrict__ weight11, + const int nns, // len = nns*2 + const int quals, + const NnediTargetField targetField, + const VppNnediPreScreen prescreen +) { + const int thIdX = threadIdx.x; //(サイズ: NNEDI_BLOCK_X) + const int thIdY = threadIdx.y; //(サイズ: NNEDI_BLOCK_Y) + const int gIdX = blockIdx.x * NNEDI_BLOCK_X /*blockDim.x*/ + thIdX; + const int gIdY =(blockIdx.y * NNEDI_BLOCK_Y /*blockDim.y*/ + thIdY) * thread_y_loop; //フィールド単位 + const int nnxy = nnx * nny; + + //sharedメモリのサイズと使途 + //1.src: (NNEDI_BLOCK_X + nnx) * (NNEDI_BLOCK_Y * thread_y_loop + nny) * sizeof(ptr_src[0]) + //2.tmp: (nny + NNEDI_BLOCK_Y * thread_y_loop) * NNEDI_BLOCK_X * 2 * sizeof(ptr_temp[0]) + alignas(128) extern __shared__ char shared[]; + TypeCalc *const ptr_src = (TypeCalc *)shared; + const int ssrc_dim = NNEDI_BLOCK_X + nnx; + + //input(texture) -> shared + //textureからpixel情報をsharedメモリにロードする + //範囲外の折り返し等はtextureでやってくれるのでここでは無視 + const int nnx_2_m1 = nnx / 2 - 1; + const int nny_2 = nny / 2 - (targetField == NNEDI_GEN_FIELD_BOTTOM ? 1 : 0); + load_texSrc<1, thread_y_loop, false, float/*実際には使わないのでなんでもいい*/, bit_depth>( + ptr_src, ssrc_dim, nullptr, 0, texSrc, nnx, nny, nnx_2_m1, nny_2, thIdX, thIdY); + __syncthreads(); + + TypeCalc *const ptr_temp = (TypeCalc *)((char *)shared + + (NNEDI_BLOCK_X + nnx) * (NNEDI_BLOCK_Y * thread_y_loop + nny) * sizeof(ptr_src[0])); + + float mstd[thread_y_loop][4]; + kernel_comute_network1_calc_scale(mstd, ptr_temp, ptr_src, ssrc_dim, nnx, nny, nnxy, thIdX, thIdY, thread_y_loop); + + uint8_t *const ptr_dst_base = (uint8_t *)pDst + gIdY * dstPitch + gIdX * sizeof(TypePixel); + uint32_t flag_sum = 0xffffffff; //処理するかどうかのフラグ + if (((uint32_t)prescreen & (uint32_t)VPP_NNEDI_PRE_SCREEN_MODE) != 0) { //prescreenをやっていれば確認する + flag_sum = 0x00; + uint8_t *ptr_dst = ptr_dst_base; + //自分のスレッドの担当するpixelについて調査する + //処理対象となっていたらビットを立てる + //thread_y_loopについて、下のビットから使っていく + #pragma unroll + for (int ithy = 0; ithy < thread_y_loop; ithy++, ptr_dst += dstPitch) { + uint32_t flag = 0x00; + if ((gIdY + ithy) * 2 < dstHeight) { //縦方向は1行おきの処理となるので "*2" + flag = (((TypePixel *)ptr_dst)[0] == prescreen_flag()) ? 0x01 << ithy : 0x00; + } + flag_sum |= flag; + //ビットを使い切らないようにチェック + static_assert(thread_y_loop <= sizeof(flag_sum) * 8, "thread_y_loop <= sizeof(flag_sum) * 8"); + } + } + +/* + |<-------- nns*2 --------->| + WEIGHT_LOOP + |<-->| ---> 繰り返し処理 + --- |--------------------------| + | | + | | + | | + nnxy | | + | | + | | + | | + --- |--------------------------| + + |<---- nnxy --->| + --- |------------------| |----| +NNEDI_BLOCK_X | | | | <-- 各スレッドはこの出力の1pixel分(縦方向)をそれぞれ担当 +*NNEDI_BLOCK_Y | | | | 横: WEIGHT_LOOP + --- | | |----| 縦: NNEDI_BLOCK_X * NNEDI_BLOCK_Y + | | + | | + pixels | | + | | | + | | | +  ↓ | | + +*/ + //weightの先頭のポインタ + if (__any(flag_sum)) { //どのpixelも処理する必要がなければ、スキップする + for (int iquality = 0; iquality < quals; iquality++) { + const TypeCalc *const weight = (iquality) ? weight11 : weight10; + float wsum[thread_y_loop], vsum[thread_y_loop]; + #pragma unroll + for (int ithy = 0; ithy < thread_y_loop; ithy++) { + wsum[ithy] = vsum[ithy] = 0.0f; + } + if (ENABLE_DP1_WEIGHT_LOOP_UNROLL) { + kernel_comute_network1_dot_product_opt( + wsum, vsum, ptr_src, ssrc_dim, weight, mstd, nnx, nny, nnxy, nns, thIdX, thIdY); + } else { + kernel_comute_network1_dot_product( + wsum, vsum, ptr_src, ssrc_dim, weight, mstd, nnx, nny, nnxy, nns, thIdX, thIdY); + } + + const float min_weight_sum = 1e-10f; + for (int ithy = 0; ithy < thread_y_loop; ithy++) { + if (wsum[ithy] > min_weight_sum) { + mstd[ithy][3] += ((5.0f * vsum[ithy]) * __frcp_rn(wsum[ithy])) * mstd[ithy][1]; + } + mstd[ithy][3] += mstd[ithy][0]; + } + } + + if (gIdX < dstWidth) { + const float scale = (1< 1) ? 0.5f : 1.0f); + uint8_t *ptr_dst = (uint8_t *)ptr_dst_base; + for (int ithy = 0; ithy < thread_y_loop; ithy++, ptr_dst += dstPitch) { + if ((((uint32_t)prescreen & (uint32_t)VPP_NNEDI_PRE_SCREEN_BLOCK) || (flag_sum & (1< +cudaError_t setTexFieldNnedi(cudaTextureObject_t& texSrc, const RGYFrameInfo *pFrame, const NnediTargetField targetField) { + texSrc = 0; + + cudaResourceDesc resDescSrc; + memset(&resDescSrc, 0, sizeof(resDescSrc)); + resDescSrc.resType = cudaResourceTypePitch2D; + resDescSrc.res.pitch2D.desc = cudaCreateChannelDesc(); + resDescSrc.res.pitch2D.pitchInBytes = pFrame->pitch * 2; //1行おきなので通常の2倍 + resDescSrc.res.pitch2D.width = pFrame->width; + resDescSrc.res.pitch2D.height = pFrame->height / 2; //フィールドなので半分 + resDescSrc.res.pitch2D.devPtr = (uint8_t *)pFrame->ptr + + (pFrame->pitch * ((targetField == NNEDI_GEN_FIELD_TOP) ? 1 : 0)); //有効なほうのフィールドを選択 + + cudaTextureDesc texDescSrc; + memset(&texDescSrc, 0, sizeof(texDescSrc)); + texDescSrc.addressMode[0] = cudaAddressModeWrap; + texDescSrc.addressMode[1] = cudaAddressModeWrap; + texDescSrc.filterMode = cudaFilterModePoint; + texDescSrc.readMode = cudaReadModeNormalizedFloat; + texDescSrc.normalizedCoords = 0; + + return cudaCreateTextureObject(&texSrc, &resDescSrc, &texDescSrc, nullptr); +} + +template +cudaError_t nnedi_compute_network_0(RGYFrameInfo *pOutputPlane, + cudaTextureObject_t texSrc, + const TypeCalc *weight0, + const VppNnediPreScreen pre_screen, + const NnediTargetField targetField, + cudaStream_t stream +) { + dim3 blockSize(NNEDI_BLOCK_X, NNEDI_BLOCK_Y); + + auto cudaerr = cudaSuccess; + if ((pre_screen & VPP_NNEDI_PRE_SCREEN_MODE) == VPP_NNEDI_PRE_SCREEN_ORIGINAL) { + const int thread_y_loop_org = 2; + dim3 gridSize( + divCeil(pOutputPlane->width, blockSize.x), + divCeil(pOutputPlane->height / 2, blockSize.y * thread_y_loop_org)); + kernel_compute_network0<<>>( + (uint8_t *)pOutputPlane->ptr + pOutputPlane->pitch * (targetField == NNEDI_GEN_FIELD_TOP ? 0 : 1), //生成するほうのフィールドを選択 + pOutputPlane->pitch * 2, //1行おきなので通常の2倍 + pOutputPlane->width, + pOutputPlane->height, + texSrc, //有効フィールドのみのテクスチャ(縦解像度は半分) + weight0, targetField); + cudaerr = cudaGetLastError(); + } else if ((pre_screen & VPP_NNEDI_PRE_SCREEN_MODE) >= VPP_NNEDI_PRE_SCREEN_NEW) { + const int thread_y_loop_new = 2; + dim3 gridSize( + divCeil(pOutputPlane->width, blockSize.x * 4 /*4ピクセル分一度に処理する*/), + divCeil(pOutputPlane->height / 2, blockSize.y * thread_y_loop_new)); + kernel_compute_network0<<>>( + (uint8_t *)pOutputPlane->ptr + pOutputPlane->pitch * (targetField == NNEDI_GEN_FIELD_TOP ? 0 : 1), //生成するほうのフィールドを選択 + pOutputPlane->pitch * 2, //1行おきなので通常の2倍 + pOutputPlane->width, + pOutputPlane->height, + texSrc, //有効フィールドのみのテクスチャ(縦解像度は半分) + weight0, targetField); + cudaerr = cudaGetLastError(); + } else { + cudaerr = setPlaneFieldAsync(pOutputPlane, -1, targetField == NNEDI_GEN_FIELD_TOP /* 生成するほうのフィールドを選択 */, stream); + } + if (cudaerr != cudaSuccess) { + return cudaerr; + } + return cudaerr; +} + +template +cudaError_t nnedi_compute_network_1( + RGYFrameInfo *pOutputFrame, + cudaTextureObject_t texSrc, + const TypeCalc *weight10, + const TypeCalc *weight11, + const NnediTargetField targetField, + const VppNnediNSize nsize, + const int nns, + const VppNnediQuality quality, + const VppNnediPreScreen pre_screen, + cudaStream_t stream +) { + //スレッド内で複数の出力を同時に計算する + static const int THREAD_Y_LOOP = 4; + //重み(nns)方向のループアンロール数 + //やりすぎると使用レジスタ数が増え、かえって遅くなる + static_assert(WEIGHT_LOOP_1 <= WARP_SIZE, "WEIGHT_LOOP < WARP_SIZE"); + + dim3 blockSize(NNEDI_BLOCK_X, NNEDI_BLOCK_Y); + dim3 gridSize( + divCeil(pOutputFrame->width, blockSize.x), + divCeil(pOutputFrame->height / 2, blockSize.y * THREAD_Y_LOOP)); + + const int nnx = NVEncFilterNnedi::sizeNX[nsize]; + const int nny = NVEncFilterNnedi::sizeNY[nsize]; + const int shared_mem_size = + (NNEDI_BLOCK_X + nnx) * (NNEDI_BLOCK_Y * THREAD_Y_LOOP + nny) * sizeof(TypeCalc) + //src + (NNEDI_BLOCK_Y * THREAD_Y_LOOP + nny) * NNEDI_BLOCK_X * 2 * sizeof(TypeCalc); //temp + + switch (nsize) { + case VPP_NNEDI_NSIZE_8x6: + kernel_comute_network1<<>>( + (uint8_t *)pOutputFrame->ptr + pOutputFrame->pitch * ((targetField == NNEDI_GEN_FIELD_TOP) ? 0 : 1), //生成するほうのフィールドを選択 + pOutputFrame->pitch * 2, //1行おきなので通常の2倍 + pOutputFrame->width, + pOutputFrame->height, + texSrc, + weight10, weight11, + nns, (int)quality, targetField, pre_screen); + break; + case VPP_NNEDI_NSIZE_16x6: + kernel_comute_network1<<>>( + (uint8_t *)pOutputFrame->ptr + pOutputFrame->pitch * ((targetField == NNEDI_GEN_FIELD_TOP) ? 0 : 1), //生成するほうのフィールドを選択 + pOutputFrame->pitch * 2, //1行おきなので通常の2倍 + pOutputFrame->width, + pOutputFrame->height, + texSrc, + weight10, weight11, + nns, (int)quality, targetField, pre_screen); + break; + case VPP_NNEDI_NSIZE_32x6: + kernel_comute_network1<<>>( + (uint8_t *)pOutputFrame->ptr + pOutputFrame->pitch * ((targetField == NNEDI_GEN_FIELD_TOP) ? 0 : 1), //生成するほうのフィールドを選択 + pOutputFrame->pitch * 2, //1行おきなので通常の2倍 + pOutputFrame->width, + pOutputFrame->height, + texSrc, + weight10, weight11, + nns, (int)quality, targetField, pre_screen); + break; + case VPP_NNEDI_NSIZE_48x6: + kernel_comute_network1<<>>( + (uint8_t *)pOutputFrame->ptr + pOutputFrame->pitch * ((targetField == NNEDI_GEN_FIELD_TOP) ? 0 : 1), //生成するほうのフィールドを選択 + pOutputFrame->pitch * 2, //1行おきなので通常の2倍 + pOutputFrame->width, + pOutputFrame->height, + texSrc, + weight10, weight11, + nns, (int)quality, targetField, pre_screen); + break; + case VPP_NNEDI_NSIZE_8x4: + kernel_comute_network1<<>>( + (uint8_t *)pOutputFrame->ptr + pOutputFrame->pitch * ((targetField == NNEDI_GEN_FIELD_TOP) ? 0 : 1), //生成するほうのフィールドを選択 + pOutputFrame->pitch * 2, //1行おきなので通常の2倍 + pOutputFrame->width, + pOutputFrame->height, + texSrc, + weight10, weight11, + nns, (int)quality, targetField, pre_screen); + break; + case VPP_NNEDI_NSIZE_16x4: + kernel_comute_network1<<>>( + (uint8_t *)pOutputFrame->ptr + pOutputFrame->pitch * ((targetField == NNEDI_GEN_FIELD_TOP) ? 0 : 1), //生成するほうのフィールドを選択 + pOutputFrame->pitch * 2, //1行おきなので通常の2倍 + pOutputFrame->width, + pOutputFrame->height, + texSrc, + weight10, weight11, + nns, (int)quality, targetField, pre_screen); + break; + case VPP_NNEDI_NSIZE_32x4: + kernel_comute_network1<<>>( + (uint8_t *)pOutputFrame->ptr + pOutputFrame->pitch * ((targetField == NNEDI_GEN_FIELD_TOP) ? 0 : 1), //生成するほうのフィールドを選択 + pOutputFrame->pitch * 2, //1行おきなので通常の2倍 + pOutputFrame->width, + pOutputFrame->height, + texSrc, + weight10, weight11, + nns, (int)quality, targetField, pre_screen); + break; + default: + return cudaErrorAssert; + } + auto cudaerr = cudaGetLastError(); + if (cudaerr != cudaSuccess) { + return cudaerr; + } + return cudaerr; +} + +template +cudaError_t proc_plane( + RGYFrameInfo *pOutputPlane, + const RGYFrameInfo *pInputPlane, + const std::shared_ptr pNnediParam, + const NnediTargetField targetField, + const TypeCalc *weight0, + const TypeCalc *weight10, + const TypeCalc *weight11, + cudaStream_t stream +) { + // 有効なほうのフィールドをコピー + auto cudaerr = copyPlaneFieldAsync(pOutputPlane, pInputPlane, targetField != NNEDI_GEN_FIELD_TOP, targetField != NNEDI_GEN_FIELD_TOP, stream); + if (cudaerr != cudaSuccess) { + return cudaerr; + } + + cudaTextureObject_t texSrc = 0; + cudaerr = setTexFieldNnedi(texSrc, pInputPlane, targetField); + if (cudaerr != cudaSuccess) { + return cudaerr; + } + cudaerr = nnedi_compute_network_0(pOutputPlane, + texSrc, + weight0, + (pNnediParam->nnedi.pre_screen & VPP_NNEDI_PRE_SCREEN_MODE), + targetField, + stream); + if (cudaerr != cudaSuccess) { + return cudaerr; + } + if (!(pNnediParam->nnedi.pre_screen & VPP_NNEDI_PRE_SCREEN_ONLY)) { + cudaerr = nnedi_compute_network_1( + pOutputPlane, + texSrc, + weight10, + weight11, + targetField, + pNnediParam->nnedi.nsize, + pNnediParam->nnedi.nns, + pNnediParam->nnedi.quality, + (pNnediParam->nnedi.pre_screen & (VPP_NNEDI_PRE_SCREEN_MODE | VPP_NNEDI_PRE_SCREEN_BLOCK)), + stream); + if (cudaerr != cudaSuccess) { + return cudaerr; + } + } + cudaerr = cudaDestroyTextureObject(texSrc); + if (cudaerr != cudaSuccess) { + return cudaerr; + } + return cudaerr; +} + +template +cudaError_t proc_frame(RGYFrameInfo *pOutputFrame, + const RGYFrameInfo *pInputFrame, + const std::shared_ptr pNnediParam, + const NnediTargetField targetField, + const void *weight0, + const void *weight10, + const void *weight11, + cudaStream_t stream +) { + static_assert(sizeof(TypePixel4) == sizeof(TypePixel) * 4, "sizeof(TypePixel4) == sizeof(TypePixel) * 4"); + cudaError_t cudaerr = cudaSuccess; + const auto planeInputY = getPlane(pInputFrame, RGY_PLANE_Y); + const auto planeInputU = getPlane(pInputFrame, RGY_PLANE_U); + const auto planeInputV = getPlane(pInputFrame, RGY_PLANE_V); + auto planeOutputY = getPlane(pOutputFrame, RGY_PLANE_Y); + auto planeOutputU = getPlane(pOutputFrame, RGY_PLANE_U); + auto planeOutputV = getPlane(pOutputFrame, RGY_PLANE_V); + + cudaerr = proc_plane(&planeOutputY, &planeInputY, pNnediParam, targetField, (const TypeCalc *)weight0, (const TypeCalc *)weight10, (const TypeCalc *)weight11, stream); + if (cudaerr != cudaSuccess) { + return cudaerr; + } + cudaerr = proc_plane(&planeOutputU, &planeInputU, pNnediParam, targetField, (const TypeCalc *)weight0, (const TypeCalc *)weight10, (const TypeCalc *)weight11, stream); + if (cudaerr != cudaSuccess) { + return cudaerr; + } + cudaerr = proc_plane(&planeOutputV, &planeInputV, pNnediParam, targetField, (const TypeCalc *)weight0, (const TypeCalc *)weight10, (const TypeCalc *)weight11, stream); + if (cudaerr != cudaSuccess) { + return cudaerr; + } + return cudaerr; +} + +const int NVEncFilterNnedi::weight_loop_1 = 4; +const int NVEncFilterNnedi::sizeNX[] = { 8, 16, 32, 48, 8, 16, 32 }; +const int NVEncFilterNnedi::sizeNY[] = { 6, 6, 6, 6, 4, 4, 4 }; +const int NVEncFilterNnedi::sizeNN[] = { 16, 32, 64, 128, 256 }; + +NVEncFilterNnedi::NVEncFilterNnedi() : m_weight0(), m_weight1() { + m_sFilterName = _T("nnedi"); +} + +NVEncFilterNnedi::~NVEncFilterNnedi() { + close(); +} + +RGY_ERR NVEncFilterNnedi::checkParam(const std::shared_ptr pNnediParam) { + if (pNnediParam->frameOut.height <= 0 || pNnediParam->frameOut.width <= 0) { + AddMessage(RGY_LOG_ERROR, _T("Invalid frame size.\n")); + return RGY_ERR_INVALID_PARAM; + } + if (pNnediParam->nnedi.field <= VPP_NNEDI_FIELD_UNKNOWN || VPP_NNEDI_FIELD_MAX <= pNnediParam->nnedi.field) { + AddMessage(RGY_LOG_ERROR, _T("invalid value for param \"field\": %d\n"), pNnediParam->nnedi.field); + return RGY_ERR_INVALID_PARAM; + } + if (pNnediParam->nnedi.nns < 16 || 256 < pNnediParam->nnedi.nns) { + pNnediParam->nnedi.nns = clamp(pNnediParam->nnedi.nns, 16, 256); + AddMessage(RGY_LOG_WARN, _T("nns should be in range of %d - %d.\n"), 16, 256); + } + if (pNnediParam->nnedi.nsize <= VPP_NNEDI_NSIZE_UNKNOWN || VPP_NNEDI_NSIZE_MAX <= pNnediParam->nnedi.nsize) { + AddMessage(RGY_LOG_ERROR, _T("invalid value for param \"nsize\": %d\n"), pNnediParam->nnedi.nsize); + return RGY_ERR_INVALID_PARAM; + } + if (pNnediParam->nnedi.quality <= VPP_NNEDI_QUALITY_UNKNOWN || VPP_NNEDI_QUALITY_MAX <= pNnediParam->nnedi.quality) { + AddMessage(RGY_LOG_ERROR, _T("invalid value for param \"quality\": %d\n"), pNnediParam->nnedi.quality); + return RGY_ERR_INVALID_PARAM; + } + if (VPP_NNEDI_PRE_SCREEN_MAX <= pNnediParam->nnedi.pre_screen) { + AddMessage(RGY_LOG_ERROR, _T("invalid value for param \"pre_screen\": %d\n"), pNnediParam->nnedi.pre_screen); + return RGY_ERR_INVALID_PARAM; + } + if (pNnediParam->nnedi.precision < VPP_FP_PRECISION_UNKNOWN || VPP_FP_PRECISION_MAX <= pNnediParam->nnedi.precision) { + AddMessage(RGY_LOG_ERROR, _T("invalid value for param \"prec\": %d\n"), pNnediParam->nnedi.precision); + return RGY_ERR_INVALID_PARAM; + } +#if !ENABLE_CUDA_FP16_HOST + if (pNnediParam->nnedi.precision == VPP_FP_PRECISION_FP16) { + AddMessage(RGY_LOG_WARN, _T("prec=fp16 not compiled in this build, switching to fp32.\n")); + pNnediParam->nnedi.precision = VPP_FP_PRECISION_FP32; + } +#endif + return RGY_ERR_NONE; +} + +shared_ptr NVEncFilterNnedi::readWeights(const tstring& weightFile, HMODULE hModule) { + shared_ptr weights; + const uint32_t expectedFileSize = 13574928u; + uint64_t weightFileSize = 0; + if (weightFile.length() == 0) { + //埋め込みデータを使用する +#if defined(_WIN32) || defined(_WIN64) + if (hModule == NULL) { + hModule = GetModuleHandle(NULL); + } +#endif + void *pDataPtr = NULL; + weightFileSize = getEmbeddedResource(&pDataPtr, _T("NNEDI_WEIGHTBIN"), _T("EXE_DATA"), hModule); + if (pDataPtr == nullptr || weightFileSize == 0) { + AddMessage(RGY_LOG_ERROR, _T("Failed to load resource \"NNEDI_WEIGHTBIN\".\n")); + } else if (expectedFileSize != weightFileSize) { + AddMessage(RGY_LOG_ERROR, _T("Weights data has unexpected size %lld [expected: %u].\n"), + (long long int)weightFileSize, expectedFileSize); + } else { + weights = shared_ptr((const float *)pDataPtr, [](const float *x) { UNREFERENCED_PARAMETER(x); return; /*何もしない*/ }); + } + } else { + if (!rgy_file_exists(weightFile)) { + AddMessage(RGY_LOG_ERROR, _T("weight file \"%s\" does not exist.\n"), weightFile.c_str()); + } else if (!rgy_get_filesize(weightFile.c_str(), &weightFileSize)) { + AddMessage(RGY_LOG_ERROR, _T("Failed to get filesize of weight file \"%s\".\n"), weightFile.c_str()); + } else if (weightFileSize != expectedFileSize) { + AddMessage(RGY_LOG_ERROR, _T("Weights file \"%s\" has unexpected file size %lld [expected: %u].\n"), + weightFile.c_str(), (long long int)weightFileSize, expectedFileSize); + } else { + std::ifstream fin(weightFile, std::ios::in | std::ios::binary); + if (!fin.good()) { + AddMessage(RGY_LOG_ERROR, _T("Failed to open weights file \"%s\".\n"), weightFile.c_str()); + } else { + float *buffer = new float[weightFileSize / sizeof(float)]; + if (!buffer) { + AddMessage(RGY_LOG_ERROR, _T("Failed to allocate buffer memory for \"%s\".\n"), weightFile.c_str()); + } else { + weights = shared_ptr(buffer, std::default_delete()); + if (fin.read((char *)weights.get(), weightFileSize).gcount() != (int64_t)weightFileSize) { + AddMessage(RGY_LOG_ERROR, _T("Failed to read weights file \"%s\".\n"), weightFile.c_str()); + weights.reset(); + } + } + fin.close(); + } + } + } + return weights; +} + +RGY_ERR NVEncFilterNnedi::initParams(const std::shared_ptr pNnediParam) { + auto weights = readWeights(pNnediParam->nnedi.weightfile, pNnediParam->hModule); + if (!weights) { + return RGY_ERR_INVALID_PARAM; + } + if (pNnediParam->nnedi.precision == VPP_FP_PRECISION_AUTO) { + pNnediParam->nnedi.precision = +#if ENABLE_CUDA_FP16_HOST + ((pNnediParam->compute_capability.first == 6 && pNnediParam->compute_capability.second == 0) + || pNnediParam->compute_capability.first >= 7) + ? VPP_FP_PRECISION_FP16 : VPP_FP_PRECISION_FP32; +#else + VPP_FP_PRECISION_FP32; +#endif + } + + const int weight1size = pNnediParam->nnedi.nns * 2 * (sizeNX[pNnediParam->nnedi.nsize] * sizeNY[pNnediParam->nnedi.nsize] + 1); + const int sizeofweight = (pNnediParam->nnedi.precision == VPP_FP_PRECISION_FP32) ? 4 : 2; + int weight1size_tsize = 0; + int weight1size_offset = 0; + for (int j = 0; j < (int)_countof(sizeNN); j++) { + for (int i = 0; i < (int)_countof(sizeNX); i++) { + if (i == pNnediParam->nnedi.nsize + && j == get_cx_index(list_vpp_nnedi_nns, pNnediParam->nnedi.nns)) { + weight1size_offset = weight1size_tsize; + } + weight1size_tsize += sizeNN[j] * (sizeNX[i] * sizeNY[i] + 1) * 4; + } + } + + std::vector weight0f; + weight0f.resize((((pNnediParam->nnedi.pre_screen & VPP_NNEDI_PRE_SCREEN_MODE) >= VPP_NNEDI_PRE_SCREEN_NEW) ? weight0sizenew : weight0size) * sizeofweight); + if (pNnediParam->nnedi.precision == VPP_FP_PRECISION_FP32) { + setWeight0((float *)weight0f.data(), weights.get(), pNnediParam); + } else { +#if ENABLE_CUDA_FP16_HOST + setWeight0<__half>((__half *)weight0f.data(), weights.get(), pNnediParam); +#endif //#if ENABLE_CUDA_FP16_HOST + } + + std::array, 2> weight1; + for (int i = 0; i < 2; i++) { + weight1[i].resize(weight1size * sizeofweight, 0); + const float *ptrW = weights.get() + weight0size + weight0sizenew * 3 + weight1size_tsize * pNnediParam->nnedi.errortype + weight1size_offset + i * weight1size; + if (pNnediParam->nnedi.precision == VPP_FP_PRECISION_FP32) { + setWeight1((float *)weight1[i].data(), ptrW, pNnediParam); + } else { +#if ENABLE_CUDA_FP16_HOST + setWeight1<__half>((__half *)weight1[i].data(), ptrW, pNnediParam); +#endif //#if ENABLE_CUDA_FP16_HOST + } + } + m_weight0 = CUMemBuf(weight0f.size()); + m_weight0.alloc(); + cudaMemcpy(m_weight0.ptr, weight0f.data(), m_weight0.nSize, cudaMemcpyHostToDevice); + for (size_t i = 0; i < weight1.size(); i++) { + m_weight1[i] = CUMemBuf(weight1[i].size()); + m_weight1[i].alloc(); + cudaMemcpy(m_weight1[i].ptr, weight1[i].data(), m_weight1[i].nSize, cudaMemcpyHostToDevice); + } + return RGY_ERR_NONE; +} + +template TypeCalc toWeight(float f); +template<> float toWeight(float f) { return f; } +#if ENABLE_CUDA_FP16_HOST +template<> __half toWeight<__half>(float f) { return __float2half_rn(f); } +#endif + +template +void NVEncFilterNnedi::setWeight0(TypeCalc *ptrDst, const float *ptrW, const std::shared_ptr pNnediParam) { + if ((pNnediParam->nnedi.pre_screen & VPP_NNEDI_PRE_SCREEN_MODE) >= VPP_NNEDI_PRE_SCREEN_NEW) { + auto index = [](int j, int k) { + return ((k >> 3) << 5) + ((j & 3) << 3) + (k & 7); + }; + + const auto ptr_w = ptrW + weight0size + weight0sizenew * ((pNnediParam->nnedi.pre_screen & VPP_NNEDI_PRE_SCREEN_MODE) - VPP_NNEDI_PRE_SCREEN_NEW); + double avg[4] = { 0.0, 0.0, 0.0, 0.0 }; + for (int j = 0; j < 4; j++) { + double sum = 0.0; + for (int k = 0; k < 64; k++) { + sum += ptr_w[index(j, k)]; + } + avg[j] = sum * (1.0 / 64.0); + } + const double halfinv = 1.0 / (((1 << 8) - 1) * 0.5); + for (int j = 0; j < 4; j++) { + for (int k = 0; k < 64; k++) { + //ptrDst[index(j, k)] = (float)((ptr_w[index(j, k)] - avg[j]) * halfinv); + ptrDst[j*64+k] = toWeight((float)((ptr_w[index(j, k)] - avg[j]) * halfinv)); + } + } + for (int i = 0; i < 4; i++) { + ptrDst[4*64+i] = toWeight(ptr_w[4*64+i]); + } + for (int j = 0; j < 4; j++) { + for (int k = 0; k < 4; k++) { + ptrDst[4*65+j*4+k] = toWeight(ptr_w[4*65+ j + k*4]); //転置 + } + } + for (int i = 0; i < 4; i++) { + ptrDst[4*65+4*4+i] = toWeight(ptr_w[4*65+4*4+i]); + } + //<<<<<< ここまでで通常(CPU版)の並びのデータが作成できた + + if (pNnediParam->nnedi.precision == VPP_FP_PRECISION_FP16) { + //並べ替え + std::vector tmp(ptrDst, ptrDst + weight0sizenew); + for (int j = 0; j < 4; j++) { + for (int k = 0; k < 64; k++) { + int j2 = j / 4; + int j3 = j % 4; + ptrDst[(j2 * 64 + k) * 4 + j3] = tmp[j * 64 + k]; + } + } + for (int j = 0; j < 4; j++) { + ptrDst[64*4 + j] = tmp[64*4 + j]; + } + for (int j = 0; j < 4; j++) { + for (int k = 0; k < 4; k++) { + int j2 = j / 4; + int j3 = j % 4; + ptrDst[65*4 + (j2 * 4 + k) * 4 + j3] = tmp[65*4 + j * 4 + k]; + } + } + for (int j = 0; j < 4; j++) { + ptrDst[65*4+4*4 + j] = tmp[65*4+4*4 + j]; + } + } + } else { + const auto ptr_w = ptrW; + double avg[4] = { 0.0, 0.0, 0.0, 0.0 }; + for (int j = 0; j < 4; j++) { + double sum = 0.0; + for (int k = 0; k < 48; k++) { + sum += ptr_w[j * 48 + k]; + } + avg[j] = sum * (1.0 / 48.0); + } + const double halfinv = 1.0 / (((1 << 8) - 1) * 0.5); + for (int j = 0; j < 4; j++) { + for (int k = 0; k < 48; k++) { + ptrDst[j * 48 + k] = toWeight((float)((ptr_w[j * 48 + k] - avg[j]) * halfinv)); + } + } + for (int i = 4 * 48; i < 4*49; i++) { + ptrDst[i] = toWeight(ptr_w[i]); + } + for (int i = 4 * 49; i < 4*49 + 4*4; i++) { + ptrDst[i] = toWeight(ptr_w[i]); + } + for (int i = 4 * 49 + 4*4; i < 4*49 + 4*5; i++) { + ptrDst[i] = toWeight(ptr_w[i]); + } + for (int i = 4*49 + 4*5; i < 4*49 + 4*5+ 4*8; i++) { + ptrDst[i] = toWeight(ptr_w[i]); + } + for (int i = 4*49 + 4*5+ 4*8; i < 4*49 + 4*5+ 4*9; i++) { + ptrDst[i] = toWeight(ptr_w[i]); + } + //<<<<<< ここまでで通常(CPU版)の並びのデータが作成できた + + if (pNnediParam->nnedi.precision == VPP_FP_PRECISION_FP16) { + //並べ替え + std::vector tmp(ptrDst, ptrDst + weight0size); + for (int j = 0; j < 4; j++) { + for (int k = 0; k < 48; k++) { + int j2 = j / 4; + int j3 = j % 4; + ptrDst[(j2 * 48 + k) * 4 + j3] = tmp[j * 48 + k]; + } + } + for (int j = 0; j < 4; j++) { + ptrDst[48*4 + j] = tmp[48*4 + j]; + } + for (int j = 0; j < 4; j++) { + for (int k = 0; k < 4; k++) { + int j2 = j / 4; + int j3 = j % 4; + ptrDst[49*4+(j2 * 4 + k) * 4 + j3] = tmp[49*4+j * 4 + k]; + } + } + for (int j = 0; j < 4; j++) { + ptrDst[49*4+4*4 + j] = tmp[49*4+4*4 + j]; + } + for (int j = 0; j < 4; j++) { + for (int k = 0; k < 8; k++) { + int j2 = j / 4; + int j3 = j % 4; + ptrDst[49*4+5*4 + (j2 * 8 + k) * 4 + j3] = tmp[49*4+5*4 + j * 8 + k]; + } + } + for (int j = 0; j < 4; j++) { + ptrDst[49*4+5*4+8*4 + j] = tmp[49*4+5*4+8*4 + j]; + } + } + } +} + +template +void NVEncFilterNnedi::setWeight1(TypeCalc *ptrDst, const float *ptrW, const std::shared_ptr pNnediParam) { + const int sizeNXY = sizeNX[pNnediParam->nnedi.nsize] * sizeNY[pNnediParam->nnedi.nsize]; + + std::vector mean0(pNnediParam->nnedi.nns * 2, 0.0); + for (int j = 0; j < pNnediParam->nnedi.nns * 2; j++) { + const float *ptr = ptrW + j * sizeNXY; + mean0[j] = std::accumulate(ptr, ptr + sizeNXY, 0.0) / (double)sizeNXY; + } + + const double inv_nns = 1.0 / (double)pNnediParam->nnedi.nns; + std::vector mean1(sizeNXY, 0.0); + for (int j = 0; j < pNnediParam->nnedi.nns; j++) { + for (int k = 0; k < sizeNXY; k++) { + mean1[k] += (ptrW[j * sizeNXY + k] - mean0[j]) * inv_nns; + } + } + + const float *ptr = ptrW + pNnediParam->nnedi.nns * 2 * sizeNXY; + const double mean2 = std::accumulate(ptr, ptr + pNnediParam->nnedi.nns, 0.0) * inv_nns; + + vector buf(pNnediParam->nnedi.nns * 2 * sizeNXY); + float max0 = 0.0f, max1 = 0.0f; + for (int j = 0; j < pNnediParam->nnedi.nns * 2; j++) { + for (int k = 0; k < sizeNXY; k++) { + buf[j * sizeNXY + k] = (float)(ptrW[j * sizeNXY + k] - mean0[j] - (j < pNnediParam->nnedi.nns ? mean1[k] : 0.0)); + if (j < pNnediParam->nnedi.nns) { + max0 = std::max(max0, buf[j * sizeNXY + k]); + } else { + max1 = std::max(max1, buf[j * sizeNXY + k]); + } + } + //fp16の場合、オーバーフローを避けるため途中まで0~1の範囲で計算するので、offsetの部分には1/256が必要 + float scale = (pNnediParam->nnedi.precision == VPP_FP_PRECISION_FP16) ? 1.0f / 256.0f : 1.0f; + ptrDst[pNnediParam->nnedi.nns * 2 * sizeNXY + j] = toWeight((ptrW[pNnediParam->nnedi.nns * 2 * sizeNXY + j] - (float)(j < pNnediParam->nnedi.nns ? mean2 : 0.0)) * scale); + } + for (int j = 0; j < pNnediParam->nnedi.nns * 2; j++) { + for (int k = 0; k < sizeNXY; k++) { + ptrDst[j * sizeNXY + k] = toWeight(buf[j * sizeNXY + k]); + } + } + //<<<<<< ここまでで通常(CPU版)の並びのデータが作成できた + +#if ENABLE_DP1_WEIGHT_ARRAY_OPT + //最適化のため、本来の並びを変更する + //[2][nns][nnxy] -> [nns/weight_loop_1][nnxy][weight_loop_1][2] + vector tmp(pNnediParam->nnedi.nns * 2 * (sizeNXY + 1)); + memcpy(tmp.data(), ptrDst, sizeof(tmp[0]) * tmp.size()); + for (int j = 0; j < pNnediParam->nnedi.nns * 2; j++) { + for (int k = 0; k < sizeNXY; k++) { + const int j1 = j / pNnediParam->nnedi.nns; + const int j2 = j % pNnediParam->nnedi.nns; + const int j3 = j2 / weight_loop_1; + const int w = j2 % weight_loop_1; + ptrDst[((j3 * sizeNXY + k) * weight_loop_1 + w) * 2 + j1] = tmp[j * sizeNXY + k]; + } + } + ptrDst += pNnediParam->nnedi.nns * 2 * sizeNXY; + auto tmp2 = tmp.data() + pNnediParam->nnedi.nns * 2 * sizeNXY; + for (int j = 0; j < pNnediParam->nnedi.nns; j++) { + ptrDst[j * 2 + 0] = tmp2[j]; + ptrDst[j * 2 + 1] = tmp2[pNnediParam->nnedi.nns + j]; + } +#endif +} + +RGY_ERR NVEncFilterNnedi::init(shared_ptr pParam, shared_ptr pPrintMes) { + RGY_ERR sts = RGY_ERR_NONE; + m_pPrintMes = pPrintMes; + auto pNnediParam = std::dynamic_pointer_cast(pParam); + if (!pNnediParam) { + AddMessage(RGY_LOG_ERROR, _T("Invalid parameter type.\n")); + return RGY_ERR_INVALID_PARAM; + } + //パラメータチェック + if ((sts = checkParam(pNnediParam)) != RGY_ERR_NONE) { + return sts; + } + + auto cudaerr = AllocFrameBuf(pNnediParam->frameOut, pNnediParam->nnedi.isbob() ? 2 : 1); + if (cudaerr != cudaSuccess) { + AddMessage(RGY_LOG_ERROR, _T("failed to allocate memory: %s.\n"), char_to_tstring(cudaGetErrorName(cudaerr)).c_str()); + return RGY_ERR_MEMORY_ALLOC; + } + pNnediParam->frameOut.pitch = m_pFrameBuf[0]->frame.pitch; + + auto pNnediParamPrev = std::dynamic_pointer_cast(m_pParam); + if (!pNnediParamPrev + || pNnediParamPrev->nnedi != pNnediParam->nnedi) { + if ((sts = initParams(pNnediParam)) != RGY_ERR_NONE) { + return sts; + } + } + if (pNnediParam->nnedi.isbob()) { + pParam->baseFps *= 2; + m_nPathThrough &= (~(FILTER_PATHTHROUGH_TIMESTAMP)); + } + + setFilterInfo(pParam->print()); + m_pParam = pNnediParam; + return sts; +} + +tstring NVEncFilterParamNnedi::print() const { + return nnedi.print(); +} + +RGY_ERR NVEncFilterNnedi::run_filter(const RGYFrameInfo *pInputFrame, RGYFrameInfo **ppOutputFrames, int *pOutputFrameNum, cudaStream_t stream) { + RGY_ERR sts = RGY_ERR_NONE; + if (pInputFrame->ptr == nullptr) { + return sts; + } + auto pNnediParam = std::dynamic_pointer_cast(m_pParam); + if (!pNnediParam) { + AddMessage(RGY_LOG_ERROR, _T("Invalid parameter type.\n")); + return RGY_ERR_INVALID_PARAM; + } + + *pOutputFrameNum = 1; + if (ppOutputFrames[0] == nullptr) { + auto pOutFrame = m_pFrameBuf[m_nFrameIdx].get(); + ppOutputFrames[0] = &pOutFrame->frame; + ppOutputFrames[0]->picstruct = pInputFrame->picstruct; + m_nFrameIdx = (m_nFrameIdx + 1) % m_pFrameBuf.size(); + if (pNnediParam->nnedi.isbob()) { + pOutFrame = m_pFrameBuf[m_nFrameIdx].get(); + ppOutputFrames[1] = &pOutFrame->frame; + ppOutputFrames[1]->picstruct = pInputFrame->picstruct; + m_nFrameIdx = (m_nFrameIdx + 1) % m_pFrameBuf.size(); + *pOutputFrameNum = 2; + } + } + + const auto memcpyKind = getCudaMemcpyKind(pInputFrame->deivce_mem, ppOutputFrames[0]->deivce_mem); + if (memcpyKind != cudaMemcpyDeviceToDevice) { + AddMessage(RGY_LOG_ERROR, _T("only supported on device memory.\n")); + return RGY_ERR_UNSUPPORTED; + } + if (m_pParam->frameOut.csp != m_pParam->frameIn.csp) { + AddMessage(RGY_LOG_ERROR, _T("csp does not match.\n")); + return RGY_ERR_UNSUPPORTED; + } + + NnediTargetField targetField = NNEDI_GEN_FIELD_UNKNOWN; + if ( pNnediParam->nnedi.field == VPP_NNEDI_FIELD_USE_AUTO + || pNnediParam->nnedi.field == VPP_NNEDI_FIELD_BOB_AUTO) { + if ((pInputFrame->picstruct & RGY_PICSTRUCT_INTERLACED) == 0) { + copyFrameAsync(ppOutputFrames[0], pInputFrame, stream); + return RGY_ERR_NONE; + } else if ((pInputFrame->picstruct & RGY_PICSTRUCT_FRAME_TFF) == RGY_PICSTRUCT_FRAME_TFF) { + targetField = NNEDI_GEN_FIELD_BOTTOM; + } else if ((pInputFrame->picstruct & RGY_PICSTRUCT_FRAME_BFF) == RGY_PICSTRUCT_FRAME_BFF) { + targetField = NNEDI_GEN_FIELD_TOP; + } + } else if (pNnediParam->nnedi.field == VPP_NNEDI_FIELD_USE_TOP + || pNnediParam->nnedi.field == VPP_NNEDI_FIELD_BOB_TOP_BOTTOM) { + targetField = NNEDI_GEN_FIELD_BOTTOM; + } else if (pNnediParam->nnedi.field == VPP_NNEDI_FIELD_USE_BOTTOM + || pNnediParam->nnedi.field == VPP_NNEDI_FIELD_BOB_BOTTOM_TOP) { + targetField = NNEDI_GEN_FIELD_TOP; + } else { + AddMessage(RGY_LOG_ERROR, _T("Not implemented yet.\n")); + return RGY_ERR_INVALID_PARAM; + } + + static const std::map)*> func_list_fp32 ={ + { RGY_CSP_YV12, proc_frame }, + { RGY_CSP_YV12_16, proc_frame }, + { RGY_CSP_YUV444, proc_frame }, + { RGY_CSP_YUV444_16, proc_frame } + }; +#if ENABLE_CUDA_FP16_HOST + static const std::map)*> func_list_fp16 ={ + { RGY_CSP_YV12, proc_frame }, + { RGY_CSP_YV12_16, proc_frame }, + { RGY_CSP_YUV444, proc_frame }, + { RGY_CSP_YUV444_16, proc_frame } + }; + const auto& func_list = (pNnediParam->nnedi.precision == VPP_FP_PRECISION_FP32) ? func_list_fp32 : func_list_fp16; +#else + const auto& func_list = func_list_fp32; +#endif + if (func_list.count(pInputFrame->csp) == 0) { + AddMessage(RGY_LOG_ERROR, _T("unsupported csp %s.\n"), RGY_CSP_NAMES[pInputFrame->csp]); + return RGY_ERR_UNSUPPORTED; + } + func_list.at(pInputFrame->csp)(ppOutputFrames[0], pInputFrame, + pNnediParam, targetField, + m_weight0.ptr, + m_weight1[0].ptr, + m_weight1[1].ptr, + stream + ); + auto cudaerr = cudaGetLastError(); + if (cudaerr != cudaSuccess) { + AddMessage(RGY_LOG_ERROR, _T("error at nnedi(%s): %s.\n"), + RGY_CSP_NAMES[pInputFrame->csp], + char_to_tstring(cudaGetErrorString(cudaerr)).c_str()); + return RGY_ERR_CUDA; + } + ppOutputFrames[0]->picstruct = RGY_PICSTRUCT_FRAME; + + if (pNnediParam->nnedi.isbob()) { + targetField = (targetField == NNEDI_GEN_FIELD_BOTTOM) ? NNEDI_GEN_FIELD_TOP : NNEDI_GEN_FIELD_BOTTOM; + func_list.at(pInputFrame->csp)(ppOutputFrames[1], pInputFrame, + pNnediParam, targetField, + m_weight0.ptr, + m_weight1[0].ptr, + m_weight1[1].ptr, + stream + ); + cudaerr = cudaGetLastError(); + if (cudaerr != cudaSuccess) { + AddMessage(RGY_LOG_ERROR, _T("error at nnedi(%s): %s.\n"), + RGY_CSP_NAMES[pInputFrame->csp], + char_to_tstring(cudaGetErrorString(cudaerr)).c_str()); + return RGY_ERR_CUDA; + } + ppOutputFrames[1]->picstruct = RGY_PICSTRUCT_FRAME; + ppOutputFrames[0]->timestamp = pInputFrame->timestamp; + ppOutputFrames[0]->duration = (pInputFrame->duration + 1) / 2; + ppOutputFrames[1]->timestamp = ppOutputFrames[0]->timestamp + ppOutputFrames[0]->duration; + ppOutputFrames[1]->duration = pInputFrame->duration - ppOutputFrames[0]->duration; + ppOutputFrames[1]->inputFrameId = pInputFrame->inputFrameId; + } + return sts; +} + +void NVEncFilterNnedi::close() { + m_pFrameBuf.clear(); +} diff --git a/cuda_code/NVStringsImpl_2.cu b/cuda_code/NVStringsImpl_2.cu new file mode 100644 index 0000000000000000000000000000000000000000..d32a391c79ba33285b28fe31dba72f2bab3e14ce --- /dev/null +++ b/cuda_code/NVStringsImpl_2.cu @@ -0,0 +1,578 @@ +/* +* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "NVStrings.h" +#include "NVStringsImpl.h" +#include "custring_view.cuh" +#include "custring.cuh" +#include "unicode/unicode_flags.h" +#include "unicode/charcases.h" +#include "Timing.h" + + +struct timing_record +{ + double compute_size_times; + double operation_times; + timing_record() : compute_size_times(0.0), operation_times(0.0) {} + void add_time(double st, double ot) { compute_size_times += st; operation_times += ot; } +}; + +// +void printCudaError( cudaError_t err, const char* prefix ) +{ + if( err != cudaSuccess ) + fprintf(stderr,"%s: %s(%d):%s\n",prefix,cudaGetErrorName(err),(int)err,cudaGetErrorString(err)); +} + +// +char32_t* to_char32( const char* ca ) +{ + unsigned int size = (unsigned int)strlen(ca); + unsigned int count = custring_view::chars_in_string(ca,size); + char32_t* rtn = new char32_t[count+1]; + char32_t* optr = rtn; + const char* iptr = ca; + for( int i=0; i < size; ++i ) + { + Char oc = 0; + unsigned int cw = custring_view::char_to_Char(iptr,oc); + iptr += cw; + i += cw - 1; + *optr++ = oc; + } + rtn[count] = 0; + return rtn; +} + +// +static unsigned char* d_unicode_flags = 0; +unsigned char* get_unicode_flags() +{ + if( !d_unicode_flags ) + { + // leave this out of RMM since it is never freed + cudaMalloc(&d_unicode_flags,65536); + cudaMemcpy(d_unicode_flags,unicode_flags,65536,cudaMemcpyHostToDevice); + } + return d_unicode_flags; +} + +static unsigned short* d_charcases = 0; +unsigned short* get_charcases() +{ + if( !d_charcases ) + { + // leave this out of RMM since it is never freed + cudaMalloc(&d_charcases,65536*sizeof(unsigned short)); + cudaMemcpy(d_charcases,charcases,65536*sizeof(unsigned short),cudaMemcpyHostToDevice); + } + return d_charcases; +} + +// +NVStringsImpl::NVStringsImpl(unsigned int count) + : bufferSize(0), memoryBuffer(0), bIpcHandle(false), stream_id(0) +{ + pList = new rmm::device_vector(count,nullptr); +} + +NVStringsImpl::~NVStringsImpl() +{ + if( memoryBuffer && !bIpcHandle ) + RMM_FREE(memoryBuffer,0); + memoryBuffer = 0; + delete pList; + pList = 0; + bufferSize = 0; +} + +char* NVStringsImpl::createMemoryFor( size_t* d_lengths ) +{ + unsigned int count = (unsigned int)pList->size(); + auto execpol = rmm::exec_policy(stream_id); + size_t outsize = thrust::reduce(execpol->on(stream_id), d_lengths, d_lengths+count); + if( outsize==0 ) + return 0; // all sizes are zero + RMM_ALLOC(&memoryBuffer,outsize,0); + bufferSize = outsize; + return memoryBuffer; +} + +void NVStringsImpl::addOpTimes( const char* op, double sizeTime, double opTime ) +{ + std::string name = op; + if( mapTimes.find(name)==mapTimes.end() ) + mapTimes[name] = timing_record(); + mapTimes[name].add_time(sizeTime,opTime); +} + +void NVStringsImpl::printTimingRecords() +{ + size_t count = pList->size(); + if( !count ) + return; + for( auto itr = mapTimes.begin(); itr != mapTimes.end(); itr++ ) + { + std::string opname = itr->first; + timing_record tr = itr->second; + double otavg = (tr.operation_times / (double)count) * 1000.0; + printf("%s: ",opname.c_str()); + if( tr.compute_size_times ) + { + double ctavg = (tr.compute_size_times / (double)count) * 1000.0; + printf("avg compute size time = %g; ",ctavg); + } + printf("avg operation time = %g\n",otavg); + } +} + +// +int NVStrings_init_from_strings(NVStringsImpl* pImpl, const char** strs, unsigned int count ) +{ + cudaError_t err = cudaSuccess; + auto execpol = rmm::exec_policy(0); + setlocale(LC_NUMERIC, ""); + // first compute the size of each string + size_t nbytes = 0; + thrust::host_vector hoffsets(count+1,0); + //hoffsets[0] = 0; --already set by this ----^ + thrust::host_vector hlengths(count,0); + for( unsigned int idx=0; idx < count; ++idx ) + { + const char* str = strs[idx]; + size_t len = ( str ? (strlen(str)+1) : 0 ); + size_t nsz = len; // include null-terminator + if( len > 0 ) // len=0 is null, len=1 is empty string + { + hlengths[idx] = len; // just the string length + int nchars = custring_view::chars_in_string(str,(int)len-1); + nsz = custring_view::alloc_size((int)len-1,nchars); + } + nsz = ALIGN_SIZE(nsz); + nbytes += nsz; + hoffsets[idx+1] = nbytes; + } + // check if they are all null + if( nbytes==0 ) + return 0; + + // Host serialization + unsigned int cheat = 0;//sizeof(custring_view); + char* h_flatstrs = (char*)malloc(nbytes); + for( unsigned int idx = 0; idx < count; ++idx ) + memcpy(h_flatstrs + hoffsets[idx] + cheat, strs[idx], hlengths[idx]); + + // copy to device memory + char* d_flatstrs = 0; + rmmError_t rerr = RMM_ALLOC(&d_flatstrs,nbytes,0); + if( rerr == RMM_SUCCESS ) + err = cudaMemcpy(d_flatstrs, h_flatstrs, nbytes, cudaMemcpyHostToDevice); + free(h_flatstrs); // no longer needed + if( err != cudaSuccess ) + { + fprintf(stderr,"nvs-sts: alloc/copy %'lu bytes\n",nbytes); + printCudaError(err); + return (int)err; + } + + // copy offsets and lengths to device memory + rmm::device_vector offsets(hoffsets); + rmm::device_vector lengths(hlengths); + size_t* d_offsets = offsets.data().get(); + size_t* d_lengths = lengths.data().get(); + + // initialize custring objects in device memory + custring_view_array d_strings = pImpl->getStringsPtr(); + thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator(0), count, + [d_flatstrs, d_offsets, d_lengths, cheat, d_strings] __device__(unsigned int idx){ + size_t len = d_lengths[idx]; + if( len < 1 ) + return; // null string + size_t offset = d_offsets[idx]; + char* ptr = d_flatstrs + offset; + char* str = ptr + cheat; + d_strings[idx] = custring_view::create_from(ptr,str,(int)len-1); + }); + // + err = cudaDeviceSynchronize(); + if( err!=cudaSuccess ) + { + fprintf(stderr,"nvs-sts: sync=%d copy %'u strings\n",(int)err,count); + printCudaError(err); + } + + pImpl->setMemoryBuffer(d_flatstrs,nbytes); + +#if STR_STATS + if( err==cudaSuccess ) + { + size_t memSize = nbytes + (count * sizeof(custring_view*)); + // lengths are +1 the size of the string so readjust + thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator(0), count, + [d_lengths] __device__ (unsigned int idx) { + size_t val = d_lengths[idx]; + val = (val ? val-1 : 0); + d_lengths[idx] = val; + }); + //size_t max = thrust::transform_reduce(execpol->on(0),d_dstLengths,d_dstLengths+count,thrust::identity(),0,thrust::maximum()); + size_t max = *thrust::max_element(execpol->on(0), lengths.begin(), lengths.end()); + size_t sum = thrust::reduce(execpol->on(0), lengths.begin(), lengths.end()); + size_t avg = 0; + if( count > 0 ) + avg =sum / count; + printf("nvs-sts: created %'u strings in device memory(%p) = %'lu bytes\n",count,d_flatstrs,memSize); + printf("nvs-sts: largest string is %lu bytes, average string length is %lu bytes\n",max,avg); + } +#endif + + return (int)err; +} + +// build strings from array of device pointers and sizes +int NVStrings_init_from_indexes( NVStringsImpl* pImpl, std::pair* indexes, unsigned int count, bool bdevmem, NVStrings::sorttype stype ) +{ + setlocale(LC_NUMERIC, ""); + cudaError_t err = cudaSuccess; + auto execpol = rmm::exec_policy(0); + thrust::pair* d_indexes = (thrust::pair*)indexes; + if( !bdevmem ) + { + RMM_ALLOC(&d_indexes,sizeof(std::pair)*count,0); + cudaMemcpy(d_indexes,indexes,sizeof(std::pair)*count,cudaMemcpyHostToDevice); + } + + // sort the list - helps reduce divergence + if( stype ) + { + thrust::sort(execpol->on(0), d_indexes, d_indexes + count, + [stype] __device__( thrust::pair& lhs, thrust::pair& rhs ) { + if( lhs.first==0 || rhs.first==0 ) + return rhs.first!=0; // null < non-null + int diff = 0; + if( stype & NVStrings::length ) + diff = (unsigned int)(lhs.second - rhs.second); + if( diff==0 && (stype & NVStrings::name) ) + diff = custr::compare(lhs.first,(unsigned int)lhs.second,rhs.first,(unsigned int)rhs.second); + return (diff < 0); + }); + err = cudaDeviceSynchronize(); + if( err != cudaSuccess ) + { + printCudaError(err,"nvs-idx: sorting"); + if( !bdevmem ) + RMM_FREE(d_indexes,0); + return (int)err; + } + } + + // first get the size we need to store these strings + rmm::device_vector sizes(count,0); + size_t* d_sizes = sizes.data().get(); + thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator(0), count, + [d_indexes, d_sizes] __device__ (unsigned int idx) { + const char* str = d_indexes[idx].first; + size_t bytes = d_indexes[idx].second; + if( str ) + d_sizes[idx] = ALIGN_SIZE(custring_view::alloc_size((char*)str,(int)bytes)); + }); + err = cudaDeviceSynchronize(); + if( err != cudaSuccess ) + { + printCudaError(err,"nvs-idx: computing sizes"); + if( !bdevmem ) + RMM_FREE(d_indexes,0); + return (int)err; + } + + // allocate device memory + size_t nbytes = thrust::reduce(execpol->on(0),sizes.begin(),sizes.end()); + //printf("nvs-idx: %'lu bytes\n",nbytes); + if( nbytes==0 ) + return 0; // done, all the strings were null + char* d_flatdstrs = 0; + rmmError_t rerr = RMM_ALLOC(&d_flatdstrs,nbytes,0); + if( rerr != RMM_SUCCESS ) + { + fprintf(stderr,"nvs-idx: RMM_ALLOC(%p,%lu)=%d\n", d_flatdstrs,nbytes,(int)rerr); + //printCudaError(err); + if( !bdevmem ) + RMM_FREE(d_indexes,0); + return (int)err; + } + + // build offsets array + rmm::device_vector offsets(count,0); + thrust::exclusive_scan(execpol->on(0),sizes.begin(),sizes.end(),offsets.begin()); + + // now build the strings vector + custring_view_array d_strings = pImpl->getStringsPtr(); + size_t* d_offsets = offsets.data().get(); + thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator(0), count, + [d_indexes, d_flatdstrs, d_offsets, d_sizes, d_strings] __device__(unsigned int idx){ + // add string to internal vector array + const char* str = d_indexes[idx].first; + size_t bytes = d_indexes[idx].second; + size_t offset = d_offsets[idx]; + char* ptr = d_flatdstrs + offset; + custring_view* dstr = 0; + if( str ) + dstr = custring_view::create_from(ptr,(char*)str,(int)bytes); + d_strings[idx] = dstr; + d_sizes[idx] = bytes; + }); + // + err = cudaDeviceSynchronize(); + if( err != cudaSuccess ) + { + fprintf(stderr,"nvs-idx: sync=%d copying %'u strings\n",(int)err,count); + printCudaError(err); + } + + pImpl->setMemoryBuffer(d_flatdstrs,nbytes); + +#ifdef STR_STATS + if( err == cudaSuccess ) + { + size_t memSize = nbytes + (count * sizeof(custring_view*)); // flat memory plus device_vector + //size_t max = thrust::transform_reduce(execpol->on(0),d_sizes,d_sizes+count,thrust::identity(),0,thrust::maximum()); + size_t max = *thrust::max_element(execpol->on(0), sizes.begin(), sizes.end()); + size_t sum = thrust::reduce(execpol->on(0), sizes.begin(), sizes.end()); + size_t avg = 0; + if( count > 0 ) + avg =sum / count; + // + printf("nvs-idx: created %'u strings in device memory(%p) = %'lu bytes\n",count,d_flatdstrs,memSize); + printf("nvs-idx: largest string is %lu bytes, average string length is %lu bytes\n",max,avg); + } +#endif + //printf("nvs-idx: processed %'u strings\n",count); + + if( !bdevmem ) + RMM_FREE(d_indexes,0); + return (int)err; +} + +// build strings from array of device pointers and sizes +int NVStrings_init_from_offsets( NVStringsImpl* pImpl, const char* strs, int count, const int* offsets, const unsigned char* bitmask, int nulls ) +{ + if( count==nulls ) + return 0; // if all are nulls then we are done + setlocale(LC_NUMERIC, ""); + cudaError_t err = cudaSuccess; + auto execpol = rmm::exec_policy(0); + + // first compute the size of each string + size_t nbytes = 0; + thrust::host_vector hoffsets(count+1,0); + thrust::host_vector hlengths(count,0); + for( unsigned int idx=0; idx < count; ++idx ) + { + int offset = offsets[idx]; + int len = offsets[idx+1] - offset; + const char* str = strs + offset; + int nchars = custring_view::chars_in_string(str,len); + int bytes = custring_view::alloc_size(len,nchars); + if( bitmask && ((bitmask[idx/8] & (1 << (idx % 8)))==0) ) // from arrow spec + bytes = 0; + hlengths[idx] = len; + nbytes += ALIGN_SIZE(bytes); + hoffsets[idx+1] = nbytes; + } + if( nbytes==0 ) + return 0; // should not happen + + // serialize host memory into a new buffer + unsigned int cheat = 0;//sizeof(custring_view); + char* h_flatstrs = (char*)malloc(nbytes); + for( unsigned int idx = 0; idx < count; ++idx ) + memcpy(h_flatstrs + hoffsets[idx] + cheat, strs + offsets[idx], hlengths[idx]); + + // copy whole thing to device memory + char* d_flatstrs = 0; + rmmError_t rerr = RMM_ALLOC(&d_flatstrs,nbytes,0); + if( rerr == RMM_SUCCESS ) + err = cudaMemcpy(d_flatstrs, h_flatstrs, nbytes, cudaMemcpyHostToDevice); + free(h_flatstrs); // no longer needed + if( err != cudaSuccess ) + { + fprintf(stderr,"nvs-ofs: alloc/copy %'lu bytes\n",nbytes); + printCudaError(err); + return (int)err; + } + + // copy offsets and lengths to device memory + rmm::device_vector doffsets(hoffsets); + rmm::device_vector dlengths(hlengths); + size_t* d_offsets = doffsets.data().get(); + size_t* d_lengths = dlengths.data().get(); + + // initialize custring objects in device memory + custring_view_array d_strings = pImpl->getStringsPtr(); + thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator(0), count, + [d_flatstrs, d_offsets, d_lengths, cheat, d_strings] __device__(unsigned int idx){ + size_t len = d_lengths[idx]; + size_t offset = d_offsets[idx]; + size_t size = d_offsets[idx+1] - offset; + if( size < 1 ) + return; // null string + char* ptr = d_flatstrs + offset; + char* str = ptr + cheat; + d_strings[idx] = custring_view::create_from(ptr,str,len); + }); + // + err = cudaDeviceSynchronize(); + if( err!=cudaSuccess ) + { + fprintf(stderr,"nvs-ofs: sync=%d copy %'u strings\n",(int)err,count); + printCudaError(err); + } + + pImpl->setMemoryBuffer(d_flatstrs,nbytes); + +#if STR_STATS + if( err==cudaSuccess ) + { + size_t memSize = nbytes + (count * sizeof(custring_view*)); + // lengths are +1 the size of the string so readjust + thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator(0), count, + [d_lengths] __device__ (unsigned int idx) { + size_t val = d_lengths[idx]; + val = (val ? val-1 : 0); + d_lengths[idx] = val; + }); + //size_t max = thrust::transform_reduce(execpol->on(0),d_dstLengths,d_dstLengths+count,thrust::identity(),0,thrust::maximum()); + size_t max = *thrust::max_element(execpol->on(0), lengths.begin(), lengths.end()); + size_t sum = thrust::reduce(execpol->on(0), lengths.begin(), lengths.end()); + size_t avg = 0; + if( count > 0 ) + avg =sum / count; + printf("nvs-ofs: created %'u strings in device memory(%p) = %'lu bytes\n",count,d_flatstrs,memSize); + printf("nvs-ofs: largest string is %lu bytes, average string length is %lu bytes\n",max,avg); + } +#endif + + return (int)err;; +} + +int NVStrings_copy_strings( NVStringsImpl* pImpl, std::vector& strslist ) +{ + auto execpol = rmm::exec_policy(0); + auto pList = pImpl->pList; + unsigned int count = (unsigned int)pList->size(); + size_t nbytes = 0; + for( auto itr=strslist.begin(); itr!=strslist.end(); itr++ ) + nbytes += (*itr)->memsize(); + + custring_view_array d_results = pList->data().get(); + char* d_buffer = 0; + RMM_ALLOC(&d_buffer,nbytes,0); + size_t offset = 0; + size_t memoffset = 0; + + for( auto itr=strslist.begin(); itr!=strslist.end(); itr++ ) + { + NVStrings* strs = *itr; + unsigned int size = strs->size(); + size_t memsize = strs->memsize(); + if( size==0 ) + continue; + rmm::device_vector strings(size,nullptr); + custring_view** d_strings = strings.data().get(); + strs->create_custring_index(d_strings); + if( memsize ) + { + // checking pointer values to find the first non-null one + custring_view** first = thrust::min_element(execpol->on(0),d_strings,d_strings+size, + [] __device__ (custring_view* lhs, custring_view* rhs) { + return (lhs && rhs) ? (lhs < rhs) : rhs==0; + }); + char* baseaddr = 0; + cudaError_t err = cudaMemcpy(&baseaddr,first,sizeof(custring_view*),cudaMemcpyDeviceToHost); + if( err!=cudaSuccess ) + fprintf(stderr, "copy-strings: cudaMemcpy(%p,%p,%d)=%d\n",&baseaddr,first,(int)sizeof(custring_view*),(int)err); + // copy string memory + char* buffer = d_buffer + memoffset; + err = cudaMemcpy((void*)buffer,(void*)baseaddr,memsize,cudaMemcpyDeviceToDevice); + if( err!=cudaSuccess ) + fprintf(stderr, "copy-strings: cudaMemcpy(%p,%p,%ld)=%d\n",buffer,baseaddr,memsize,(int)err); + // adjust pointers + custring_view_array results = d_results + offset; + thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator(0), size, + [buffer, baseaddr, d_strings, results] __device__(unsigned int idx){ + char* dstr = (char*)d_strings[idx]; + if( !dstr ) + return; + size_t diff = dstr - baseaddr; + char* newaddr = buffer + diff; + results[idx] = (custring_view*)newaddr; + }); + } + offset += size; + memoffset += memsize; + } + cudaError_t err = cudaDeviceSynchronize(); + if( err!=cudaSuccess ) + printCudaError(err,"nvs-cs"); + pImpl->setMemoryBuffer(d_buffer,nbytes); + return count; +} + +int NVStrings_fixup_pointers( NVStringsImpl* pImpl, char* baseaddr ) +{ + auto execpol = rmm::exec_policy(0); + auto pList = pImpl->pList; + unsigned int count = (unsigned int)pList->size(); + + custring_view_array d_strings = pImpl->getStringsPtr(); + //---- the following can be used to find the base-address of the original memory ---- + //---- instead of passing it across the ipc boundary; leaving it here for now ---- + //custring_view** first = thrust::min_element(execpol->on(0),d_strings,d_strings+count, + // [] __device__ (custring_view* lhs, custring_view* rhs) { + // return (lhs && rhs) ? (lhs < rhs) : rhs==0; + // }); + //cudaError_t err = cudaMemcpy(&baseaddr,first,sizeof(custring_view*),cudaMemcpyDeviceToHost); + //if( err!=cudaSuccess ) + // fprintf(stderr, "fixup: cudaMemcpy(%p,%p,%d)=%d\n",&baseaddr,first,(int)sizeof(custring_view*),(int)err); + // + char* buffer = pImpl->getMemoryPtr(); + thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator(0), count, + [buffer, baseaddr, d_strings] __device__(unsigned int idx){ + char* dstr = (char*)d_strings[idx]; + if( !dstr ) + return; + size_t diff = dstr - baseaddr; + char* newaddr = buffer + diff; + d_strings[idx] = (custring_view*)newaddr; + }); + cudaError_t err = cudaDeviceSynchronize(); + if( err!=cudaSuccess ) + printCudaError(err,"nvs-fixup"); + return count; +} \ No newline at end of file diff --git a/cuda_code/NVStrings_split.cu b/cuda_code/NVStrings_split.cu new file mode 100644 index 0000000000000000000000000000000000000000..4adc9f01b1c91a3fbe49126e52d54e5971d2d527 --- /dev/null +++ b/cuda_code/NVStrings_split.cu @@ -0,0 +1,1374 @@ +/* +* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "NVStrings.h" +#include "NVStringsImpl.h" +#include "custring_view.cuh" +#include "Timing.h" + +// common token counter for all split methods +struct token_counter +{ + custring_view_array d_strings; + char* d_delimiter; + unsigned int dellen; + int tokens; + int* d_counts; + // + token_counter(custring_view_array dstrs, char* delim, unsigned int dlen, int t, int* counts) + : d_strings(dstrs), d_delimiter(delim), dellen(dlen), tokens(t), d_counts(counts) {} + __device__ void operator()(unsigned int idx) + { + custring_view* dstr = d_strings[idx]; + if( dstr ) + d_counts[idx] = dstr->split_size(d_delimiter,dellen,0,tokens); + } +}; + +// special-case token counter for whitespace delimiter +// leading and trailing and duplicate delimiters are ignored +struct whitespace_token_counter +{ + custring_view_array d_strings; + int tokens; + int* d_counts; + + // count the 'words' only between non-whitespace characters + whitespace_token_counter(custring_view_array dstrs, int t, int* counts) + : d_strings(dstrs), tokens(t), d_counts(counts) {} + __device__ void operator()(unsigned int idx) + { + custring_view* dstr = d_strings[idx]; + if( !dstr ) + return; + int dcount = 0; + bool spaces = true; + custring_view::iterator itr = dstr->begin(); + while( itr != dstr->end() ) + { + Char ch = *itr; + if( spaces == (ch <= ' ') ) + itr++; + else + { + dcount += (int)spaces; + spaces = !spaces; + } + } + if( tokens && (dcount > tokens) ) + dcount = tokens; + if( dcount==0 ) + dcount = 1; // always allow empty string + d_counts[idx] = dcount; + //printf("dcount=%d\n",dcount); + } +}; + +// +// Coded form Pandas split algorithm as documented here: +// https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.split.html#pandas.Series.str.split +// +// Example: +// +// import pandas as pd +// pd_series = pd.Series(['', None, 'a_b', '_a_b_', '__aa__bb__', '_a__bbb___c', '_aa_b__ccc__']) +// print(pd_series.str.split(pat='_', expand=False)) +// 0 [] +// 1 None +// 2 [a, b] +// 3 [, a, b, ] +// 4 [, , aa, , bb, , ] +// 5 [, a, , bbb, , , c] +// 6 [, aa, b, , ccc, , ] +// +// print(pd_series.str.split(pat='_', n=1, expand=False)) +// 0 [] +// 1 None +// 2 [a, b] +// 3 [, a_b_] +// 4 [, _aa__bb__] +// 5 [, a__bbb___c] +// 6 [, aa_b__ccc__] +// +// print(pd_series.str.split(pat='_', n=2, expand=False)) +// 0 [] +// 1 None +// 2 [a, b] +// 3 [, a, b_] +// 4 [, , aa__bb__] +// 5 [, a, _bbb___c] +// 6 [, aa, b__ccc__] +// +// +int NVStrings::split_record( const char* delimiter, int maxsplit, std::vector& results) +{ + if( delimiter==0 ) + return split_record(maxsplit,results); + + auto execpol = rmm::exec_policy(0); + char* d_delimiter = 0; + unsigned int dellen = (unsigned int)strlen(delimiter); + RMM_ALLOC(&d_delimiter,dellen+1,0); + cudaMemcpy(d_delimiter,delimiter,dellen+1,cudaMemcpyHostToDevice); + int tokens = 0; + if( maxsplit > 0 ) + tokens = maxsplit + 1; // makes consistent with Pandas + + // need to count how many output strings per string + unsigned int count = size(); + custring_view** d_strings = pImpl->getStringsPtr(); + rmm::device_vector counts(count,0); + int* d_counts = counts.data().get(); + thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator(0), count, + token_counter(d_strings,d_delimiter,dellen,tokens,d_counts)); + + // build int arrays to hold each string's split size + int totalSizes = thrust::reduce(execpol->on(0), counts.begin(), counts.end()); + rmm::device_vector sizes(totalSizes,0), offsets(count,0), totals(count,0); + thrust::exclusive_scan(execpol->on(0),counts.begin(),counts.end(),offsets.begin()); + int* d_offsets = offsets.data().get(); + int* d_sizes = sizes.data().get(); + int* d_totals = totals.data().get(); + thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator(0), count, + [d_strings, d_delimiter, dellen, d_counts, d_offsets, d_sizes, d_totals] __device__(unsigned int idx){ + custring_view* dstr = d_strings[idx]; + if( !dstr ) + return; + int* dsizes = d_sizes + d_offsets[idx]; + int dcount = d_counts[idx]; + d_totals[idx] = dstr->split_size(d_delimiter,dellen,dsizes,dcount); + }); + // + cudaDeviceSynchronize(); + + // now build an array of custring_views* arrays for each value + int totalNewStrings = 0; + thrust::host_vector h_counts(counts); + thrust::host_vector h_totals(totals); + thrust::host_vector h_splits(count,nullptr); + thrust::host_vector h_buffers(count,nullptr); + for( unsigned int idx=0; idx < count; ++idx ) + { + int splitCount = h_counts[idx]; + if( splitCount==0 ) + { + results.push_back(0); + continue; + } + + NVStrings* splitResult = new NVStrings(splitCount); + results.push_back(splitResult); + h_splits[idx] = splitResult->pImpl->getStringsPtr(); + + int totalSize = h_totals[idx]; + char* d_buffer = 0; + RMM_ALLOC(&d_buffer,totalSize,0); + splitResult->pImpl->setMemoryBuffer(d_buffer,totalSize); + h_buffers[idx] = d_buffer; + + totalNewStrings += splitCount; + } + + // + rmm::device_vector splits(h_splits); + custring_view_array* d_splits = splits.data().get(); + rmm::device_vector buffers(h_buffers); + char** d_buffers = buffers.data().get(); + + // do the splits and fill in the arrays + thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator(0), count, + [d_strings, d_delimiter, dellen, d_counts, d_buffers, d_sizes, d_offsets, d_splits] __device__(unsigned int idx){ + custring_view* dstr = d_strings[idx]; + if( !dstr ) + return; + int d_count = d_counts[idx]; + if( d_count < 1 ) + return; + char* buffer = (char*)d_buffers[idx]; + int* dsizes = d_sizes + d_offsets[idx]; + custring_view_array d_strs = d_splits[idx]; + for( int i=0; i < d_count; ++i ) + { + int size = ALIGN_SIZE(dsizes[i]); + d_strs[i] = (custring_view*)buffer; + buffer += size; + } + dstr->split(d_delimiter,dellen,d_count,d_strs); + }); + + // + printCudaError(cudaDeviceSynchronize(),"nvs-split_record"); + RMM_FREE(d_delimiter,0); + return totalNewStrings; +} + +// +// Whitespace delimiter algorithm is very different. +// It follows the Python str.split algorithm as defined in Pandas: https://docs.python.org/3/library/stdtypes.html#str.split +// Paraphrased as follows (for null delimiter): +// Runs of consecutive whitespace are regarded as a single separator, +// and the result will contain no empty strings at the start orend if +// the string has leading or trailing whitespace. +// Also whitespace is not just space. +// The algorithm below uses the shortcut (<=' ') to catch \t\r\n or any other control character. +// The above statement does not account for maxplit as seen in the following examples where n=maxpslit. +// +// import pandas as pd +// pd_series = pd.Series(['', None, 'a b', ' a b ', ' aa bb ', ' a bbb c', ' aa b ccc ']) +// print(pd_series.str.split(pat=None, expand=False)) +// 0 [] +// 1 None +// 2 [a, b] +// 3 [a, b] +// 4 [aa, bb] +// 5 [a, bbb, c] +// 6 [aa, b, ccc] +// +// print(pd_series.str.split(pat=None, n=1, expand=False)) +// 0 [] +// 1 None +// 2 [a, b] +// 3 [a, b ] +// 4 [aa, bb ] +// 5 [a, bbb c] +// 6 [aa, b ccc ] +// +// print(pd_series.str.split(pat=None, n=2, expand=False)) +// 0 [] +// 1 None +// 2 [a, b] +// 3 [a, b] +// 4 [aa, bb] +// 5 [a, bbb, c] +// 6 [aa, b, ccc ] +// +// Note: +// - lack of empty strings +// - trailing and leading characters are ignored (sometimes) +// - multiple whitespace characters are ignored (sometimes) +// +int NVStrings::split_record( int maxsplit, std::vector& results) +{ + auto execpol = rmm::exec_policy(0); + int tokens = 0; + if( maxsplit > 0 ) + tokens = maxsplit + 1; // makes consistent with Pandas + + // need to count how many output strings per string + unsigned int count = size(); + custring_view** d_strings = pImpl->getStringsPtr(); + rmm::device_vector counts(count,0); + int* d_counts = counts.data().get(); + thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator(0), count, + whitespace_token_counter(d_strings,tokens,d_counts)); + //cudaDeviceSynchronize(); + + // build int arrays to hold each string's split size + int totalSizes = thrust::reduce(execpol->on(0), counts.begin(), counts.end()); + rmm::device_vector sizes(totalSizes,0), offsets(count,0), totals(count,0); + thrust::exclusive_scan(execpol->on(0),counts.begin(),counts.end(),offsets.begin()); + int* d_offsets = offsets.data().get(); + int* d_sizes = sizes.data().get(); + int* d_totals = totals.data().get(); + thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator(0), count, + [d_strings, tokens, d_counts, d_offsets, d_sizes, d_totals] __device__(unsigned int idx){ + custring_view* dstr = d_strings[idx]; + if( !dstr ) + return; // null string + int* dsizes = d_sizes + d_offsets[idx]; + int dcount = d_counts[idx]; + int bytes = 0, sidx = 0, spos = 0, nchars = dstr->chars_count(); + //printf("tokens=%d,dcount=%d,nchars=%d\n",tokens,dcount,nchars); + bool spaces = true; + for( int pos=0; (pos < nchars) && (sidx < dcount); ++pos ) + { + Char ch = dstr->at(pos); + if( spaces == (ch <= ' ') ) + { + if( spaces ) + spos = pos+1; + continue; + } + if( !spaces ) + { + if( (sidx+1)==tokens ) + break; + int size = dstr->substr_size(spos,pos-spos); + dsizes[sidx++] = size; + //printf("%d:pos=%d,spos=%d,size=%d\n",(sidx-1),pos,spos,size); + bytes += ALIGN_SIZE(size); + spos = pos + 1; + } + spaces = !spaces; + } + if( sidx < dcount ) + { + int size = 0; + if( spos < nchars ) + size = dstr->substr_size(spos,nchars-spos); + else + size = (int)custring_view::alloc_size((unsigned)0,(unsigned)0); + dsizes[sidx] = size; + //printf("spos=%d,nchars=%d,size=%d\n",spos,nchars,size); + bytes += ALIGN_SIZE(size); + } + //printf("bytes=%d\n",bytes); + d_totals[idx] = bytes; + }); + + // + cudaDeviceSynchronize(); + + // now build an array of custring_views* arrays for each value + int totalNewStrings = 0; + thrust::host_vector h_counts(counts); + thrust::host_vector h_totals(totals); + thrust::host_vector h_splits(count,nullptr); + thrust::host_vector h_buffers(count,nullptr); + for( unsigned int idx=0; idx < count; ++idx ) + { + int splitCount = h_counts[idx]; + if( splitCount==0 ) + { + results.push_back(0); + continue; + } + + NVStrings* splitResult = new NVStrings(splitCount); + results.push_back(splitResult); + h_splits[idx] = splitResult->pImpl->getStringsPtr(); + + int totalSize = h_totals[idx]; + char* d_buffer = 0; + RMM_ALLOC(&d_buffer,totalSize,0); + splitResult->pImpl->setMemoryBuffer(d_buffer,totalSize); + h_buffers[idx] = d_buffer; + + totalNewStrings += splitCount; + } + + // + rmm::device_vector splits(h_splits); + custring_view_array* d_splits = splits.data().get(); + rmm::device_vector buffers(h_buffers); + char** d_buffers = buffers.data().get(); + + // do the splits and fill in the arrays + thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator(0), count, + [d_strings, tokens, d_counts, d_buffers, d_sizes, d_offsets, d_splits] __device__(unsigned int idx){ + custring_view* dstr = d_strings[idx]; + if( !dstr ) + return; // null string + int dcount = d_counts[idx]; + char* buffer = (char*)d_buffers[idx]; + int* dsizes = d_sizes + d_offsets[idx]; + custring_view_array d_strs = d_splits[idx]; + int emptysize = (int)custring_view::alloc_size((unsigned)0,(unsigned)0); + if( dcount==0 || dsizes[0]==emptysize ) + { + d_strs[0] = custring_view::create_from(buffer,buffer,0); + return; // empty string + } + for( int i=0; i < dcount; ++i ) + { + int size = ALIGN_SIZE(dsizes[i]); + d_strs[i] = (custring_view*)buffer; + buffer += size; + } + int sidx = 0, spos = 0, nchars = dstr->chars_count(); + //printf(">tokens=%d,dcount=%d,nchars=%d",tokens,dcount,nchars); + bool spaces = true; + for( int pos=0; (pos < nchars) && (sidx < dcount); ++pos ) + { + Char ch = dstr->at(pos); + if( spaces == (ch <= ' ') ) + { + if( spaces ) + spos = pos+1; + continue; + } + if( !spaces ) + { + if( (sidx+1)==tokens ) + break; + d_strs[sidx] = dstr->substr(spos,pos-spos,1,(void*)d_strs[sidx]); + //printf(">%d:pos=%d,spos=%d\n",sidx,pos,spos); + ++sidx; + spos = pos + 1; + } + spaces = !spaces; + } + if( (sidx < dcount) && (spos < nchars) ) + { + d_strs[sidx] = dstr->substr(spos,nchars-spos,1,(void*)d_strs[sidx]); + //printf(">%d:spos=%d,nchars=%d\n",sidx,spos,nchars); + } + }); + + // + printCudaError(cudaDeviceSynchronize(),"nvs-split_record_ws"); + return totalNewStrings; +} + +// +// This is just the split-from-the-right version of above. +// +int NVStrings::rsplit_record( const char* delimiter, int maxsplit, std::vector& results) +{ + if( delimiter==0 ) + return rsplit_record(maxsplit,results); + + auto execpol = rmm::exec_policy(0); + char* d_delimiter = 0; + unsigned int dellen = (unsigned int)strlen(delimiter); + RMM_ALLOC(&d_delimiter,dellen+1,0); + cudaMemcpy(d_delimiter,delimiter,dellen+1,cudaMemcpyHostToDevice); + int tokens = 0; + if( maxsplit > 0 ) + tokens = maxsplit + 1; // makes consistent with Pandas + + // need to count how many output strings per string + unsigned int count = size(); + custring_view** d_strings = pImpl->getStringsPtr(); + rmm::device_vector counts(count,0); + int* d_counts = counts.data().get(); + thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator(0), count, + token_counter(d_strings,d_delimiter,dellen,tokens,d_counts)); + + // build int arrays to hold each string's split size + int totalSizes = thrust::reduce(execpol->on(0), counts.begin(), counts.end()); + rmm::device_vector sizes(totalSizes,0), offsets(count,0), totals(count,0); + thrust::exclusive_scan(execpol->on(0),counts.begin(),counts.end(),offsets.begin()); + int* d_offsets = offsets.data().get(); + int* d_sizes = sizes.data().get(); + int* d_totals = totals.data().get(); + thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator(0), count, + [d_strings, d_delimiter, dellen, d_counts, d_offsets, d_sizes, d_totals] __device__(unsigned int idx){ + custring_view* dstr = d_strings[idx]; + if( !dstr ) + return; + int dcount = d_counts[idx]; + int* dsizes = d_sizes + d_offsets[idx]; + d_totals[idx] = dstr->rsplit_size(d_delimiter,dellen,dsizes,dcount); + }); + cudaDeviceSynchronize(); + + // now build an array of custring_views* arrays for each value + int totalNewStrings = 0; + thrust::host_vector h_counts(counts); + thrust::host_vector h_totals(totals); + thrust::host_vector h_splits(count,nullptr); + thrust::host_vector h_buffers(count,nullptr); + for( int idx=0; idx < count; ++idx ) + { + int splitCount = h_counts[idx]; + if( splitCount==0 ) + { + results.push_back(0); + continue; + } + NVStrings* splitResult = new NVStrings(splitCount); + results.push_back(splitResult); + h_splits[idx] = splitResult->pImpl->getStringsPtr(); + + int totalSize = h_totals[idx]; + char* d_buffer = 0; + RMM_ALLOC(&d_buffer,totalSize,0); + splitResult->pImpl->setMemoryBuffer(d_buffer,totalSize); + h_buffers[idx] = d_buffer; + + totalNewStrings += splitCount; + } + + // + rmm::device_vector splits(h_splits); + custring_view_array* d_splits = splits.data().get(); + rmm::device_vector buffers(h_buffers); + char** d_buffers = buffers.data().get(); + + // do the splits and fill in the arrays + thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator(0), count, + [d_strings, d_delimiter, dellen, d_counts, d_buffers, d_sizes, d_offsets, d_splits] __device__(unsigned int idx){ + custring_view* dstr = d_strings[idx]; + if( !dstr ) + return; + int d_count = d_counts[idx]; + if( d_count < 1 ) + return; + char* buffer = (char*)d_buffers[idx]; + int* dsizes = d_sizes + d_offsets[idx]; + custring_view_array d_strs = d_splits[idx]; + for( int i=0; i < d_count; ++i ) + { + d_strs[i] = (custring_view*)buffer; + int size = ALIGN_SIZE(dsizes[i]); + buffer += size; + //printf("%d:%d=%d\n",(int)idx,i,size); + } + dstr->rsplit(d_delimiter,dellen,d_count,d_strs); + }); + + // + printCudaError(cudaDeviceSynchronize(),"nvs-rsplit_record"); + RMM_FREE(d_delimiter,0); + return totalNewStrings; +} + +// +// And the whitespace-delimited version of rsplit_record +// +int NVStrings::rsplit_record( int maxsplit, std::vector& results) +{ + auto execpol = rmm::exec_policy(0); + int tokens = 0; + if( maxsplit > 0 ) + tokens = maxsplit + 1; // makes consistent with Pandas + + // need to count how many output strings per string + unsigned int count = size(); + custring_view** d_strings = pImpl->getStringsPtr(); + rmm::device_vector counts(count,0); + int* d_counts = counts.data().get(); + thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator(0), count, + whitespace_token_counter(d_strings,tokens,d_counts)); + + // build int arrays to hold each string's split size + int totalSizes = thrust::reduce(execpol->on(0), counts.begin(), counts.end()); + rmm::device_vector sizes(totalSizes,0), offsets(count,0), totals(count,0); + thrust::exclusive_scan(execpol->on(0),counts.begin(),counts.end(),offsets.begin()); + int* d_offsets = offsets.data().get(); + int* d_sizes = sizes.data().get(); + int* d_totals = totals.data().get(); + thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator(0), count, + [d_strings, tokens, d_counts, d_offsets, d_sizes, d_totals] __device__(unsigned int idx){ + custring_view* dstr = d_strings[idx]; + if( !dstr ) + return; + int* dsizes = d_sizes + d_offsets[idx]; + int dcount = d_counts[idx]; + int sidx = (dcount-1), nchars = dstr->chars_count(); + int bytes = 0, epos = nchars; + //printf("tokens=%d,dcount=%d,nchars=%d\n",tokens,dcount,nchars); + bool spaces = true; + for( int pos=nchars; (pos>0) && (sidx>=0); --pos ) + { + Char ch = dstr->at(pos-1); + if( spaces == (ch <= ' ') ) + { + if( spaces ) + epos = pos-1; + continue; + } + if( !spaces ) + { + if( (dcount-sidx)==tokens ) + break; + int size = dstr->substr_size(pos,epos-pos); + dsizes[sidx--] = size; + //printf("%d:pos=%d,epos=%d,size=%d\n",(sidx+1),pos,epos,size); + bytes += ALIGN_SIZE(size); + epos = pos-1; + } + spaces = !spaces; + } + if( sidx==0 ) + { + int size = 0; + if( epos > 0 ) + size = dstr->substr_size(0,epos); + else + size = (int)custring_view::alloc_size((unsigned)0,(unsigned)0); + //printf("%d:epos=%d,size=%d\n",sidx,epos,size); + dsizes[sidx] = size; + bytes += ALIGN_SIZE(size); + } + //printf("bytes=%d\n",bytes); + d_totals[idx] = bytes; + }); + + cudaDeviceSynchronize(); + + // now build an array of custring_views* arrays for each value + int totalNewStrings = 0; + thrust::host_vector h_counts(counts); + thrust::host_vector h_totals(totals); + thrust::host_vector h_splits(count,nullptr); + thrust::host_vector h_buffers(count,nullptr); + for( int idx=0; idx < count; ++idx ) + { + int splitCount = h_counts[idx]; + if( splitCount==0 ) + { + results.push_back(0); + continue; + } + NVStrings* splitResult = new NVStrings(splitCount); + results.push_back(splitResult); + h_splits[idx] = splitResult->pImpl->getStringsPtr(); + + int totalSize = h_totals[idx]; + char* d_buffer = 0; + RMM_ALLOC(&d_buffer,totalSize,0); + splitResult->pImpl->setMemoryBuffer(d_buffer,totalSize); + h_buffers[idx] = d_buffer; + + totalNewStrings += splitCount; + } + + // + rmm::device_vector splits(h_splits); + custring_view_array* d_splits = splits.data().get(); + rmm::device_vector buffers(h_buffers); + char** d_buffers = buffers.data().get(); + + // do the splits and fill in the arrays + thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator(0), count, + [d_strings, tokens, d_counts, d_buffers, d_sizes, d_offsets, d_splits] __device__(unsigned int idx){ + custring_view* dstr = d_strings[idx]; + if( !dstr ) + return; + int dcount = d_counts[idx]; + char* buffer = (char*)d_buffers[idx]; + int* dsizes = d_sizes + d_offsets[idx]; + custring_view_array d_strs = d_splits[idx]; + int emptysize = (int)custring_view::alloc_size((unsigned)0,(unsigned)0); + if( dcount==0 || dsizes[0]==emptysize ) + { + d_strs[0] = custring_view::create_from(buffer,buffer,0); + return; // empty string + } + for( int i=0; i < dcount; ++i ) + { + int size = ALIGN_SIZE(dsizes[i]); + d_strs[i] = (custring_view*)buffer; + buffer += size; + } + int sidx = (dcount-1), nchars = dstr->chars_count(); + int epos = nchars; + //printf(">tokens=%d,dcount=%d,nchars=%d\n",tokens,dcount,nchars); + bool spaces = true; + for( int pos=nchars; (pos > 0) && (sidx >= 0); --pos ) + { + Char ch = dstr->at(pos-1); + if( spaces == (ch <= ' ') ) + { + if( spaces ) + epos = pos-1; + continue; + } + if( !spaces ) + { + if( (dcount-sidx)==tokens ) + break; + d_strs[sidx] = dstr->substr(pos,epos-pos,1,(void*)d_strs[sidx]); + //printf(">%d:pos=%d,epos=%d\n",sidx,pos,epos); + --sidx; + epos = pos-1; + } + spaces = !spaces; + } + if( (sidx>=0) && (epos > 0) ) + { + d_strs[sidx] = dstr->substr(0,epos,1,(void*)d_strs[sidx]); + //printf(">%d:epos=%d\n",sidx,epos); + } + }); + // + printCudaError(cudaDeviceSynchronize(),"nvs-rsplit_record_ws"); + return totalNewStrings; +} + +// +// This will create new columns by splitting the array of strings vertically. +// All the first tokens go in the first column, all the second tokens go in the second column, etc. +// It is comparable to Pandas split with expand=True but the rows/columns are transposed. +// Example: +// import pandas as pd +// pd_series = pd.Series(['', None, 'a_b', '_a_b_', '__aa__bb__', '_a__bbb___c', '_aa_b__ccc__']) +// print(pd_series.str.split(pat='_', expand=True)) +// 0 1 2 3 4 5 6 +// 0 '' None None None None None None +// 1 None None None None None None None +// 2 a b None None None None None +// 3 '' a b '' None None None +// 4 '' '' aa '' bb '' '' +// 5 '' a '' bbb '' '' c +// 6 '' aa b '' ccc '' '' +// +// print(pd_series.str.split(pat='_', n=1, expand=True)) +// 0 1 +// 0 '' None +// 1 None None +// 2 a b +// 3 '' a_b_ +// 4 '' _aa__bb__ +// 5 '' a__bbb___c +// 6 '' aa_b__ccc__ +// +// print(pd_series.str.split(pat='_', n=2, expand=True)) +// 0 1 2 +// 0 '' None None +// 1 None None None +// 2 a b None +// 3 '' a b_ +// 4 '' aa__bb__ +// 5 '' a _bbb___c +// 6 '' aa b__ccc__ +// +unsigned int NVStrings::split( const char* delimiter, int maxsplit, std::vector& results) +{ + if( delimiter==0 ) + return split(maxsplit,results); + auto execpol = rmm::exec_policy(0); + char* d_delimiter = 0; + unsigned int dellen = (unsigned int)strlen(delimiter); + RMM_ALLOC(&d_delimiter,dellen+1,0); + cudaMemcpy(d_delimiter,delimiter,dellen+1,cudaMemcpyHostToDevice); + int tokens = 0; + if( maxsplit > 0 ) + tokens = maxsplit + 1; // makes consistent with Pandas + + // need to count how many output strings per string + unsigned int count = size(); + custring_view_array d_strings = pImpl->getStringsPtr(); + rmm::device_vector counts(count,0); + int* d_counts = counts.data().get(); + thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator(0), count, + token_counter(d_strings,d_delimiter,dellen,tokens,d_counts)); + + int columnsCount = *thrust::max_element(execpol->on(0), counts.begin(), counts.end() ); + // boundary case: if no columns, return one null column (issue #119) + if( columnsCount==0 ) + results.push_back(new NVStrings(count)); + + // create each column + for( int col=0; col < columnsCount; ++col ) + { + // first, build a vector of pair's' for each column + // each pair points to a string for this column for each row + //st = GetTime(); + rmm::device_vector< thrust::pair > indexes(count); + thrust::pair* d_indexes = indexes.data().get(); + thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator(0), count, + [d_strings, col, d_delimiter, dellen, d_counts, d_indexes] __device__(unsigned int idx){ + custring_view* dstr = d_strings[idx]; + d_indexes[idx].first = 0; // initialize to + d_indexes[idx].second = 0; // null string + if( !dstr ) + return; + // dcount already accounts for the maxsplit value + int dcount = d_counts[idx]; + if( col >= dcount ) + return; // passed the end for this string + // skip delimiters until we reach this column + int dchars = custring_view::chars_in_string(d_delimiter,dellen); + int spos = 0, nchars = dstr->chars_count(); + int epos = nchars; + for( int c=0; c < (dcount-1); ++c ) + { + epos = dstr->find(d_delimiter,dellen,spos); + if( epos < 0 ) + { + epos = nchars; + break; + } + if( c==col ) // found our column + break; + spos = epos + dchars; + epos = nchars; + } + // this will be the string for this column + if( spos < epos ) + { + spos = dstr->byte_offset_for(spos); // convert char pos + epos = dstr->byte_offset_for(epos); // to byte offset + d_indexes[idx].first = dstr->data() + spos; + d_indexes[idx].second = (epos-spos); + } + else + { // this will create empty string instead of null one + d_indexes[idx].first = dstr->data(); + } + }); + + cudaError_t err = cudaDeviceSynchronize(); + if( err != cudaSuccess ) + { + fprintf(stderr,"nvs-split(%s,%d), col=%d\n",delimiter,maxsplit,col); + printCudaError(err); + } + // + NVStrings* column = NVStrings::create_from_index((std::pair*)d_indexes,count); + results.push_back(column); + } + // + RMM_FREE(d_delimiter,0); + return (unsigned int)results.size(); +} + +// +// This is the whitespace-delimiter version of the column split function. +// Like the one above, it can be compared to Pandas split with expand=True but +// with the rows/columns transposed. +// +// import pandas as pd +// pd_series = pd.Series(['', None, 'a b', ' a b ', ' aa bb ', ' a bbb c', ' aa b ccc ']) +// print(pd_series.str.split(pat=None, expand=True)) +// 0 1 2 +// 0 None None None +// 1 None None None +// 2 a b None +// 3 a b None +// 4 aa bb None +// 5 a bbb c +// 6 aa b ccc +// +// print(pd_series.str.split(pat=None, n=1, expand=True)) +// 0 1 +// 0 None None +// 1 None None +// 2 a b +// 3 a b +// 4 aa bb +// 5 a bbb c +// 6 aa b ccc +// +// print(pd_series.str.split(pat=None, n=2, expand=True)) +// 0 1 2 +// 0 None None None +// 1 None None None +// 2 a b None +// 3 a b None +// 4 aa bb None +// 5 a bbb c +// 6 aa b ccc +// +// Like the split_record method, there are no empty strings here. +// +unsigned int NVStrings::split( int maxsplit, std::vector& results) +{ + auto execpol = rmm::exec_policy(0); + int tokens = 0; + if( maxsplit > 0 ) + tokens = maxsplit + 1; // makes consistent with Pandas + + // need to count how many output strings per string + unsigned int count = size(); + custring_view_array d_strings = pImpl->getStringsPtr(); + rmm::device_vector counts(count,0); + int* d_counts = counts.data().get(); + thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator(0), count, + whitespace_token_counter(d_strings,tokens,d_counts)); + + int columnsCount = *thrust::max_element(execpol->on(0), counts.begin(), counts.end() ); + // boundary case: if no columns, return one null column (issue #119) + if( columnsCount==0 ) + results.push_back(new NVStrings(count)); + + // create each column + for( int col=0; col < columnsCount; ++col ) + { + // first, build a vector of pair's' for each column + // each pair points to a string for this column for each row + //st = GetTime(); + rmm::device_vector< thrust::pair > indexes(count); + thrust::pair* d_indexes = indexes.data().get(); + thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator(0), count, + [d_strings, col, tokens, d_counts, d_indexes] __device__(unsigned int idx){ + custring_view* dstr = d_strings[idx]; + d_indexes[idx].first = 0; // initialize to + d_indexes[idx].second = 0; // null string + if( !dstr ) + return; // null string + int dcount = d_counts[idx]; + if( col >= dcount ) + return; + int c = 0, nchars = dstr->chars_count(); + int spos = 0, epos = nchars; + //printf(">%d:tokens=%d,dcount=%d,nchars=%d\n",col,tokens,dcount,nchars); + bool spaces = true; + for( int pos=0; pos < nchars; ++pos ) + { + Char ch = dstr->at(pos); + if( spaces == (ch <= ' ') ) + { + if( spaces ) + spos = pos+1; + else + epos = pos+1; + continue; + } + if( !spaces ) + { + epos = nchars; + if( (c+1)==tokens ) + break; + epos = pos; + if( c==col ) + break; + spos = pos+1; + epos = nchars; + ++c; + } + spaces = !spaces; + } + if( spos < epos ) + { + spos = dstr->byte_offset_for(spos); // convert char pos + epos = dstr->byte_offset_for(epos); // to byte offset + //printf(">%d:spos=%d,epos=%d\n",c,spos,epos); + d_indexes[idx].first = dstr->data() + spos; + d_indexes[idx].second = (epos-spos); + } + //else + //{ no empty strings in split-column-whitespace + // d_indexes[idx].first = dstr->data(); + //} + }); + + cudaError_t err = cudaDeviceSynchronize(); + if( err != cudaSuccess ) + { + fprintf(stderr,"nvs-split-ws(%d), col=%d\n",maxsplit,col); + printCudaError(err); + } + // + NVStrings* column = NVStrings::create_from_index((std::pair*)d_indexes,count); + results.push_back(column); + } + // + return (unsigned int)results.size(); +} +// +// The split-from-the-right version of split +// +unsigned int NVStrings::rsplit( const char* delimiter, int maxsplit, std::vector& results) +{ + if( delimiter==0 ) + return rsplit(maxsplit,results); + auto execpol = rmm::exec_policy(0); + char* d_delimiter = 0; + unsigned int dellen = (unsigned int)strlen(delimiter); + RMM_ALLOC(&d_delimiter,dellen+1,0); + cudaMemcpy(d_delimiter,delimiter,dellen+1,cudaMemcpyHostToDevice); + int tokens = 0; + if( maxsplit > 0 ) + tokens = maxsplit + 1; // makes consistent with Pandas + + // need to count how many output strings per string + unsigned int count = size(); + custring_view** d_strings = pImpl->getStringsPtr(); + rmm::device_vector counts(count,0); + int* d_counts = counts.data().get(); + thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator(0), count, + token_counter(d_strings,d_delimiter,dellen,tokens,d_counts)); + + int columnsCount = *thrust::max_element(execpol->on(0), counts.begin(), counts.end() ); + // boundary case: if no columns, return one null column (issue #119) + if( columnsCount==0 ) + results.push_back(new NVStrings(count)); + + // create each column + for( int col = 0; col < columnsCount; ++col ) + { + // first, build a vector of pair's' for each column + // each pair points to a string for this column for each row + rmm::device_vector< thrust::pair > indexes(count); + thrust::pair* d_indexes = indexes.data().get(); + thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator(0), count, + [d_strings, col, d_delimiter, dellen, d_counts, d_indexes] __device__(unsigned int idx){ + custring_view* dstr = d_strings[idx]; + d_indexes[idx].first = 0; // initialize to + d_indexes[idx].second = 0; // null string + if( !dstr ) + return; + // dcount already accounts for the maxsplit value + int dcount = d_counts[idx]; + if( col >= dcount ) + return; // passed the end for this string + // skip delimiters until we reach this column + int dchars = custring_view::chars_in_string(d_delimiter,dellen); + int spos = 0, nchars = dstr->chars_count(); + int epos = nchars, pos = dstr->size()-1; + for( int c=(dcount-1); c > 0; --c ) + { + spos = dstr->rfind(d_delimiter,dellen,0,epos); + if( spos < 0 ) + { + spos = 0; + break; + } + if( c==col ) // found our column + { + spos += dchars; // do not include delimiter + break; + } + epos = spos; + spos = 0; + } + // this will be the string for this column + if( spos < epos ) + { + spos = dstr->byte_offset_for(spos); // convert char pos + epos = dstr->byte_offset_for(epos); // to byte offset + d_indexes[idx].first = dstr->data() + spos; + d_indexes[idx].second = (epos-spos); + } + else + { // this will create empty string instead of null one + d_indexes[idx].first = dstr->data(); + } + }); + + cudaError_t err = cudaDeviceSynchronize(); + if( err != cudaSuccess ) + { + fprintf(stderr,"nvs-rsplit(%s,%d)\n",delimiter,maxsplit); + printCudaError(err); + } + // + NVStrings* column = NVStrings::create_from_index((std::pair*)d_indexes,count); + results.push_back(column); + } + // + RMM_FREE(d_delimiter,0); + return (unsigned int)results.size(); +} + +// +// The whitespace-delimited version of rsplit. +// +unsigned int NVStrings::rsplit( int maxsplit, std::vector& results) +{ + auto execpol = rmm::exec_policy(0); + int tokens = 0; + if( maxsplit > 0 ) + tokens = maxsplit + 1; // makes consistent with Pandas + + // need to count how many output strings per string + unsigned int count = size(); + custring_view** d_strings = pImpl->getStringsPtr(); + rmm::device_vector counts(count,0); + int* d_counts = counts.data().get(); + thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator(0), count, + whitespace_token_counter(d_strings,tokens,d_counts)); + + int columnsCount = *thrust::max_element(execpol->on(0), counts.begin(), counts.end() ); + // boundary case: if no columns, return one null column (issue #119) + if( columnsCount==0 ) + results.push_back(new NVStrings(count)); + + // create each column + for( int col = 0; col < columnsCount; ++col ) + { + // first, build a vector of pair's' for each column + // each pair points to a string for this column for each row + rmm::device_vector< thrust::pair > indexes(count); + thrust::pair* d_indexes = indexes.data().get(); + thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator(0), count, + [d_strings, col, columnsCount, tokens, d_counts, d_indexes] __device__(unsigned int idx){ + custring_view* dstr = d_strings[idx]; + d_indexes[idx].first = 0; // initialize to + d_indexes[idx].second = 0; // null string + if( !dstr ) + return; // null string + int dcount = d_counts[idx]; + if( col >= dcount ) + return; + int c = (dcount-1), nchars = dstr->chars_count(); + int spos = 0, epos = nchars; + //printf(">%d:tokens=%d,dcount=%d,nchars=%d\n",col,tokens,dcount,nchars); + bool spaces = true; + for( int pos=nchars; pos > 0; --pos ) + { + Char ch = dstr->at(pos-1); + if( spaces == (ch <= ' ') ) + { + if( spaces ) + epos = pos-1; + else + spos = pos-1; + continue; + } + if( !spaces ) + { + spos = 0; + if( (columnsCount-c)==tokens ) + break; + spos = pos; + if( c==col ) + break; + epos = pos-1; + spos = 0; + --c; + } + spaces = !spaces; + } + if( spos < epos ) + { + spos = dstr->byte_offset_for(spos); // convert char pos + epos = dstr->byte_offset_for(epos); // to byte offset + //printf(">%d:spos=%d,epos=%d\n",c,spos,epos); + d_indexes[idx].first = dstr->data() + spos; + d_indexes[idx].second = (epos-spos); + } + //else + //{ no empty strings in rsplit column whitespace + // d_indexes[idx].first = dstr->data(); + //} + }); + + cudaError_t err = cudaDeviceSynchronize(); + if( err != cudaSuccess ) + { + fprintf(stderr,"nvs-rsplit-ws(%d)\n",maxsplit); + printCudaError(err); + } + // + NVStrings* column = NVStrings::create_from_index((std::pair*)d_indexes,count); + results.push_back(column); + } + // + return (unsigned int)results.size(); +} + +// +// Partition is split the string at the first occurrence of delimiter, and return 3 elements containing +// the part before the delimiter, the delimiter itself, and the part after the delimiter. +// If the delimiter is not found, return 3 elements containing the string itself, followed by two empty strings. +// +// >>> import pandas as pd +// >>> strs = pd.Series(['héllo', None, 'a_bc_déf', 'a__bc', '_ab_cd', 'ab_cd_']) +// >>> strs.str.partition('_') +// 0 1 2 +// 0 héllo +// 1 None None None +// 2 a _ bc_déf +// 3 a _ _bc +// 4 _ ab_cd +// 5 ab _ cd_ +// +int NVStrings::partition( const char* delimiter, std::vector& results) +{ + if( delimiter==0 ) + return 0; + unsigned int bytes = (unsigned int)strlen(delimiter); + if( bytes==0 ) + return 0; // just return original list? + + auto execpol = rmm::exec_policy(0); + // copy delimiter to device + char* d_delimiter = 0; + RMM_ALLOC(&d_delimiter,bytes,0); + cudaMemcpy(d_delimiter,delimiter,bytes,cudaMemcpyHostToDevice); + int d_asize = custring_view::alloc_size((char*)delimiter,bytes); + d_asize = ALIGN_SIZE(d_asize); + + unsigned int count = size(); + custring_view** d_strings = pImpl->getStringsPtr(); + // build int arrays to hold each string's partition sizes + int totalSizes = 2 * count; + rmm::device_vector sizes(totalSizes,0), totals(count,0); + int* d_sizes = sizes.data().get(); + int* d_totals = totals.data().get(); + thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator(0), count, + [d_strings, d_delimiter, bytes, d_asize, d_sizes, d_totals] __device__(size_t idx){ + custring_view* dstr = d_strings[idx]; + if( !dstr ) + return; + int* dsizes = &(d_sizes[idx*2]); + d_totals[idx] = dstr->split_size(d_delimiter,bytes,dsizes,2) + d_asize; + }); + + cudaDeviceSynchronize(); + + // build an output array of custring_views* arrays for each value + // there will always be 3 per string + thrust::host_vector h_totals(totals); + thrust::host_vector h_buffers(count,nullptr); + thrust::host_vector h_splits(count,nullptr); + for( int idx=0; idx < count; ++idx ) + { + NVStrings* result = new NVStrings(3); + results.push_back(result); + h_splits[idx] = result->pImpl->getStringsPtr(); + + int totalSize = h_totals[idx]; + char* d_buffer = 0; + RMM_ALLOC(&d_buffer,totalSize,0); + result->pImpl->setMemoryBuffer(d_buffer,totalSize); + h_buffers[idx] = d_buffer; + } + + rmm::device_vector splits(h_splits); + custring_view_array* d_splits = splits.data().get(); + rmm::device_vector buffers(h_buffers); + char** d_buffers = buffers.data().get(); + + // do the partition and fill in the arrays + thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator(0), count, + [d_strings, d_delimiter, bytes, d_buffers, d_sizes, d_splits] __device__(unsigned int idx){ + custring_view* dstr = d_strings[idx]; + if( !dstr ) + return; + char* buffer = (char*)d_buffers[idx]; + int* dsizes = &(d_sizes[idx*2]); + custring_view_array d_strs = d_splits[idx]; + + d_strs[0] = custring_view::create_from(buffer,0,0); + buffer += ALIGN_SIZE(dsizes[0]); + d_strs[1] = custring_view::create_from(buffer,0,0); + buffer += ALIGN_SIZE(dsizes[1]); + d_strs[2] = custring_view::create_from(buffer,0,0); + + // + int dcount = dstr->rsplit_size(d_delimiter,bytes,0,2); + dstr->split(d_delimiter,bytes,2,d_strs); + if( dcount==2 ) + { // insert delimiter element in the middle + custring_view* tmp = d_strs[1]; + d_strs[1] = custring_view::create_from(buffer,d_delimiter,bytes); + d_strs[2] = tmp; + } + }); + + printCudaError(cudaDeviceSynchronize(),"nvs-partition"); + RMM_FREE(d_delimiter,0); + return count; +} + +// +// This follows most of the same logic as partition above except that the delimiter +// search starts from the end of the string. Also, if no delimiter is found the +// resulting array includes two empty strings followed by the original string. +// +// >>> import pandas as pd +// >>> strs = pd.Series(['héllo', None, 'a_bc_déf', 'a__bc', '_ab_cd', 'ab_cd_']) +// >>> strs.str.rpartition('_') +// 0 1 2 +// 0 héllo +// 1 None None None +// 2 a_bc _ déf +// 3 a_ _ bc +// 4 _ab _ cd +// 5 ab_cd _ +// +int NVStrings::rpartition( const char* delimiter, std::vector& results) +{ + if( delimiter==0 ) + return 0; + unsigned int bytes = (unsigned int)strlen(delimiter); + if( bytes==0 ) + return 0; // just return original list? + + auto execpol = rmm::exec_policy(0); + // copy delimiter to device + char* d_delimiter = 0; + RMM_ALLOC(&d_delimiter,bytes,0); + cudaMemcpy(d_delimiter,delimiter,bytes,cudaMemcpyHostToDevice); + int d_asize = custring_view::alloc_size((char*)delimiter,bytes); + d_asize = ALIGN_SIZE(d_asize); + + unsigned int count = size(); + custring_view** d_strings = pImpl->getStringsPtr(); + // build int arrays to hold each string's partition sizes + int totalSizes = 2 * count; + rmm::device_vector sizes(totalSizes,0), totals(count,0); + int* d_sizes = sizes.data().get(); + int* d_totals = totals.data().get(); + thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator(0), count, + [d_strings, d_delimiter, bytes, d_asize, d_sizes, d_totals] __device__(unsigned int idx){ + custring_view* dstr = d_strings[idx]; + if( !dstr ) + return; + int* dsizes = &(d_sizes[idx*2]); + //d_totals[idx] = dstr->rpartition_size(d_delimiter,bytes,dsizes); + d_totals[idx] = dstr->rsplit_size(d_delimiter,bytes,dsizes,2) + d_asize; + }); + + cudaDeviceSynchronize(); + + // now build an output array of custring_views* arrays for each value + // there will always be 3 per string + thrust::host_vector h_totals(totals); + thrust::host_vector h_buffers(count,nullptr); + thrust::host_vector h_splits(count,nullptr); + for( int idx=0; idx < count; ++idx ) + { + NVStrings* result = new NVStrings(3); + results.push_back(result); + h_splits[idx] = result->pImpl->getStringsPtr(); + + int totalSize = h_totals[idx]; + char* d_buffer = 0; + RMM_ALLOC(&d_buffer,totalSize,0); + result->pImpl->setMemoryBuffer(d_buffer,totalSize); + h_buffers[idx] = d_buffer; + } + + rmm::device_vector splits(h_splits); + custring_view_array* d_splits = splits.data().get(); + rmm::device_vector buffers(h_buffers); + char** d_buffers = buffers.data().get(); + + // do the partition and fill in the arrays + thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator(0), count, + [d_strings, d_delimiter, bytes, d_buffers, d_sizes, d_splits] __device__(unsigned int idx){ + custring_view* dstr = d_strings[idx]; + if( !dstr ) + return; + char* buffer = (char*)d_buffers[idx]; + int* dsizes = &(d_sizes[idx*2]); + custring_view_array d_strs = d_splits[idx]; + + d_strs[0] = custring_view::create_from(buffer,0,0); + buffer += ALIGN_SIZE(dsizes[0]); + d_strs[1] = custring_view::create_from(buffer,0,0); + buffer += ALIGN_SIZE(dsizes[1]); + d_strs[2] = custring_view::create_from(buffer,0,0); + + // + int dcount = dstr->rsplit_size(d_delimiter,bytes,0,2); + dstr->rsplit(d_delimiter,bytes,2,d_strs); + // reorder elements + if( dcount==1 ) + { // if only one element, it goes on the end + custring_view* tmp = d_strs[2]; + d_strs[2] = d_strs[0]; + d_strs[0] = tmp; + } + if( dcount==2 ) + { // insert delimiter element in the middle + custring_view* tmp = d_strs[1]; + d_strs[1] = custring_view::create_from(buffer,d_delimiter,bytes); + d_strs[2] = tmp; + } + }); + + printCudaError(cudaDeviceSynchronize(),"nvs-rpartition"); + RMM_FREE(d_delimiter,0); + return count; +} diff --git a/cuda_code/Normalization_16.cu b/cuda_code/Normalization_16.cu new file mode 100644 index 0000000000000000000000000000000000000000..23559fb16d4bcb229afbcffd9e9c3b747bec3fdc --- /dev/null +++ b/cuda_code/Normalization_16.cu @@ -0,0 +1,691 @@ +#include +#include +#include +#include +#include +#include +#include + +namespace at { namespace native { + +namespace { + +ScalarType first_type() { + return ScalarType::Undefined; +} + +template +ScalarType first_type(const Tensor& arg, const Args&... parameters) { + return arg.defined() ? arg.scalar_type() : first_type(parameters...); +} + +// A transform is mixed type if the parameters are higher precision than the input +template +bool is_mixed_type(const Tensor& input, const Args&... parameters) { + const auto parameter_type = first_type(parameters...); + return ((parameter_type != ScalarType::Undefined) && + (parameter_type != input.scalar_type())); +} + +inline bool batch_norm_use_channels_last_kernels(const at::Tensor& self) { + return (self.is_contiguous(at::MemoryFormat::ChannelsLast) || + (self.is_contiguous() && self.strides()[1] == 1)); +} + +enum class Impl { + Contiguous, + ChannelsLast, + General, +}; + +inline Impl batch_norm_choose_impl(const Tensor& self) { + if (!at::cuda::detail::canUse32BitIndexMath(self)) { + return Impl::General; + } + + if (self.is_contiguous()) { + return self.strides()[1] == 1 ? Impl::ChannelsLast : Impl::Contiguous; + } + + if (self.is_contiguous(at::MemoryFormat::ChannelsLast)) { + return Impl::ChannelsLast; + } + + return Impl::General; +} + +inline Impl batch_norm_choose_impl(const Tensor& in1, const Tensor& in2) { + auto imp1 = batch_norm_choose_impl(in1); + if (imp1 == Impl::General) { + return imp1; + } + auto imp2 = batch_norm_choose_impl(in2); + return imp1 == imp2 ? imp1 : Impl::General; +} + +void batch_norm_elementwise( + const Tensor& out, const Tensor& self, const c10::optional& weight_opt, + const c10::optional& bias_opt, const Tensor& mean_, const Tensor& invstd_) { + switch (batch_norm_choose_impl(self)) { + case Impl::Contiguous: { + c10::MaybeOwned weight = at::borrow_from_optional_tensor(weight_opt); + c10::MaybeOwned bias = at::borrow_from_optional_tensor(bias_opt); + AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, self.scalar_type(), + "batch_norm_elementwise_cuda", [&] { + using accscalar_t = at::acc_type; + const bool mixed_type = is_mixed_type(self, *weight, *bias); + if (mixed_type) { + batch_norm_elemt_cuda_template( + out, self, *weight, *bias, mean_, invstd_); + } else { + batch_norm_elemt_cuda_template( + out, self, *weight, *bias, mean_, invstd_); + } + }); + return; + } + case Impl::ChannelsLast: { + auto weight = at::borrow_from_optional_tensor(weight_opt); + auto bias = at::borrow_from_optional_tensor(bias_opt); + if (out.is_contiguous(at::MemoryFormat::ChannelsLast) && + (!weight->defined() || weight->is_contiguous()) && + (!bias->defined() || bias->is_contiguous()) && + (!mean_.defined() || mean_.is_contiguous()) && + (!invstd_.defined() || invstd_.is_contiguous())) { + batch_norm_elemt_channels_last_cuda_template( + out, self, *weight, *bias, mean_, invstd_); + return; + } + C10_FALLTHROUGH; + } + case Impl::General: { + const int64_t ndim = self.dim(); + DimVector sizes(ndim, 1), strides(ndim, 0); + // Helper to convert 1d tensors to an nd tensor that broadcasts with input + // All elements go into the channel dimension + auto as_nd = [&](const Tensor& t) { + TORCH_INTERNAL_ASSERT(t.defined() && t.dim() == 1); + sizes[1] = t.sizes()[0]; + strides[1] = t.strides()[0]; + return t.as_strided(sizes, strides); + }; + + auto weight = weight_opt.has_value() && weight_opt->defined() ? + as_nd(*weight_opt) : at::scalar_tensor(1, mean_.options()); + auto bias = bias_opt.has_value() && bias_opt->defined() ? + as_nd(*bias_opt) : at::scalar_tensor(0, mean_.options()); + auto mean = as_nd(mean_); + auto invstd = as_nd(invstd_); + + auto iter = TensorIteratorConfig() + .add_output(out) + .add_input(self) + .add_input(weight) + .add_input(bias) + .add_input(mean) + .add_input(invstd) + .check_all_same_dtype(false) + .promote_inputs_to_common_dtype(false) + .build(); + + AT_DISPATCH_FLOATING_TYPES_AND2(kBFloat16, kHalf, self.scalar_type(), + "batch_norm_elementwise_cuda", [&] { + using acc_t = at::acc_type; + gpu_kernel(iter, [] GPU_LAMBDA (scalar_t input, acc_t weight, acc_t bias, + acc_t mean, acc_t invstd) -> scalar_t { + return ((input - mean) * invstd) * weight + bias; + }); + }); + return; + } + } +} + +Tensor batch_norm_elementwise_backward_train( + const Tensor& grad_out, const Tensor& input, const Tensor& mean, const Tensor& invstd, + const Tensor& weight, const Tensor& sum_dy, const Tensor& sum_dy_xmu) { + switch (batch_norm_choose_impl(input, grad_out)) { + case Impl::Contiguous: { + return AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), + "batch_norm_backward_elemt", [&] { + using accscalar_t = at::acc_type; + const bool mixed_type = is_mixed_type(input, weight); + if (mixed_type) { + return batch_norm_backward_elemt_cuda_template( + grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu); + } else { + return batch_norm_backward_elemt_cuda_template( + grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu); + } + }); + } + case Impl::ChannelsLast: { + if ((!weight.defined() || weight.is_contiguous()) && + mean.is_contiguous() && invstd.is_contiguous()) { + return batch_norm_backward_elemt_channels_last_cuda_template( + grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu); + } + C10_FALLTHROUGH; + } + case Impl::General: { + const auto ndim = input.dim(); + DimVector sizes(ndim, 1), strides(ndim, 0); + auto as_nd = [&](const Tensor& t) { + TORCH_INTERNAL_ASSERT(t.defined() && t.dim() == 1); + sizes[1] = t.sizes()[0]; + strides[1] = t.strides()[0]; + return t.as_strided(sizes, strides); + }; + auto invstd_nd = as_nd(invstd); + auto mean_nd = as_nd(mean); + auto sum_dy_nd = as_nd(sum_dy); + auto sum_dy_xmu_nd = as_nd(sum_dy_xmu); + auto weight_nd = weight.defined() ? as_nd(weight) : + at::scalar_tensor(1.0, input.options().dtype(mean.scalar_type())); + + Tensor grad_input = at::empty(input.sizes(), grad_out.options()); + auto iter = TensorIteratorConfig() + .add_output(grad_input) + .add_input(grad_out) + .add_input(input) + .add_input(weight_nd) + .add_input(mean_nd) + .add_input(invstd_nd) + .add_input(sum_dy_xmu_nd) + .add_input(sum_dy_nd) + .check_all_same_dtype(false) + .promote_inputs_to_common_dtype(false) + .build(); + + AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, grad_out.scalar_type(), + "batch_norm_eval_backward", [&]{ + using accscalar_t = at::acc_type; + auto norm_fct = static_cast(1.0 / (input.numel() /input.size(1)) ); + gpu_kernel(iter, [norm_fct] GPU_LAMBDA (scalar_t gO, scalar_t input, accscalar_t weight, + accscalar_t mean, accscalar_t invstd, + accscalar_t xmu, accscalar_t dy) -> scalar_t { + auto factor_1_c = invstd * invstd * xmu * norm_fct; + auto factor_2_c = weight * invstd; + auto m_dy_c = dy * norm_fct; + return (gO - m_dy_c - (input - mean) * factor_1_c) * factor_2_c; + }); + }); + return grad_input; + } + } + TORCH_INTERNAL_ASSERT(false); +} + +Tensor batch_norm_elementwise_backward_eval( + const Tensor& grad_out, const Tensor& input, + const Tensor& invstd, const Tensor& weight) { + const auto ndim = input.dim(); + DimVector shape(ndim, 1), strides(ndim, 0); + shape[1] = invstd.sizes()[0]; + strides[1] = invstd.strides()[0]; + auto invstd_nd = invstd.as_strided(shape, strides); + Tensor grad_input = at::empty(input.sizes(), grad_out.options()); + + if (weight.defined()) { + strides[1] = weight.strides()[0]; + auto weight_nd = weight.as_strided(shape, strides); + auto iter = TensorIteratorConfig() + .add_output(grad_input) + .add_input(grad_out) + .add_input(invstd_nd) + .add_input(weight_nd) + .check_all_same_dtype(false) + .promote_inputs_to_common_dtype(false) + .build(); + + AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, grad_out.scalar_type(), + "batch_norm_eval_backward", [&]{ + using accscalar_t = at::acc_type; + gpu_kernel(iter, [] GPU_LAMBDA (scalar_t gO, accscalar_t invstd, accscalar_t weight) + -> scalar_t { + return gO * weight * invstd; + }); + }); + } else { + auto iter = TensorIteratorConfig() + .add_output(grad_input) + .add_input(grad_out) + .add_input(invstd_nd) + .check_all_same_dtype(false) + .promote_inputs_to_common_dtype(false) + .build(); + + AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, grad_out.scalar_type(), + "batch_norm_eval_backward", [&]{ + using accscalar_t = at::acc_type; + gpu_kernel(iter, [] GPU_LAMBDA (scalar_t gO, accscalar_t invstd) -> scalar_t { + return gO * invstd; + }); + }); + } + return grad_input; +} + + +void batch_norm_mean_var(const Tensor& self, Tensor& save_mean, Tensor& save_var) { + // NOTE: Epsilon is only used for InvStd, not Var. The value here is ignored. + const double dummy_epsilon = 1e-5; + switch (batch_norm_choose_impl(self)) { + case Impl::Contiguous: { + AT_DISPATCH_FLOATING_TYPES_AND2( + kHalf, kBFloat16, self.scalar_type(), "batch_norm_stats_cuda", [&] { + batch_norm_stats_cuda_template( + save_mean, save_var, self, dummy_epsilon); + }); + return; + } + case Impl::ChannelsLast: { + if ((!save_mean.defined() || save_mean.is_contiguous()) && + (!save_var.defined() || save_var.is_contiguous())) { + AT_DISPATCH_FLOATING_TYPES_AND2( + kHalf, kBFloat16, self.scalar_type(), "batch_norm_stats_cuda", [&] { + batch_norm_stats_channels_last_cuda_template( + save_mean, save_var, self, dummy_epsilon); + }); + return; + } + C10_FALLTHROUGH; + } + case Impl::General: { + const int64_t ndim = self.dim(); + DimVector reduce_dims(ndim - 1); + reduce_dims[0] = 0; + for (int64_t i = 2; i < ndim; ++i) { + reduce_dims[i - 1] = i; + } + + // For some reason this isn't an actual operator but it exists anyway... + at::native::var_mean_out(save_var, save_mean, self, /*dims=*/reduce_dims, + /*unbiased=*/false, /*keepdim=*/false); + return; + } + } +} + +void batch_norm_update_stats( + const Tensor& save_mean, const Tensor& save_var, + const Tensor& running_mean, const Tensor& running_var, + double momentum_, int64_t N) { + + auto iter = TensorIteratorConfig() + .add_output(running_mean) + .add_output(running_var) + .add_input(save_mean) + .add_input(save_var) + .add_input(running_mean) + .add_input(running_var) + .check_all_same_dtype(false) + .promote_inputs_to_common_dtype(false) + .build(); + + AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, running_mean.scalar_type(), + "batch_norm_update_stats_cuda", [&] { + using acc_t = at::acc_type; + const auto bessel_correction_factor = static_cast( + static_cast(N) / static_cast(N - 1)); + const auto momentum = static_cast(momentum_); + gpu_kernel_multiple_outputs( + iter, [=] GPU_LAMBDA (acc_t mean, acc_t var, scalar_t running_mean, scalar_t running_var) + -> thrust::tuple { + const auto unbiased_var = var * bessel_correction_factor; + return thrust::tuple{ + mean * momentum + (1 - momentum) * running_mean, + unbiased_var * momentum + (1 - momentum) * running_var, + }; + }); + }); +} + +void batch_norm_update_stats_and_invert( + const Tensor& save_mean, const Tensor& save_var, + const Tensor& running_mean, const Tensor& running_var, + double momentum_, double epsilon, int64_t N) { + + auto iter = TensorIteratorConfig() + .add_output(running_mean) + .add_output(running_var) + .add_output(save_var) + .add_input(save_mean) + .add_input(save_var) + .add_input(running_mean) + .add_input(running_var) + .check_all_same_dtype(false) + .promote_inputs_to_common_dtype(false) + .build(); + + AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, running_mean.scalar_type(), + "batch_norm_update_stats_cuda", [&] { + using acc_t = at::acc_type; + const auto bessel_correction_factor = static_cast( + static_cast(N) / static_cast(N - 1)); + const auto eps = static_cast(epsilon); + const auto momentum = static_cast(momentum_); + gpu_kernel_multiple_outputs( + iter, [=] GPU_LAMBDA (acc_t mean, acc_t var, scalar_t running_mean, scalar_t running_var) + -> thrust::tuple { + const auto unbiased_var = var * bessel_correction_factor; + return thrust::tuple{ + mean * momentum + (1 - momentum) * running_mean, + unbiased_var * momentum + (1 - momentum) * running_var, + c10::cuda::compat::rsqrt(var + eps) + }; + }); + }); +} + +void batch_norm_calc_invstd(const Tensor& out_invstd, const Tensor& running_var, double epsilon) { + auto iter = TensorIteratorConfig() + .add_output(out_invstd) + .add_input(running_var) + .check_all_same_dtype(false) + .build(); + + AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, running_var.scalar_type(), + "batch_norm_invert_std_cuda", [&] { + using acc_t = at::acc_type; + auto eps = static_cast(epsilon); + gpu_kernel(iter, [eps] GPU_LAMBDA (scalar_t var) -> acc_t { + return c10::cuda::compat::rsqrt(var + eps); + }); + }); +} +} + +std::tuple batch_norm_cuda_out(const Tensor& self, const c10::optional& weight_opt, const c10::optional& bias_opt, const c10::optional& running_mean_opt, const c10::optional& running_var_opt, bool train, double momentum, double epsilon, Tensor& output, Tensor& save_mean, Tensor& save_invstd) { + const bool has_running_mean = (running_mean_opt.has_value() && running_mean_opt->defined()); + const bool has_running_var = (running_var_opt.has_value() && running_var_opt->defined()); + TORCH_CHECK(has_running_mean == has_running_var); + + if (train) { + batch_norm_mean_var(self, save_mean, save_invstd); + if (has_running_mean) { + const int64_t N = self.numel() / save_mean.numel(); + batch_norm_update_stats_and_invert( + save_mean, save_invstd, *running_mean_opt, *running_var_opt, + momentum, epsilon, N); + } else { + batch_norm_calc_invstd(save_invstd, save_invstd, epsilon); + } + } else { + TORCH_CHECK(has_running_mean); + at::native::resize_output(save_mean, running_mean_opt->sizes()); + save_mean.copy_(*running_mean_opt, /*non_blocking=*/true); + batch_norm_calc_invstd(save_invstd, running_var_opt.value(), epsilon); + } + + batch_norm_elementwise(output, self, weight_opt, bias_opt, save_mean, save_invstd); + return std::tuple(output, save_mean, save_invstd); +} + +std::tuple batch_norm_cuda(const Tensor& self, const c10::optional& weight_opt, const c10::optional& bias_opt, const c10::optional& running_mean_opt, const c10::optional& running_var_opt, bool train, double momentum, double epsilon) { + auto output = at::empty_like(self, self.suggest_memory_format()); + int64_t n_input = self.size(1); + auto options = self.options().dtype( + at::toAccumulateType(self.scalar_type(), /*is_cuda=*/true)); + auto save_mean = at::empty({n_input}, options); + auto save_invstd = at::empty({n_input}, options); + + at::native::batch_norm_cuda_out( + self, + weight_opt, + bias_opt, + running_mean_opt, + running_var_opt, + train, + momentum, + epsilon, + output, + save_mean, + save_invstd); + return std::make_tuple(output, save_mean, save_invstd); +} + +std::tuple batch_norm_backward_cuda(const Tensor& grad_out, const Tensor& input, const c10::optional& weight_opt, const c10::optional& running_mean_opt, const c10::optional& running_var_opt, const c10::optional& save_mean_opt, const c10::optional& save_invstd_opt, bool train, double epsilon, std::array grad_input_mask) { + // See [Note: hacky wrapper removal for optional tensor] + c10::MaybeOwned weight = at::borrow_from_optional_tensor(weight_opt); + c10::MaybeOwned save_mean = at::borrow_from_optional_tensor(save_mean_opt); + c10::MaybeOwned save_invstd = at::borrow_from_optional_tensor(save_invstd_opt); + c10::MaybeOwned running_mean = at::borrow_from_optional_tensor(running_mean_opt); + c10::MaybeOwned running_var = at::borrow_from_optional_tensor(running_var_opt); + + const bool needs_reduction = train || grad_input_mask[1] || grad_input_mask[2]; + + // Fused reducion & elementwise kernel + if (needs_reduction && grad_input_mask[0] && + !batch_norm_use_channels_last_kernels(input) && + cuda::detail::canUse32BitIndexMath(input) && + cuda::detail::canUse32BitIndexMath(grad_out)) { + return AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), + "batch_norm_backward_cuda", [&] { + using accscalar_t = at::acc_type; + const bool mixed_type = is_mixed_type(input, *weight, *running_mean, *running_var); + if (mixed_type) { + return batch_norm_backward_cuda_template( + grad_out, input, *weight, *running_mean, *running_var, + *save_mean, *save_invstd, train, epsilon, grad_input_mask); + } else { + return batch_norm_backward_cuda_template( + grad_out, input, *weight, *running_mean, *running_var, + *save_mean, *save_invstd, train, epsilon, grad_input_mask); + } + }); + } + + // NOTE: native_batch_norm always returns save_mean and save_invstd to be reused in backward. + // However, this is also called from cudnn_batch_norm in eval mode which doesn't give + // save_mean and save_invstd, so it needs recalculated. + const auto acc_type = at::toAccumulateType(input.scalar_type(), /*is_cuda=*/true); + Tensor mean; + if (save_mean->defined()) { + mean = *save_mean; + } else if (needs_reduction) { + TORCH_CHECK(!train && running_mean->defined()); + mean = (running_mean->scalar_type() == acc_type) ? + *running_mean : running_mean->to(acc_type); + } + + Tensor invstd; + if (save_invstd->defined()) { + invstd = *save_invstd; + } else { + TORCH_CHECK(!train && running_var->defined()); + auto n_channels = input.sizes()[1]; + invstd = at::empty({n_channels}, input.options().dtype(acc_type)); + batch_norm_calc_invstd(invstd, *running_var, epsilon); + } + + Tensor sum_dy, sum_dy_xmu, grad_weight, grad_bias; + if (needs_reduction) { + std::tie(sum_dy, sum_dy_xmu, grad_weight, grad_bias) = + batch_norm_backward_reduce_cuda( + grad_out, input, mean, invstd, *weight, + grad_input_mask[0], grad_input_mask[1], grad_input_mask[2]); + } + + Tensor grad_input; + if (grad_input_mask[0]) { + if (train) { + // NOTE: sum_dy and sum_dy_xmy are defined, as train implies needs_reduction + grad_input = batch_norm_elementwise_backward_train( + grad_out, input, mean, invstd, *weight, sum_dy, sum_dy_xmu); + } else { + grad_input = batch_norm_elementwise_backward_eval( + grad_out, input, invstd, *weight); + } + } + + return std::make_tuple(grad_input, grad_weight, grad_bias); +} + +std::tuple batch_norm_stats_cuda(const Tensor& self, double epsilon) { + auto options = self.options().dtype( + at::toAccumulateType(self.scalar_type(), /*is_cuda=*/true)); + auto n_channels = self.size(1); + auto save_mean = at::empty({n_channels}, options); + auto save_invstd = at::empty({n_channels}, options); + + bool use_channels_last_kernel = batch_norm_use_channels_last_kernels(self); + AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, + self.scalar_type(), "batch_norm_stats_cuda", [&] { + if (cuda::detail::canUse32BitIndexMath(self)) { + if (use_channels_last_kernel) { + batch_norm_stats_channels_last_cuda_template( + save_mean, save_invstd, self, epsilon); + } else { + batch_norm_stats_cuda_template( + save_mean, save_invstd, self, epsilon); + } + } else { + batch_norm_stats_cuda_template( + save_mean, save_invstd, self, epsilon); + } + }); + return std::tuple(save_mean, save_invstd); +} + +Tensor batch_norm_elemt_cuda( + const Tensor& self, const c10::optional& weight_opt, + const c10::optional& bias_opt, const Tensor& mean, + const Tensor& invstd, double epsilon) { + auto output = at::empty_like(self, self.suggest_memory_format()); + // FIXME: Epsilon parameter isn't required, we don't take the reciprocal + batch_norm_elementwise(output, self, weight_opt, bias_opt, mean, invstd); + return output; +} + +Tensor& batch_norm_elemt_cuda_out(const Tensor& self, const c10::optional& weight_opt, const c10::optional& bias_opt, + const Tensor& mean, const Tensor& invstd, double epsilon, Tensor& output) { + // FIXME: Epsilon parameter isn't required, we don't take the reciprocal + batch_norm_elementwise(output, self, weight_opt, bias_opt, mean, invstd); + return output; +} + +// accepting input(self) here to determine template data types, since running_mean/running_var are optional +std::tuple batch_norm_gather_stats_cuda(const Tensor& self, const Tensor& mean, const Tensor& invstd, const c10::optional& running_mean_opt, const c10::optional& running_var_opt, double momentum, double epsilon, int64_t count) { + // See [Note: hacky wrapper removal for optional tensor] + c10::MaybeOwned running_mean_maybe_owned = at::borrow_from_optional_tensor(running_mean_opt); + const Tensor& running_mean = *running_mean_maybe_owned; + const Tensor& running_var = c10::value_or_else(running_var_opt, [] {return Tensor();}); + + std::vector counts(mean.size(0), count); + Tensor counts_ = at::from_blob((void*)counts.data(), {(int64_t)counts.size()}, self.options().dtype(at::kLong).device(at::kCPU)); + counts_ = counts_.to(self.device()).to(running_mean.defined() ? running_mean.dtype() : self.dtype()); + return batch_norm_gather_stats_with_counts_cuda(self, mean, invstd, running_mean, running_var, momentum, epsilon, counts_); +} + + +std::tuple batch_norm_gather_stats_with_counts_cuda( + const Tensor& self, const Tensor& mean, const Tensor& invstd, const c10::optional& running_mean_opt /* optional */, const c10::optional& running_var_opt /* optional */, double momentum, double epsilon, const Tensor& counts) { + // See [Note: hacky wrapper removal for optional tensor] + c10::MaybeOwned running_mean_maybe_owned = at::borrow_from_optional_tensor(running_mean_opt); + const Tensor& running_mean = *running_mean_maybe_owned; + const Tensor& running_var = c10::value_or_else(running_var_opt, [] {return Tensor();}); + + + auto scalar_type = running_mean.defined() ? running_mean.scalar_type() : self.scalar_type(); + return AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, scalar_type, "batch_norm_update_stats_cuda", [&] { + using accscalar_t = at::acc_type; + if (cuda::detail::canUse32BitIndexMath(self)) { + return batch_norm_gather_stats_cuda_template(mean, invstd, running_mean, running_var, momentum, epsilon, counts); + } else { + return batch_norm_gather_stats_cuda_template(mean, invstd, running_mean, running_var, momentum, epsilon, counts); + } + }); +} + +std::tuple batch_norm_backward_reduce_cuda(const Tensor& grad_output, const Tensor& input, const Tensor& mean, const Tensor& invstd, const c10::optional& weight_opt, bool input_g, bool weight_g, bool bias_g) { + // See [Note: hacky wrapper removal for optional tensor] + c10::MaybeOwned weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); + const Tensor& weight = *weight_maybe_owned; + + if (at::cuda::detail::canUse32BitIndexMath(grad_output) && + batch_norm_use_channels_last_kernels(grad_output) && + batch_norm_use_channels_last_kernels(input) && + (!weight.defined() || weight.is_contiguous()) && + mean.is_contiguous() && invstd.is_contiguous()){ + return batch_norm_backward_reduce_cuda_channels_last_template( + grad_output, input, mean, invstd, weight, input_g, weight_g, bias_g); + } + + return AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, grad_output.scalar_type(), "batch_norm_backward_reduce", [&] { + auto mean_st = mean.dtype(); + auto invstd_st = invstd.dtype(); + TORCH_CHECK(mean_st == invstd_st, "mean and invstd need to have the same data types"); + const bool mixed_type = is_mixed_type(input, weight); + using accscalar_t = at::acc_type; + + if (cuda::detail::canUse32BitIndexMath(grad_output)) { + if (mixed_type) { + return batch_norm_backward_reduce_cuda_template(grad_output, input, mean, invstd, weight, input_g, weight_g, bias_g); + } else { + return batch_norm_backward_reduce_cuda_template(grad_output, input, mean, invstd, weight, input_g, weight_g, bias_g); + } + } else { + if (mixed_type) { + return batch_norm_backward_reduce_cuda_template(grad_output, input, mean, invstd, weight, input_g, weight_g, bias_g); + } else { + return batch_norm_backward_reduce_cuda_template(grad_output, input, mean, invstd, weight, input_g, weight_g, bias_g); + } + } + }); +} + +Tensor batch_norm_backward_elemt_cuda(const Tensor& self, const Tensor& input, const Tensor& mean, const Tensor& invstd, const c10::optional& weight_opt, const Tensor& sum_dy, const Tensor& sum_dy_xmu, const Tensor& count) { + // See [Note: hacky wrapper removal for optional tensor] + c10::MaybeOwned weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt); + const Tensor& weight = *weight_maybe_owned; + + if (at::cuda::detail::canUse32BitIndexMath(self) && batch_norm_use_channels_last_kernels(self)){ + return batch_norm_backward_elemt_channels_last_cuda_template(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count); + } + + return AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "batch_norm_backward_elemt", [&] { + auto mean_st = mean.dtype(); + auto invstd_st = invstd.dtype(); + TORCH_CHECK(mean_st == invstd_st, "mean and invstd need to have the same data types"); + bool is_half_float = std::is_same::value && mean_st == at::kFloat; + bool is_bfloat16_float = std::is_same::value && mean_st == at::kFloat; + using accscalar_t = at::acc_type; + if (cuda::detail::canUse32BitIndexMath(self)) { + if (is_half_float || is_bfloat16_float) { + return batch_norm_backward_elemt_cuda_template(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count); + } else { + return batch_norm_backward_elemt_cuda_template(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count); + } + } else { + if (is_half_float || is_bfloat16_float) { + return batch_norm_backward_elemt_cuda_template(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count); + } else { + return batch_norm_backward_elemt_cuda_template(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count); + } + } + }); +} + +std::tuple batch_norm_update_stats_cuda( + const Tensor& self, const c10::optional& running_mean_opt, + const c10::optional& running_var_opt, double momentum) { + c10::MaybeOwned running_mean = at::borrow_from_optional_tensor(running_mean_opt); + c10::MaybeOwned running_var = at::borrow_from_optional_tensor(running_var_opt); + + const int64_t n_input = self.size(1); + auto options = self.options().dtype( + at::toAccumulateType(self.scalar_type(), /*is_cuda=*/true)); + auto save_mean = at::empty({n_input}, options); + auto save_var = at::empty({n_input}, options); + + batch_norm_mean_var(self, save_mean, save_var); + TORCH_CHECK(running_mean->defined() == running_var->defined()); + if (running_mean->defined()) { + const int64_t N = self.numel() / save_mean.numel(); + batch_norm_update_stats(save_mean, save_var, *running_mean, *running_var, momentum, N); + } + return std::tuple(save_mean, save_var); +} + +} } // namespace at::native diff --git a/cuda_code/NumericLimits.test_2.cu b/cuda_code/NumericLimits.test_2.cu new file mode 100644 index 0000000000000000000000000000000000000000..5bcd60381653bbd082b30066b6da6300698bbbd6 --- /dev/null +++ b/cuda_code/NumericLimits.test_2.cu @@ -0,0 +1,79 @@ +//---------------------------------*-CUDA-*----------------------------------// +// Copyright 2020 UT-Battelle, LLC, and other Celeritas developers. +// See the top-level COPYRIGHT file for details. +// SPDX-License-Identifier: (Apache-2.0 OR MIT) +//---------------------------------------------------------------------------// +//! \file NumericLimits.test.cu +//---------------------------------------------------------------------------// +#include "NumericLimits.test.hh" + +#include "base/Assert.hh" +#include "base/KernelParamCalculator.cuda.hh" +#include "base/NumericLimits.hh" + +namespace celeritas_test +{ +//---------------------------------------------------------------------------// +// KERNELS +//---------------------------------------------------------------------------// + +template +__global__ void nl_test_kernel(NLTestOutput* data) +{ + using limits_t = celeritas::numeric_limits; + unsigned int local_thread_id + = celeritas::KernelParamCalculator::thread_id().get(); + if (local_thread_id == 0) + { + data->eps = limits_t::epsilon(); + } + else if (local_thread_id == 1) + { + data->nan = limits_t::quiet_NaN(); + } + else if (local_thread_id == 2) + { + data->inf = limits_t::infinity(); + } + else if (local_thread_id == 3) + { + data->max = limits_t::max(); + } +} + +//---------------------------------------------------------------------------// +// TESTING INTERFACE +//---------------------------------------------------------------------------// +//! Run on device and return results +template +NLTestOutput nl_test() +{ + // Allocate output data + NLTestOutput* result_device; + CELER_CUDA_CALL(cudaMalloc(&result_device, sizeof(NLTestOutput))); + + celeritas::KernelParamCalculator calc_launch_params; + + auto params = calc_launch_params(3); + nl_test_kernel<<>>(result_device); + CELER_CUDA_CALL(cudaDeviceSynchronize()); + + // Copy to host + NLTestOutput result; + CELER_CUDA_CALL(cudaMemcpy(&result, + result_device, + sizeof(NLTestOutput), + cudaMemcpyDeviceToHost)); + CELER_CUDA_CALL(cudaFree(result_device)); + return result; +} + +//---------------------------------------------------------------------------// +// EXPLICIT INSTANTIATION +//---------------------------------------------------------------------------// + +template NLTestOutput nl_test(); +template NLTestOutput nl_test(); + +//---------------------------------------------------------------------------// +} // namespace celeritas_test diff --git a/cuda_code/NvPipe_8.cu b/cuda_code/NvPipe_8.cu new file mode 100644 index 0000000000000000000000000000000000000000..8326f3c2478a4b5be28d87002f70350600300f66 --- /dev/null +++ b/cuda_code/NvPipe_8.cu @@ -0,0 +1,1315 @@ +/* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of NVIDIA CORPORATION nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "NvPipe.h" + +#ifdef NVPIPE_WITH_ENCODER +#include "NvCodec/NvEncoder/NvEncoderCuda.h" +#endif + +#ifdef NVPIPE_WITH_DECODER +#include "NvCodec/NvDecoder/NvDecoder.h" +#endif + +#include "NvCodec/Utils/NvCodecUtils.h" + +#include +#include +#include +#include +#include + +#include +#include + +#ifdef NVPIPE_WITH_OPENGL +#include +#endif + +#ifdef NVPIPE_WITH_D3D11 +#include +#endif + +extern "C" { + +class Exception +{ +public: + Exception(const std::string& msg) : message(msg) {} + std::string getErrorString() const { return message; } +public: + std::string message; +}; + + +inline void CUDA_THROW(cudaError_t code, const std::string& errorMessage) +{ + if (cudaSuccess != code) { + throw Exception(errorMessage + " (Error " + std::to_string(code) + ": " + std::string(cudaGetErrorString(code)) + ")"); + } +} + +inline bool isDevicePointer(const void* ptr) +{ + struct cudaPointerAttributes attr; + const cudaError_t perr = cudaPointerGetAttributes(&attr, ptr); + +#if (CUDA_VERSION >= 10000) + return (perr == cudaSuccess) && (attr.type != cudaMemoryTypeHost); +#else + return (perr == cudaSuccess) && (attr.memoryType != cudaMemoryTypeHost); +#endif +} + +inline uint64_t getFrameSize(NvPipe_Format format, uint32_t width, uint32_t height) +{ + if (format == NVPIPE_BGRA32) + return width * height * 4; + else if (format == NVPIPE_UINT4) + return width * height / 2; + else if (format == NVPIPE_UINT8) + return width * height; + else if (format == NVPIPE_UINT16) + return width * height * 2; + else if (format == NVPIPE_UINT32) + return width * height * 4; + + return 0; +} + + +__global__ +void uint4_to_nv12(const uint8_t* src, uint32_t srcPitch, uint8_t* dst, uint32_t dstPitch, uint32_t width, uint32_t height) +{ + // one thread per pixel + const uint32_t x = blockIdx.x * blockDim.x + threadIdx.x; + const uint32_t y = blockIdx.y * blockDim.y + threadIdx.y; + + if (x < width && y < height) + { + const uint32_t i = y * srcPitch + x/2; + const uint32_t j = y * dstPitch + x; + + // Extend 4 bit to 8 bits + // Even thread: higher 4 bits, odd thread: lower 4 bits + dst[j] = (x & 1 == 1) ? (src[i] & 0xF) : ((src[i] & 0xF0) >> 4); + + // Blank UV channel + if (y < height / 2) + { + uint8_t* UV = dst + dstPitch * (height + y); + UV[x] = 0; + } + } +} + +__global__ +void nv12_to_uint4(const uint8_t* src, uint32_t srcPitch, uint8_t* dst, uint32_t dstPitch, uint32_t width, uint32_t height) +{ + // one thread per TWO pixels + const uint32_t x = blockIdx.x * blockDim.x + threadIdx.x; + const uint32_t y = blockIdx.y * blockDim.y + threadIdx.y; + + if (2 * x < width && y < height) + { + const uint32_t i = y * srcPitch + 2 * x; + const uint32_t j = y * dstPitch + x; + + // Merge lower 4 bits of two Y bytes to one output byte + uint8_t v = (src[i] & 0xF) << 4; + + if (2 * x + 1 < width) + v = v | (src[i+1] & 0xF); + + dst[j] = v; + } +} + +__global__ +void uint8_to_nv12(const uint8_t* src, uint32_t srcPitch, uint8_t* dst, uint32_t dstPitch, uint32_t width, uint32_t height) +{ + const uint32_t x = blockIdx.x * blockDim.x + threadIdx.x; + const uint32_t y = blockIdx.y * blockDim.y + threadIdx.y; + + if (x < width && y < height) + { + const uint32_t i = y * srcPitch + x; + const uint32_t j = y * dstPitch + x; + + // Copy grayscale image to Y channel + dst[j] = src[i]; + + // Blank UV channel + if (y < height / 2) + { + uint8_t* UV = dst + dstPitch * (height + y); + UV[x] = 0; + } + } +} + +__global__ +void nv12_to_uint8(const uint8_t* src, uint32_t srcPitch, uint8_t* dst, uint32_t dstPitch, uint32_t width, uint32_t height) +{ + const uint32_t x = blockIdx.x * blockDim.x + threadIdx.x; + const uint32_t y = blockIdx.y * blockDim.y + threadIdx.y; + + if (x < width && y < height) + { + const uint32_t i = y * srcPitch + x; + const uint32_t j = y * dstPitch + x; + + // Copy Y channel to grayscale image + dst[j] = src[i]; + + } +} + +__global__ +void uint16_to_nv12(const uint8_t* src, uint32_t srcPitch, uint8_t* dst, uint32_t dstPitch, uint32_t width, uint32_t height) +{ + const uint32_t x = blockIdx.x * blockDim.x + threadIdx.x; + const uint32_t y = blockIdx.y * blockDim.y + threadIdx.y; + + if (x < width && y < height) + { + const uint32_t i = y * srcPitch + 2 * x; + const uint32_t j = y * dstPitch + x; + + // Copy higher byte to left half of Y channel + dst[j] = src[i]; + + // Copy lower byte to right half of Y channel + dst[j + width] = src[i + 1]; + + // Blank UV channel + if (y < height / 2) + { + uint8_t* UV = dst + dstPitch * (height + y); + UV[2 * x + 0] = 0; + UV[2 * x + 1] = 0; + } + } +} + +__global__ +void nv12_to_uint16(const uint8_t* src, uint32_t srcPitch, uint8_t* dst, uint32_t dstPitch, uint32_t width, uint32_t height) +{ + const uint32_t x = blockIdx.x * blockDim.x + threadIdx.x; + const uint32_t y = blockIdx.y * blockDim.y + threadIdx.y; + + if (x < width && y < height) + { + const uint32_t i = y * srcPitch + x; + const uint32_t j = y * dstPitch + 2 * x; + + // Copy higher byte from left half of Y channel + dst[j] = src[i]; + + // Copy lower byte from right half of Y channel + dst[j + 1] = src[i + width]; + } +} + +__global__ +void uint32_to_nv12(const uint8_t* src, uint32_t srcPitch, uint8_t* dst, uint32_t dstPitch, uint32_t width, uint32_t height) +{ + const uint32_t x = blockIdx.x * blockDim.x + threadIdx.x; + const uint32_t y = blockIdx.y * blockDim.y + threadIdx.y; + + if (x < width && y < height) + { + const uint32_t i = y * srcPitch + 4 * x; + const uint32_t j = y * dstPitch + x; + + // Copy highest byte to left quarter of Y channel, + // ... + // Copy lowest byte to right quarter of Y channel + dst[j] = src[i]; + dst[j + width] = src[i + 1]; + dst[j + 2 * width] = src[i + 2]; + dst[j + 3 * width] = src[i + 3]; + + // Blank UV channel + if (y < height / 2) + { + uint8_t* UV = dst + dstPitch * (height + y); + UV[4 * x + 0] = 0; + UV[4 * x + 1] = 0; + UV[4 * x + 2] = 0; + UV[4 * x + 3] = 0; + } + } +} + +__global__ +void nv12_to_uint32(const uint8_t* src, uint32_t srcPitch, uint8_t* dst, uint32_t dstPitch, uint32_t width, uint32_t height) +{ + const uint32_t x = blockIdx.x * blockDim.x + threadIdx.x; + const uint32_t y = blockIdx.y * blockDim.y + threadIdx.y; + + if (x < width && y < height) + { + const uint32_t i = y * srcPitch + x; + const uint32_t j = y * dstPitch + 4 * x; + + // Copy highest byte from left quarter of Y channel + // ... + // Copy lowest byte from right quarter of Y channel + dst[j] = src[i]; + dst[j + 1] = src[i + width]; + dst[j + 2] = src[i + 2 * width]; + dst[j + 3] = src[i + 3 * width]; + + } +} + +#ifdef NVPIPE_WITH_OPENGL +class GraphicsResourceRegistryGL +{ +public: + virtual ~GraphicsResourceRegistryGL() + { + // Unregister all + for (auto& r : this->registeredPBOs) + CUDA_THROW(cudaGraphicsUnregisterResource(r.second.graphicsResource), + "Failed to unregister PBO graphics resource"); + + for (auto& r : this->registeredTextures) + CUDA_THROW(cudaGraphicsUnregisterResource(r.second.graphicsResource), + "Failed to unregister texture graphics resource"); + } + + cudaGraphicsResource_t getTextureGraphicsResource(uint32_t texture, uint32_t target, uint32_t width, uint32_t height, uint32_t flags) + { + // Check if texture needs to be (re)registered + RegisteredTexture& reg = this->registeredTextures[texture]; + + if (reg.width != width || reg.height != height || reg.target != target) { + if (reg.graphicsResource) { + CUDA_THROW(cudaGraphicsUnregisterResource(reg.graphicsResource), + "Failed to unregister texture graphics resource"); + + reg.graphicsResource = nullptr; + } + + CUDA_THROW(cudaGraphicsGLRegisterImage(®.graphicsResource, texture, target, flags), + "Failed to register texture as graphics resource"); + + reg.width = width; + reg.height = height; + reg.target = target; + } + + return reg.graphicsResource; + } + + cudaGraphicsResource_t getPBOGraphicsResource(uint32_t pbo, uint32_t width, uint32_t height, uint32_t flags) + { + // Check if PBO needs to be (re)registered + RegisteredResource& reg = this->registeredPBOs[pbo]; + + if (reg.width != width || reg.height != height) { + if (reg.graphicsResource) { + CUDA_THROW(cudaGraphicsUnregisterResource(reg.graphicsResource), + "Failed to unregister PBO graphics resource"); + + reg.graphicsResource = nullptr; + } + + CUDA_THROW(cudaGraphicsGLRegisterBuffer(®.graphicsResource, pbo, flags), + "Failed to register PBO as graphics resource"); + + reg.width = width; + reg.height = height; + } + + return reg.graphicsResource; + } + +private: + struct RegisteredTexture + { + cudaGraphicsResource_t graphicsResource = nullptr; + uint32_t width = 0; + uint32_t height = 0; + uint32_t target = 0; + }; + std::unordered_map registeredTextures; + + struct RegisteredResource + { + cudaGraphicsResource_t graphicsResource = nullptr; + uint32_t width = 0; + uint32_t height = 0; + }; + std::unordered_map registeredPBOs; +}; +#endif + +#ifdef NVPIPE_WITH_D3D11 +class GraphicsResourceRegistryD3D11 +{ +public: + virtual ~GraphicsResourceRegistryD3D11() + { + // TODO: Is this needed? + cudaThreadSynchronize(); + + // Unregister all + for (auto& r : this->registeredTextures) + { + if (r.second.graphicsResource) + { + CUDA_THROW(cudaGraphicsUnregisterResource(r.second.graphicsResource), + "Failed to unregister D3D11 texture graphics resource"); + + r.first->Release(); + } + } + } + + cudaGraphicsResource_t getTextureGraphicsTextureD3D11(ID3D11Texture2D* texture, uint32_t width, uint32_t height, uint32_t flags) + { + // Check if D3D11 texture needs to be (re)registered + RegisteredResource& reg = this->registeredTextures[texture]; + + if (reg.width != width || reg.height != height) + { + if (reg.graphicsResource) + { + CUDA_THROW(cudaGraphicsUnregisterResource(reg.graphicsResource), + "Failed to unregister D3D11 graphics resource"); + + reg.graphicsResource = nullptr; + + texture->Release(); + } + + CUDA_THROW(cudaGraphicsD3D11RegisterResource(®.graphicsResource, texture, flags), + "Failed to register D3D11 texture as graphics resource"); + + texture->AddRef(); + + reg.width = width; + reg.height = height; + } + + return reg.graphicsResource; + } +private: + struct RegisteredResource + { + cudaGraphicsResource_t graphicsResource = nullptr; + uint32_t width = 0; + uint32_t height = 0; + }; + std::unordered_map registeredTextures; +}; +#endif + +class GraphicsResourceRegistryBase {}; + +class GraphicsResourceRegistry +: public GraphicsResourceRegistryBase +#ifdef NVPIPE_WITH_OPENGL +, public GraphicsResourceRegistryGL +#endif +#ifdef NVPIPE_WITH_D3D11 +, public GraphicsResourceRegistryD3D11 +#endif +{ +}; + +#ifdef NVPIPE_WITH_ENCODER +/** + * @brief Encoder implementation. + */ +class Encoder +{ +public: + Encoder(NvPipe_Format format, NvPipe_Codec codec, NvPipe_Compression compression, uint64_t bitrate, uint32_t targetFrameRate) + { + this->format = format; + this->codec = codec; + this->compression = compression; + this->bitrate = bitrate; + this->targetFrameRate = targetFrameRate; + + this->recreate(1920, 1080); + } + + ~Encoder() + { + // Free temporary device memory + if (this->deviceBuffer) + cudaFree(this->deviceBuffer); + } + + void setBitrate(uint64_t bitrate, uint32_t targetFrameRate) + { + NV_ENC_CONFIG config; + memset(&config, 0, sizeof(config)); + config.version = NV_ENC_CONFIG_VER; + config.rcParams.averageBitRate = bitrate; + + NV_ENC_RECONFIGURE_PARAMS reconfigureParams; + memset(&reconfigureParams, 0, sizeof(reconfigureParams)); + reconfigureParams.version = NV_ENC_RECONFIGURE_PARAMS_VER; + reconfigureParams.resetEncoder = 1; + reconfigureParams.forceIDR = 1; + reconfigureParams.reInitEncodeParams.encodeConfig = &config; + + encoder->GetInitializeParams(&reconfigureParams.reInitEncodeParams); + reconfigureParams.reInitEncodeParams.frameRateNum = targetFrameRate; + reconfigureParams.reInitEncodeParams.frameRateDen = 1; + + encoder->Reconfigure(&reconfigureParams); + + this->bitrate = bitrate; + this->targetFrameRate = targetFrameRate; + } + + uint64_t encode(const void* src, uint64_t srcPitch, uint8_t *dst, uint64_t dstSize, uint32_t width, uint32_t height, bool forceIFrame) + { + // Recreate encoder if size changed + if (this->format == NVPIPE_UINT16) + this->recreate(width * 2, height); // split into two adjecent tiles in Y channel + else if (this->format == NVPIPE_UINT32) + this->recreate(width * 4, height); // split into four adjecent tiles in Y channel + else + this->recreate(width, height); + + // RGBA can be directly copied from host or device + if (this->format == NVPIPE_BGRA32) + { + const NvEncInputFrame* f = this->encoder->GetNextInputFrame(); + CUDA_THROW(cudaMemcpy2D(f->inputPtr, f->pitch, src, srcPitch, width * 4, height, isDevicePointer(src) ? cudaMemcpyDeviceToDevice : cudaMemcpyHostToDevice), + "Failed to copy input frame"); + } + // Other formats need to be copied to the device and converted + else + { + // Copy to device if necessary + bool copyToDevice = !isDevicePointer(src); + if (copyToDevice) + { + this->recreateDeviceBuffer(width, height); + CUDA_THROW(cudaMemcpy(this->deviceBuffer, src, getFrameSize(this->format, width, height), cudaMemcpyHostToDevice), + "Failed to copy input frame"); + } + + // Convert + const NvEncInputFrame* f = this->encoder->GetNextInputFrame(); + + if (this->format == NVPIPE_UINT4) + { + // one thread per pixel (extract 4 bit and copy to 8 bit) + dim3 gridSize(width / 16 + 1, height / 2 + 1); + dim3 blockSize(16, 2); + + uint4_to_nv12<<>>((uint8_t*) (copyToDevice ? this->deviceBuffer : src), srcPitch, (uint8_t*) f->inputPtr, f->pitch, width, height); + } + else if (this->format == NVPIPE_UINT8) + { + // one thread per pixel (copy 8 bit) + dim3 gridSize(width / 16 + 1, height / 2 + 1); + dim3 blockSize(16, 2); + + uint8_to_nv12<<>>((uint8_t*) (copyToDevice ? this->deviceBuffer : src), srcPitch, (uint8_t*) f->inputPtr, f->pitch, width, height); + } + else if (this->format == NVPIPE_UINT16) + { + // one thread per pixel (split 16 bit into 2x 8 bit) + dim3 gridSize(width / 16 + 1, height / 2 + 1); + dim3 blockSize(16, 2); + + uint16_to_nv12<<>>((uint8_t*) (copyToDevice ? this->deviceBuffer : src), srcPitch, (uint8_t*) f->inputPtr, f->pitch, width, height); + } + else if (this->format == NVPIPE_UINT32) + { + // one thread per pixel (split 32 bit into 4x 8 bit) + dim3 gridSize(width / 16 + 1, height / 2 + 1); + dim3 blockSize(16, 2); + + uint32_to_nv12<<>>((uint8_t*) (copyToDevice ? this->deviceBuffer : src), srcPitch, (uint8_t*) f->inputPtr, f->pitch, width, height); + } + } + + // Encode + return this->encode(dst, dstSize, forceIFrame); + } + +#ifdef NVPIPE_WITH_OPENGL + + uint64_t encodeTexture(uint32_t texture, uint32_t target, uint8_t* dst, uint64_t dstSize, uint32_t width, uint32_t height, bool forceIFrame) + { + if (this->format != NVPIPE_BGRA32) + throw Exception("The OpenGL interface only supports the BGRA32 format"); + + // Recreate encoder if size changed + this->recreate(width, height); + + // Map texture and copy input to encoder + cudaGraphicsResource_t resource = this->registry.getTextureGraphicsResource(texture, target, width, height, cudaGraphicsRegisterFlagsReadOnly); + CUDA_THROW(cudaGraphicsMapResources(1, &resource), + "Failed to map texture graphics resource"); + cudaArray_t array; + CUDA_THROW(cudaGraphicsSubResourceGetMappedArray(&array, resource, 0, 0), + "Failed get texture graphics resource array"); + + const NvEncInputFrame* f = this->encoder->GetNextInputFrame(); + CUDA_THROW(cudaMemcpy2DFromArray(f->inputPtr, f->pitch, array, 0, 0, width * 4, height, cudaMemcpyDeviceToDevice), + "Failed to copy from texture array"); + + // Encode + uint64_t size = this->encode(dst, dstSize, forceIFrame); + + // Unmap texture + CUDA_THROW(cudaGraphicsUnmapResources(1, &resource), + "Failed to unmap texture graphics resource"); + + return size; + } + + uint64_t encodePBO(uint32_t pbo, uint8_t* dst, uint64_t dstSize, uint32_t width, uint32_t height, bool forceIFrame) + { + if (this->format != NVPIPE_BGRA32) + throw Exception("The OpenGL interface only supports the BGRA32 format"); + + // Map PBO and copy input to encoder + cudaGraphicsResource_t resource = this->registry.getPBOGraphicsResource(pbo, width, height, cudaGraphicsRegisterFlagsReadOnly); + CUDA_THROW(cudaGraphicsMapResources(1, &resource), + "Failed to map PBO graphics resource"); + void* pboPointer; + size_t pboSize; + CUDA_THROW(cudaGraphicsResourceGetMappedPointer(&pboPointer, &pboSize, resource), + "Failed to get mapped PBO pointer"); + + // Encode + uint64_t size = this->encode(pboPointer, width * 4, dst, dstSize, width, height, forceIFrame); + + // Unmap PBO + CUDA_THROW(cudaGraphicsUnmapResources(1, &resource), + "Failed to unmap PBO graphics resource"); + + return size; + } + +#endif + +#ifdef NVPIPE_WITH_D3D11 + + uint64_t encodeTextureD3D11(ID3D11Texture2D* texture, uint8_t* dst, uint64_t dstSize, bool forceIFrame) + { + if (this->format != NVPIPE_BGRA32) + throw Exception("The D3D11 interface only supports the BGRA32 format"); + + D3D11_TEXTURE2D_DESC desc; + texture->GetDesc(&desc); + + uint32_t width = desc.Width; + uint32_t height = desc.Height; + + // Recreate encoder if size changed + this->recreate(width, height); + + // Map texture and copy input to encoder + cudaGraphicsResource_t resource = this->registry.getTextureGraphicsTextureD3D11(texture, width, height, cudaGraphicsRegisterFlagsNone); + CUDA_THROW(cudaGraphicsMapResources(1, &resource), + "Failed to map texture graphics resource"); + cudaArray_t array; + CUDA_THROW(cudaGraphicsSubResourceGetMappedArray(&array, resource, 0, 0), + "Failed get texture graphics resource array"); + + const NvEncInputFrame* f = this->encoder->GetNextInputFrame(); + CUDA_THROW(cudaMemcpy2DFromArray(f->inputPtr, f->pitch, array, 0, 0, width * 4, height, cudaMemcpyDeviceToDevice), + "Failed to copy from texture array"); + + // Encode + uint64_t size = this->encode(dst, dstSize, forceIFrame); + + // Unmap texture + CUDA_THROW(cudaGraphicsUnmapResources(1, &resource), + "Failed to unmap texture graphics resource"); + + return size; + } +#endif +private: + void recreate(uint32_t width, uint32_t height) + { + // Only recreate if necessary + if (width == this->width && height == this->height) + return; + + this->width = width; + this->height = height; + + // Ensure we have a CUDA context + CUDA_THROW(cudaDeviceSynchronize(), + "Failed to synchronize device"); + CUcontext cudaContext; + cuCtxGetCurrent(&cudaContext); + + // Create encoder + try + { + NV_ENC_BUFFER_FORMAT bufferFormat = (this->format == NVPIPE_BGRA32) ? NV_ENC_BUFFER_FORMAT_ARGB : NV_ENC_BUFFER_FORMAT_NV12; + this->encoder = std::unique_ptr(new NvEncoderCuda(cudaContext, width, height, bufferFormat, 0)); + + NV_ENC_INITIALIZE_PARAMS initializeParams = { NV_ENC_INITIALIZE_PARAMS_VER }; + NV_ENC_CONFIG encodeConfig = { NV_ENC_CONFIG_VER }; + initializeParams.encodeConfig = &encodeConfig; + + GUID codecGUID = (this->codec == NVPIPE_HEVC) ? NV_ENC_CODEC_HEVC_GUID : NV_ENC_CODEC_H264_GUID; + + GUID presetGUID = NV_ENC_PRESET_LOW_LATENCY_HQ_GUID; + if (this->compression == NVPIPE_LOSSLESS) + presetGUID = NV_ENC_PRESET_LOSSLESS_DEFAULT_GUID; // NV_ENC_PRESET_LOSSLESS_HP_GUID + + encoder->CreateDefaultEncoderParams(&initializeParams, codecGUID, presetGUID); + + initializeParams.encodeWidth = width; + initializeParams.encodeHeight = height; + initializeParams.frameRateNum = this->targetFrameRate; + initializeParams.frameRateDen = 1; + initializeParams.enablePTD = 1; + + encodeConfig.gopLength = NVENC_INFINITE_GOPLENGTH; // No B-frames + encodeConfig.frameIntervalP = 1; + + if (this->codec == NVPIPE_H264) + encodeConfig.encodeCodecConfig.h264Config.idrPeriod = NVENC_INFINITE_GOPLENGTH; + else if (this->codec == NVPIPE_HEVC) + encodeConfig.encodeCodecConfig.hevcConfig.idrPeriod = NVENC_INFINITE_GOPLENGTH; + + if (this->compression == NVPIPE_LOSSY) + { + encodeConfig.rcParams.averageBitRate = this->bitrate; + encodeConfig.rcParams.rateControlMode = NV_ENC_PARAMS_RC_CBR_LOWDELAY_HQ; + encodeConfig.rcParams.vbvBufferSize = encodeConfig.rcParams.averageBitRate * initializeParams.frameRateDen / initializeParams.frameRateNum; // bitrate / framerate = one frame + encodeConfig.rcParams.maxBitRate = encodeConfig.rcParams.averageBitRate; + encodeConfig.rcParams.vbvInitialDelay = encodeConfig.rcParams.vbvBufferSize; + } + + encoder->CreateEncoder(&initializeParams); + } + catch (NVENCException& e) + { + throw Exception("Failed to create encoder (" + e.getErrorString() + ")"); + } + } + + uint64_t encode(uint8_t* dst, uint64_t dstSize, bool forceIFrame) + { + std::vector> packets; + + try + { + if (forceIFrame) + { + NV_ENC_PIC_PARAMS params = {}; + params.encodePicFlags = NV_ENC_PIC_FLAG_FORCEIDR | NV_ENC_PIC_FLAG_OUTPUT_SPSPPS; + + this->encoder->EncodeFrame(packets, ¶ms); + } + else + { + this->encoder->EncodeFrame(packets); + } + } + catch (NVENCException& e) + { + throw Exception("Encode failed (" + e.getErrorString() + ")"); + } + + // Copy output + uint64_t size = 0; + for (auto& p : packets) + { + if (size + p.size() <= dstSize) + { + memcpy(dst + size, p.data(), p.size()); + size += p.size(); + } + else + { + throw Exception("Encode output buffer overflow"); + } + } + + return size; + } + + void recreateDeviceBuffer(uint32_t width, uint32_t height) + { + // (Re)allocate temporary device memory if necessary + uint64_t requiredSize = getFrameSize(this->format, width, height); + + if (this->deviceBufferSize < requiredSize) + { + if (this->deviceBuffer) + cudaFree(this->deviceBuffer); + + this->deviceBufferSize = requiredSize; + CUDA_THROW(cudaMalloc(&this->deviceBuffer, this->deviceBufferSize), + "Failed to allocate temporary device memory"); + } + } + +private: + NvPipe_Format format; + NvPipe_Codec codec; + NvPipe_Compression compression; + uint64_t bitrate; + uint32_t targetFrameRate; + uint32_t width = 0; + uint32_t height = 0; + + std::unique_ptr encoder; + + void* deviceBuffer = nullptr; + uint64_t deviceBufferSize = 0; + + GraphicsResourceRegistry registry; +}; +#endif + + +#ifdef NVPIPE_WITH_DECODER +/** + * @brief Decoder implementation. + */ +class Decoder +{ +public: + Decoder(NvPipe_Format format, NvPipe_Codec codec) + { + this->format = format; + this->codec = codec; + + this->recreate(1920, 1080); + } + + ~Decoder() + { + // Free temporary device memory + if (this->deviceBuffer) + cudaFree(this->deviceBuffer); + } + + uint64_t decode(const uint8_t* src, uint64_t srcSize, void* dst, uint32_t width, uint32_t height) + { + // Recreate decoder if size changed + if (this->format == NVPIPE_UINT16) + this->recreate(width * 2, height); // split into two adjecent tiles in Y channel + else if (this->format == NVPIPE_UINT32) + this->recreate(width * 4, height); // split into four adjecent tiles in Y channel + else + this->recreate(width, height); + + // Decode + uint8_t* decoded = this->decode(src, srcSize); + + if (nullptr != decoded) + { + // Allocate temporary device buffer if we need to copy to the host eventually + bool copyToHost = !isDevicePointer(dst); + if (copyToHost) + this->recreateDeviceBuffer(width, height); + + // Convert to output format + uint8_t* dstDevice = (uint8_t*) (copyToHost ? this->deviceBuffer : dst); + + if (this->format == NVPIPE_BGRA32) + { + Nv12ToBgra32(decoded, width, dstDevice, width * 4, width, height); + } + else if (this->format == NVPIPE_UINT4) + { + // one thread per TWO pixels (merge 2x4 bit to one byte per thread) + dim3 gridSize(width / 16 / 2 + 1, height / 2 + 1); + dim3 blockSize(16, 2); + + nv12_to_uint4<<>>(decoded, this->decoder->GetDeviceFramePitch(), dstDevice, width / 2, width, height); + } + else if (this->format == NVPIPE_UINT8) + { + // one thread per pixel (copy 8 bit) + dim3 gridSize(width / 16 + 1, height / 2 + 1); + dim3 blockSize(16, 2); + + nv12_to_uint8<<>>(decoded, this->decoder->GetDeviceFramePitch(), dstDevice, width, width, height); + } + else if (this->format == NVPIPE_UINT16) + { + // one thread per pixel (merge 2x8 bit into 16 bit pixels) + dim3 gridSize(width / 16 + 1, height / 2 + 1); + dim3 blockSize(16, 2); + + nv12_to_uint16<<>>(decoded, this->decoder->GetDeviceFramePitch(), dstDevice, width * 2, width, height); + } + else if (this->format == NVPIPE_UINT32) + { + // one thread per pixel (merge 4x8 bit into 32 bit pixels) + dim3 gridSize(width / 16 + 1, height / 2 + 1); + dim3 blockSize(16, 2); + + nv12_to_uint32<<>>(decoded, this->decoder->GetDeviceFramePitch(), dstDevice, width * 4, width, height); + } + + // Copy to host if necessary + if (copyToHost) + CUDA_THROW(cudaMemcpy(dst, this->deviceBuffer, getFrameSize(this->format, width, height), cudaMemcpyDeviceToHost), + "Failed to copy output to host memory"); + + return getFrameSize(this->format, width, height); + } + + return 0; + } + +#ifdef NVPIPE_WITH_OPENGL + + uint64_t decodeTexture(const uint8_t* src, uint64_t srcSize, uint32_t texture, uint32_t target, uint32_t width, uint32_t height) + { + if (this->format != NVPIPE_BGRA32) + throw Exception("The OpenGL interface only supports the BGRA32 format"); + + // Recreate decoder if size changed + this->recreate(width, height); + + // Decode + uint8_t* decoded = this->decode(src, srcSize); + + if (nullptr != decoded) + { + // Convert to RGBA + this->recreateDeviceBuffer(width, height); + Nv12ToBgra32(decoded, width, (uint8_t*) this->deviceBuffer, width * 4, width, height); + + // Copy output to texture + cudaGraphicsResource_t resource = this->registry.getTextureGraphicsResource(texture, target, width, height, cudaGraphicsRegisterFlagsWriteDiscard); + CUDA_THROW(cudaGraphicsMapResources(1, &resource), + "Failed to map texture graphics resource"); + cudaArray_t array; + CUDA_THROW(cudaGraphicsSubResourceGetMappedArray(&array, resource, 0, 0), + "Failed get texture graphics resource array"); + CUDA_THROW(cudaMemcpy2DToArray(array, 0, 0, this->deviceBuffer, width * 4, width * 4, height, cudaMemcpyDeviceToDevice), + "Failed to copy to texture array"); + CUDA_THROW(cudaGraphicsUnmapResources(1, &resource), + "Failed to unmap texture graphics resource"); + + return width * height * 4; + } + + return 0; + } + + uint64_t decodePBO(const uint8_t* src, uint64_t srcSize, uint32_t pbo, uint32_t width, uint32_t height) + { + if (this->format != NVPIPE_BGRA32) + throw Exception("The OpenGL interface only supports the BGRA32 format"); + + // Map PBO for output + cudaGraphicsResource_t resource = this->registry.getPBOGraphicsResource(pbo, width, height, cudaGraphicsRegisterFlagsWriteDiscard); + CUDA_THROW(cudaGraphicsMapResources(1, &resource), + "Failed to map PBO graphics resource"); + void* pboPointer; + size_t pboSize; + CUDA_THROW(cudaGraphicsResourceGetMappedPointer(&pboPointer, &pboSize, resource), + "Failed to get mapped PBO pointer"); + + // Decode + uint64_t size = this->decode(src, srcSize, pboPointer, width, height); + + // Unmap PBO + CUDA_THROW(cudaGraphicsUnmapResources(1, &resource), + "Failed to unmap PBO graphics resource"); + + return size; + } + +#endif + +private: + void recreate(uint32_t width, uint32_t height) + { + // Only recreate if necessary + if (width == this->width && height == this->height) + return; + + this->width = width; + this->height = height; + + // Ensure we have a CUDA context + CUDA_THROW(cudaDeviceSynchronize(), + "Failed to synchronize device"); + CUcontext cudaContext; + cuCtxGetCurrent(&cudaContext); + + // Create decoder + try + { + this->decoder = std::unique_ptr(new NvDecoder(cudaContext, width, height, true, (this->codec == NVPIPE_HEVC) ? cudaVideoCodec_HEVC : cudaVideoCodec_H264, nullptr, true)); + } + catch (NVDECException& e) + { + throw Exception("Failed to create decoder (" + e.getErrorString() + ")"); + } + } + + uint8_t* decode(const uint8_t* src, uint64_t srcSize) + { + int numFramesDecoded = 0; + uint8_t **decodedFrames; + int64_t *timeStamps; + + try + { + // Some cuvid implementations have one frame latency. Refeed frame into pipeline in this case. + const uint32_t DECODE_TRIES = 3; + for (uint32_t i = 0; (i < DECODE_TRIES) && (numFramesDecoded <= 0); ++i) + this->decoder->Decode(src, srcSize, &decodedFrames, &numFramesDecoded, CUVID_PKT_ENDOFPICTURE, &timeStamps, this->n++); + } + catch (NVDECException& e) + { + throw Exception("Decode failed (" + e.getErrorString() + ")"); + } + + if (numFramesDecoded <= 0) + { + throw Exception("No frame decoded (Decoder expects encoded bitstream for a single complete frame. Accumulating partial data or combining multiple frames is not supported.)"); + } + + return decodedFrames[numFramesDecoded - 1]; + } + + void recreateDeviceBuffer(uint32_t width, uint32_t height) + { + // (Re)allocate temporary device memory if necessary + uint64_t requiredSize = getFrameSize(this->format, width, height); + + if (this->deviceBufferSize < requiredSize) + { + if (this->deviceBuffer) + cudaFree(this->deviceBuffer); + + this->deviceBufferSize = requiredSize; + CUDA_THROW(cudaMalloc(&this->deviceBuffer, this->deviceBufferSize), + "Failed to allocate temporary device memory"); + } + } + +private: + NvPipe_Format format; + NvPipe_Codec codec; + uint32_t width = 0; + uint32_t height = 0; + + std::unique_ptr decoder; + int64_t n = 0; + + void* deviceBuffer = nullptr; + uint64_t deviceBufferSize = 0; + +#ifdef NVPIPE_WITH_OPENGL + GraphicsResourceRegistry registry; +#endif +}; + +#endif + + + + +// --------- Exported C API --------- + +// NvPipe was originally developed as a C++ library. +// However, for compatibility reasons its functionality is now exposed as a plain C API. + +struct Instance +{ +#ifdef NVPIPE_WITH_ENCODER + std::unique_ptr encoder; +#endif + +#ifdef NVPIPE_WITH_DECODER + std::unique_ptr decoder; +#endif + + std::string error; +}; + +std::string sharedError; // shared error code for create functions (NOT threadsafe) + + +#ifdef NVPIPE_WITH_ENCODER + +NVPIPE_EXPORT NvPipe* NvPipe_CreateEncoder(NvPipe_Format format, NvPipe_Codec codec, NvPipe_Compression compression, uint64_t bitrate, uint32_t targetFrameRate) +{ + Instance* instance = new Instance(); + + try + { + instance->encoder = std::unique_ptr(new Encoder(format, codec, compression, bitrate, targetFrameRate)); + } + catch (Exception& e) + { + sharedError = e.getErrorString(); + delete instance; + return nullptr; + } + + return instance; +} + +NVPIPE_EXPORT void NvPipe_SetBitrate(NvPipe* nvp, uint64_t bitrate, uint32_t targetFrameRate) +{ + Instance* instance = static_cast(nvp); + if (!instance->encoder) + { + instance->error = "Invalid NvPipe encoder."; + return; + } + + try + { + return instance->encoder->setBitrate(bitrate, targetFrameRate); + } + catch (Exception& e) + { + instance->error = e.getErrorString(); + } +} + +NVPIPE_EXPORT uint64_t NvPipe_Encode(NvPipe* nvp, const void* src, uint64_t srcPitch, uint8_t* dst, uint64_t dstSize, uint32_t width, uint32_t height, bool forceIFrame) +{ + Instance* instance = static_cast(nvp); + if (!instance->encoder) + { + instance->error = "Invalid NvPipe encoder."; + return 0; + } + + try + { + return instance->encoder->encode(src, srcPitch, dst, dstSize, width, height, forceIFrame); + } + catch (Exception& e) + { + instance->error = e.getErrorString(); + return 0; + } +} + +#ifdef NVPIPE_WITH_OPENGL + +NVPIPE_EXPORT uint64_t NvPipe_EncodeTexture(NvPipe* nvp, uint32_t texture, uint32_t target, uint8_t* dst, uint64_t dstSize, uint32_t width, uint32_t height, bool forceIFrame) +{ + Instance* instance = static_cast(nvp); + if (!instance->encoder) + { + instance->error = "Invalid NvPipe encoder."; + return 0; + } + + try + { + return instance->encoder->encodeTexture(texture, target, dst, dstSize, width, height, forceIFrame); + } + catch (Exception& e) + { + instance->error = e.getErrorString(); + return 0; + } +} + +NVPIPE_EXPORT uint64_t NvPipe_EncodePBO(NvPipe* nvp, uint32_t pbo, uint8_t* dst, uint64_t dstSize, uint32_t width, uint32_t height, bool forceIFrame) +{ + Instance* instance = static_cast(nvp); + if (!instance->encoder) + { + instance->error = "Invalid NvPipe encoder."; + return 0; + } + + try + { + return instance->encoder->encodePBO(pbo, dst, dstSize, width, height, forceIFrame); + } + catch (Exception& e) + { + instance->error = e.getErrorString(); + return 0; + } +} + +#endif + +#endif + +#ifdef NVPIPE_WITH_DECODER + +NVPIPE_EXPORT NvPipe* NvPipe_CreateDecoder(NvPipe_Format format, NvPipe_Codec codec) +{ + Instance* instance = new Instance(); + + try + { + instance->decoder = std::unique_ptr(new Decoder(format, codec)); + } + catch (Exception& e) + { + sharedError = e.getErrorString(); + delete instance; + return nullptr; + } + + return instance; +} + +NVPIPE_EXPORT uint64_t NvPipe_Decode(NvPipe* nvp, const uint8_t* src, uint64_t srcSize, void* dst, uint32_t width, uint32_t height) +{ + Instance* instance = static_cast(nvp); + if (!instance->decoder) + { + instance->error = "Invalid NvPipe decoder."; + return 0; + } + + try + { + return instance->decoder->decode(src, srcSize, dst, width, height); + } + catch (Exception& e) + { + instance->error = e.getErrorString(); + return 0; + } +} + +#ifdef NVPIPE_WITH_OPENGL + +NVPIPE_EXPORT uint64_t NvPipe_DecodeTexture(NvPipe* nvp, const uint8_t* src, uint64_t srcSize, uint32_t texture, uint32_t target, uint32_t width, uint32_t height) +{ + Instance* instance = static_cast(nvp); + if (!instance->decoder) + { + instance->error = "Invalid NvPipe decoder."; + return 0; + } + + try + { + return instance->decoder->decodeTexture(src, srcSize, texture, target, width, height); + } + catch (Exception& e) + { + instance->error = e.getErrorString(); + return 0; + } +} + +NVPIPE_EXPORT uint64_t NvPipe_DecodePBO(NvPipe* nvp, const uint8_t* src, uint64_t srcSize, uint32_t pbo, uint32_t width, uint32_t height) +{ + Instance* instance = static_cast(nvp); + if (!instance->decoder) + { + instance->error = "Invalid NvPipe decoder."; + return 0; + } + + try + { + return instance->decoder->decodePBO(src, srcSize, pbo, width, height); + } + catch (Exception& e) + { + instance->error = e.getErrorString(); + return 0; + } +} + +#endif + +#endif + +NVPIPE_EXPORT void NvPipe_Destroy(NvPipe* nvp) +{ + Instance* instance = static_cast(nvp); + delete instance; +} + +NVPIPE_EXPORT const char* NvPipe_GetError(NvPipe* nvp) +{ + if (nullptr == nvp) + return sharedError.c_str(); + + Instance* instance = static_cast(nvp); + return instance->error.c_str(); +} + +#ifdef NVPIPE_WITH_D3D11 + +NVPIPE_EXPORT uint64_t NvPipe_EncodeTextureD3D11(NvPipe* nvp, ID3D11Texture2D* texture, uint8_t* dst, uint64_t dstSize, bool forceIFrame) +{ + Instance* instance = static_cast(nvp); + if (!instance->encoder) + { + instance->error = "Invalid NvPipe encoder."; + return 0; + } + + try + { + return instance->encoder->encodeTextureD3D11(texture, dst, dstSize, forceIFrame); + } + catch (Exception& e) + { + instance->error = e.getErrorString(); + return 0; + } +} + +#endif + +} // extern "C" + + + + + + + + + + + + + diff --git a/cuda_code/OneDimElasticityModule_1.cu b/cuda_code/OneDimElasticityModule_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..aae4984f4b87f2cd850f11623588af2042fb036b --- /dev/null +++ b/cuda_code/OneDimElasticityModule_1.cu @@ -0,0 +1,194 @@ +#include +#include "Node.h" +#include "OneDimElasticityModule.h" + +namespace dyno +{ + template + __global__ void ODE_SolveElasticityWithPBD( + DArray position_new, + DArray position, + DArray mass, + Real distance, + Real lambda_prime) + { + int pId = threadIdx.x + (blockIdx.x * blockDim.x); + if (pId >= position.size()) return; + + Coord delta_p = Coord(0); + + + if (pId < position.size() - 1) + { + Coord p1 = position[pId]; + Coord p2 = position[pId + 1]; + + Real w1 = Real(1) / mass[pId]; + Real w2 = Real(1) / mass[pId + 1]; + + Coord d12 = p1 - p2; + Real d12_norm = d12.norm(); + Coord n_12 = d12; + if (d12_norm > EPSILON) + { + n_12.normalize(); + } + else + { + n_12 = Coord(1, 0, 0);; + } + //Coord n_12 = d12_norm > EPSILON ? d12.normalize() : Coord(1, 0, 0); + + delta_p += -w1 / (w1 + w2)*(d12_norm - distance)*n_12; + //Coord delta_p2 = w2 / (w1 + w2)*(d12_norm - distance)*n_12; + } + + if (pId >= 1) + { + Coord p0 = position[pId - 1]; + Coord p1 = position[pId]; + + Real w0 = Real(1) / mass[pId - 1]; + Real w1 = Real(1) / mass[pId]; + + Coord d01 = p0 - p1; + Real d01_norm = d01.norm(); + Coord n_01 = d01; + if (d01_norm > EPSILON) + { + n_01.normalize(); + } + else + { + n_01 = Coord(1, 0, 0); + } + //Coord n_01 = d01_norm > EPSILON ? d01.normalize() : Coord(1, 0, 0); + + //Coord delta_p0 = -w0 / (w0 + w1)*(d01_norm - distance)*n_01; + delta_p += w1 / (w0 + w1)*(d01_norm - distance)*n_01; + } + + position_new[pId] = position[pId] + lambda_prime *delta_p; + } + + template + __global__ void ODE_UpdateVelocity( + DArray velArr, + DArray prePos, + DArray curPos, + Real dt) + { + int pId = threadIdx.x + (blockIdx.x * blockDim.x); + if (pId >= velArr.size()) return; + + velArr[pId] += (curPos[pId] - prePos[pId]) / dt; + } + + template + OneDimElasticityModule::OneDimElasticityModule() + : ConstraintModule() + { + this->attachField(&m_distance, "distance", "The sampling distance!", false); + this->attachField(&m_lambda, "lambda", "Material stiffness!", false); + this->attachField(&m_iterNum, "Iterations", "Iteration Number", false); + + this->attachField(&m_position, "position", "Storing the particle positions!", false); + this->attachField(&m_velocity, "velocity", "Storing the particle velocities!", false); + + m_distance.setValue(0.005); + m_lambda.setValue(0.1); + m_iterNum.setValue(10); + } + + + template + OneDimElasticityModule::~OneDimElasticityModule() + { + } + + template + void OneDimElasticityModule::solveElasticity() + { + //Save new positions + m_position_old.assign(m_position.getData()); + + int itor = 0; + Real lambda_prime = 1 - pow(1 - m_lambda.getData(), 1 / Real(m_iterNum.getData())); + while (itor < m_iterNum.getData()) + { + m_position_buf.assign(m_position.getData()); + + int num = m_position.getElementCount(); + uint pDims = cudaGridSize(num, BLOCK_SIZE); + + ODE_SolveElasticityWithPBD << > > ( + m_position.getData(), + m_position_buf, + m_mass.getData(), + m_distance.getData(), + lambda_prime); + + itor++; + } + + this->updateVelocity(); + } + + template + void OneDimElasticityModule::updateVelocity() + { + int num = m_position.getElementCount(); + uint pDims = cudaGridSize(num, BLOCK_SIZE); + + Real dt = 0.001; + + ODE_UpdateVelocity << > > ( + m_velocity.getData(), + m_position_old, + m_position.getData(), + dt); + cuSynchronize(); + } + + + template + bool OneDimElasticityModule::constrain() + { + this->solveElasticity(); + + return true; + } + + + template + bool OneDimElasticityModule::initializeImpl() + { + if (m_distance.isEmpty() || m_position.isEmpty() || m_velocity.isEmpty()) + { + std::cout << "Exception: " << std::string("ElasticityModule's fields are not fully initialized!") << "\n"; + return false; + } + + int num = m_position.getElementCount(); + +// m_invK.resize(num); +// m_weights.resize(num); +// m_displacement.resize(num); +// +// m_F.resize(num); +// + m_position_old.resize(num); + m_position_buf.resize(num); +// m_bulkCoefs.resize(num); +// +// resetRestShape(); +// +// this->computeMaterialStiffness(); +// +// Function1Pt::copy(m_position_old, m_position.getData()); + + return true; + } + + DEFINE_CLASS(OneDimElasticityModule); +} \ No newline at end of file diff --git a/cuda_code/OptiX7Test_2.cu b/cuda_code/OptiX7Test_2.cu new file mode 100644 index 0000000000000000000000000000000000000000..8383687b767ba4311e51ed30cb5c58730c123d28 --- /dev/null +++ b/cuda_code/OptiX7Test_2.cu @@ -0,0 +1,392 @@ +#include + +#include "scuda.h" +#include "squad.h" +#include "qat4.h" + +// simulation +#include +#include "qsim.h" +#include "qevent.h" + +#include "csg_intersect_node.h" +#include "csg_intersect_tree.h" + +#include "Binding.h" +#include "Params.h" + +extern "C" { __constant__ Params params ; } + + +/** +trace : pure function, with no use of params, everything via args +------------------------------------------------------------------- + +**/ + +static __forceinline__ __device__ void trace( + OptixTraversableHandle handle, + float3 ray_origin, + float3 ray_direction, + float tmin, + float tmax, + float3* normal, + float* t, + unsigned* identity, + unsigned* boundary, + float* spare1, + float* spare2 + ) // pure +{ + const unsigned SBToffset = 0u ; + const unsigned SBTstride = 1u ; + const unsigned missSBTIndex = 0u ; + const float rayTime = 0.0f ; + + unsigned p0, p1, p2, p3 ; + unsigned p4, p5, p6, p7 ; + + optixTrace( + handle, + ray_origin, + ray_direction, + tmin, + tmax, + rayTime, + OptixVisibilityMask( 1 ), + OPTIX_RAY_FLAG_NONE, + SBToffset, + SBTstride, + missSBTIndex, + p0, p1, p2, p3, + p4, p5, p6, p7 + ); + + normal->x = uint_as_float( p0 ); + normal->y = uint_as_float( p1 ); + normal->z = uint_as_float( p2 ); + *t = uint_as_float( p3 ); + *identity = p4 ; + *boundary = p5 ; + *spare1 = uint_as_float( p6 ); + *spare2 = uint_as_float( p7 ); + // max of 8, perhaps need f_theta, f_phi ? +} + + +__forceinline__ __device__ uchar4 make_color( const float3& normal, unsigned identity ) // pure +{ + //float scale = iidx % 2u == 0u ? 0.5f : 1.f ; + float scale = 1.f ; + return make_uchar4( + static_cast( clamp( normal.x, 0.0f, 1.0f ) *255.0f )*scale , + static_cast( clamp( normal.y, 0.0f, 1.0f ) *255.0f )*scale , + static_cast( clamp( normal.z, 0.0f, 1.0f ) *255.0f )*scale , + 255u + ); +} + + +/** +render : non-pure, uses params for viewpoint inputs and pixels output +----------------------------------------------------------------------- + +**/ + +static __forceinline__ __device__ void render( const uint3& idx, const uint3& dim ) +{ + float2 d = 2.0f * make_float2( + static_cast( idx.x ) / static_cast( dim.x ), + static_cast( idx.y ) / static_cast( dim.y ) + ) - 1.0f; + + const bool yflip = true ; + if(yflip) d.y = -d.y ; + + const unsigned cameratype = params.cameratype ; + const float3 dxyUV = d.x * params.U + d.y * params.V ; + // cameratype 0u:perspective, 1u:orthographic + const float3 origin = cameratype == 0u ? params.eye : params.eye + dxyUV ; + const float3 direction = cameratype == 0u ? normalize( dxyUV + params.W ) : normalize( params.W ) ; + + float t = 0.f ; + float3 normal = make_float3( 0.5f, 0.5f, 0.5f ); + unsigned identity = 0u ; + unsigned boundary = 0u ; + float spare1 = 0.f ; + float spare2 = 0.f ; + + trace( + params.handle, + origin, + direction, + params.tmin, + params.tmax, + &normal, + &t, + &identity, + &boundary, + &spare1, + &spare2 + ); + + float3 position = origin + t*direction ; + float3 diddled_normal = normalize(normal)*0.5f + 0.5f ; // lightens render, with mid-grey "pedestal" + uchar4 color = make_color( diddled_normal, identity ); + + unsigned index = idx.y * params.width + idx.x ; + + params.pixels[index] = color ; + params.isect[index] = make_float4( position.x, position.y, position.z, uint_as_float(identity)) ; +} + +/** +simulate : uses params for input: gensteps, seeds and output photons +---------------------------------------------------------------------- + +**/ + +static __forceinline__ __device__ void simulate( const uint3& idx, const uint3& dim ) +{ + qevent* evt = params.evt ; + if (idx.x >= evt->num_photon) return; + + unsigned photon_id = idx.x ; + unsigned genstep_id = evt->seed[photon_id] ; + const quad6& gs = evt->genstep[genstep_id] ; + + qsim* sim = params.sim ; + curandState rng = sim->rngstate[photon_id] ; // TODO: skipahead using an event_id + quad4 p ; + sim->generate_photon(p, rng, gs, photon_id, genstep_id ); + + float3 origin = make_float3( p.q0.f.x, p.q0.f.y, p.q0.f.z ) ; + float3 direction = make_float3( p.q1.f.x, p.q1.f.y, p.q1.f.z ) ; + + float t = 0.f ; + float3 normal = make_float3( 0.5f, 0.5f, 0.5f ); + unsigned identity = 0u ; + unsigned boundary = 0u ; + + float spare1 = 0.f ; + float spare2 = 0.f ; + + trace( + params.handle, + origin, + direction, + params.tmin, + params.tmax, + &normal, + &t, + &identity, + &boundary, + &spare1, + &spare2 + ); + + float3 position = origin + t*direction ; + + + float wx = float(params.cegs.x) ; + float wz = float(params.cegs.z) ; + float fx = 0.5f*(1.f+(position.x - params.center_extent.x)/(wx*params.center_extent.w)) ; // 0.f -> 1.f + float fz = 0.5f*(1.f+(position.z - params.center_extent.z)/(wz*params.center_extent.w)) ; // 0.f -> 1.f + unsigned ix = fx > 0.f && fx < 1.f ? unsigned( fx*params.width ) : 0 ; + unsigned iz = fz > 0.f && fz < 1.f ? unsigned( fz*params.height ) : 0 ; + + //float cos_theta = dot(normal,direction); + // + // * cos_theta "sign/orient-ing the boundary" up here in raygen unlike oxrap/cu/closest_hit_propagate.cu, + // avoids having to pass the information from lower level + // + // * for angular efficiency need intersection point in object frame to get the angles + // + + p.q0.f.x = position.x ; + p.q0.f.y = position.y ; + p.q0.f.z = position.z ; + p.q0.f.w = spare1 ; + + /* + p.q1.f.x = direction.x ; + p.q1.f.y = direction.y ; + p.q1.f.z = direction.z ; + p.q1.f.w = spare2 ; + */ + p.q1.f.x = fx ; + p.q1.f.y = fz ; + p.q1.i.z = ix ; + p.q1.i.w = iz ; + + p.q2.f.x = params.tmin ; + p.q2.f.y = params.tmax ; + p.q2.f.z = t ; + p.q2.u.w = boundary ; + + p.q3.f.x = normal.x ; + p.q3.f.y = normal.y ; + p.q3.f.z = normal.z ; + p.q3.u.w = identity ; + + evt->photon[photon_id] = p ; + + + // Compose frames of pixels, isect and "fphoton" within the cegs window + // using the positions of the intersect "photons". + // Note that multiple threads may be writing to the same pixel + // that is apparently not a problem, just which does it is uncontrolled. + + + unsigned index = iz * params.width + ix ; + if( index > 0 ) + { + params.pixels[index] = make_uchar4( 255u, 0u, 0u, 255u) ; + params.isect[index] = make_float4( position.x, position.y, position.z, uint_as_float(identity)) ; + params.fphoton[index] = p ; + } +} + +/** +**/ + +extern "C" __global__ void __raygen__rg() +{ + const uint3 idx = optixGetLaunchIndex(); + const uint3 dim = optixGetLaunchDimensions(); + switch( params.raygenmode ) + { + case 0: render( idx, dim ) ; break ; + case 1: simulate( idx, dim ) ; break ; + } +} + + +/** +*setPayload* is used from __closesthit__ and __miss__ providing communication to __raygen__ optixTrace call +**/ +static __forceinline__ __device__ void setPayload( float3 normal, float t, unsigned identity, unsigned boundary, float spare1, float spare2 ) // pure? +{ + optixSetPayload_0( float_as_uint( normal.x ) ); + optixSetPayload_1( float_as_uint( normal.y ) ); + optixSetPayload_2( float_as_uint( normal.z ) ); + optixSetPayload_3( float_as_uint( t ) ); + optixSetPayload_4( identity ); + optixSetPayload_5( boundary ); + optixSetPayload_6( float_as_uint(spare1) ); + optixSetPayload_7( float_as_uint(spare2) ); + // maximum of 8 payload values configured in PIP::PIP +} + + +extern "C" __global__ void __miss__ms() +{ + MissData* ms = reinterpret_cast( optixGetSbtDataPointer() ); + float3 normal = make_float3( ms->r, ms->g, ms->b ); // hmm: this is render specific, but easily ignored + float t = 0.f ; + unsigned identity = 0u ; + unsigned boundary = 0u ; + setPayload( normal, t, identity, boundary, 0.f, 0.f ); +} + +/** +__closesthit__ch : pass attributes from __intersection__ into setPayload +============================================================================ + +optixGetInstanceId + flat instance_idx over all transforms in the single IAS, + JUNO maximum ~50,000 (fits with 0xffff = 65535) + +optixGetPrimitiveIndex + local index of AABB within the GAS, + instanced solids adds little to the number of AABB, + most come from unfortunate repeated usage of prims in the non-instanced global + GAS with repeatIdx 0 (JUNO up to ~4000) + +optixGetRayTmax + In intersection and CH returns the current smallest reported hitT or the tmax passed into rtTrace + if no hit has been reported + + +**/ + +extern "C" __global__ void __closesthit__ch() +{ + const float3 local_normal = // geometry object frame normal at intersection point + make_float3( + uint_as_float( optixGetAttribute_0() ), + uint_as_float( optixGetAttribute_1() ), + uint_as_float( optixGetAttribute_2() ) + ); + + + const float t = uint_as_float( optixGetAttribute_3() ) ; + unsigned boundary = optixGetAttribute_4() ; + + const float3 local_point = // geometry object frame normal at intersection point + make_float3( + uint_as_float( optixGetAttribute_5() ), + uint_as_float( optixGetAttribute_6() ), + uint_as_float( optixGetAttribute_7() ) + ); + + const float spare1 = optixGetRayTmax() ; + const float spare2 = local_point.x ; + + //unsigned instance_index = optixGetInstanceIndex() ; 0-based index within IAS + unsigned instance_id = optixGetInstanceId() ; // user supplied instanceId, see IAS_Builder::Build and InstanceId.h + unsigned prim_idx = optixGetPrimitiveIndex() ; // see GAS_Builder::MakeCustomPrimitivesBI_11N (1+index-of-CSGPrim within CSGSolid/GAS) + unsigned identity = (( prim_idx & 0xffff ) << 16 ) | ( instance_id & 0xffff ) ; + + float3 normal = optixTransformNormalFromObjectToWorldSpace( local_normal ) ; + + setPayload( normal, t, identity, boundary, spare1, spare2 ); // communicate to raygen +} + + +/** +__intersection__is +---------------------- + +HitGroupData provides the numNode and nodeOffset of the intersected CSGPrim. +Which Prim gets intersected relies on the CSGPrim::setSbtIndexOffset + +**/ +extern "C" __global__ void __intersection__is() +{ + HitGroupData* hg = (HitGroupData*)optixGetSbtDataPointer(); + int numNode = hg->numNode ; // equivalent of CSGPrim + int nodeOffset = hg->nodeOffset ; + + const CSGNode* node = params.node + nodeOffset ; + const float4* plan = params.plan ; + const qat4* itra = params.itra ; + + const float t_min = optixGetRayTmin() ; + const float3 ray_origin = optixGetObjectRayOrigin(); + const float3 ray_direction = optixGetObjectRayDirection(); + + float4 isect ; // .xyz normal .w distance + if(intersect_prim(isect, numNode, node, plan, itra, t_min , ray_origin, ray_direction )) + { + const unsigned hitKind = 0u ; // only 8bit : could use to customize how attributes interpreted + unsigned a0, a1, a2, a3, a4, a5, a6, a7 ; + const unsigned boundary = node->boundary() ; // all nodes of tree have same boundary + + float3 local_point = ray_origin + isect.w*ray_direction ; + + a0 = float_as_uint( isect.x ); // isect.xyz is object frame normal of geometry at intersection point + a1 = float_as_uint( isect.y ); + a2 = float_as_uint( isect.z ); + a3 = float_as_uint( isect.w ) ; // perhaps no need to pass the "t", should be standard access to "t" + a4 = boundary ; + a5 = float_as_uint( local_point.x ); + a6 = float_as_uint( local_point.y ); + a7 = float_as_uint( local_point.z ); + + optixReportIntersection( isect.w, hitKind, a0, a1, a2, a3, a4, a5, a6, a7 ); + // IS:optixReportIntersection writes the attributes that can be read in CH and AH programs + // max 8 attribute registers, see PIP::PIP, communicate to __closesthit__ch + } +} +// story begins with intersection diff --git a/cuda_code/OpticsCompensation.cu b/cuda_code/OpticsCompensation.cu new file mode 100644 index 0000000000000000000000000000000000000000..d0c7753e7d6db5cb70d6a569c51ae8931a1ad99b --- /dev/null +++ b/cuda_code/OpticsCompensation.cu @@ -0,0 +1,36 @@ +#include "AUCUDA.cuh" +#include "OpticsCompensation_func.cuh" + +/* Luaからデータを受け取ってそのままCore関数を呼び出す */ +int OpticsCompensation(lua_State *L){ + int r = OpticsCompensation_Core(L); + return r; + // Lua 側での戻り値の個数を返す +} + +/* Luaからデータを受け取ってそのままCore関数(Direct)を呼び出す */ +int OpticsCompensation_Direct(lua_State *L){ + int r = OpticsCompensation_Direct_Core(L); + return r; + // Lua 側での戻り値の個数を返す +} + +static luaL_Reg OpticsCompensation_s[] = { + {"info", info}, + { "OpticsCompensation", OpticsCompensation }, + { "OpticsCompensation_Direct", OpticsCompensation_Direct }, + { NULL, NULL } +}; + +/* +ここでdllを定義します +別のものを作る場合は +OpticsCompensation_s +の部分を新しい名前に変えてください +*/ +extern "C"{ + __declspec(dllexport) int luaopen_OpticsCompensation_s(lua_State *L) { + luaL_register(L, "OpticsCompensation_s", OpticsCompensation_s); + return 1; +} +} diff --git a/cuda_code/POCS_TV_6.cu b/cuda_code/POCS_TV_6.cu new file mode 100644 index 0000000000000000000000000000000000000000..2934b2d419db67ae9d70f068552e0346115cf69a --- /dev/null +++ b/cuda_code/POCS_TV_6.cu @@ -0,0 +1,340 @@ +/*------------------------------------------------------------------------- + * + * CUDA functions for Steepest descend in POCS-type algorithms. + * + * This file will iteratively minimize by stepest descend the total variation + * of the input image, with the parameters given, using GPUs. + * + * CODE by Ander Biguri + * +--------------------------------------------------------------------------- +--------------------------------------------------------------------------- +Copyright (c) 2015, University of Bath and CERN- European Organization for +Nuclear Research +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors +may be used to endorse or promote products derived from this software without +specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + --------------------------------------------------------------------------- + +Contact: tigre.toolbox@gmail.com +Codes : https://github.com/CERN/TIGRE +--------------------------------------------------------------------------- + */ + + + + + + + +#define MAXTHREADS 1024 + +#include "POCS_TV.hpp" + + + + +#define cudaCheckErrors(msg) \ +do { \ + cudaError_t __err = cudaGetLastError(); \ + if (__err != cudaSuccess) { \ + mexPrintf("ERROR in: %s \n",msg);\ + mexErrMsgIdAndTxt("err",cudaGetErrorString(__err));\ + } \ +} while (0) + +// CUDA kernels +//https://stackoverflow.com/questions/21332040/simple-cuda-kernel-optimization/21340927#21340927 + __global__ void divideArrayScalar(float* vec,float scalar,const size_t n) + { + unsigned long long i = (blockIdx.x * blockDim.x) + threadIdx.x; + for(; i= 0 && z= 0 && y= 0 && x= cols || y >= rows || z >= depth ) + return; + + float df[3] ={0,0,0}; + float dfi[3]={0,0,0}; // dfi== \partial f_{i+1,j,k} + float dfj[3]={0,0,0}; + float dfk[3]={0,0,0}; + gradient(f,df ,z ,y ,x , depth,rows,cols); + gradient(f,dfi ,z ,y ,x+1, depth,rows,cols); + gradient(f,dfj ,z ,y+1,x , depth,rows,cols); + gradient(f,dfk ,z+1,y ,x , depth,rows,cols); + float eps=0.00000001; //% avoid division by zero + dftv[idx]=(df[0]+df[1]+df[2])/(sqrt(df[0] *df[0] +df[1] *df[1] +df[2] *df[2])+eps) + -dfi[2]/(sqrt(dfi[0]*dfi[0]+dfi[1]*dfi[1]+dfi[2]*dfi[2]) +eps) // I wish I coudl precompute this, but if I do then Id need to recompute the gradient. + -dfj[1]/(sqrt(dfj[0]*dfj[0]+dfj[1]*dfj[1]+dfj[2]*dfj[2]) +eps) + -dfk[0]/(sqrt(dfk[0]*dfk[0]+dfk[1]*dfk[1]+dfk[2]*dfk[2]) +eps); + + } + + __device__ void warpReduce(volatile float *sdata, size_t tid) { + sdata[tid] += sdata[tid + 32]; + sdata[tid] += sdata[tid + 16]; + sdata[tid] += sdata[tid + 8]; + sdata[tid] += sdata[tid + 4]; + sdata[tid] += sdata[tid + 2]; + sdata[tid] += sdata[tid + 1]; + } + + __global__ void reduceNorm2(float *g_idata, float *g_odata, size_t n){ + extern __shared__ volatile float sdata[]; + //http://stackoverflow.com/a/35133396/1485872 + size_t tid = threadIdx.x; + size_t i = blockIdx.x*blockDim.x + tid; + size_t gridSize = blockDim.x*gridDim.x; + float mySum = 0; + float value=0; + while (i < n) { + value=g_idata[i]; //avoid reading twice + mySum += value*value; + i += gridSize; + } + sdata[tid] = mySum; + __syncthreads(); + + if (tid < 512) + sdata[tid] += sdata[tid + 512]; + __syncthreads(); + if (tid < 256) + sdata[tid] += sdata[tid + 256]; + __syncthreads(); + + if (tid < 128) + sdata[tid] += sdata[tid + 128]; + __syncthreads(); + + if (tid < 64) + sdata[tid] += sdata[tid + 64]; + __syncthreads(); + + +#if (__CUDA_ARCH__ >= 300) + if ( tid < 32 ) + { + mySum = sdata[tid] + sdata[tid + 32]; + for (int offset = warpSize/2; offset > 0; offset /= 2) { + mySum += __shfl_down(mySum, offset); + } + } +#else + if (tid < 32) { + warpReduce(sdata, tid); + mySum = sdata[0]; + } +#endif + if (tid == 0) g_odata[blockIdx.x] = mySum; + } + __global__ void reduceSum(float *g_idata, float *g_odata, size_t n){ + extern __shared__ volatile float sdata[]; + //http://stackoverflow.com/a/35133396/1485872 + size_t tid = threadIdx.x; + size_t i = blockIdx.x*blockDim.x + tid; + size_t gridSize = blockDim.x*gridDim.x; + float mySum = 0; + // float value=0; + while (i < n) { + mySum += g_idata[i]; + i += gridSize; + } + sdata[tid] = mySum; + __syncthreads(); + + if (tid < 512) + sdata[tid] += sdata[tid + 512]; + __syncthreads(); + if (tid < 256) + sdata[tid] += sdata[tid + 256]; + __syncthreads(); + + if (tid < 128) + sdata[tid] += sdata[tid + 128]; + __syncthreads(); + + if (tid < 64) + sdata[tid] += sdata[tid + 64]; + __syncthreads(); + + +#if (__CUDA_ARCH__ >= 300) + if ( tid < 32 ) + { + mySum = sdata[tid] + sdata[tid + 32]; + for (int offset = warpSize/2; offset > 0; offset /= 2) { + mySum += __shfl_down(mySum, offset); + } + } +#else + if (tid < 32) { + warpReduce(sdata, tid); + mySum = sdata[0]; + } +#endif + if (tid == 0) g_odata[blockIdx.x] = mySum; + } + + + + +// main function + void pocs_tv(const float* img,float* dst,float alpha,const long* image_size, int maxIter){ + + + size_t total_pixels = image_size[0] * image_size[1] * image_size[2] ; + size_t mem_size = sizeof(float) * total_pixels; + + float *d_image, *d_dimgTV,*d_norm2aux,*d_norm2; + // memory for image + cudaMalloc(&d_image, mem_size); + cudaCheckErrors("Malloc Image error"); + cudaMemcpy(d_image, img, mem_size, cudaMemcpyHostToDevice); + cudaCheckErrors("Memory Malloc and Memset: SRC"); + // memory for df + cudaMalloc(&d_dimgTV, mem_size); + cudaCheckErrors("Memory Malloc and Memset: TV"); + + cudaMalloc(&d_norm2, mem_size); + cudaCheckErrors("Memory Malloc and Memset: TV"); + + // memory for L2norm auxiliar + cudaMalloc(&d_norm2aux, sizeof(float)*(total_pixels + MAXTHREADS - 1) / MAXTHREADS); + cudaCheckErrors("Memory Malloc and Memset: NORMAux"); + + + + // For the gradient + dim3 blockGrad(10, 10, 10); + dim3 gridGrad((image_size[0]+blockGrad.x-1)/blockGrad.x, (image_size[1]+blockGrad.y-1)/blockGrad.y, (image_size[2]+blockGrad.z-1)/blockGrad.z); + + // For the reduction + float sumnorm2; + + + + for(unsigned int i=0;i>>(d_image,d_dimgTV,image_size[2], image_size[1],image_size[0]); + cudaCheckErrors("Gradient"); +// cudaMemcpy(dst, d_dimgTV, mem_size, cudaMemcpyDeviceToHost); + + + cudaMemcpy(d_norm2, d_dimgTV, mem_size, cudaMemcpyDeviceToDevice); + cudaCheckErrors("Copy from gradient call error"); + // Compute the L2 norm of the gradint. For that, reduction is used. + //REDUCE + size_t dimblockRed = MAXTHREADS; + size_t dimgridRed = (total_pixels + MAXTHREADS - 1) / MAXTHREADS; + reduceNorm2 << > >(d_norm2, d_norm2aux, total_pixels); + cudaCheckErrors("reduce1"); + if (dimgridRed > 1) { + reduceSum << <1, dimblockRed, MAXTHREADS*sizeof(float) >> >(d_norm2aux, d_norm2, dimgridRed); + cudaCheckErrors("reduce2"); + cudaMemcpy(&sumnorm2, d_norm2, sizeof(float), cudaMemcpyDeviceToHost); + cudaCheckErrors("cudaMemcpy"); + + } + else { + cudaMemcpy(&sumnorm2, d_norm2aux, sizeof(float), cudaMemcpyDeviceToHost); + cudaCheckErrors("cudaMemcpy"); + } + //mexPrintf("%f ",sqrt(sumnorm2)); + //NOMRALIZE + //in a Tesla, maximum blocks =15 SM * 4 blocks/SM + divideArrayScalar <<<60,MAXTHREADS>>>(d_dimgTV,sqrt(sumnorm2),total_pixels); + cudaCheckErrors("Division error"); + //MULTIPLY HYPERPARAMETER + multiplyArrayScalar<<<60,MAXTHREADS>>>(d_dimgTV,alpha, total_pixels); + cudaCheckErrors("Multiplication error"); + //SUBSTRACT GRADIENT + substractArrays <<<60,MAXTHREADS>>>(d_image,d_dimgTV, total_pixels); + cudaCheckErrors("Substraction error"); + sumnorm2=0; + } + + cudaCheckErrors("TV minimization"); + + cudaMemcpy(dst, d_image, mem_size, cudaMemcpyDeviceToHost); + cudaCheckErrors("Copy result back"); + + cudaFree(d_image); + cudaFree(d_norm2aux); + cudaFree(d_dimgTV); + cudaFree(d_norm2); + + cudaCheckErrors("Memory free"); + cudaDeviceReset(); + } + diff --git a/cuda_code/PQScanMultiPassNoPrecomputed_6.cu b/cuda_code/PQScanMultiPassNoPrecomputed_6.cu new file mode 100644 index 0000000000000000000000000000000000000000..57030c9e3454d08ea33a8691ccbe6871544b9b74 --- /dev/null +++ b/cuda_code/PQScanMultiPassNoPrecomputed_6.cu @@ -0,0 +1,594 @@ +/** + * Copyright (c) Facebook, Inc. and its affiliates. + * + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. + */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +namespace faiss { namespace gpu { + +// This must be kept in sync with PQCodeDistances.cu +bool isSupportedNoPrecomputedSubDimSize(int dims) { + switch (dims) { + case 1: + case 2: + case 3: + case 4: + case 6: + case 8: + case 10: + case 12: + case 16: + case 20: + case 24: + case 28: + case 32: + return true; + default: + // FIXME: larger sizes require too many registers - we need the + // MM implementation working + return false; + } +} + +template +struct LoadCodeDistances { + static inline __device__ void load(LookupT* smem, + LookupT* codes, + int numCodes) { + constexpr int kWordSize = sizeof(LookupVecT) / sizeof(LookupT); + + // We can only use the vector type if the data is guaranteed to be + // aligned. The codes are innermost, so if it is evenly divisible, + // then any slice will be aligned. + if (numCodes % kWordSize == 0) { + // Load the data by float4 for efficiency, and then handle any remainder + // limitVec is the number of whole vec words we can load, in terms + // of whole blocks performing the load + constexpr int kUnroll = 2; + int limitVec = numCodes / (kUnroll * kWordSize * blockDim.x); + limitVec *= kUnroll * blockDim.x; + + LookupVecT* smemV = (LookupVecT*) smem; + LookupVecT* codesV = (LookupVecT*) codes; + + for (int i = threadIdx.x; i < limitVec; i += kUnroll * blockDim.x) { + LookupVecT vals[kUnroll]; + +#pragma unroll + for (int j = 0; j < kUnroll; ++j) { + vals[j] = + LoadStore::load(&codesV[i + j * blockDim.x]); + } + +#pragma unroll + for (int j = 0; j < kUnroll; ++j) { + LoadStore::store(&smemV[i + j * blockDim.x], vals[j]); + } + } + + // This is where we start loading the remainder that does not evenly + // fit into kUnroll x blockDim.x + int remainder = limitVec * kWordSize; + + for (int i = remainder + threadIdx.x; i < numCodes; i += blockDim.x) { + smem[i] = codes[i]; + } + } else { + // Potential unaligned load + constexpr int kUnroll = 4; + + int limit = utils::roundDown(numCodes, kUnroll * blockDim.x); + + int i = threadIdx.x; + for (; i < limit; i += kUnroll * blockDim.x) { + LookupT vals[kUnroll]; + +#pragma unroll + for (int j = 0; j < kUnroll; ++j) { + vals[j] = codes[i + j * blockDim.x]; + } + +#pragma unroll + for (int j = 0; j < kUnroll; ++j) { + smem[i + j * blockDim.x] = vals[j]; + } + } + + for (; i < numCodes; i += blockDim.x) { + smem[i] = codes[i]; + } + } + } +}; + +template +__global__ void +pqScanNoPrecomputedMultiPass(Tensor queries, + Tensor pqCentroids, + Tensor topQueryToCentroid, + Tensor codeDistances, + void** listCodes, + int* listLengths, + Tensor prefixSumOffsets, + Tensor distance) { + const auto codesPerSubQuantizer = pqCentroids.getSize(2); + + // Where the pq code -> residual distance is stored + extern __shared__ char smemCodeDistances[]; + LookupT* codeDist = (LookupT*) smemCodeDistances; + + // Each block handles a single query + auto queryId = blockIdx.y; + auto probeId = blockIdx.x; + + // This is where we start writing out data + // We ensure that before the array (at offset -1), there is a 0 value + int outBase = *(prefixSumOffsets[queryId][probeId].data() - 1); + float* distanceOut = distance[outBase].data(); + + auto listId = topQueryToCentroid[queryId][probeId]; + // Safety guard in case NaNs in input cause no list ID to be generated + if (listId == -1) { + return; + } + + unsigned char* codeList = (unsigned char*) listCodes[listId]; + int limit = listLengths[listId]; + + constexpr int kNumCode32 = NumSubQuantizers <= 4 ? 1 : + (NumSubQuantizers / 4); + unsigned int code32[kNumCode32]; + unsigned int nextCode32[kNumCode32]; + + // We double-buffer the code loading, which improves memory utilization + if (threadIdx.x < limit) { + LoadCode32::load(code32, codeList, threadIdx.x); + } + + LoadCodeDistances::load( + codeDist, + codeDistances[queryId][probeId].data(), + codeDistances.getSize(2) * codeDistances.getSize(3)); + + // Prevent WAR dependencies + __syncthreads(); + + // Each thread handles one code element in the list, with a + // block-wide stride + for (int codeIndex = threadIdx.x; + codeIndex < limit; + codeIndex += blockDim.x) { + // Prefetch next codes + if (codeIndex + blockDim.x < limit) { + LoadCode32::load( + nextCode32, codeList, codeIndex + blockDim.x); + } + + float dist = 0.0f; + +#pragma unroll + for (int word = 0; word < kNumCode32; ++word) { + constexpr int kBytesPerCode32 = + NumSubQuantizers < 4 ? NumSubQuantizers : 4; + + if (kBytesPerCode32 == 1) { + auto code = code32[0]; + dist = ConvertTo::to(codeDist[code]); + + } else { +#pragma unroll + for (int byte = 0; byte < kBytesPerCode32; ++byte) { + auto code = getByte(code32[word], byte * 8, 8); + + auto offset = + codesPerSubQuantizer * (word * kBytesPerCode32 + byte); + + dist += ConvertTo::to(codeDist[offset + code]); + } + } + } + + // Write out intermediate distance result + // We do not maintain indices here, in order to reduce global + // memory traffic. Those are recovered in the final selection step. + distanceOut[codeIndex] = dist; + + // Rotate buffers +#pragma unroll + for (int word = 0; word < kNumCode32; ++word) { + code32[word] = nextCode32[word]; + } + } +} + +void +runMultiPassTile(Tensor& queries, + Tensor& centroids, + Tensor& pqCentroidsInnermostCode, + NoTypeTensor<4, true>& codeDistances, + Tensor& topQueryToCentroid, + Tensor& bitset, + bool useFloat16Lookup, + int bytesPerCode, + int numSubQuantizers, + int numSubQuantizerCodes, + thrust::device_vector& listCodes, + thrust::device_vector& listIndices, + IndicesOptions indicesOptions, + thrust::device_vector& listLengths, + Tensor& thrustMem, + Tensor& prefixSumOffsets, + Tensor& allDistances, + Tensor& heapDistances, + Tensor& heapIndices, + int k, + Tensor& outDistances, + Tensor& outIndices, + cudaStream_t stream) { + // Calculate offset lengths, so we know where to write out + // intermediate results + runCalcListOffsets(topQueryToCentroid, listLengths, prefixSumOffsets, + thrustMem, stream); + + // Calculate residual code distances, since this is without + // precomputed codes + runPQCodeDistances(pqCentroidsInnermostCode, + queries, + centroids, + topQueryToCentroid, + codeDistances, + useFloat16Lookup, + stream); + + // Convert all codes to a distance, and write out (distance, + // index) values for all intermediate results + { + auto kThreadsPerBlock = 256; + + auto grid = dim3(topQueryToCentroid.getSize(1), + topQueryToCentroid.getSize(0)); + auto block = dim3(kThreadsPerBlock); + + // pq centroid distances + auto smem = useFloat16Lookup ? sizeof(half) : sizeof(float); + + smem *= numSubQuantizers * numSubQuantizerCodes; + FAISS_ASSERT(smem <= getMaxSharedMemPerBlockCurrentDevice()); + +#define RUN_PQ_OPT(NUM_SUB_Q, LOOKUP_T, LOOKUP_VEC_T) \ + do { \ + auto codeDistancesT = codeDistances.toTensor(); \ + \ + pqScanNoPrecomputedMultiPass \ + <<>>( \ + queries, \ + pqCentroidsInnermostCode, \ + topQueryToCentroid, \ + codeDistancesT, \ + listCodes.data().get(), \ + listLengths.data().get(), \ + prefixSumOffsets, \ + allDistances); \ + } while (0) + +#define RUN_PQ(NUM_SUB_Q) \ + do { \ + if (useFloat16Lookup) { \ + RUN_PQ_OPT(NUM_SUB_Q, half, Half8); \ + } else { \ + RUN_PQ_OPT(NUM_SUB_Q, float, float4); \ + } \ + } while (0) + + switch (bytesPerCode) { + case 1: + RUN_PQ(1); + break; + case 2: + RUN_PQ(2); + break; + case 3: + RUN_PQ(3); + break; + case 4: + RUN_PQ(4); + break; + case 8: + RUN_PQ(8); + break; + case 12: + RUN_PQ(12); + break; + case 16: + RUN_PQ(16); + break; + case 20: + RUN_PQ(20); + break; + case 24: + RUN_PQ(24); + break; + case 28: + RUN_PQ(28); + break; + case 32: + RUN_PQ(32); + break; + case 40: + RUN_PQ(40); + break; + case 48: + RUN_PQ(48); + break; + case 56: + RUN_PQ(56); + break; + case 64: + RUN_PQ(64); + break; + case 96: + RUN_PQ(96); + break; + default: + FAISS_ASSERT(false); + break; + } + +#undef RUN_PQ +#undef RUN_PQ_OPT + } + + CUDA_TEST_ERROR(); + + // k-select the output in chunks, to increase parallelism + runPass1SelectLists(listIndices, + indicesOptions, + prefixSumOffsets, + topQueryToCentroid, + bitset, + allDistances, + topQueryToCentroid.getSize(1), + k, + false, // L2 distance chooses smallest + heapDistances, + heapIndices, + stream); + + // k-select final output + auto flatHeapDistances = heapDistances.downcastInner<2>(); + auto flatHeapIndices = heapIndices.downcastInner<2>(); + + runPass2SelectLists(flatHeapDistances, + flatHeapIndices, + listIndices, + indicesOptions, + prefixSumOffsets, + topQueryToCentroid, + k, + false, // L2 distance chooses smallest + outDistances, + outIndices, + stream); +} + +void runPQScanMultiPassNoPrecomputed(Tensor& queries, + Tensor& centroids, + Tensor& pqCentroidsInnermostCode, + Tensor& topQueryToCentroid, + Tensor& bitset, + bool useFloat16Lookup, + int bytesPerCode, + int numSubQuantizers, + int numSubQuantizerCodes, + thrust::device_vector& listCodes, + thrust::device_vector& listIndices, + IndicesOptions indicesOptions, + thrust::device_vector& listLengths, + int maxListLength, + int k, + // output + Tensor& outDistances, + // output + Tensor& outIndices, + GpuResources* res) { + constexpr int kMinQueryTileSize = 8; + constexpr int kMaxQueryTileSize = 128; + constexpr int kThrustMemSize = 16384; + + int nprobe = topQueryToCentroid.getSize(1); + + auto& mem = res->getMemoryManagerCurrentDevice(); + auto stream = res->getDefaultStreamCurrentDevice(); + + // Make a reservation for Thrust to do its dirty work (global memory + // cross-block reduction space); hopefully this is large enough. + DeviceTensor thrustMem1( + mem, {kThrustMemSize}, stream); + DeviceTensor thrustMem2( + mem, {kThrustMemSize}, stream); + DeviceTensor* thrustMem[2] = + {&thrustMem1, &thrustMem2}; + + // How much temporary storage is available? + // If possible, we'd like to fit within the space available. + size_t sizeAvailable = mem.getSizeAvailable(); + + // We run two passes of heap selection + // This is the size of the first-level heap passes + constexpr int kNProbeSplit = 8; + int pass2Chunks = std::min(nprobe, kNProbeSplit); + + size_t sizeForFirstSelectPass = + pass2Chunks * k * (sizeof(float) + sizeof(int)); + + // How much temporary storage we need per each query + size_t sizePerQuery = + 2 * // streams + ((nprobe * sizeof(int) + sizeof(int)) + // prefixSumOffsets + nprobe * maxListLength * sizeof(float) + // allDistances + // residual distances + nprobe * numSubQuantizers * numSubQuantizerCodes * sizeof(float) + + sizeForFirstSelectPass); + + int queryTileSize = (int) (sizeAvailable / sizePerQuery); + + if (queryTileSize < kMinQueryTileSize) { + queryTileSize = kMinQueryTileSize; + } else if (queryTileSize > kMaxQueryTileSize) { + queryTileSize = kMaxQueryTileSize; + } + + // FIXME: we should adjust queryTileSize to deal with this, since + // indexing is in int32 + FAISS_ASSERT(queryTileSize * nprobe * maxListLength < + std::numeric_limits::max()); + + // Temporary memory buffers + // Make sure there is space prior to the start which will be 0, and + // will handle the boundary condition without branches + DeviceTensor prefixSumOffsetSpace1( + mem, {queryTileSize * nprobe + 1}, stream); + DeviceTensor prefixSumOffsetSpace2( + mem, {queryTileSize * nprobe + 1}, stream); + + DeviceTensor prefixSumOffsets1( + prefixSumOffsetSpace1[1].data(), + {queryTileSize, nprobe}); + DeviceTensor prefixSumOffsets2( + prefixSumOffsetSpace2[1].data(), + {queryTileSize, nprobe}); + DeviceTensor* prefixSumOffsets[2] = + {&prefixSumOffsets1, &prefixSumOffsets2}; + + // Make sure the element before prefixSumOffsets is 0, since we + // depend upon simple, boundary-less indexing to get proper results + CUDA_VERIFY(cudaMemsetAsync(prefixSumOffsetSpace1.data(), + 0, + sizeof(int), + stream)); + CUDA_VERIFY(cudaMemsetAsync(prefixSumOffsetSpace2.data(), + 0, + sizeof(int), + stream)); + + int codeDistanceTypeSize = useFloat16Lookup ? sizeof(half) : sizeof(float); + + int totalCodeDistancesSize = + queryTileSize * nprobe * numSubQuantizers * numSubQuantizerCodes * + codeDistanceTypeSize; + + DeviceTensor codeDistances1Mem( + mem, {totalCodeDistancesSize}, stream); + NoTypeTensor<4, true> codeDistances1( + codeDistances1Mem.data(), + codeDistanceTypeSize, + {queryTileSize, nprobe, numSubQuantizers, numSubQuantizerCodes}); + + DeviceTensor codeDistances2Mem( + mem, {totalCodeDistancesSize}, stream); + NoTypeTensor<4, true> codeDistances2( + codeDistances2Mem.data(), + codeDistanceTypeSize, + {queryTileSize, nprobe, numSubQuantizers, numSubQuantizerCodes}); + + NoTypeTensor<4, true>* codeDistances[2] = + {&codeDistances1, &codeDistances2}; + + DeviceTensor allDistances1( + mem, {queryTileSize * nprobe * maxListLength}, stream); + DeviceTensor allDistances2( + mem, {queryTileSize * nprobe * maxListLength}, stream); + DeviceTensor* allDistances[2] = + {&allDistances1, &allDistances2}; + + DeviceTensor heapDistances1( + mem, {queryTileSize, pass2Chunks, k}, stream); + DeviceTensor heapDistances2( + mem, {queryTileSize, pass2Chunks, k}, stream); + DeviceTensor* heapDistances[2] = + {&heapDistances1, &heapDistances2}; + + DeviceTensor heapIndices1( + mem, {queryTileSize, pass2Chunks, k}, stream); + DeviceTensor heapIndices2( + mem, {queryTileSize, pass2Chunks, k}, stream); + DeviceTensor* heapIndices[2] = + {&heapIndices1, &heapIndices2}; + + auto streams = res->getAlternateStreamsCurrentDevice(); + streamWait(streams, {stream}); + + int curStream = 0; + + for (int query = 0; query < queries.getSize(0); query += queryTileSize) { + int numQueriesInTile = + std::min(queryTileSize, queries.getSize(0) - query); + + auto prefixSumOffsetsView = + prefixSumOffsets[curStream]->narrowOutermost(0, numQueriesInTile); + + auto codeDistancesView = + codeDistances[curStream]->narrowOutermost(0, numQueriesInTile); + auto coarseIndicesView = + topQueryToCentroid.narrowOutermost(query, numQueriesInTile); + auto queryView = + queries.narrowOutermost(query, numQueriesInTile); + + auto heapDistancesView = + heapDistances[curStream]->narrowOutermost(0, numQueriesInTile); + auto heapIndicesView = + heapIndices[curStream]->narrowOutermost(0, numQueriesInTile); + + auto outDistanceView = + outDistances.narrowOutermost(query, numQueriesInTile); + auto outIndicesView = + outIndices.narrowOutermost(query, numQueriesInTile); + + runMultiPassTile(queryView, + centroids, + pqCentroidsInnermostCode, + codeDistancesView, + coarseIndicesView, + bitset, + useFloat16Lookup, + bytesPerCode, + numSubQuantizers, + numSubQuantizerCodes, + listCodes, + listIndices, + indicesOptions, + listLengths, + *thrustMem[curStream], + prefixSumOffsetsView, + *allDistances[curStream], + heapDistancesView, + heapIndicesView, + k, + outDistanceView, + outIndicesView, + streams[curStream]); + + curStream = (curStream + 1) % 2; + } + + streamWait({stream}, streams); +} + +} } // namespace diff --git a/cuda_code/PageKernels.cu b/cuda_code/PageKernels.cu new file mode 100644 index 0000000000000000000000000000000000000000..938764fcfe79858eaba715d9684f8f00e828f6d0 --- /dev/null +++ b/cuda_code/PageKernels.cu @@ -0,0 +1,750 @@ +#include +#include +#include + +#include "PageKernels.cuh" +#include "CVoxelFunctions.cuh" +#include "CMatrixFunctions.cuh" +#include "CAABBFunctions.cuh" +#include "COpenGLTypes.h" +#include "CAtomicPageAlloc.cuh" +#include "GIVoxelPages.h" + +#define GI_MAX_JOINT_COUNT 63 + +__global__ void FilterVoxels(// Voxel System + CVoxelPage* gVoxelPages, + // Dense Data from OGL + uint32_t& gAllocator, + uint2* gDenseData, + uint32_t segmentOffset, + // Limits + uint32_t cascadeId, + uint32_t gridSize) +{ + unsigned int globalId = threadIdx.x + blockIdx.x * blockDim.x; + uint2 data = gDenseData[globalId]; + + // Skip if data is uninteresting + if(data.x == 0x0 && data.y == 0x0) return; + + //DEBUG + //printf("%d %d\n", data.y, data.x); return; + + // Allocate a position and find your location + uint32_t position = atomicAdd(&gAllocator, 1); + position += segmentOffset * GIVoxelPages::SegmentSize; + + uint32_t pageId = position / GIVoxelPages::PageSize; + uint32_t pageLocalId = position % GIVoxelPages::PageSize; + + uint32_t pageLocalSegmentId = pageLocalId / GIVoxelPages::SegmentSize; + uint32_t segmentLocalId = pageLocalId % GIVoxelPages::SegmentSize; + + const CVoxelPage& gPage = gVoxelPages[pageId]; + + // Fake a entity + if(segmentLocalId == 0) + { + CSegmentInfo info = {}; + info.batchId = 0; + info.objId = 0; + info.objectSegmentId = 0; + info.packed = PackSegmentInfo(cascadeId, + CObjectType::STATIC, + CSegmentOccupation::OCCUPIED, + true); + + gPage.dSegmentInfo[pageLocalSegmentId] = info; + } + + // Pos + int3 voxelId; + voxelId.z = (globalId / gridSize / gridSize) % gridSize; + voxelId.y = (globalId / gridSize ) % gridSize; + voxelId.x = (globalId ) % gridSize; + gPage.dGridVoxPos[pageLocalId] = PackVoxPos(voxelId); + + // Normal + gPage.dGridVoxNorm[pageLocalId] = (data.y & 0x00FFFFFF); + + // Write color + gPage.dGridVoxOccupancy[pageLocalId] = data.x; + + // Clean up dense structure + gDenseData[globalId] = uint2{0x0, 0x0}; +} + +__global__ void ClearPages(CVoxelPage* gVoxelPages) +{ + unsigned int globalId = threadIdx.x + blockIdx.x * blockDim.x; + unsigned int pageId = globalId / GIVoxelPages::PageSize; + unsigned int pageLocalId = globalId % GIVoxelPages::PageSize; + unsigned int pageLocalSegmentId = pageLocalId / GIVoxelPages::SegmentSize; + unsigned int segmentLocalVoxId = pageLocalId % GIVoxelPages::SegmentSize; + + if(segmentLocalVoxId == 0) + { + CSegmentInfo info = {}; + gVoxelPages[pageId].dSegmentInfo[pageLocalSegmentId] = info; + } + gVoxelPages[pageId].dGridVoxNorm[pageLocalId] = 0x0; + gVoxelPages[pageId].dGridVoxPos[pageLocalId] = 0x0; + gVoxelPages[pageId].dGridVoxOccupancy[pageLocalId] = 0x0; +} + +__global__ void CountVoxelsInPageSystem(uint32_t* gCounter, + // Voxel Cache + const BatchVoxelCache* gBatchVoxelCache, + // Voxel Pages + const CVoxelPageConst* gVoxelPages, + // Limits + const uint32_t batchCount) +{ + __shared__ CSegmentInfo sSegInfo; + __shared__ CMeshVoxelInfo sMeshVoxelInfo; + + unsigned int blockLocalId = threadIdx.x; + unsigned int globalId = threadIdx.x + blockIdx.x * blockDim.x; + unsigned int pageId = globalId / GIVoxelPages::PageSize; + unsigned int pageLocalId = globalId % GIVoxelPages::PageSize; + unsigned int pageLocalSegmentId = pageLocalId / GIVoxelPages::SegmentSize; + unsigned int segmentLocalVoxId = pageLocalId % GIVoxelPages::SegmentSize; + + // Get Segments Obj Information Struct + CObjectType objType; + CSegmentOccupation occupation; + uint8_t cascadeId; + bool firstOccurance; + if(blockLocalId == 0) + { + // Load to smem + // Todo split this into the threadss + sSegInfo = gVoxelPages[pageId].dSegmentInfo[pageLocalSegmentId]; + ExpandSegmentInfo(cascadeId, objType, occupation, firstOccurance, sSegInfo.packed); + } + __syncthreads(); + if(blockLocalId != 0) + { + ExpandSegmentInfo(cascadeId, objType, occupation, firstOccurance, sSegInfo.packed); + } + // Full Block Cull + if(occupation == CSegmentOccupation::EMPTY) return; + assert(occupation != CSegmentOccupation::MARKED_FOR_CLEAR); + + // If segment is not empty + // Load Block Constants + if(blockLocalId == 0) + { + // TODO: Re-write this to be more multi-thread loadable + sMeshVoxelInfo = gBatchVoxelCache[cascadeId * batchCount + sSegInfo.batchId].dMeshVoxelInfo[sSegInfo.objId]; + } + __syncthreads(); + + // Voxel Ids + const uint32_t objectLocalVoxelId = sSegInfo.objectSegmentId * GIVoxelPages::SegmentSize + segmentLocalVoxId; + + // Cull threads + // Edge case where last segment do not always full + if(objectLocalVoxelId >= sMeshVoxelInfo.voxCount) return; + + atomicAdd(gCounter, 1); +} + +__global__ void InitializePage(unsigned char* emptySegments, const size_t pageCount) +{ + size_t sizePerPage = GIVoxelPages::PageSize * + (sizeof(CVoxelPos) + + sizeof(CVoxelNorm) + + sizeof(CVoxelOccupancy)) + + + GIVoxelPages::SegmentSize * + (sizeof(unsigned char) + + sizeof(CSegmentInfo)); + + unsigned int globalId = threadIdx.x + blockIdx.x * blockDim.x; + unsigned int pageLocalSegmentId = globalId % GIVoxelPages::SegmentPerPage; + unsigned int pageId = globalId / GIVoxelPages::SegmentPerPage; + + // Cull if out of bounds + if(globalId >= pageCount * GIVoxelPages::SegmentPerPage) return; + emptySegments[pageId * sizePerPage + pageLocalSegmentId] = GIVoxelPages::SegmentPerPage - pageLocalSegmentId - 1; +} + +inline __device__ unsigned int WarpAggragateIndex(unsigned int& gAtomicIndex) +{ + unsigned int activeThreads = __ballot_sync(0xFFFFFFFF, 0x1); + unsigned int incrementCount = __popc(activeThreads); + unsigned int leader = __ffs(activeThreads) - 1; + unsigned int warpLocalId = threadIdx.x % warpSize; + + unsigned int baseIndex; + if(warpLocalId == leader) + baseIndex = atomicAdd(&gAtomicIndex, incrementCount); + baseIndex = __shfl_sync(0xFFFFFFFF, baseIndex, leader); + return baseIndex + __popc(activeThreads & ((1 << warpLocalId) - 1)); +} + +__global__ void CopyPage(// OGL Buffer + VoxelPosition* gVoxelPosition, + unsigned int* gVoxelRender, + unsigned int& gAtomicIndex, + // Voxel Cache + const BatchVoxelCache* gBatchVoxelCache, + // Voxel Pages + const CVoxelPageConst* gVoxelPages, + // + const uint32_t batchCount, + const uint32_t selectedCascade, + const VoxelRenderType renderType, + bool useCache) +{ + // Shared Memory for generic data + __shared__ CSegmentInfo sSegInfo; + __shared__ CMeshVoxelInfo sMeshVoxelInfo; + + unsigned int blockLocalId = threadIdx.x; + unsigned int globalId = threadIdx.x + blockIdx.x * blockDim.x; + unsigned int pageId = globalId / GIVoxelPages::PageSize; + unsigned int pageLocalId = globalId % GIVoxelPages::PageSize; + unsigned int pageLocalSegmentId = pageLocalId / GIVoxelPages::SegmentSize; + unsigned int segmentLocalVoxId = pageLocalId % GIVoxelPages::SegmentSize; + + // Get Segments Obj Information Struct + CObjectType objType; + CSegmentOccupation occupation; + uint8_t cascadeId; + bool firstOccurance; + if(blockLocalId == 0) + { + // Load to shared memory + sSegInfo = gVoxelPages[pageId].dSegmentInfo[pageLocalSegmentId]; + ExpandSegmentInfo(cascadeId, objType, occupation, firstOccurance, sSegInfo.packed); + } + __syncthreads(); + if(blockLocalId != 0) + { + ExpandSegmentInfo(cascadeId, objType, occupation, firstOccurance, sSegInfo.packed); + } + + // Full Block Cull + if(cascadeId < selectedCascade) return; + if(occupation == CSegmentOccupation::EMPTY) return; + assert(occupation != CSegmentOccupation::MARKED_FOR_CLEAR); + + if(blockLocalId == 0) + { + sMeshVoxelInfo = gBatchVoxelCache[cascadeId * batchCount + sSegInfo.batchId].dMeshVoxelInfo[sSegInfo.objId]; + } + __syncthreads(); + + // Now Copy If individual voxel is valid + CVoxelNorm voxNorm = gVoxelPages[pageId].dGridVoxNorm[pageLocalId]; + if(voxNorm != 0xFFFFFFFF) + { + // Get Index + unsigned int index = atomicAdd(&gAtomicIndex, 1); + //unsigned int index = WarpAggragateIndex(gAtomicIndex); + + // Get Data + if(!useCache && renderType != VoxelRenderType::NORMAL) + { + voxNorm = gVoxelPages[pageId].dGridVoxOccupancy[pageLocalId]; + } + else if(renderType != VoxelRenderType::NORMAL) + { + // Find your opengl data and voxel cache + // then find appropriate albedo + const uint16_t& batchId = sSegInfo.batchId; + const BatchVoxelCache& batchCache = gBatchVoxelCache[cascadeId * batchCount + batchId]; + const uint32_t objectLocalVoxelId = sSegInfo.objectSegmentId * GIVoxelPages::SegmentSize + segmentLocalVoxId; + const uint32_t batchLocalVoxelId = objectLocalVoxelId + sMeshVoxelInfo.voxOffset; + + voxNorm = batchCache.dVoxelAlbedo[batchLocalVoxelId]; + } + + // Inject Voxel Pos + CVoxelPos voxPos = gVoxelPages[pageId].dGridVoxPos[pageLocalId]; + voxPos |= (cascadeId & 0x00000003) << 30; + + gVoxelPosition[index] = voxPos; + gVoxelRender[index] = voxNorm; + } +} + +__global__ void VoxelDeallocate(// Voxel System + CVoxelPage* gVoxelPages, + const CVoxelGrid* gGridInfos, + // Helper Structures + ushort2* gSegmentAllocInfo, + const CSegmentInfo* gSegmentInfo, + // Per Object Related + const BatchOGLData* gBatchOGLData, + // Limits + const uint32_t totalSegments) +{ + unsigned int globalId = threadIdx.x + blockIdx.x * blockDim.x; + + // Now Thread Scheme changes per objectSegment + if(globalId >= totalSegments) return; + + // Unpack segmentInfo + const CSegmentInfo segInfo = gSegmentInfo[globalId]; + uint8_t cascadeNo = ExpandOnlyCascadeNo(segInfo.packed); + const CVoxelGrid cascadeGrid = gGridInfos[cascadeNo]; + + // Intersection Check + const uint32_t transformId = gBatchOGLData[segInfo.batchId].dModelTransformIndices[segInfo.objId]; + const CMatrix4x4 transform = gBatchOGLData[segInfo.batchId].dModelTransforms[transformId].transform; + const CAABB objAABB = gBatchOGLData[segInfo.batchId].dAABBs[segInfo.objId]; + bool intersects = CheckGridVoxIntersect(cascadeGrid, objAABB, transform); + + // Check if this object is not allocated + ushort2 objAlloc = gSegmentAllocInfo[globalId]; + if(!intersects && objAlloc.x != 0xFFFF) + { + // "Dealocate" + assert(ExpandOnlyOccupation(gVoxelPages[objAlloc.x].dSegmentInfo[objAlloc.y].packed) == CSegmentOccupation::OCCUPIED); + unsigned int size = AtomicDealloc(&(gVoxelPages[objAlloc.x].dEmptySegmentStackSize), GIVoxelPages::SegmentPerPage); + assert(size != GIVoxelPages::SegmentPerPage); + if(size != GIVoxelPages::SegmentPerPage) + { + unsigned int location = size; + gVoxelPages[objAlloc.x].dEmptySegmentPos[location] = objAlloc.y; + + CSegmentInfo segObjId = {}; + gVoxelPages[objAlloc.x].dSegmentInfo[objAlloc.y] = segObjId; + gSegmentAllocInfo[globalId] = ushort2{0xFFFF, 0xFFFF}; + } + } +} + +__global__ void VoxelAllocate(// Voxel System + CVoxelPage* gVoxelPages, + const CVoxelGrid* gGridInfos, + // Helper Structures + ushort2* gSegmentAllocInfo, + const CSegmentInfo* gSegmentInfo, + // Per Object Related + const BatchOGLData* gBatchOGLData, + // Limits + const uint32_t totalSegments, + const uint32_t pageAmount) +{ + unsigned int globalId = threadIdx.x + blockIdx.x * blockDim.x; + + // Now Thread Scheme changes per objectSegment + if(globalId >= totalSegments) return; + + // Unpack segmentInfo + const CSegmentInfo segInfo = gSegmentInfo[globalId]; + uint8_t cascadeNo = ExpandOnlyCascadeNo(segInfo.packed); + const CVoxelGrid cascadeGrid = gGridInfos[cascadeNo]; + + // Intersection Check + const uint32_t transformId = gBatchOGLData[segInfo.batchId].dModelTransformIndices[segInfo.objId]; + const CMatrix4x4 transform = gBatchOGLData[segInfo.batchId].dModelTransforms[transformId].transform; + const CAABB objAABB = gBatchOGLData[segInfo.batchId].dAABBs[segInfo.objId]; + bool intersects = CheckGridVoxIntersect(cascadeGrid, objAABB, transform); + + // Check if this object is not allocated + ushort2 objAlloc = gSegmentAllocInfo[globalId]; + + if(intersects && objAlloc.x == 0xFFFF) + { + // "Allocate" + // Check page by page + for(unsigned int i = 0; i < pageAmount; i++) + { + unsigned int size = AtomicAlloc(&(gVoxelPages[i].dEmptySegmentStackSize)); + if(size != 0) + { + unsigned int location = gVoxelPages[i].dEmptySegmentPos[size - 1]; + assert(ExpandOnlyOccupation(gVoxelPages[i].dSegmentInfo[location].packed) == CSegmentOccupation::EMPTY); + gSegmentAllocInfo[globalId] = ushort2 + { + static_cast(i), + static_cast(location) + }; + gVoxelPages[i].dSegmentInfo[location] = segInfo; + return; + } + } + } +} + +inline __device__ void LoadTransformData(// Shared Mem + CMatrix4x4* sTransformMatrices, + CMatrix3x3* sRotationMatrices, + uint8_t* sMatrixLookup, + // Object Transform Matrix + const BatchOGLData& gBatchOGLData, + // Current Voxel Weight + const uchar4& voxelWeightIndex, + // Object Type that will be broadcasted + const CObjectType& objType, + const uint16_t& objId, + const uint16_t& transformId) +{ + unsigned int blockLocalId = threadIdx.x; + + // Here we will load transform and rotation matrices + // Each thread will load 1 float. There is two 4x4 matrix + // 32 floats will be loaded + // Just enough for a warp to do the work + // Load matrices (4 byte load by each thread sequential no bank conflict) + const CModelTransform& objectMT = gBatchOGLData.dModelTransforms[transformId]; + float* sTrans = reinterpret_cast(&sTransformMatrices[0]); + float* sRot = reinterpret_cast(&sRotationMatrices[0]); + if(blockLocalId < 16) + { + const float* objectTransform = reinterpret_cast(&objectMT.transform); + sTrans[blockLocalId] = objectTransform[blockLocalId]; + } + else if(blockLocalId < 25) + { + unsigned int rotationId = blockLocalId - 16; + unsigned int columnId = rotationId / 3; + unsigned int rowId = rotationId % 3; + + const float* objectRotation = reinterpret_cast(&objectMT.rotation); + sRot[columnId * 4 + rowId] = objectRotation[columnId * 4 + rowId]; + } + + // Load Joint Transforms if Skeletal Object + if(objType == CObjectType::SKEL_DYNAMIC) + { + // All valid objects will request matrix load + // then entire block will try to load it + // Max skeleton bone count is 64 + // Worst case 64 * 16 = 1024 float will be loaded to sMem + // Some blocks will load twice + // However its extremely rare (even impossible case) + // In a realistic scenario (and if a segment holds adjacent voxels) + // And if max bone influence per vertex is around 4 + // there should be at most 8 + + // Matrix Lookup Initialize + if(blockLocalId < GI_MAX_JOINT_COUNT) + sMatrixLookup[blockLocalId] = 0; + __syncthreads(); + + if(voxelWeightIndex.x != 0xFF) sMatrixLookup[voxelWeightIndex.x] = 1; + if(voxelWeightIndex.y != 0xFF) sMatrixLookup[voxelWeightIndex.y] = 1; + if(voxelWeightIndex.z != 0xFF) sMatrixLookup[voxelWeightIndex.z] = 1; + if(voxelWeightIndex.w != 0xFF) sMatrixLookup[voxelWeightIndex.w] = 1; + __syncthreads(); + + // Lookup Tables are Loaded + // Theorethical 63 Matrices will be loaded + // Each thread will load 1 float we need 1024 threads + unsigned int iterationCount = (GI_MAX_JOINT_COUNT * 16) / blockDim.x; + for(unsigned int i = 0; i < iterationCount; i++) + { + unsigned int floatId = blockLocalId + (blockDim.x * i); + + // Transformation + if(floatId < GI_MAX_JOINT_COUNT * 16) + { + unsigned int matrixId = (floatId / 16); + unsigned int matrixLocalFloatId = floatId % 16; + if(sMatrixLookup[matrixId] == 1) + { + const CMatrix4x4& jointT = gBatchOGLData.dJointTransforms[matrixId].transform; + const float* jointTFloat = reinterpret_cast(&jointT); + float* sTrans = reinterpret_cast(&sTransformMatrices[matrixId + 1]); + + sTrans[matrixLocalFloatId] = jointTFloat[matrixLocalFloatId]; + } + } + // Rotation + if(floatId < GI_MAX_JOINT_COUNT * 9) + { + unsigned int matrixId = (floatId / 9); + unsigned int matrixLocalFloatId = floatId % 9; + if(sMatrixLookup[matrixId] == 1) + { + const CMatrix4x4& jointRot = gBatchOGLData.dJointTransforms[matrixId].rotation; + const float* jointRotFloat = reinterpret_cast(&jointRot); + float* sRot = reinterpret_cast(&sRotationMatrices[matrixId + 1]); + + unsigned int column = matrixLocalFloatId / 3; + unsigned int row = matrixLocalFloatId % 3; + sRot[column * 4 + row] = jointRotFloat[column * 4 + row]; + } + } + } + } + // We write to shared mem sync between warps + __syncthreads(); +} + +__global__ void VoxelTransform(// Voxel Pages + CVoxelPage* gVoxelPages, + const CVoxelGrid* gGridInfos, + // OGL Related + const BatchOGLData* gBatchOGLData, + // Voxel Cache Related + const BatchVoxelCache* gBatchVoxelCache, + // Limits + const uint32_t batchCount) +{ + // Cache Loading + // Shared Memory which used for transform rendering + __shared__ CMatrix4x4 sTransformMatrices[GI_MAX_JOINT_COUNT + 1]; // First index holds model matrix + __shared__ CMatrix3x3 sRotationMatrices[GI_MAX_JOINT_COUNT + 1]; + __shared__ uint8_t sMatrixLookup[GI_MAX_JOINT_COUNT + 1]; // Extra 4 Byte for alignment + // Shared Memory for generic data + __shared__ CSegmentInfo sSegInfo; + __shared__ CVoxelGrid sGridInfo; + __shared__ uint32_t sObjTransformId; + __shared__ CMeshVoxelInfo sMeshVoxelInfo; + + unsigned int blockLocalId = threadIdx.x; + unsigned int globalId = threadIdx.x + blockIdx.x * blockDim.x; + unsigned int pageId = globalId / GIVoxelPages::PageSize; + unsigned int pageLocalId = globalId % GIVoxelPages::PageSize; + unsigned int pageLocalSegmentId = pageLocalId / GIVoxelPages::SegmentSize; + unsigned int segmentLocalVoxId = pageLocalId % GIVoxelPages::SegmentSize; + + // Get Segments Obj Information Struct + CObjectType objType; + CSegmentOccupation occupation; + uint8_t cascadeId; + bool firstOccurance; + if(blockLocalId == 0) + { + // Load to smem + // Todo split this into the threadss + sSegInfo = gVoxelPages[pageId].dSegmentInfo[pageLocalSegmentId]; + ExpandSegmentInfo(cascadeId, objType, occupation, firstOccurance, sSegInfo.packed); + } + __syncthreads(); + if(blockLocalId != 0) + { + ExpandSegmentInfo(cascadeId, objType, occupation, firstOccurance, sSegInfo.packed); + } + // Full Block Cull + if(occupation == CSegmentOccupation::EMPTY) return; + assert(occupation != CSegmentOccupation::MARKED_FOR_CLEAR); + + // If segment is not empty + // Load Block Constants + if(blockLocalId == 0) + { + // TODO: Re-write this to be more multi-thread loadable + sObjTransformId = gBatchOGLData[sSegInfo.batchId].dModelTransformIndices[sSegInfo.objId]; + sMeshVoxelInfo = gBatchVoxelCache[cascadeId * batchCount + sSegInfo.batchId].dMeshVoxelInfo[sSegInfo.objId]; + sGridInfo = gGridInfos[cascadeId]; + } + __syncthreads(); + + // Find your opengl data and voxel cache + const uint16_t& batchId = sSegInfo.batchId; + const uint16_t& objectId = sSegInfo.objId; + const BatchOGLData& batchOGLData = gBatchOGLData[batchId]; + const BatchVoxelCache& batchCache = gBatchVoxelCache[cascadeId * batchCount + batchId]; + + // Voxel Ids + const uint32_t objectLocalVoxelId = sSegInfo.objectSegmentId * GIVoxelPages::SegmentSize + segmentLocalVoxId; + const uint32_t batchLocalVoxelId = objectLocalVoxelId + sMeshVoxelInfo.voxOffset; + + // Load weights if necessary + CVoxelWeights weights = {{0x00, 0x00, 0x00, 0x00},{0xFF, 0xFF, 0xFF, 0xFF}}; + if(objectLocalVoxelId < sMeshVoxelInfo.voxCount && objType == CObjectType::SKEL_DYNAMIC) + { + weights = batchCache.dVoxelWeight[batchLocalVoxelId]; + } + + // Segment is occupied so load matrices before culling unused warps + LoadTransformData(// Shared Mem + sTransformMatrices, + sRotationMatrices, + sMatrixLookup, + // OGL + batchOGLData, + // Weight Index + weights.weightIndex, + // Object Type that will be broadcasted + objType, + objectId, + sObjTransformId); + + // Cull threads + // Edge case where last segment do not always full + if(objectLocalVoxelId >= sMeshVoxelInfo.voxCount) + { + gVoxelPages[pageId].dGridVoxPos[pageLocalId] = 0xFFFFFFFF; + gVoxelPages[pageId].dGridVoxNorm[pageLocalId] = 0xFFFFFFFF; + return; + } + + // Fetch NormalPos from cache + int3 voxPos; + float3 normal; + voxPos = ExpandVoxPos(batchCache.dVoxelPos[batchLocalVoxelId]); + normal = ExpandVoxNormal(batchCache.dVoxelNorm[batchLocalVoxelId]); + + // Fetch AABB min, transform and span + float4 objAABBMin = batchOGLData.dAABBs[objectId].min; + + // Generate World Position + // start with object space position + float3 worldPos; + worldPos.x = objAABBMin.x + voxPos.x * sGridInfo.span; + worldPos.y = objAABBMin.y + voxPos.y * sGridInfo.span; + worldPos.z = objAABBMin.z + voxPos.z * sGridInfo.span; + + // Joint Transformations + if(objType == CObjectType::SKEL_DYNAMIC) + { + float4 weightUnorm; + weightUnorm.x = static_cast(weights.weight.x) / 255.0f; + weightUnorm.y = static_cast(weights.weight.y) / 255.0f; + weightUnorm.z = static_cast(weights.weight.z) / 255.0f; + weightUnorm.w = static_cast(weights.weight.w) / 255.0f; + + //if(threadIdx.x == 0) + // printf("x %d, y %d, z %d, w %d\n", + // weights.weightIndex.x, + // weights.weightIndex.y, + // weights.weightIndex.z, + // weights.weightIndex.w); + + // Nyra Char Related Assert + //assert(weights.weightIndex.x <= 24); + //assert(weights.weightIndex.y <= 24); + //assert(weights.weightIndex.z <= 24); + //assert(weights.weightIndex.w <= 24); + + float3 pos = {0.0f, 0.0f, 0.0f}; + float3 p = MultMatrix(worldPos, sTransformMatrices[weights.weightIndex.x + 1]); + //float3 p = MultMatrix(worldPos, batchOGLData.dJointTransforms[weights.weightIndex.x].transform); + pos.x += weightUnorm.x * p.x; + pos.y += weightUnorm.x * p.y; + pos.z += weightUnorm.x * p.z; + + p = MultMatrix(worldPos, sTransformMatrices[weights.weightIndex.y + 1]); + //p = MultMatrix(worldPos, batchOGLData.dJointTransforms[weights.weightIndex.y].transform); + pos.x += weightUnorm.y * p.x; + pos.y += weightUnorm.y * p.y; + pos.z += weightUnorm.y * p.z; + + p = MultMatrix(worldPos, sTransformMatrices[weights.weightIndex.z + 1]); + //p = MultMatrix(worldPos, batchOGLData.dJointTransforms[weights.weightIndex.z].transform); + pos.x += weightUnorm.z * p.x; + pos.y += weightUnorm.z * p.y; + pos.z += weightUnorm.z * p.z; + + p = MultMatrix(worldPos, sTransformMatrices[weights.weightIndex.w + 1]); + //p = MultMatrix(worldPos, batchOGLData.dJointTransforms[weights.weightIndex.w].transform); + pos.x += weightUnorm.w * p.x; + pos.y += weightUnorm.w * p.y; + pos.z += weightUnorm.w * p.z; + + worldPos = pos; + + float3 norm = {0.0f, 0.0f, 0.0f}; + float3 n = MultMatrix(normal, sRotationMatrices[weights.weightIndex.x + 1]); + norm.x += weightUnorm.x * n.x; + norm.y += weightUnorm.x * n.y; + norm.z += weightUnorm.x * n.z; + + n = MultMatrix(normal, sRotationMatrices[weights.weightIndex.y + 1]); + norm.x += weightUnorm.y * n.x; + norm.y += weightUnorm.y * n.y; + norm.z += weightUnorm.y * n.z; + + n = MultMatrix(normal, sRotationMatrices[weights.weightIndex.z + 1]); + norm.x += weightUnorm.z * n.x; + norm.y += weightUnorm.z * n.y; + norm.z += weightUnorm.z * n.z; + + n = MultMatrix(normal, sRotationMatrices[weights.weightIndex.w + 1]); + norm.x += weightUnorm.w * n.x; + norm.y += weightUnorm.w * n.y; + norm.z += weightUnorm.w * n.z; + + normal = norm; + } + + // Model Transformations + MultMatrixSelf(worldPos, sTransformMatrices[0]); + MultMatrixSelf(normal, sRotationMatrices[0]); + //// Unoptimized Matrix Load + //CMatrix4x4 transform = gObjTransforms[segObj.batchId][gObjTransformIds[segObj.batchId][segObj.objId]].transform; + //CMatrix4x4 rotation = gObjTransforms[segObj.batchId][gObjTransformIds[segObj.batchId][segObj.objId]].transform; + //MultMatrixSelf(worldPos, transform); + //MultMatrixSelf(normal, rotation); + + // Reconstruct Voxel Indices relative to the new pos of the grid + worldPos.x -= sGridInfo.position.x; + worldPos.y -= sGridInfo.position.y; + worldPos.z -= sGridInfo.position.z; + + bool outOfBounds; + outOfBounds = (worldPos.x < 0.0f) || (worldPos.x >= (sGridInfo.dimension.x) * sGridInfo.span); + outOfBounds |= (worldPos.y < 0.0f) || (worldPos.y >= (sGridInfo.dimension.y) * sGridInfo.span); + outOfBounds |= (worldPos.z < 0.0f) || (worldPos.z >= (sGridInfo.dimension.z) * sGridInfo.span); + + // If its mip dont update inner cascade + bool inInnerCascade = false; + if(!firstOccurance) // Only do inner culling if object is not first occurance in hierarchy (base level voxel data of the object + { + inInnerCascade = (worldPos.x > (sGridInfo.dimension.x) * sGridInfo.span * 0.25f) && + (worldPos.x < (sGridInfo.dimension.x) * sGridInfo.span * 0.75f); + + inInnerCascade &= (worldPos.y > (sGridInfo.dimension.y) * sGridInfo.span * 0.25f) && + (worldPos.y < (sGridInfo.dimension.y) * sGridInfo.span * 0.75f); + + inInnerCascade &= (worldPos.z > (sGridInfo.dimension.z) * sGridInfo.span * 0.25f) && + (worldPos.z < (sGridInfo.dimension.z) * sGridInfo.span * 0.75f); + } + outOfBounds |= inInnerCascade; + + // Voxel Space + float invSpan = 1.0f / sGridInfo.span; + voxPos.x = static_cast(worldPos.x * invSpan); + voxPos.y = static_cast(worldPos.y * invSpan); + voxPos.z = static_cast(worldPos.z * invSpan); + + // Calculate VoxelWeights + float3 volumeWeight; + volumeWeight.x = fmodf(worldPos.x, sGridInfo.span) * invSpan; + volumeWeight.y = fmodf(worldPos.y, sGridInfo.span) * invSpan; + volumeWeight.z = fmodf(worldPos.z, sGridInfo.span) * invSpan; + + //volumeWeight.x = volumeWeight.x - static_cast(voxPos.x); + //volumeWeight.y = volumeWeight.y - static_cast(voxPos.y); + //volumeWeight.z = volumeWeight.z - static_cast(voxPos.z); + + //volumeWeight.x = 1.0f; + //volumeWeight.y = 1.0f; + //volumeWeight.z = 1.0f; + + //uint3 neigbourBits; + //neigbourBits.x = (volumeWeight.x > 0) ? 1 : 0; + //neigbourBits.y = (volumeWeight.y > 0) ? 1 : 0; + //neigbourBits.z = (volumeWeight.z > 0) ? 1 : 0; + + // Outer Bound Check + outOfBounds |= (voxPos.x >= sGridInfo.dimension.x); + outOfBounds |= (voxPos.y >= sGridInfo.dimension.y); + outOfBounds |= (voxPos.z >= sGridInfo.dimension.z); + + // Now Write + // Discard the out of bound voxels + //outOfBounds = false; + if(!outOfBounds) + { + // Write to page + gVoxelPages[pageId].dGridVoxPos[pageLocalId] = PackVoxPos(voxPos); + gVoxelPages[pageId].dGridVoxNorm[pageLocalId] = PackVoxNormal(normal); + gVoxelPages[pageId].dGridVoxOccupancy[pageLocalId] = PackOccupancy(volumeWeight); + } + else + { + gVoxelPages[pageId].dGridVoxPos[pageLocalId] = 0xFFFFFFFF; + gVoxelPages[pageId].dGridVoxNorm[pageLocalId] = 0xFFFFFFFF; + } +} \ No newline at end of file diff --git a/cuda_code/ParaCellsError.cu b/cuda_code/ParaCellsError.cu new file mode 100644 index 0000000000000000000000000000000000000000..70e7896e940a311ae40724e2457f31571939fc51 --- /dev/null +++ b/cuda_code/ParaCellsError.cu @@ -0,0 +1,47 @@ +#include "ParaCellsError.cuh" + +#include +#include +#include + +ParaCellsError::ParaCellsError() +{ + message = NULL; +} + +ParaCellsError::ParaCellsError(const char *message, const char *file, int line, int isFromCUDA) +{ + const char *err; + if (isFromCUDA) + { + err = "[ParaCells Error][CUDA] "; + } + else + { + err = "[ParaCells Error] "; + } + + char line_str[10]; + //itoa(line, line_str, 10); + sprintf(line_str, "%d", line); + this->message = (char *)malloc(sizeof(char)*(strlen(err) + strlen(file) + strlen(line_str) + strlen(message) + 6)); + strcpy(this->message, err); + strcat(this->message, file); + strcat(this->message, "("); + strcat(this->message, line_str); + strcat(this->message, "): "); + strcat(this->message, message); +} + +char* ParaCellsError::getMessage() +{ + return message; +} + +ParaCellsError::~ParaCellsError() +{ + if (message) + { + //free(message); + } +} diff --git a/cuda_code/PowKernel.cu b/cuda_code/PowKernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..b6e893dfc0fc92d212bc2dd96fdae32ba58dc737 --- /dev/null +++ b/cuda_code/PowKernel.cu @@ -0,0 +1,187 @@ +#include +#include +#include +#include +#include +#include + +namespace at { namespace native { + +namespace { + + +// SFINAE doesn't work well with NVCC under Windows for math functions like pow and sqrt. +// So we need to define the functions with the explicit function signatures. +// As for pow, the following signatures are defined as the device function: +// pow(float, int) +// pow(double, int) +// pow(float, float) +// pow(double, double) +// As for sqrt, the following signatures are defined as the device function: +// sqrt(float) +// sqrt(double) +// As for inverse sqrt, we must define it explicitly in MSVC, otherwise the static cast will be +// applied to the result of the inline function, and thus the result is incorrect. +// e.g. if we use 1.0 / sqrt(2) for 2 ^ (-0.5) in MSVC, we get +// int(2 ^ (-0.5)) = int(1.0 / sqrt(2)) = int(1.0 / int(1.414)) = int(1.0 / 1) = 1 +// However, the correct result is +// int(2 ^ (-0.5)) = int(1.0 / 1.414) = 0 +#ifdef _MSC_VER +// Functions for pow +// pow for at::Half +static inline __host__ __device__ at::Half pow_(at::Half base, at::Half exp) { + return static_cast(std::pow(static_cast(base), static_cast(exp))); +} +// pow for at::BFloat16 +static inline __host__ __device__ at::BFloat16 pow_(at::BFloat16 base, at::BFloat16 exp) { + return static_cast(std::pow(static_cast(base), static_cast(exp))); +} +// pow (floating, floating/int) +template +static inline __host__ __device__ typename std::enable_if::value && (std::is_same::value || std::is_same::value), Base_type>::type + pow_(Base_type base, Exp_type exp) { + return std::pow(base, exp); +} +// pow (integral, integral) +template +static inline __host__ __device__ typename std::enable_if::value && std::is_same::value, Base_type>::type + pow_(Base_type base, Exp_type exp) { + return native::powi(base, exp); +} +// pow (Otherwise) +template +static inline __host__ __device__ typename std::enable_if::value && !std::is_same::value, Base_type>::type + pow_(Base_type base, Exp_type exp) { + return static_cast(std::pow(static_cast(base), static_cast(exp))); +} +// pow (Complex) +template +static inline __host__ __device__ B complex_pow_(B base, E exp) { + return std::pow(base, exp); +} +// Functions for sqrt +// sqrt (floating) +template +static inline __host__ __device__ typename std::enable_if::value, T>::type sqrt_(T x) { + return std::sqrt(x); +} +// sqrt (integral) +template +static inline __host__ __device__ typename std::enable_if::value, T>::type sqrt_(T x) { + return static_cast(std::sqrt(static_cast(x))); +} +// Function for inverse sqrt +// invsqrt (floating) +template +static inline __host__ __device__ typename std::enable_if::value, T>::type invsqrt_(T x) { + return 1.0 / std::sqrt(x); +} +// invsqrt (integral) +template +static inline __host__ __device__ typename std::enable_if::value, T>::type invsqrt_(T x) { + return static_cast(1.0 / std::sqrt(static_cast(x))); +} +#else +template +static inline __host__ __device__ Base_type pow_(Base_type base, Exp_type exp) { + return ::pow(base, exp); +} +template +static inline __host__ __device__ T sqrt_(T x) { + return ::sqrt(x); +} +template +static inline __host__ __device__ T invsqrt_(T x) { + return 1.0 / ::sqrt(x); +} +// pow (Otherwise) +template +static inline __host__ __device__ B complex_pow_(B base, E exp) { + return std::pow(base, exp); +} +#endif + +void pow_tensor_tensor_kernel(TensorIteratorBase& iter) { + if (isComplexType(iter.dtype())) { + AT_DISPATCH_COMPLEX_TYPES(iter.dtype(), "pow_cuda", [&]() { + gpu_kernel_with_scalars(iter, [=]GPU_LAMBDA(scalar_t base, scalar_t exp) -> scalar_t { + return complex_pow_(base, exp); + }); + }); + } else if (isFloatingType(iter.dtype())) { + AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "pow_cuda", [&]() { + gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t base, scalar_t exp) -> scalar_t { + return pow_(base, exp); + }); + }); + } else { + AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "pow_cuda", [&]() { + gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t base, scalar_t exp) -> scalar_t { + return native::powi(base, exp); + }); + }); + } +} + +template +void pow_tensor_scalar_kernel_impl(TensorIteratorBase& iter, + Exp_type exp) { + const auto d_exp = static_cast(exp); + if (d_exp == 0.5) { + gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type { + return sqrt_(base); + }); + } else if (d_exp == 2) { + gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type { + return base * base; + }); + } else if (d_exp == 3) { + gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type { + return base * base * base; + }); + } else if (d_exp == -0.5) { + gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type { + return invsqrt_(base); + }); + } else if (d_exp == -1) { + gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type { + return 1.0 / base; + }); + } else if (d_exp == -2) { + gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type { + return 1.0 / (base * base); + }); + } else { + gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type { + return pow_(base, exp); + }); + } +} + +void pow_tensor_scalar_kernel(TensorIteratorBase& iter, const Scalar& exp_scalar) { + if (isComplexType(iter.dtype()) || exp_scalar.isComplex()) { + AT_DISPATCH_COMPLEX_TYPES(iter.dtype(), "pow_cuda", [&]() { + const auto exp = exp_scalar.to(); + gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t base) -> scalar_t { + return complex_pow_(base, exp); + }); + }); + } else if (isFloatingType(iter.dtype()) || exp_scalar.isIntegral(false)) { + AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.dtype(), "pow_cuda", [&]() { + const auto exp = exp_scalar.to(); + pow_tensor_scalar_kernel_impl(iter, exp); + }); + } else { + const auto exp = exp_scalar.to(); + AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "pow_cuda", [&]() { + pow_tensor_scalar_kernel_impl(iter, exp); + }); + } +} + +} // anonymous namespace + +REGISTER_DISPATCH(pow_tensor_tensor_stub, &pow_tensor_tensor_kernel); +REGISTER_DISPATCH(pow_tensor_scalar_stub, &pow_tensor_scalar_kernel); + +}} // namespace at::native diff --git a/cuda_code/PreScale.cu b/cuda_code/PreScale.cu new file mode 100644 index 0000000000000000000000000000000000000000..27e72c2c2c8a7a2f72bfac515034c7a233839c08 --- /dev/null +++ b/cuda_code/PreScale.cu @@ -0,0 +1,262 @@ +#include + +/*#define N_BINS 16 +#define DIV_VAL_HIST 16 // GRAY_LEVELS / 16 = 256 / 16 + +#define N_THREADS_SCALE 16 +#define VER_HIST_SMEM_SIZE 256 // N_BINS x N_THREADS_SCALE = 16 x 16 +*/ + +#define N_BINS 8 +#define DIV_VAL_HIST 32 + +#define N_THREADS_SCALE 16 +#define VER_HIST_SMEM_SIZE 128 + + +unsigned int *d_im0, *d_im1; +float *h_scales, *d_scales; +float *h_diffScaleVerHists, *d_diffScaleVerHists; + +float sy; +float syDiff; + + +// ========================================= PRIVATE UTILS ========================================= + + +void checkErrorScale(const char *msg) { + cudaError_t err = cudaGetLastError(); + if(cudaSuccess != err) { + fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err)); + exit(EXIT_FAILURE); + } +} + + +// ========================================= KERNELS ========================================= + + +__global__ void kernel_calcDiffScaleVerHists(float *diffScaleVerHists, unsigned int *im0, unsigned int *im1, int imH, int imW, + float *scales, int nScales) { // scale < 1: zoom out, scale > 1: zoom in + + int globalIdx = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; + + int iThScale = globalIdx / imW; + int iThColumn = globalIdx % imW; + float scale = scales[iThScale]; + + + // ---------- define shared memories ---------- + __shared__ int smem_verHist0[VER_HIST_SMEM_SIZE]; // with size = N_BINS * VER_HIST_N_THREADS_SCALE = 16 bins * 16 threads = 256 (one thread processes one column) + __shared__ int smem_verHist1[VER_HIST_SMEM_SIZE]; + + __shared__ float smem_diffVerHist[N_THREADS_SCALE]; + + + // init shared memories to 0 + #pragma unroll + for(int i = 0; i < N_BINS; i++) { + int smemPos = __mul24(i, N_THREADS_SCALE) + threadIdx.x; + smem_verHist0[smemPos] = 0; + smem_verHist1[smemPos] = 0; + } + __syncthreads(); + + + + // ---------- calculate vertical histograms of 2 images and store to shared memories ---------- + int interpolatedImH = -1; + int startPos = -1; + + // determine zoom out or zoom in + float scaleUp = 1 - (scale - 1); + + if(scale < 1) { // zoom out + interpolatedImH = int(imH * scale); // for im0 // interpolatedImgH < imH + startPos = int(imH * (1 - scale)/2); // for im1 + } else { // zoom in + startPos = int(imH * (1 - scaleUp)/2); // for im0 + interpolatedImH = int(imH * scaleUp); // for img1 + } + + // calc hist. + int tmp = __mul24(startPos, imW) + iThColumn; + + #pragma unroll + for(int i = 0; i < interpolatedImH; i++) { + int interpolatedI = -1; + + if(scale < 1) { // zoom out + interpolatedI = int(i/scale); // i * (1 / scale) // interpolatedI > i + } else { // zoom in + interpolatedI = int(i/scaleUp); + } + + int imPos0 = __mul24(interpolatedI, imW) + iThColumn; + int imPos1 = __mul24(i, imW) + tmp; + //int imgPos1 = (i + startPos) * imgW + idx; // = i*imgW + startPos*imgW + idx = i*imgW + tmp; + + int binIdx0 = im0[imPos0] / DIV_VAL_HIST; int smemPos0 = __mul24(binIdx0, N_THREADS_SCALE) + threadIdx.x; smem_verHist0[smemPos0]++; + int binIdx1 = im1[imPos1] / DIV_VAL_HIST; int smemPos1 = __mul24(binIdx1, N_THREADS_SCALE) + threadIdx.x; smem_verHist1[smemPos1]++; + } + __syncthreads(); + + + + // ---------- calculate the difference btw vertical histograms and store to global memory ---------- + int diff = 0; + + #pragma unroll + for(int i = 0; i < N_BINS; i++) { + int smemPos = __mul24(i, N_THREADS_SCALE) + threadIdx.x; + diff += abs(smem_verHist0[smemPos] - smem_verHist1[smemPos]); + } + smem_diffVerHist[threadIdx.x] = 1 - diff*0.5/imH; + __syncthreads(); + + diffScaleVerHists[globalIdx] = smem_diffVerHist[threadIdx.x]; +} + + +// ========================================================================================================================== + + +void initScale(int *img0, int *img1, int imH, int imW, + double scaleFrom, double scaleTo, double scaleRes, int nScales) { + + int fourBytes = sizeof(int); + + + // ----- init arrays representing for 2 input images ----- + size_t memSizeOfImage = imH * imW * fourBytes; + cudaMalloc((void **) &d_im0, memSizeOfImage); cudaMemcpy(d_im0, img0, memSizeOfImage, cudaMemcpyHostToDevice); + cudaMalloc((void **) &d_im1, memSizeOfImage); cudaMemcpy(d_im1, img1, memSizeOfImage, cudaMemcpyHostToDevice); + + + // ----- init an array of scales ----- + size_t memSizeOfNScales = nScales * fourBytes; + h_scales = (float *)malloc(memSizeOfNScales); + + int count = 0; + for(double i = scaleFrom; i <= scaleTo; i += scaleRes) // i < 1: zoom out, i > 1: zoom in + h_scales[count++] = float(i); + + cudaMalloc((void **) &d_scales, memSizeOfNScales); + cudaMemcpy(d_scales, h_scales, memSizeOfNScales, cudaMemcpyHostToDevice); + + + // ----- init array for the different btw 2 vertical histograms ----- + size_t memSizeOfDiffScaleVerHists = nScales * imW * fourBytes; + cudaMalloc((void **) &d_diffScaleVerHists, memSizeOfDiffScaleVerHists); + h_diffScaleVerHists = (float *)malloc(memSizeOfDiffScaleVerHists); +} + + +void calcDiffScaleVerHists(int imH, int imW, int nScales) { + int nThreads = N_THREADS_SCALE; + int nBlocks = (imW / nThreads) * nScales; // imageWidth chia het cho 16 + + kernel_calcDiffScaleVerHists<<>>(d_diffScaleVerHists, d_im0, d_im1, imH, imW, d_scales, nScales); checkErrorScale("kernel_calcDiffScaleVerHists"); + cudaThreadSynchronize(); checkErrorScale("kernel_calcDiffScaleVerHists cudaThreadSynchronize"); + + size_t memSizeOfDiffScaleVerHists = nScales * imW * sizeof(float); + cudaMemcpy(h_diffScaleVerHists, d_diffScaleVerHists, memSizeOfDiffScaleVerHists, cudaMemcpyDeviceToHost); checkErrorScale("copy from DEV to HOST"); +} + + +float calcGlobalDiffScaleVerHist(int imW, int iThScale) { + float diff = 0; + for(int i = 0; i < imW; i++) + diff += h_diffScaleVerHists[iThScale * imW + i]; + return diff / imW; +} + + +// -------------------------------------------------------------------- + + +/*void estimateScale(int *img0, int *img1, int imH, int imW, + double scaleFrom, double scaleTo, double scaleRes) { + + // initialization + int nScales = int((scaleTo - scaleFrom) / scaleRes + 1); + initScale(img0, img1, imH, imW, scaleFrom, scaleTo, scaleRes, nScales); // co phan nay la ton > 30 % thoi gian + + // calculate sy + calcDiffScaleVerHists(imH, imW, nScales); + + syDiff = -1.0; + for(int iThScale = 0; iThScale < nScales; iThScale++) { // i < 1: zoom out, i > 1: zoom in + float diff = calcGlobalDiffScaleVerHist(imW, iThScale); + if (syDiff < diff) { + syDiff = diff; + sy = float(scaleFrom + iThScale*scaleRes); + } + } +}*/ + + +void releaseScale() { + cudaFree(d_im0); cudaFree(d_im1); + free(h_scales); cudaFree(d_scales); + free(h_diffScaleVerHists); cudaFree(d_diffScaleVerHists); +} + + +// -------------------------------------------------------------------- + + +extern "C" float getSy() { return sy; } + + +extern "C" float runScaleEst(int *img0, int *img1, int imH, int imW, + double scaleFrom, double scaleTo, double scaleRes) { + + // ---------------------- initialization ---------------------- + int nScales = int((scaleTo - scaleFrom) / scaleRes + 1); + initScale(img0, img1, imH, imW, scaleFrom, scaleTo, scaleRes, nScales); // co phan nay la ton > 30 % thoi gian + // ------------------------------------------------------------ + + + // computation time measure + cudaEvent_t timeStart, timeStop; + + float elapsedTime; + cudaEventCreate(&timeStart); + cudaEventCreate(&timeStop); + cudaEventRecord(timeStart, 0); + + + // ---------------------- calculate sy ---------------------- + calcDiffScaleVerHists(imH, imW, nScales); + + syDiff = -1.0; + for(int iThScale = 0; iThScale < nScales; iThScale++) { // i < 1: zoom out, i > 1: zoom in + float diff = calcGlobalDiffScaleVerHist(imW, iThScale); + if (syDiff < diff) { + syDiff = diff; + sy = float(scaleFrom + iThScale*scaleRes); + } + } + // ----------------------------------------------------------- + + + cudaEventRecord(timeStop, 0); + cudaEventSynchronize(timeStop); + cudaEventElapsedTime(&elapsedTime, timeStart, timeStop); + cudaEventDestroy(timeStart); + cudaEventDestroy(timeStop); + + // print results to file + FILE *fp; + fp = fopen("result.txt", "a"); + //fprintf(fp, "sy=%f, syDiff=%f, t=%f \n", getSy(), syDiff, elapsedTime); + fprintf(fp, "sy=%1.3f, t=%1.3f ", getSy(), elapsedTime); + fclose(fp); + + + releaseScale(); + + return elapsedTime; +} \ No newline at end of file diff --git a/cuda_code/QuEST_gpu_10.cu b/cuda_code/QuEST_gpu_10.cu new file mode 100644 index 0000000000000000000000000000000000000000..f264d4753bb9a1a907d12d487a4d27ea5fef6f17 --- /dev/null +++ b/cuda_code/QuEST_gpu_10.cu @@ -0,0 +1,2234 @@ +// Distributed under MIT licence. See https://github.com/aniabrown/QuEST_GPU/blob/master/LICENCE.txt for details + +/** @file + * An implementation of the pure backend in ../QuEST_ops_pure.h for a GPU environment. + */ + +# include "../QuEST.h" +# include "../QuEST_precision.h" +# include "../mt19937ar.h" + +# include +# include +# include + +# define REDUCE_SHARED_SIZE 512 +# define DEBUG 0 + + +static __device__ int extractBit (int locationOfBitFromRight, long long int theEncodedNumber) +{ + return (theEncodedNumber & ( 1LL << locationOfBitFromRight )) >> locationOfBitFromRight; +} + +#ifdef __cplusplus +extern "C" { +#endif + + + + + + +void statevec_setAmps(Qureg qureg, long long int startInd, qreal* reals, qreal* imags, long long int numAmps) { + + cudaDeviceSynchronize(); + cudaMemcpy( + qureg.deviceStateVec.real + startInd, + reals, + numAmps * sizeof(*(qureg.deviceStateVec.real)), + cudaMemcpyHostToDevice); + cudaMemcpy( + qureg.deviceStateVec.imag + startInd, + imags, + numAmps * sizeof(*(qureg.deviceStateVec.real)), + cudaMemcpyHostToDevice); +} + + +/** works for both statevectors and density matrices */ +void statevec_cloneQureg(Qureg targetQureg, Qureg copyQureg) { + + // copy copyQureg's GPU statevec to targetQureg's GPU statevec + cudaDeviceSynchronize(); + cudaMemcpy( + targetQureg.deviceStateVec.real, + copyQureg.deviceStateVec.real, + targetQureg.numAmpsPerChunk*sizeof(*(targetQureg.deviceStateVec.real)), + cudaMemcpyDeviceToDevice); + cudaMemcpy( + targetQureg.deviceStateVec.imag, + copyQureg.deviceStateVec.imag, + targetQureg.numAmpsPerChunk*sizeof(*(targetQureg.deviceStateVec.imag)), + cudaMemcpyDeviceToDevice); +} + +__global__ void densmatr_initPureStateKernel( + long long int numPureAmps, + qreal *targetVecReal, qreal *targetVecImag, + qreal *copyVecReal, qreal *copyVecImag) +{ + // this is a particular index of the pure copyQureg + long long int index = blockIdx.x*blockDim.x + threadIdx.x; + if (index>=numPureAmps) return; + + qreal realRow = copyVecReal[index]; + qreal imagRow = copyVecImag[index]; + for (long long int col=0; col < numPureAmps; col++) { + qreal realCol = copyVecReal[col]; + qreal imagCol = - copyVecImag[col]; // minus for conjugation + targetVecReal[col*numPureAmps + index] = realRow*realCol - imagRow*imagCol; + targetVecImag[col*numPureAmps + index] = realRow*imagCol + imagRow*realCol; + } +} + +void densmatr_initPureState(Qureg targetQureg, Qureg copyQureg) +{ + int threadsPerCUDABlock, CUDABlocks; + threadsPerCUDABlock = 128; + CUDABlocks = ceil((qreal)(copyQureg.numAmpsPerChunk)/threadsPerCUDABlock); + densmatr_initPureStateKernel<<>>( + copyQureg.numAmpsPerChunk, + targetQureg.deviceStateVec.real, targetQureg.deviceStateVec.imag, + copyQureg.deviceStateVec.real, copyQureg.deviceStateVec.imag); +} + +__global__ void densmatr_initPlusStateKernel(long long int stateVecSize, qreal probFactor, qreal *stateVecReal, qreal *stateVecImag){ + long long int index; + + index = blockIdx.x*blockDim.x + threadIdx.x; + if (index>=stateVecSize) return; + + stateVecReal[index] = probFactor; + stateVecImag[index] = 0.0; +} + +void densmatr_initPlusState(Qureg qureg) +{ + qreal probFactor = 1.0/((qreal) (1LL << qureg.numQubitsRepresented)); + int threadsPerCUDABlock, CUDABlocks; + threadsPerCUDABlock = 128; + CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); + densmatr_initPlusStateKernel<<>>( + qureg.numAmpsPerChunk, + probFactor, + qureg.deviceStateVec.real, + qureg.deviceStateVec.imag); +} + +__global__ void densmatr_initClassicalStateKernel( + long long int densityNumElems, + qreal *densityReal, qreal *densityImag, + long long int densityInd) +{ + // initialise the state to all zeros + long long int index = blockIdx.x*blockDim.x + threadIdx.x; + if (index >= densityNumElems) return; + + densityReal[index] = 0.0; + densityImag[index] = 0.0; + + if (index==densityInd){ + // classical state has probability 1 + densityReal[densityInd] = 1.0; + densityImag[densityInd] = 0.0; + } +} + +void densmatr_initClassicalState(Qureg qureg, long long int stateInd) +{ + int threadsPerCUDABlock, CUDABlocks; + threadsPerCUDABlock = 128; + CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); + + // index of the desired state in the flat density matrix + long long int densityDim = 1LL << qureg.numQubitsRepresented; + long long int densityInd = (densityDim + 1)*stateInd; + + // identical to pure version + densmatr_initClassicalStateKernel<<>>( + qureg.numAmpsPerChunk, + qureg.deviceStateVec.real, + qureg.deviceStateVec.imag, densityInd); +} + +void statevec_createQureg(Qureg *qureg, int numQubits, QuESTEnv env) +{ + // allocate CPU memory + long long int numAmps = 1L << numQubits; + long long int numAmpsPerRank = numAmps/env.numRanks; + qureg->stateVec.real = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->stateVec.real)); + qureg->stateVec.imag = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->stateVec.imag)); + if (env.numRanks>1){ + qureg->pairStateVec.real = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->pairStateVec.real)); + qureg->pairStateVec.imag = (qreal*) malloc(numAmpsPerRank * sizeof(qureg->pairStateVec.imag)); + } + + // check cpu memory allocation was successful + if ( (!(qureg->stateVec.real) || !(qureg->stateVec.imag)) + && numAmpsPerRank ) { + printf("Could not allocate memory!\n"); + exit (EXIT_FAILURE); + } + if ( env.numRanks>1 && (!(qureg->pairStateVec.real) || !(qureg->pairStateVec.imag)) + && numAmpsPerRank ) { + printf("Could not allocate memory!\n"); + exit (EXIT_FAILURE); + } + + qureg->numQubitsInStateVec = numQubits; + qureg->numAmpsPerChunk = numAmpsPerRank; + qureg->numAmpsTotal = numAmps; + qureg->chunkId = env.rank; + qureg->numChunks = env.numRanks; + qureg->isDensityMatrix = 0; + + // allocate GPU memory + cudaMalloc(&(qureg->deviceStateVec.real), qureg->numAmpsPerChunk*sizeof(*(qureg->deviceStateVec.real))); + cudaMalloc(&(qureg->deviceStateVec.imag), qureg->numAmpsPerChunk*sizeof(*(qureg->deviceStateVec.imag))); + cudaMalloc(&(qureg->firstLevelReduction), ceil(qureg->numAmpsPerChunk/(qreal)REDUCE_SHARED_SIZE)*sizeof(qreal)); + cudaMalloc(&(qureg->secondLevelReduction), ceil(qureg->numAmpsPerChunk/(qreal)(REDUCE_SHARED_SIZE*REDUCE_SHARED_SIZE))* + sizeof(qreal)); + + // check gpu memory allocation was successful + if (!(qureg->deviceStateVec.real) || !(qureg->deviceStateVec.imag)){ + printf("Could not allocate memory on GPU!\n"); + exit (EXIT_FAILURE); + } + +} + +void statevec_destroyQureg(Qureg qureg, QuESTEnv env) +{ + // Free CPU memory + free(qureg.stateVec.real); + free(qureg.stateVec.imag); + if (env.numRanks>1){ + free(qureg.pairStateVec.real); + free(qureg.pairStateVec.imag); + } + + // Free GPU memory + cudaFree(qureg.deviceStateVec.real); + cudaFree(qureg.deviceStateVec.imag); +} + +int GPUExists(void){ + int deviceCount, device; + int gpuDeviceCount = 0; + struct cudaDeviceProp properties; + cudaError_t cudaResultCode = cudaGetDeviceCount(&deviceCount); + if (cudaResultCode != cudaSuccess) deviceCount = 0; + /* machines with no GPUs can still report one emulation device */ + for (device = 0; device < deviceCount; ++device) { + cudaGetDeviceProperties(&properties, device); + if (properties.major != 9999) { /* 9999 means emulation only */ + ++gpuDeviceCount; + } + } + if (gpuDeviceCount) return 1; + else return 0; +} + +QuESTEnv createQuESTEnv(void) { + // init MPI environment + if (!GPUExists()){ + printf("Trying to run GPU code with no GPU available\n"); + exit(EXIT_FAILURE); + } + + QuESTEnv env; + env.rank=0; + env.numRanks=1; + + seedQuESTDefault(); + + return env; +} + +void syncQuESTEnv(QuESTEnv env){ + cudaDeviceSynchronize(); +} + +int syncQuESTSuccess(int successCode){ + return successCode; +} + +void destroyQuESTEnv(QuESTEnv env){ + // MPI finalize goes here in MPI version. Call this function anyway for consistency +} + +void reportQuESTEnv(QuESTEnv env){ + printf("EXECUTION ENVIRONMENT:\n"); + printf("Running locally on one node with GPU\n"); + printf("Number of ranks is %d\n", env.numRanks); +# ifdef _OPENMP + printf("OpenMP enabled\n"); + printf("Number of threads available is %d\n", omp_get_max_threads()); +# else + printf("OpenMP disabled\n"); +# endif +} + +void getEnvironmentString(QuESTEnv env, Qureg qureg, char str[200]){ + sprintf(str, "%dqubits_GPU_noMpi_noOMP", qureg.numQubitsInStateVec); +} + +void copyStateToGPU(Qureg qureg) +{ + if (DEBUG) printf("Copying data to GPU\n"); + cudaMemcpy(qureg.deviceStateVec.real, qureg.stateVec.real, + qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.real)), cudaMemcpyHostToDevice); + cudaMemcpy(qureg.deviceStateVec.real, qureg.stateVec.real, + qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.real)), cudaMemcpyHostToDevice); + cudaMemcpy(qureg.deviceStateVec.imag, qureg.stateVec.imag, + qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.imag)), cudaMemcpyHostToDevice); + cudaMemcpy(qureg.deviceStateVec.imag, qureg.stateVec.imag, + qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.imag)), cudaMemcpyHostToDevice); + if (DEBUG) printf("Finished copying data to GPU\n"); +} + +void copyStateFromGPU(Qureg qureg) +{ + cudaDeviceSynchronize(); + if (DEBUG) printf("Copying data from GPU\n"); + cudaMemcpy(qureg.stateVec.real, qureg.deviceStateVec.real, + qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.real)), cudaMemcpyDeviceToHost); + cudaMemcpy(qureg.stateVec.imag, qureg.deviceStateVec.imag, + qureg.numAmpsPerChunk*sizeof(*(qureg.deviceStateVec.imag)), cudaMemcpyDeviceToHost); + if (DEBUG) printf("Finished copying data from GPU\n"); +} + +/** Print the current state vector of probability amplitudes for a set of qubits to standard out. + For debugging purposes. Each rank should print output serially. Only print output for systems <= 5 qubits + */ +void statevec_reportStateToScreen(Qureg qureg, QuESTEnv env, int reportRank){ + long long int index; + int rank; + copyStateFromGPU(qureg); + if (qureg.numQubitsInStateVec<=5){ + for (rank=0; rank + index = blockIdx.x*blockDim.x + threadIdx.x; + if (index>=stateVecSize) return; + stateVecReal[index] = 0.0; + stateVecImag[index] = 0.0; + + if (index==0){ + // zero state |0000..0000> has probability 1 + stateVecReal[0] = 1.0; + stateVecImag[0] = 0.0; + } +} + +void statevec_initZeroState(Qureg qureg) +{ + int threadsPerCUDABlock, CUDABlocks; + threadsPerCUDABlock = 128; + CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); + statevec_initZeroStateKernel<<>>( + qureg.numAmpsPerChunk, + qureg.deviceStateVec.real, + qureg.deviceStateVec.imag); +} + +__global__ void statevec_initPlusStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag){ + long long int index; + + index = blockIdx.x*blockDim.x + threadIdx.x; + if (index>=stateVecSize) return; + + qreal normFactor = 1.0/sqrt((qreal)stateVecSize); + stateVecReal[index] = normFactor; + stateVecImag[index] = 0.0; +} + +void statevec_initPlusState(Qureg qureg) +{ + int threadsPerCUDABlock, CUDABlocks; + threadsPerCUDABlock = 128; + CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); + statevec_initPlusStateKernel<<>>( + qureg.numAmpsPerChunk, + qureg.deviceStateVec.real, + qureg.deviceStateVec.imag); +} + +__global__ void statevec_initClassicalStateKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag, long long int stateInd){ + long long int index; + + // initialise the state to |stateInd> + index = blockIdx.x*blockDim.x + threadIdx.x; + if (index>=stateVecSize) return; + stateVecReal[index] = 0.0; + stateVecImag[index] = 0.0; + + if (index==stateInd){ + // classical state has probability 1 + stateVecReal[stateInd] = 1.0; + stateVecImag[stateInd] = 0.0; + } +} +void statevec_initClassicalState(Qureg qureg, long long int stateInd) +{ + int threadsPerCUDABlock, CUDABlocks; + threadsPerCUDABlock = 128; + CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); + statevec_initClassicalStateKernel<<>>( + qureg.numAmpsPerChunk, + qureg.deviceStateVec.real, + qureg.deviceStateVec.imag, stateInd); +} + +__global__ void statevec_initStateDebugKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag){ + long long int index; + + index = blockIdx.x*blockDim.x + threadIdx.x; + if (index>=stateVecSize) return; + + stateVecReal[index] = (index*2.0)/10.0; + stateVecImag[index] = (index*2.0+1.0)/10.0; +} + +void statevec_initStateDebug(Qureg qureg) +{ + int threadsPerCUDABlock, CUDABlocks; + threadsPerCUDABlock = 128; + CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); + statevec_initStateDebugKernel<<>>( + qureg.numAmpsPerChunk, + qureg.deviceStateVec.real, + qureg.deviceStateVec.imag); +} + +__global__ void statevec_initStateOfSingleQubitKernel(long long int stateVecSize, qreal *stateVecReal, qreal *stateVecImag, int qubitId, int outcome){ + long long int index; + int bit; + + index = blockIdx.x*blockDim.x + threadIdx.x; + if (index>=stateVecSize) return; + + qreal normFactor = 1.0/sqrt((qreal)stateVecSize/2); + bit = extractBit(qubitId, index); + if (bit==outcome) { + stateVecReal[index] = normFactor; + stateVecImag[index] = 0.0; + } else { + stateVecReal[index] = 0.0; + stateVecImag[index] = 0.0; + } +} + +void statevec_initStateOfSingleQubit(Qureg *qureg, int qubitId, int outcome) +{ + int threadsPerCUDABlock, CUDABlocks; + threadsPerCUDABlock = 128; + CUDABlocks = ceil((qreal)(qureg->numAmpsPerChunk)/threadsPerCUDABlock); + statevec_initStateOfSingleQubitKernel<<>>(qureg->numAmpsPerChunk, qureg->deviceStateVec.real, qureg->deviceStateVec.imag, qubitId, outcome); +} + +// returns 1 if successful, else 0 +int statevec_initStateFromSingleFile(Qureg *qureg, char filename[200], QuESTEnv env){ + long long int chunkSize, stateVecSize; + long long int indexInChunk, totalIndex; + + chunkSize = qureg->numAmpsPerChunk; + stateVecSize = chunkSize*qureg->numChunks; + + qreal *stateVecReal = qureg->stateVec.real; + qreal *stateVecImag = qureg->stateVec.imag; + + FILE *fp; + char line[200]; + + fp = fopen(filename, "r"); + if (fp == NULL) + return 0; + + indexInChunk = 0; totalIndex = 0; + while (fgets(line, sizeof(char)*200, fp) != NULL && totalIndexchunkId){ + # if QuEST_PREC==1 + sscanf(line, "%f, %f", &(stateVecReal[indexInChunk]), + &(stateVecImag[indexInChunk])); + # elif QuEST_PREC==2 + sscanf(line, "%lf, %lf", &(stateVecReal[indexInChunk]), + &(stateVecImag[indexInChunk])); + # elif QuEST_PREC==4 + sscanf(line, "%lf, %lf", &(stateVecReal[indexInChunk]), + &(stateVecImag[indexInChunk])); + # endif + indexInChunk += 1; + } + totalIndex += 1; + } + } + fclose(fp); + copyStateToGPU(*qureg); + + // indicate success + return 1; +} + +int statevec_compareStates(Qureg mq1, Qureg mq2, qreal precision){ + qreal diff; + int chunkSize = mq1.numAmpsPerChunk; + + copyStateFromGPU(mq1); + copyStateFromGPU(mq2); + + for (int i=0; iprecision) return 0; + diff = mq1.stateVec.imag[i] - mq2.stateVec.imag[i]; + if (diff<0) diff *= -1; + if (diff>precision) return 0; + } + return 1; +} + +__global__ void statevec_compactUnitaryKernel (Qureg qureg, const int rotQubit, Complex alpha, Complex beta){ + // ----- sizes + long long int sizeBlock, // size of blocks + sizeHalfBlock; // size of blocks halved + // ----- indices + long long int thisBlock, // current block + indexUp,indexLo; // current index and corresponding index in lower half block + + // ----- temp variables + qreal stateRealUp,stateRealLo, // storage for previous state values + stateImagUp,stateImagLo; // (used in updates) + // ----- temp variables + long long int thisTask; // task based approach for expose loop with small granularity + const long long int numTasks=qureg.numAmpsPerChunk>>1; + + sizeHalfBlock = 1LL << rotQubit; // size of blocks halved + sizeBlock = 2LL * sizeHalfBlock; // size of blocks + + // ---------------------------------------------------------------- // + // rotate // + // ---------------------------------------------------------------- // + + //! fix -- no necessary for GPU version + qreal *stateVecReal = qureg.deviceStateVec.real; + qreal *stateVecImag = qureg.deviceStateVec.imag; + qreal alphaImag=alpha.imag, alphaReal=alpha.real; + qreal betaImag=beta.imag, betaReal=beta.real; + + thisTask = blockIdx.x*blockDim.x + threadIdx.x; + if (thisTask>=numTasks) return; + + thisBlock = thisTask / sizeHalfBlock; + indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; + indexLo = indexUp + sizeHalfBlock; + + // store current state vector values in temp variables + stateRealUp = stateVecReal[indexUp]; + stateImagUp = stateVecImag[indexUp]; + + stateRealLo = stateVecReal[indexLo]; + stateImagLo = stateVecImag[indexLo]; + + // state[indexUp] = alpha * state[indexUp] - conj(beta) * state[indexLo] + stateVecReal[indexUp] = alphaReal*stateRealUp - alphaImag*stateImagUp + - betaReal*stateRealLo - betaImag*stateImagLo; + stateVecImag[indexUp] = alphaReal*stateImagUp + alphaImag*stateRealUp + - betaReal*stateImagLo + betaImag*stateRealLo; + + // state[indexLo] = beta * state[indexUp] + conj(alpha) * state[indexLo] + stateVecReal[indexLo] = betaReal*stateRealUp - betaImag*stateImagUp + + alphaReal*stateRealLo + alphaImag*stateImagLo; + stateVecImag[indexLo] = betaReal*stateImagUp + betaImag*stateRealUp + + alphaReal*stateImagLo - alphaImag*stateRealLo; +} + +void statevec_compactUnitary(Qureg qureg, const int targetQubit, Complex alpha, Complex beta) +{ + int threadsPerCUDABlock, CUDABlocks; + threadsPerCUDABlock = 128; + CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); + statevec_compactUnitaryKernel<<>>(qureg, targetQubit, alpha, beta); +} + +__global__ void statevec_controlledCompactUnitaryKernel (Qureg qureg, const int controlQubit, const int targetQubit, Complex alpha, Complex beta){ + // ----- sizes + long long int sizeBlock, // size of blocks + sizeHalfBlock; // size of blocks halved + // ----- indices + long long int thisBlock, // current block + indexUp,indexLo; // current index and corresponding index in lower half block + + // ----- temp variables + qreal stateRealUp,stateRealLo, // storage for previous state values + stateImagUp,stateImagLo; // (used in updates) + // ----- temp variables + long long int thisTask; // task based approach for expose loop with small granularity + const long long int numTasks=qureg.numAmpsPerChunk>>1; + int controlBit; + + sizeHalfBlock = 1LL << targetQubit; // size of blocks halved + sizeBlock = 2LL * sizeHalfBlock; // size of blocks + + // ---------------------------------------------------------------- // + // rotate // + // ---------------------------------------------------------------- // + + //! fix -- no necessary for GPU version + qreal *stateVecReal = qureg.deviceStateVec.real; + qreal *stateVecImag = qureg.deviceStateVec.imag; + qreal alphaImag=alpha.imag, alphaReal=alpha.real; + qreal betaImag=beta.imag, betaReal=beta.real; + + thisTask = blockIdx.x*blockDim.x + threadIdx.x; + if (thisTask>=numTasks) return; + + thisBlock = thisTask / sizeHalfBlock; + indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; + indexLo = indexUp + sizeHalfBlock; + + controlBit = extractBit(controlQubit, indexUp); + if (controlBit){ + // store current state vector values in temp variables + stateRealUp = stateVecReal[indexUp]; + stateImagUp = stateVecImag[indexUp]; + + stateRealLo = stateVecReal[indexLo]; + stateImagLo = stateVecImag[indexLo]; + + // state[indexUp] = alpha * state[indexUp] - conj(beta) * state[indexLo] + stateVecReal[indexUp] = alphaReal*stateRealUp - alphaImag*stateImagUp + - betaReal*stateRealLo - betaImag*stateImagLo; + stateVecImag[indexUp] = alphaReal*stateImagUp + alphaImag*stateRealUp + - betaReal*stateImagLo + betaImag*stateRealLo; + + // state[indexLo] = beta * state[indexUp] + conj(alpha) * state[indexLo] + stateVecReal[indexLo] = betaReal*stateRealUp - betaImag*stateImagUp + + alphaReal*stateRealLo + alphaImag*stateImagLo; + stateVecImag[indexLo] = betaReal*stateImagUp + betaImag*stateRealUp + + alphaReal*stateImagLo - alphaImag*stateRealLo; + } +} + +void statevec_controlledCompactUnitary(Qureg qureg, const int controlQubit, const int targetQubit, Complex alpha, Complex beta) +{ + int threadsPerCUDABlock, CUDABlocks; + threadsPerCUDABlock = 128; + CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); + statevec_controlledCompactUnitaryKernel<<>>(qureg, controlQubit, targetQubit, alpha, beta); +} + +__global__ void statevec_unitaryKernel(Qureg qureg, const int targetQubit, ComplexMatrix2 u){ + // ----- sizes + long long int sizeBlock, // size of blocks + sizeHalfBlock; // size of blocks halved + // ----- indices + long long int thisBlock, // current block + indexUp,indexLo; // current index and corresponding index in lower half block + + // ----- temp variables + qreal stateRealUp,stateRealLo, // storage for previous state values + stateImagUp,stateImagLo; // (used in updates) + // ----- temp variables + long long int thisTask; // task based approach for expose loop with small granularity + const long long int numTasks=qureg.numAmpsPerChunk>>1; + + sizeHalfBlock = 1LL << targetQubit; // size of blocks halved + sizeBlock = 2LL * sizeHalfBlock; // size of blocks + + // ---------------------------------------------------------------- // + // rotate // + // ---------------------------------------------------------------- // + + //! fix -- no necessary for GPU version + qreal *stateVecReal = qureg.deviceStateVec.real; + qreal *stateVecImag = qureg.deviceStateVec.imag; + + thisTask = blockIdx.x*blockDim.x + threadIdx.x; + if (thisTask>=numTasks) return; + + thisBlock = thisTask / sizeHalfBlock; + indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; + indexLo = indexUp + sizeHalfBlock; + + // store current state vector values in temp variables + stateRealUp = stateVecReal[indexUp]; + stateImagUp = stateVecImag[indexUp]; + + stateRealLo = stateVecReal[indexLo]; + stateImagLo = stateVecImag[indexLo]; + + // state[indexUp] = u00 * state[indexUp] + u01 * state[indexLo] + stateVecReal[indexUp] = u.r0c0.real*stateRealUp - u.r0c0.imag*stateImagUp + + u.r0c1.real*stateRealLo - u.r0c1.imag*stateImagLo; + stateVecImag[indexUp] = u.r0c0.real*stateImagUp + u.r0c0.imag*stateRealUp + + u.r0c1.real*stateImagLo + u.r0c1.imag*stateRealLo; + + // state[indexLo] = u10 * state[indexUp] + u11 * state[indexLo] + stateVecReal[indexLo] = u.r1c0.real*stateRealUp - u.r1c0.imag*stateImagUp + + u.r1c1.real*stateRealLo - u.r1c1.imag*stateImagLo; + stateVecImag[indexLo] = u.r1c0.real*stateImagUp + u.r1c0.imag*stateRealUp + + u.r1c1.real*stateImagLo + u.r1c1.imag*stateRealLo; +} + +void statevec_unitary(Qureg qureg, const int targetQubit, ComplexMatrix2 u) +{ + int threadsPerCUDABlock, CUDABlocks; + threadsPerCUDABlock = 128; + CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); + statevec_unitaryKernel<<>>(qureg, targetQubit, u); +} + +__global__ void statevec_controlledUnitaryKernel(Qureg qureg, const int controlQubit, const int targetQubit, ComplexMatrix2 u){ + // ----- sizes + long long int sizeBlock, // size of blocks + sizeHalfBlock; // size of blocks halved + // ----- indices + long long int thisBlock, // current block + indexUp,indexLo; // current index and corresponding index in lower half block + + // ----- temp variables + qreal stateRealUp,stateRealLo, // storage for previous state values + stateImagUp,stateImagLo; // (used in updates) + // ----- temp variables + long long int thisTask; // task based approach for expose loop with small granularity + const long long int numTasks=qureg.numAmpsPerChunk>>1; + + int controlBit; + + sizeHalfBlock = 1LL << targetQubit; // size of blocks halved + sizeBlock = 2LL * sizeHalfBlock; // size of blocks + + // ---------------------------------------------------------------- // + // rotate // + // ---------------------------------------------------------------- // + + //! fix -- no necessary for GPU version + qreal *stateVecReal = qureg.deviceStateVec.real; + qreal *stateVecImag = qureg.deviceStateVec.imag; + + thisTask = blockIdx.x*blockDim.x + threadIdx.x; + if (thisTask>=numTasks) return; + + thisBlock = thisTask / sizeHalfBlock; + indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; + indexLo = indexUp + sizeHalfBlock; + + // store current state vector values in temp variables + stateRealUp = stateVecReal[indexUp]; + stateImagUp = stateVecImag[indexUp]; + + stateRealLo = stateVecReal[indexLo]; + stateImagLo = stateVecImag[indexLo]; + + controlBit = extractBit(controlQubit, indexUp); + if (controlBit){ + // state[indexUp] = u00 * state[indexUp] + u01 * state[indexLo] + stateVecReal[indexUp] = u.r0c0.real*stateRealUp - u.r0c0.imag*stateImagUp + + u.r0c1.real*stateRealLo - u.r0c1.imag*stateImagLo; + stateVecImag[indexUp] = u.r0c0.real*stateImagUp + u.r0c0.imag*stateRealUp + + u.r0c1.real*stateImagLo + u.r0c1.imag*stateRealLo; + + // state[indexLo] = u10 * state[indexUp] + u11 * state[indexLo] + stateVecReal[indexLo] = u.r1c0.real*stateRealUp - u.r1c0.imag*stateImagUp + + u.r1c1.real*stateRealLo - u.r1c1.imag*stateImagLo; + stateVecImag[indexLo] = u.r1c0.real*stateImagUp + u.r1c0.imag*stateRealUp + + u.r1c1.real*stateImagLo + u.r1c1.imag*stateRealLo; + } +} + +void statevec_controlledUnitary(Qureg qureg, const int controlQubit, const int targetQubit, ComplexMatrix2 u) +{ + int threadsPerCUDABlock, CUDABlocks; + threadsPerCUDABlock = 128; + CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); + statevec_controlledUnitaryKernel<<>>(qureg, controlQubit, targetQubit, u); +} + +__global__ void statevec_multiControlledUnitaryKernel(Qureg qureg, long long int mask, const int targetQubit, ComplexMatrix2 u){ + // ----- sizes + long long int sizeBlock, // size of blocks + sizeHalfBlock; // size of blocks halved + // ----- indices + long long int thisBlock, // current block + indexUp,indexLo; // current index and corresponding index in lower half block + + // ----- temp variables + qreal stateRealUp,stateRealLo, // storage for previous state values + stateImagUp,stateImagLo; // (used in updates) + // ----- temp variables + long long int thisTask; // task based approach for expose loop with small granularity + const long long int numTasks=qureg.numAmpsPerChunk>>1; + + + sizeHalfBlock = 1LL << targetQubit; // size of blocks halved + sizeBlock = 2LL * sizeHalfBlock; // size of blocks + + // ---------------------------------------------------------------- // + // rotate // + // ---------------------------------------------------------------- // + + //! fix -- no necessary for GPU version + qreal *stateVecReal = qureg.deviceStateVec.real; + qreal *stateVecImag = qureg.deviceStateVec.imag; + + thisTask = blockIdx.x*blockDim.x + threadIdx.x; + if (thisTask>=numTasks) return; + + thisBlock = thisTask / sizeHalfBlock; + indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; + indexLo = indexUp + sizeHalfBlock; + + if (mask == (mask & indexUp) ){ + // store current state vector values in temp variables + stateRealUp = stateVecReal[indexUp]; + stateImagUp = stateVecImag[indexUp]; + + stateRealLo = stateVecReal[indexLo]; + stateImagLo = stateVecImag[indexLo]; + + // state[indexUp] = u00 * state[indexUp] + u01 * state[indexLo] + stateVecReal[indexUp] = u.r0c0.real*stateRealUp - u.r0c0.imag*stateImagUp + + u.r0c1.real*stateRealLo - u.r0c1.imag*stateImagLo; + stateVecImag[indexUp] = u.r0c0.real*stateImagUp + u.r0c0.imag*stateRealUp + + u.r0c1.real*stateImagLo + u.r0c1.imag*stateRealLo; + + // state[indexLo] = u10 * state[indexUp] + u11 * state[indexLo] + stateVecReal[indexLo] = u.r1c0.real*stateRealUp - u.r1c0.imag*stateImagUp + + u.r1c1.real*stateRealLo - u.r1c1.imag*stateImagLo; + stateVecImag[indexLo] = u.r1c0.real*stateImagUp + u.r1c0.imag*stateRealUp + + u.r1c1.real*stateImagLo + u.r1c1.imag*stateRealLo; + } +} + +void statevec_multiControlledUnitary(Qureg qureg, int *controlQubits, int numControlQubits, const int targetQubit, ComplexMatrix2 u) +{ + int threadsPerCUDABlock, CUDABlocks; + long long int mask=0; + for (int i=0; i>1)/threadsPerCUDABlock); + statevec_multiControlledUnitaryKernel<<>>(qureg, mask, targetQubit, u); +} + +__global__ void statevec_pauliXKernel(Qureg qureg, const int targetQubit){ + // ----- sizes + long long int sizeBlock, // size of blocks + sizeHalfBlock; // size of blocks halved + // ----- indices + long long int thisBlock, // current block + indexUp,indexLo; // current index and corresponding index in lower half block + + // ----- temp variables + qreal stateRealUp, // storage for previous state values + stateImagUp; // (used in updates) + // ----- temp variables + long long int thisTask; // task based approach for expose loop with small granularity + const long long int numTasks=qureg.numAmpsPerChunk>>1; + + sizeHalfBlock = 1LL << targetQubit; // size of blocks halved + sizeBlock = 2LL * sizeHalfBlock; // size of blocks + + // ---------------------------------------------------------------- // + // rotate // + // ---------------------------------------------------------------- // + + //! fix -- no necessary for GPU version + qreal *stateVecReal = qureg.deviceStateVec.real; + qreal *stateVecImag = qureg.deviceStateVec.imag; + + thisTask = blockIdx.x*blockDim.x + threadIdx.x; + if (thisTask>=numTasks) return; + + thisBlock = thisTask / sizeHalfBlock; + indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; + indexLo = indexUp + sizeHalfBlock; + + // store current state vector values in temp variables + stateRealUp = stateVecReal[indexUp]; + stateImagUp = stateVecImag[indexUp]; + + stateVecReal[indexUp] = stateVecReal[indexLo]; + stateVecImag[indexUp] = stateVecImag[indexLo]; + + stateVecReal[indexLo] = stateRealUp; + stateVecImag[indexLo] = stateImagUp; +} + +void statevec_pauliX(Qureg qureg, const int targetQubit) +{ + int threadsPerCUDABlock, CUDABlocks; + threadsPerCUDABlock = 128; + CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); + statevec_pauliXKernel<<>>(qureg, targetQubit); +} + +__global__ void statevec_pauliYKernel(Qureg qureg, const int targetQubit, const int conjFac){ + + long long int sizeHalfBlock = 1LL << targetQubit; + long long int sizeBlock = 2LL * sizeHalfBlock; + long long int numTasks = qureg.numAmpsPerChunk >> 1; + long long int thisTask = blockIdx.x*blockDim.x + threadIdx.x; + if (thisTask>=numTasks) return; + + long long int thisBlock = thisTask / sizeHalfBlock; + long long int indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; + long long int indexLo = indexUp + sizeHalfBlock; + qreal stateRealUp, stateImagUp; + + qreal *stateVecReal = qureg.deviceStateVec.real; + qreal *stateVecImag = qureg.deviceStateVec.imag; + stateRealUp = stateVecReal[indexUp]; + stateImagUp = stateVecImag[indexUp]; + + // update under +-{{0, -i}, {i, 0}} + stateVecReal[indexUp] = conjFac * stateVecImag[indexLo]; + stateVecImag[indexUp] = conjFac * -stateVecReal[indexLo]; + stateVecReal[indexLo] = conjFac * -stateImagUp; + stateVecImag[indexLo] = conjFac * stateRealUp; +} + +void statevec_pauliY(Qureg qureg, const int targetQubit) +{ + int threadsPerCUDABlock, CUDABlocks; + threadsPerCUDABlock = 128; + CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); + statevec_pauliYKernel<<>>(qureg, targetQubit, 1); +} + +void statevec_pauliYConj(Qureg qureg, const int targetQubit) +{ + int threadsPerCUDABlock, CUDABlocks; + threadsPerCUDABlock = 128; + CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); + statevec_pauliYKernel<<>>(qureg, targetQubit, -1); +} + +__global__ void statevec_controlledPauliYKernel(Qureg qureg, const int controlQubit, const int targetQubit, const int conjFac) +{ + long long int index; + long long int sizeBlock, sizeHalfBlock; + long long int stateVecSize; + int controlBit; + + qreal stateRealUp, stateImagUp; + long long int thisBlock, indexUp, indexLo; + sizeHalfBlock = 1LL << targetQubit; + sizeBlock = 2LL * sizeHalfBlock; + + stateVecSize = qureg.numAmpsPerChunk; + qreal *stateVecReal = qureg.deviceStateVec.real; + qreal *stateVecImag = qureg.deviceStateVec.imag; + + index = blockIdx.x*blockDim.x + threadIdx.x; + if (index>=(stateVecSize>>1)) return; + thisBlock = index / sizeHalfBlock; + indexUp = thisBlock*sizeBlock + index%sizeHalfBlock; + indexLo = indexUp + sizeHalfBlock; + + controlBit = extractBit(controlQubit, indexUp); + if (controlBit){ + + stateRealUp = stateVecReal[indexUp]; + stateImagUp = stateVecImag[indexUp]; + + // update under +-{{0, -i}, {i, 0}} + stateVecReal[indexUp] = conjFac * stateVecImag[indexLo]; + stateVecImag[indexUp] = conjFac * -stateVecReal[indexLo]; + stateVecReal[indexLo] = conjFac * -stateImagUp; + stateVecImag[indexLo] = conjFac * stateRealUp; + } +} + +void statevec_controlledPauliY(Qureg qureg, const int controlQubit, const int targetQubit) +{ + int conjFactor = 1; + int threadsPerCUDABlock, CUDABlocks; + threadsPerCUDABlock = 128; + CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); + statevec_controlledPauliYKernel<<>>(qureg, controlQubit, targetQubit, conjFactor); +} + +void statevec_controlledPauliYConj(Qureg qureg, const int controlQubit, const int targetQubit) +{ + int conjFactor = -1; + int threadsPerCUDABlock, CUDABlocks; + threadsPerCUDABlock = 128; + CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); + statevec_controlledPauliYKernel<<>>(qureg, controlQubit, targetQubit, conjFactor); +} + +__global__ void statevec_phaseShiftByTermKernel(Qureg qureg, const int targetQubit, qreal cosAngle, qreal sinAngle) { + + long long int sizeBlock, sizeHalfBlock; + long long int thisBlock, indexUp,indexLo; + + qreal stateRealLo, stateImagLo; + long long int thisTask; + const long long int numTasks = qureg.numAmpsPerChunk >> 1; + + sizeHalfBlock = 1LL << targetQubit; + sizeBlock = 2LL * sizeHalfBlock; + + qreal *stateVecReal = qureg.deviceStateVec.real; + qreal *stateVecImag = qureg.deviceStateVec.imag; + + thisTask = blockIdx.x*blockDim.x + threadIdx.x; + if (thisTask>=numTasks) return; + thisBlock = thisTask / sizeHalfBlock; + indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; + indexLo = indexUp + sizeHalfBlock; + + stateRealLo = stateVecReal[indexLo]; + stateImagLo = stateVecImag[indexLo]; + + stateVecReal[indexLo] = cosAngle*stateRealLo - sinAngle*stateImagLo; + stateVecImag[indexLo] = sinAngle*stateRealLo + cosAngle*stateImagLo; +} + +void statevec_phaseShiftByTerm(Qureg qureg, const int targetQubit, Complex term) +{ + qreal cosAngle = term.real; + qreal sinAngle = term.imag; + + int threadsPerCUDABlock, CUDABlocks; + threadsPerCUDABlock = 128; + CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); + statevec_phaseShiftByTermKernel<<>>(qureg, targetQubit, cosAngle, sinAngle); +} + +__global__ void statevec_controlledPhaseShiftKernel(Qureg qureg, const int idQubit1, const int idQubit2, qreal cosAngle, qreal sinAngle) +{ + long long int index; + long long int stateVecSize; + int bit1, bit2; + qreal stateRealLo, stateImagLo; + + stateVecSize = qureg.numAmpsPerChunk; + qreal *stateVecReal = qureg.deviceStateVec.real; + qreal *stateVecImag = qureg.deviceStateVec.imag; + + index = blockIdx.x*blockDim.x + threadIdx.x; + if (index>=stateVecSize) return; + + bit1 = extractBit (idQubit1, index); + bit2 = extractBit (idQubit2, index); + if (bit1 && bit2) { + stateRealLo = stateVecReal[index]; + stateImagLo = stateVecImag[index]; + + stateVecReal[index] = cosAngle*stateRealLo - sinAngle*stateImagLo; + stateVecImag[index] = sinAngle*stateRealLo + cosAngle*stateImagLo; + } +} + +void statevec_controlledPhaseShift(Qureg qureg, const int idQubit1, const int idQubit2, qreal angle) +{ + qreal cosAngle = cos(angle); + qreal sinAngle = sin(angle); + + int threadsPerCUDABlock, CUDABlocks; + threadsPerCUDABlock = 128; + CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); + statevec_controlledPhaseShiftKernel<<>>(qureg, idQubit1, idQubit2, cosAngle, sinAngle); +} + +__global__ void statevec_multiControlledPhaseShiftKernel(Qureg qureg, long long int mask, qreal cosAngle, qreal sinAngle) { + qreal stateRealLo, stateImagLo; + long long int index; + long long int stateVecSize; + + stateVecSize = qureg.numAmpsPerChunk; + qreal *stateVecReal = qureg.deviceStateVec.real; + qreal *stateVecImag = qureg.deviceStateVec.imag; + + index = blockIdx.x*blockDim.x + threadIdx.x; + if (index>=stateVecSize) return; + + if (mask == (mask & index) ){ + stateRealLo = stateVecReal[index]; + stateImagLo = stateVecImag[index]; + stateVecReal[index] = cosAngle*stateRealLo - sinAngle*stateImagLo; + stateVecImag[index] = sinAngle*stateRealLo + cosAngle*stateImagLo; + } +} + +void statevec_multiControlledPhaseShift(Qureg qureg, int *controlQubits, int numControlQubits, qreal angle) +{ + qreal cosAngle = cos(angle); + qreal sinAngle = sin(angle); + + long long int mask=0; + for (int i=0; i>>(qureg, mask, cosAngle, sinAngle); +} + +qreal densmatr_calcTotalProb(Qureg qureg) { + + // computes the trace using Kahan summation + qreal pTotal=0; + qreal y, t, c; + c = 0; + + long long int numCols = 1LL << qureg.numQubitsRepresented; + long long diagIndex; + + copyStateFromGPU(qureg); + + for (int col=0; col< numCols; col++) { + diagIndex = col*(numCols + 1); + y = qureg.stateVec.real[diagIndex] - c; + t = pTotal + y; + c = ( t - pTotal ) - y; // brackets are important + pTotal = t; + } + + return pTotal; +} + +qreal statevec_calcTotalProb(Qureg qureg){ + /* IJB - implemented using Kahan summation for greater accuracy at a slight floating + point operation overhead. For more details see https://en.wikipedia.org/wiki/Kahan_summation_algorithm */ + /* Don't change the bracketing in this routine! */ + qreal pTotal=0; + qreal y, t, c; + long long int index; + long long int numAmpsPerRank = qureg.numAmpsPerChunk; + + copyStateFromGPU(qureg); + + c = 0.0; + for (index=0; index=stateVecSize) return; + + bit1 = extractBit (idQubit1, index); + bit2 = extractBit (idQubit2, index); + if (bit1 && bit2) { + stateVecReal [index] = - stateVecReal [index]; + stateVecImag [index] = - stateVecImag [index]; + } +} + +void statevec_controlledPhaseFlip(Qureg qureg, const int idQubit1, const int idQubit2) +{ + int threadsPerCUDABlock, CUDABlocks; + threadsPerCUDABlock = 128; + CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); + statevec_controlledPhaseFlipKernel<<>>(qureg, idQubit1, idQubit2); +} + +__global__ void statevec_multiControlledPhaseFlipKernel(Qureg qureg, long long int mask) +{ + long long int index; + long long int stateVecSize; + + stateVecSize = qureg.numAmpsPerChunk; + qreal *stateVecReal = qureg.deviceStateVec.real; + qreal *stateVecImag = qureg.deviceStateVec.imag; + + index = blockIdx.x*blockDim.x + threadIdx.x; + if (index>=stateVecSize) return; + + if (mask == (mask & index) ){ + stateVecReal [index] = - stateVecReal [index]; + stateVecImag [index] = - stateVecImag [index]; + } +} + +void statevec_multiControlledPhaseFlip(Qureg qureg, int *controlQubits, int numControlQubits) +{ + int threadsPerCUDABlock, CUDABlocks; + long long int mask=0; + for (int i=0; i>>(qureg, mask); +} + + +__global__ void statevec_hadamardKernel (Qureg qureg, const int targetQubit){ + // ----- sizes + long long int sizeBlock, // size of blocks + sizeHalfBlock; // size of blocks halved + // ----- indices + long long int thisBlock, // current block + indexUp,indexLo; // current index and corresponding index in lower half block + + // ----- temp variables + qreal stateRealUp,stateRealLo, // storage for previous state values + stateImagUp,stateImagLo; // (used in updates) + // ----- temp variables + long long int thisTask; // task based approach for expose loop with small granularity + const long long int numTasks=qureg.numAmpsPerChunk>>1; + + sizeHalfBlock = 1LL << targetQubit; // size of blocks halved + sizeBlock = 2LL * sizeHalfBlock; // size of blocks + + // ---------------------------------------------------------------- // + // rotate // + // ---------------------------------------------------------------- // + + //! fix -- no necessary for GPU version + qreal *stateVecReal = qureg.deviceStateVec.real; + qreal *stateVecImag = qureg.deviceStateVec.imag; + + qreal recRoot2 = 1.0/sqrt(2.0); + + thisTask = blockIdx.x*blockDim.x + threadIdx.x; + if (thisTask>=numTasks) return; + + thisBlock = thisTask / sizeHalfBlock; + indexUp = thisBlock*sizeBlock + thisTask%sizeHalfBlock; + indexLo = indexUp + sizeHalfBlock; + + // store current state vector values in temp variables + stateRealUp = stateVecReal[indexUp]; + stateImagUp = stateVecImag[indexUp]; + + stateRealLo = stateVecReal[indexLo]; + stateImagLo = stateVecImag[indexLo]; + + stateVecReal[indexUp] = recRoot2*(stateRealUp + stateRealLo); + stateVecImag[indexUp] = recRoot2*(stateImagUp + stateImagLo); + + stateVecReal[indexLo] = recRoot2*(stateRealUp - stateRealLo); + stateVecImag[indexLo] = recRoot2*(stateImagUp - stateImagLo); +} + +void statevec_hadamard(Qureg qureg, const int targetQubit) +{ + int threadsPerCUDABlock, CUDABlocks; + threadsPerCUDABlock = 128; + CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); + statevec_hadamardKernel<<>>(qureg, targetQubit); +} + +__global__ void statevec_controlledNotKernel(Qureg qureg, const int controlQubit, const int targetQubit) +{ + long long int index; + long long int sizeBlock, // size of blocks + sizeHalfBlock; // size of blocks halved + long long int stateVecSize; + int controlBit; + + // ----- temp variables + qreal stateRealUp, // storage for previous state values + stateImagUp; // (used in updates) + long long int thisBlock, // current block + indexUp,indexLo; // current index and corresponding index in lower half block + sizeHalfBlock = 1LL << targetQubit; // size of blocks halved + sizeBlock = 2LL * sizeHalfBlock; // size of blocks + + stateVecSize = qureg.numAmpsPerChunk; + qreal *stateVecReal = qureg.deviceStateVec.real; + qreal *stateVecImag = qureg.deviceStateVec.imag; + + index = blockIdx.x*blockDim.x + threadIdx.x; + if (index>=(stateVecSize>>1)) return; + thisBlock = index / sizeHalfBlock; + indexUp = thisBlock*sizeBlock + index%sizeHalfBlock; + indexLo = indexUp + sizeHalfBlock; + + controlBit = extractBit(controlQubit, indexUp); + if (controlBit){ + stateRealUp = stateVecReal[indexUp]; + stateImagUp = stateVecImag[indexUp]; + + stateVecReal[indexUp] = stateVecReal[indexLo]; + stateVecImag[indexUp] = stateVecImag[indexLo]; + + stateVecReal[indexLo] = stateRealUp; + stateVecImag[indexLo] = stateImagUp; + } +} + +void statevec_controlledNot(Qureg qureg, const int controlQubit, const int targetQubit) +{ + int threadsPerCUDABlock, CUDABlocks; + threadsPerCUDABlock = 128; + CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk)/threadsPerCUDABlock); + statevec_controlledNotKernel<<>>(qureg, controlQubit, targetQubit); +} + +__device__ __host__ unsigned int log2Int( unsigned int x ) +{ + unsigned int ans = 0 ; + while( x>>=1 ) ans++; + return ans ; +} + +__device__ void reduceBlock(qreal *arrayIn, qreal *reducedArray, int length){ + int i, l, r; + int threadMax, maxDepth; + threadMax = length/2; + maxDepth = log2Int(length/2); + + for (i=0; i> 1; + __syncthreads(); // optimise -- use warp shuffle instead + } + + if (threadIdx.x==0) reducedArray[blockIdx.x] = arrayIn[0]; +} + +__global__ void copySharedReduceBlock(qreal*arrayIn, qreal *reducedArray, int length){ + extern __shared__ qreal tempReductionArray[]; + int blockOffset = blockIdx.x*length; + tempReductionArray[threadIdx.x*2] = arrayIn[blockOffset + threadIdx.x*2]; + tempReductionArray[threadIdx.x*2+1] = arrayIn[blockOffset + threadIdx.x*2+1]; + __syncthreads(); + reduceBlock(tempReductionArray, reducedArray, length); +} + +__global__ void densmatr_findProbabilityOfZeroKernel( + Qureg qureg, const int measureQubit, qreal *reducedArray +) { + // run by each thread + // use of block here refers to contiguous amplitudes where measureQubit = 0, + // (then =1) and NOT the CUDA block, which is the partitioning of CUDA threads + + long long int densityDim = 1LL << qureg.numQubitsRepresented; + long long int numTasks = densityDim >> 1; + long long int sizeHalfBlock = 1LL << (measureQubit); + long long int sizeBlock = 2LL * sizeHalfBlock; + + long long int thisBlock; // which block this thread is processing + long long int thisTask; // which part of the block this thread is processing + long long int basisIndex; // index of this thread's computational basis state + long long int densityIndex; // " " index of |basis>=numTasks) return; + thisBlock = thisTask / sizeHalfBlock; + basisIndex = thisBlock*sizeBlock + thisTask%sizeHalfBlock; + densityIndex = (densityDim + 1) * basisIndex; + + // record the probability in the CUDA-BLOCK-wide array + qreal prob = qureg.deviceStateVec.real[densityIndex]; // im[densityIndex] assumed ~ 0 + tempReductionArray[threadIdx.x] = prob; + + // sum the probs collected by this CUDA-BLOCK's threads into a per-CUDA-BLOCK array + __syncthreads(); + if (threadIdx.x>1; + // (good for shared memory parallelism) + + extern __shared__ qreal tempReductionArray[]; + + // ---------------------------------------------------------------- // + // dimensions // + // ---------------------------------------------------------------- // + sizeHalfBlock = 1LL << (measureQubit); // number of state vector elements to sum, + // and then the number to skip + sizeBlock = 2LL * sizeHalfBlock; // size of blocks (pairs of measure and skip entries) + + // ---------------------------------------------------------------- // + // find probability // + // ---------------------------------------------------------------- // + + // + // --- task-based shared-memory parallel implementation + // + + qreal *stateVecReal = qureg.deviceStateVec.real; + qreal *stateVecImag = qureg.deviceStateVec.imag; + + thisTask = blockIdx.x*blockDim.x + threadIdx.x; + if (thisTask>=numTasks) return; + + thisBlock = thisTask / sizeHalfBlock; + index = thisBlock*sizeBlock + thisTask%sizeHalfBlock; + qreal realVal, imagVal; + realVal = stateVecReal[index]; + imagVal = stateVecImag[index]; + tempReductionArray[threadIdx.x] = realVal*realVal + imagVal*imagVal; + __syncthreads(); + + if (threadIdx.x> 1; // half of the diagonal has measureQubit=0 + + int valuesPerCUDABlock, numCUDABlocks, sharedMemSize; + int maxReducedPerLevel = REDUCE_SHARED_SIZE; + int firstTime = 1; + + while (numValuesToReduce > 1) { + + // need less than one CUDA-BLOCK to reduce + if (numValuesToReduce < maxReducedPerLevel) { + valuesPerCUDABlock = numValuesToReduce; + numCUDABlocks = 1; + } + // otherwise use only full CUDA-BLOCKS + else { + valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory + numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); + } + + sharedMemSize = valuesPerCUDABlock*sizeof(qreal); + + // spawn threads to sum the probs in each block + if (firstTime) { + densmatr_findProbabilityOfZeroKernel<<>>( + qureg, measureQubit, qureg.firstLevelReduction); + firstTime = 0; + + // sum the block probs + } else { + cudaDeviceSynchronize(); + copySharedReduceBlock<<>>( + qureg.firstLevelReduction, + qureg.secondLevelReduction, valuesPerCUDABlock); + cudaDeviceSynchronize(); + swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction)); + } + + numValuesToReduce = numValuesToReduce/maxReducedPerLevel; + } + + qreal zeroProb; + cudaMemcpy(&zeroProb, qureg.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost); + return zeroProb; +} + +qreal statevec_findProbabilityOfZero(Qureg qureg, const int measureQubit) +{ + long long int numValuesToReduce = qureg.numAmpsPerChunk>>1; + int valuesPerCUDABlock, numCUDABlocks, sharedMemSize; + qreal stateProb=0; + int firstTime=1; + int maxReducedPerLevel = REDUCE_SHARED_SIZE; + + while(numValuesToReduce>1){ + if (numValuesToReduce>>( + qureg, measureQubit, qureg.firstLevelReduction); + firstTime=0; + } else { + cudaDeviceSynchronize(); + copySharedReduceBlock<<>>( + qureg.firstLevelReduction, + qureg.secondLevelReduction, valuesPerCUDABlock); + cudaDeviceSynchronize(); + swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction)); + } + numValuesToReduce = numValuesToReduce/maxReducedPerLevel; + } + cudaMemcpy(&stateProb, qureg.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost); + return stateProb; +} + +qreal statevec_calcProbOfOutcome(Qureg qureg, const int measureQubit, int outcome) +{ + qreal outcomeProb = statevec_findProbabilityOfZero(qureg, measureQubit); + if (outcome==1) + outcomeProb = 1.0 - outcomeProb; + return outcomeProb; +} + +qreal densmatr_calcProbOfOutcome(Qureg qureg, const int measureQubit, int outcome) +{ + qreal outcomeProb = densmatr_findProbabilityOfZero(qureg, measureQubit); + if (outcome==1) + outcomeProb = 1.0 - outcomeProb; + return outcomeProb; +} + + +/** computes either a real or imag term in the inner product */ +__global__ void statevec_calcInnerProductKernel( + int getRealComp, + qreal* vecReal1, qreal* vecImag1, qreal* vecReal2, qreal* vecImag2, + long long int numTermsToSum, qreal* reducedArray) +{ + long long int index = blockIdx.x*blockDim.x + threadIdx.x; + if (index >= numTermsToSum) return; + + // choose whether to calculate the real or imaginary term of the inner product + qreal innerProdTerm; + if (getRealComp) + innerProdTerm = vecReal1[index]*vecReal2[index] + vecImag1[index]*vecImag2[index]; + else + innerProdTerm = vecReal1[index]*vecImag2[index] - vecImag1[index]*vecReal2[index]; + + // array of each thread's collected probability, to be summed + extern __shared__ qreal tempReductionArray[]; + tempReductionArray[threadIdx.x] = innerProdTerm; + __syncthreads(); + + // every second thread reduces + if (threadIdx.x 1) { + if (numValuesToReduce < maxReducedPerLevel) { + valuesPerCUDABlock = numValuesToReduce; + numCUDABlocks = 1; + } + else { + valuesPerCUDABlock = maxReducedPerLevel; + numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); + } + sharedMemSize = valuesPerCUDABlock*sizeof(qreal); + if (firstTime) { + statevec_calcInnerProductKernel<<>>( + getRealComp, + bra.deviceStateVec.real, bra.deviceStateVec.imag, + ket.deviceStateVec.real, ket.deviceStateVec.imag, + numValuesToReduce, + bra.firstLevelReduction); + firstTime = 0; + } else { + cudaDeviceSynchronize(); + copySharedReduceBlock<<>>( + bra.firstLevelReduction, + bra.secondLevelReduction, valuesPerCUDABlock); + cudaDeviceSynchronize(); + swapDouble(&(bra.firstLevelReduction), &(bra.secondLevelReduction)); + } + numValuesToReduce = numValuesToReduce/maxReducedPerLevel; + } + cudaMemcpy(&innerProdReal, bra.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost); + + // compute imag component of inner product + getRealComp = 0; + numValuesToReduce = bra.numAmpsPerChunk; + maxReducedPerLevel = REDUCE_SHARED_SIZE; + firstTime = 1; + while (numValuesToReduce > 1) { + if (numValuesToReduce < maxReducedPerLevel) { + valuesPerCUDABlock = numValuesToReduce; + numCUDABlocks = 1; + } + else { + valuesPerCUDABlock = maxReducedPerLevel; + numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); + } + sharedMemSize = valuesPerCUDABlock*sizeof(qreal); + if (firstTime) { + statevec_calcInnerProductKernel<<>>( + getRealComp, + bra.deviceStateVec.real, bra.deviceStateVec.imag, + ket.deviceStateVec.real, ket.deviceStateVec.imag, + numValuesToReduce, + bra.firstLevelReduction); + firstTime = 0; + } else { + cudaDeviceSynchronize(); + copySharedReduceBlock<<>>( + bra.firstLevelReduction, + bra.secondLevelReduction, valuesPerCUDABlock); + cudaDeviceSynchronize(); + swapDouble(&(bra.firstLevelReduction), &(bra.secondLevelReduction)); + } + numValuesToReduce = numValuesToReduce/maxReducedPerLevel; + } + cudaMemcpy(&innerProdImag, bra.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost); + + // return complex + Complex innerProd; + innerProd.real = innerProdReal; + innerProd.imag = innerProdImag; + return innerProd; +} + +/** computes one term of (vec^*T) dens * vec */ +__global__ void densmatr_calcFidelityKernel(Qureg dens, Qureg vec, long long int dim, qreal* reducedArray) { + + // figure out which density matrix row to consider + long long int col; + long long int row = blockIdx.x*blockDim.x + threadIdx.x; + if (row >= dim) return; + + qreal* densReal = dens.deviceStateVec.real; + qreal* densImag = dens.deviceStateVec.imag; + qreal* vecReal = vec.deviceStateVec.real; + qreal* vecImag = vec.deviceStateVec.imag; + + // compute the row-th element of the product dens*vec + qreal prodReal = 0; + qreal prodImag = 0; + for (col=0LL; col < dim; col++) { + qreal densElemReal = densReal[dim*col + row]; + qreal densElemImag = densImag[dim*col + row]; + + prodReal += densElemReal*vecReal[col] - densElemImag*vecImag[col]; + prodImag += densElemReal*vecImag[col] + densElemImag*vecReal[col]; + } + + // multiply with row-th elem of (vec^*) + qreal termReal = prodImag*vecImag[row] + prodReal*vecReal[row]; + + // imag of every term should be zero, because each is a valid fidelity calc of an eigenstate + //qreal termImag = prodImag*vecReal[row] - prodReal*vecImag[row]; + + extern __shared__ qreal tempReductionArray[]; + tempReductionArray[threadIdx.x] = termReal; + __syncthreads(); + + // every second thread reduces + if (threadIdx.x 1) { + + // need less than one CUDA-BLOCK to reduce + if (numValuesToReduce < maxReducedPerLevel) { + valuesPerCUDABlock = numValuesToReduce; + numCUDABlocks = 1; + } + // otherwise use only full CUDA-BLOCKS + else { + valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory + numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); + } + // dictates size of reduction array + sharedMemSize = valuesPerCUDABlock*sizeof(qreal); + + // spawn threads to sum the probs in each block + // store the reduction in the pureState array + if (firstTime) { + densmatr_calcFidelityKernel<<>>( + qureg, pureState, densityDim, pureState.firstLevelReduction); + firstTime = 0; + + // sum the block probs + } else { + cudaDeviceSynchronize(); + copySharedReduceBlock<<>>( + pureState.firstLevelReduction, + pureState.secondLevelReduction, valuesPerCUDABlock); + cudaDeviceSynchronize(); + swapDouble(&(pureState.firstLevelReduction), &(pureState.secondLevelReduction)); + } + + numValuesToReduce = numValuesToReduce/maxReducedPerLevel; + } + + qreal fidelity; + cudaMemcpy(&fidelity, pureState.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost); + return fidelity; +} + + +__global__ void densmatr_calcPurityKernel(qreal* vecReal, qreal* vecImag, long long int numAmpsToSum, qreal *reducedArray) { + + // figure out which density matrix term this thread is assigned + long long int index = blockIdx.x*blockDim.x + threadIdx.x; + if (index >= numAmpsToSum) return; + + qreal term = vecReal[index]*vecReal[index] + vecImag[index]*vecImag[index]; + + // array of each thread's collected probability, to be summed + extern __shared__ qreal tempReductionArray[]; + tempReductionArray[threadIdx.x] = term; + __syncthreads(); + + // every second thread reduces + if (threadIdx.x 1) { + + // need less than one CUDA-BLOCK to reduce + if (numValuesToReduce < maxReducedPerLevel) { + valuesPerCUDABlock = numValuesToReduce; + numCUDABlocks = 1; + } + // otherwise use only full CUDA-BLOCKS + else { + valuesPerCUDABlock = maxReducedPerLevel; // constrained by shared memory + numCUDABlocks = ceil((qreal)numValuesToReduce/valuesPerCUDABlock); + } + // dictates size of reduction array + sharedMemSize = valuesPerCUDABlock*sizeof(qreal); + + // spawn threads to sum the probs in each block + if (firstTime) { + densmatr_calcPurityKernel<<>>( + qureg.deviceStateVec.real, qureg.deviceStateVec.imag, + numValuesToReduce, qureg.firstLevelReduction); + firstTime = 0; + + // sum the block probs + } else { + cudaDeviceSynchronize(); + copySharedReduceBlock<<>>( + qureg.firstLevelReduction, + qureg.secondLevelReduction, valuesPerCUDABlock); + cudaDeviceSynchronize(); + swapDouble(&(qureg.firstLevelReduction), &(qureg.secondLevelReduction)); + } + + numValuesToReduce = numValuesToReduce/maxReducedPerLevel; + } + + qreal traceDensSquared; + cudaMemcpy(&traceDensSquared, qureg.firstLevelReduction, sizeof(qreal), cudaMemcpyDeviceToHost); + return traceDensSquared; +} + +__global__ void statevec_collapseToKnownProbOutcomeKernel(Qureg qureg, int measureQubit, int outcome, qreal totalProbability) +{ + // ----- sizes + long long int sizeBlock, // size of blocks + sizeHalfBlock; // size of blocks halved + // ----- indices + long long int thisBlock, // current block + index; // current index for first half block + // ----- measured probability + qreal renorm; // probability (returned) value + // ----- temp variables + long long int thisTask; // task based approach for expose loop with small granularity + // (good for shared memory parallelism) + long long int numTasks=qureg.numAmpsPerChunk>>1; + + // ---------------------------------------------------------------- // + // dimensions // + // ---------------------------------------------------------------- // + sizeHalfBlock = 1LL << (measureQubit); // number of state vector elements to sum, + // and then the number to skip + sizeBlock = 2LL * sizeHalfBlock; // size of blocks (pairs of measure and skip entries) + + // ---------------------------------------------------------------- // + // find probability // + // ---------------------------------------------------------------- // + + // + // --- task-based shared-memory parallel implementation + // + renorm=1/sqrt(totalProbability); + qreal *stateVecReal = qureg.deviceStateVec.real; + qreal *stateVecImag = qureg.deviceStateVec.imag; + + thisTask = blockIdx.x*blockDim.x + threadIdx.x; + if (thisTask>=numTasks) return; + thisBlock = thisTask / sizeHalfBlock; + index = thisBlock*sizeBlock + thisTask%sizeHalfBlock; + + if (outcome==0){ + stateVecReal[index]=stateVecReal[index]*renorm; + stateVecImag[index]=stateVecImag[index]*renorm; + + stateVecReal[index+sizeHalfBlock]=0; + stateVecImag[index+sizeHalfBlock]=0; + } else if (outcome==1){ + stateVecReal[index]=0; + stateVecImag[index]=0; + + stateVecReal[index+sizeHalfBlock]=stateVecReal[index+sizeHalfBlock]*renorm; + stateVecImag[index+sizeHalfBlock]=stateVecImag[index+sizeHalfBlock]*renorm; + } +} + +/* + * outcomeProb must accurately be the probability of that qubit outcome in the state-vector, or + * else the state-vector will lose normalisation + */ +void statevec_collapseToKnownProbOutcome(Qureg qureg, const int measureQubit, int outcome, qreal outcomeProb) +{ + int threadsPerCUDABlock, CUDABlocks; + threadsPerCUDABlock = 128; + CUDABlocks = ceil((qreal)(qureg.numAmpsPerChunk>>1)/threadsPerCUDABlock); + statevec_collapseToKnownProbOutcomeKernel<<>>(qureg, measureQubit, outcome, outcomeProb); +} + +/** Maps thread ID to a |..0..><..0..| state and then locates |0><1|, |1><0| and |1><1| */ +__global__ void densmatr_collapseToKnownProbOutcomeKernel( + qreal outcomeProb, qreal* vecReal, qreal *vecImag, long long int numBasesToVisit, + long long int part1, long long int part2, long long int part3, + long long int rowBit, long long int colBit, long long int desired, long long int undesired) +{ + long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x; + if (scanInd >= numBasesToVisit) return; + + long long int base = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2); + + // renormalise desired outcome + vecReal[base + desired] /= outcomeProb; + vecImag[base + desired] /= outcomeProb; + + // kill undesired outcome + vecReal[base + undesired] = 0; + vecImag[base + undesired] = 0; + + // kill |..0..><..1..| states + vecReal[base + colBit] = 0; + vecImag[base + colBit] = 0; + vecReal[base + rowBit] = 0; + vecImag[base + rowBit] = 0; +} + +/** This involves finding |...i...><...j...| states and killing those where i!=j */ +void densmatr_collapseToKnownProbOutcome(Qureg qureg, const int measureQubit, int outcome, qreal outcomeProb) { + + int rowQubit = measureQubit + qureg.numQubitsRepresented; + + int colBit = 1LL << measureQubit; + int rowBit = 1LL << rowQubit; + + long long int numBasesToVisit = qureg.numAmpsPerChunk/4; + long long int part1 = colBit -1; + long long int part2 = (rowBit >> 1) - colBit; + long long int part3 = numBasesToVisit - (rowBit >> 1); + + long long int desired, undesired; + if (outcome == 0) { + desired = 0; + undesired = colBit | rowBit; + } else { + desired = colBit | rowBit; + undesired = 0; + } + + int threadsPerCUDABlock, CUDABlocks; + threadsPerCUDABlock = 128; + CUDABlocks = ceil(numBasesToVisit / (qreal) threadsPerCUDABlock); + densmatr_collapseToKnownProbOutcomeKernel<<>>( + outcomeProb, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numBasesToVisit, + part1, part2, part3, rowBit, colBit, desired, undesired); +} + +__global__ void densmatr_addDensityMatrixKernel(Qureg combineQureg, qreal otherProb, Qureg otherQureg, long long int numAmpsToVisit) { + + long long int ampInd = blockIdx.x*blockDim.x + threadIdx.x; + if (ampInd >= numAmpsToVisit) return; + + combineQureg.deviceStateVec.real[ampInd] *= 1-otherProb; + combineQureg.deviceStateVec.imag[ampInd] *= 1-otherProb; + + combineQureg.deviceStateVec.real[ampInd] += otherProb*otherQureg.deviceStateVec.real[ampInd]; + combineQureg.deviceStateVec.imag[ampInd] += otherProb*otherQureg.deviceStateVec.imag[ampInd]; +} + +void densmatr_addDensityMatrix(Qureg combineQureg, qreal otherProb, Qureg otherQureg) { + + long long int numAmpsToVisit = combineQureg.numAmpsPerChunk; + + int threadsPerCUDABlock, CUDABlocks; + threadsPerCUDABlock = 128; + CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock); + densmatr_addDensityMatrixKernel<<>>( + combineQureg, otherProb, otherQureg, numAmpsToVisit + ); +} + +/** Called once for every 4 amplitudes in density matrix + * Works by establishing the |..0..><..0..| state (for its given index) then + * visiting |..1..><..0..| and |..0..><..1..|. Labels |part1 X pa>= numAmpsToVisit) return; + + long long int ampInd = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2); + vecReal[ampInd + colBit] *= fac; + vecImag[ampInd + colBit] *= fac; + vecReal[ampInd + rowBit] *= fac; + vecImag[ampInd + rowBit] *= fac; +} + +void densmatr_oneQubitDephase(Qureg qureg, const int targetQubit, qreal dephase) { + + if (dephase == 0) + return; + + long long int numAmpsToVisit = qureg.numAmpsPerChunk/4; + + int rowQubit = targetQubit + qureg.numQubitsRepresented; + long long int colBit = 1LL << targetQubit; + long long int rowBit = 1LL << rowQubit; + + long long int part1 = colBit - 1; + long long int part2 = (rowBit >> 1) - colBit; + long long int part3 = numAmpsToVisit - (rowBit >> 1); + qreal dephFac = 1 - dephase; + + int threadsPerCUDABlock, CUDABlocks; + threadsPerCUDABlock = 128; + CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock); + densmatr_oneQubitDephaseKernel<<>>( + dephFac, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numAmpsToVisit, + part1, part2, part3, colBit, rowBit); +} + +/** Called 12 times for every 16 amplitudes in density matrix + * Each sums from the |..0..0..><..0..0..| index to visit either + * |..0..0..><..0..1..|, |..0..0..><..1..0..|, |..0..0..><..1..1..|, |..0..1..><..0..0..| + * etc and so on to |..1..1..><..1..0|. Labels |part1 0 part2 0 par>= numAmpsToVisit) return; + + // sets meta in 1...14 excluding 5, 10, creating bit string DCBA for |..D..C..><..B..A| + int meta = 1 + (outerInd/numBackgroundStates); + if (meta > 4) meta++; + if (meta > 9) meta++; + + long long int shift = rowBit2*((meta>>3)%2) + rowBit1*((meta>>2)%2) + colBit2*((meta>>1)%2) + colBit1*(meta%2); + long long int scanInd = outerInd % numBackgroundStates; + long long int stateInd = ( + shift + + (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2) + ((scanInd&part4)<<3) + ((scanInd&part5)<<4)); + + vecReal[stateInd] *= fac; + vecImag[stateInd] *= fac; +} + +// @TODO is separating these 12 amplitudes really faster than letting every 16th base modify 12 elems? +void densmatr_twoQubitDephase(Qureg qureg, int qubit1, int qubit2, qreal dephase) { + + if (dephase == 0) + return; + + // assumes qubit2 > qubit1 + + int rowQubit1 = qubit1 + qureg.numQubitsRepresented; + int rowQubit2 = qubit2 + qureg.numQubitsRepresented; + + long long int colBit1 = 1LL << qubit1; + long long int rowBit1 = 1LL << rowQubit1; + long long int colBit2 = 1LL << qubit2; + long long int rowBit2 = 1LL << rowQubit2; + + long long int part1 = colBit1 - 1; + long long int part2 = (colBit2 >> 1) - colBit1; + long long int part3 = (rowBit1 >> 2) - (colBit2 >> 1); + long long int part4 = (rowBit2 >> 3) - (rowBit1 >> 2); + long long int part5 = (qureg.numAmpsPerChunk/16) - (rowBit2 >> 3); + qreal dephFac = 1 - dephase; + + // refers to states |a 0 b 0 c>>>( + dephFac, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numBackgroundStates, numAmpsToVisit, + part1, part2, part3, part4, part5, colBit1, rowBit1, colBit2, rowBit2); +} + +/** Works like oneQubitDephase but modifies every other element, and elements are averaged in pairs */ +__global__ void densmatr_oneQubitDepolariseKernel( + qreal depolLevel, qreal* vecReal, qreal *vecImag, long long int numAmpsToVisit, + long long int part1, long long int part2, long long int part3, + long long int bothBits) +{ + long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x; + if (scanInd >= numAmpsToVisit) return; + + long long int baseInd = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2); + long long int targetInd = baseInd + bothBits; + + qreal realAvDepol = depolLevel * 0.5 * (vecReal[baseInd] + vecReal[targetInd]); + qreal imagAvDepol = depolLevel * 0.5 * (vecImag[baseInd] + vecImag[targetInd]); + + vecReal[baseInd] *= 1 - depolLevel; + vecImag[baseInd] *= 1 - depolLevel; + vecReal[targetInd] *= 1 - depolLevel; + vecImag[targetInd] *= 1 - depolLevel; + + vecReal[baseInd] += realAvDepol; + vecImag[baseInd] += imagAvDepol; + vecReal[targetInd] += realAvDepol; + vecImag[targetInd] += imagAvDepol; +} + +void densmatr_oneQubitDepolarise(Qureg qureg, const int targetQubit, qreal depolLevel) { + + if (depolLevel == 0) + return; + + densmatr_oneQubitDephase(qureg, targetQubit, depolLevel); + + long long int numAmpsToVisit = qureg.numAmpsPerChunk/4; + int rowQubit = targetQubit + qureg.numQubitsRepresented; + + long long int colBit = 1LL << targetQubit; + long long int rowBit = 1LL << rowQubit; + long long int bothBits = colBit | rowBit; + + long long int part1 = colBit - 1; + long long int part2 = (rowBit >> 1) - colBit; + long long int part3 = numAmpsToVisit - (rowBit >> 1); + + int threadsPerCUDABlock, CUDABlocks; + threadsPerCUDABlock = 128; + CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock); + densmatr_oneQubitDepolariseKernel<<>>( + depolLevel, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numAmpsToVisit, + part1, part2, part3, bothBits); +} + +/** Called once for every 16 amplitudes */ +__global__ void densmatr_twoQubitDepolariseKernel( + qreal depolLevel, qreal* vecReal, qreal *vecImag, long long int numAmpsToVisit, + long long int part1, long long int part2, long long int part3, + long long int part4, long long int part5, + long long int rowCol1, long long int rowCol2) +{ + long long int scanInd = blockIdx.x*blockDim.x + threadIdx.x; + if (scanInd >= numAmpsToVisit) return; + + // index of |..0..0..><..0..0| + long long int ind00 = (scanInd&part1) + ((scanInd&part2)<<1) + ((scanInd&part3)<<2) + ((scanInd&part4)<<3) + ((scanInd&part5)<<4); + long long int ind01 = ind00 + rowCol1; + long long int ind10 = ind00 + rowCol2; + long long int ind11 = ind00 + rowCol1 + rowCol2; + + qreal realAvDepol = depolLevel * 0.25 * ( + vecReal[ind00] + vecReal[ind01] + vecReal[ind10] + vecReal[ind11]); + qreal imagAvDepol = depolLevel * 0.25 * ( + vecImag[ind00] + vecImag[ind01] + vecImag[ind10] + vecImag[ind11]); + + qreal retain = 1 - depolLevel; + vecReal[ind00] *= retain; vecImag[ind00] *= retain; + vecReal[ind01] *= retain; vecImag[ind01] *= retain; + vecReal[ind10] *= retain; vecImag[ind10] *= retain; + vecReal[ind11] *= retain; vecImag[ind11] *= retain; + + vecReal[ind00] += realAvDepol; vecImag[ind00] += imagAvDepol; + vecReal[ind01] += realAvDepol; vecImag[ind01] += imagAvDepol; + vecReal[ind10] += realAvDepol; vecImag[ind10] += imagAvDepol; + vecReal[ind11] += realAvDepol; vecImag[ind11] += imagAvDepol; +} + +void densmatr_twoQubitDepolarise(Qureg qureg, int qubit1, int qubit2, qreal depolLevel) { + + if (depolLevel == 0) + return; + + // assumes qubit2 > qubit1 + + densmatr_twoQubitDephase(qureg, qubit1, qubit2, depolLevel); + + int rowQubit1 = qubit1 + qureg.numQubitsRepresented; + int rowQubit2 = qubit2 + qureg.numQubitsRepresented; + + long long int colBit1 = 1LL << qubit1; + long long int rowBit1 = 1LL << rowQubit1; + long long int colBit2 = 1LL << qubit2; + long long int rowBit2 = 1LL << rowQubit2; + + long long int rowCol1 = colBit1 | rowBit1; + long long int rowCol2 = colBit2 | rowBit2; + + long long int numAmpsToVisit = qureg.numAmpsPerChunk/16; + long long int part1 = colBit1 - 1; + long long int part2 = (colBit2 >> 1) - colBit1; + long long int part3 = (rowBit1 >> 2) - (colBit2 >> 1); + long long int part4 = (rowBit2 >> 3) - (rowBit1 >> 2); + long long int part5 = numAmpsToVisit - (rowBit2 >> 3); + + int threadsPerCUDABlock, CUDABlocks; + threadsPerCUDABlock = 128; + CUDABlocks = ceil(numAmpsToVisit / (qreal) threadsPerCUDABlock); + densmatr_twoQubitDepolariseKernel<<>>( + depolLevel, qureg.deviceStateVec.real, qureg.deviceStateVec.imag, numAmpsToVisit, + part1, part2, part3, part4, part5, rowCol1, rowCol2); +} + +void seedQuESTDefault(){ + // init MT random number generator with three keys -- time, pid and a hash of hostname + // for the MPI version, it is ok that all procs will get the same seed as random numbers will only be + // used by the master process + + unsigned long int key[3]; + getQuESTDefaultSeedKey(key); + init_by_array(key, 3); +} + + +#ifdef __cplusplus +} +#endif diff --git a/cuda_code/QuadtreeCreation.cu b/cuda_code/QuadtreeCreation.cu new file mode 100644 index 0000000000000000000000000000000000000000..8baddb8938980cc943f316dece7508047f1b466c --- /dev/null +++ b/cuda_code/QuadtreeCreation.cu @@ -0,0 +1,514 @@ +#ifndef __CUDACC__ + #define __CUDACC__ +#endif + +#include "cuda/csm/QuadTreeCreation.cuh" + +#include +#include +#include +#include +#include +#include + +#include "csm/CudaHelpers.h" + +namespace Quadtree { + +typedef float flag_t; +#define EMPTY_FLAG 0.0f +#define LEAF_FLAG 1.0f +#define INNER_FLAG 2.0f + + ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +__device__ +void sortValues(float* vals1, bool* isMin1, float* vals2, bool* isMin2) +{ + +#define IS_LOWER(V,F,i,j) (V[(i)] < V[(j)] || (V[(i)] == V[(j)] && F[(i)] && !F[(j)])) +#define COPY(Vsrc, Fsrc, Vdst, Fdst, src, dst) Vdst[(dst)] = Vsrc[(src)]; Fdst[(dst)] = Fsrc[(src)]; + + //////////////// Sort pairs of values + for (int i = 0; i < 4; ++i) + { + if (IS_LOWER(vals1, isMin1, i * 2, i * 2 + 1)) + { + COPY(vals1, isMin1, vals2, isMin2, i * 2, i * 2); + COPY(vals1, isMin1, vals2, isMin2, i * 2 + 1, i * 2 + 1); + } + else + { + COPY(vals1, isMin1, vals2, isMin2, i * 2, i * 2 + 1); + COPY(vals1, isMin1, vals2, isMin2, i * 2 + 1, i * 2); + } + } + + //////////////// Sort sets of 4 values + for (int i = 0; i < 2; ++i) + { + int idx1 = i * 4; + int idx2 = i * 4 + 2; + int idx1Limit = idx1 + 2; + int idx2Limit = idx2 + 2; + int dstIndex = i * 4; + + for (int j = 0; j < 4; ++j) + { + if (idx1 >= idx1Limit) { + COPY(vals2, isMin2, vals1, isMin1, idx2, dstIndex); + idx2++; + } + else if (idx2 >= idx2Limit || IS_LOWER(vals2, isMin2, idx1, idx2)) { + COPY(vals2, isMin2, vals1, isMin1, idx1, dstIndex); + idx1++; + } + else { + COPY(vals2, isMin2, vals1, isMin1, idx2, dstIndex); + idx2++; + } + + dstIndex++; + } + } + + //////////////// Sort set of 8 values + { + int idx1 = 0; + int idx2 = 4; + int idx1Limit = 4; + int idx2Limit = 8; + int dstIndex = 0; + + for (int j = 0; j < 8; ++j) + { + if (idx1 >= idx1Limit) { + COPY(vals1, isMin1, vals2, isMin2, idx2, dstIndex); + idx2++; + } + else if (idx2 >= idx2Limit || IS_LOWER(vals1, isMin1, idx1, idx2)) { + COPY(vals1, isMin1, vals2, isMin2, idx1, dstIndex); + idx1++; + } + else { + COPY(vals1, isMin1, vals2, isMin2, idx2, dstIndex); + idx2++; + } + + dstIndex++; + } + } + +#undef IS_LOWER +#undef COPY + + } + + ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + + inline __device__ void count_ivals(const float2& ival, const float2& a, const float2& b, const float2& c, unsigned int& count) + { + count = 1; + if (a.x <= ival.x && a.y >= ival.x) + count++; + if (b.x <= ival.x && b.y >= ival.x) + count++; + if (c.x <= ival.x && c.y >= ival.x) + count++; + + } + + __global__ + void createLevel(const unsigned int level, + const bool preserveBottomLevel, + cudaTextureObject_t prevMin, + cudaTextureObject_t prevMax, + cudaSurfaceObject_t prevValue, + uint32_t* prevExists, + cudaSurfaceObject_t nextMin, + cudaSurfaceObject_t nextMax, + cudaSurfaceObject_t nextValue, + uint32_t* nextExists, + uint32_t* totalCount, + uint8_t* levelExists, + dim3 texDim) + { + int x = blockIdx.x * blockDim.x + threadIdx.x; + int y = blockIdx.y * blockDim.y + threadIdx.y; + + if (x >= texDim.x || y >= texDim.y) + return; + + // Get the bounds of the four children of the current node + const int2 tl = make_int2(2 * x , 2 * y); + const int2 tr = make_int2(2 * x + 1, 2 * y); + const int2 bl = make_int2(2 * x , 2 * y + 1); + const int2 br = make_int2(2 * x + 1, 2 * y + 1); + + const float2 tlBounds = make_float2(tex2D(prevMin, tl.x, tl.y), tex2D(prevMax, tl.x, tl.y)); // 2 x READ + const float2 trBounds = make_float2(tex2D(prevMin, tr.x, tr.y), tex2D(prevMax, tr.x, tr.y)); // 2 x READ + const float2 blBounds = make_float2(tex2D(prevMin, bl.x, bl.y), tex2D(prevMax, bl.x, bl.y)); // 2 x READ + const float2 brBounds = make_float2(tex2D(prevMin, br.x, br.y), tex2D(prevMax, br.x, br.y)); // 2 x READ + + // // Add a minimum interval depth + //float bias = 0.00001; + //tlBounds.y = max(tlBounds.x + bias, tlBounds.y); + //trBounds.y = max(trBounds.x + bias, trBounds.y); + //blBounds.y = max(blBounds.x + bias, blBounds.y); + //brBounds.y = max(brBounds.x + bias, brBounds.y); + + // Compute minimum and maximum of the 4 intervals + const float gmin = max(max(tlBounds.x, trBounds.x), max(blBounds.x, brBounds.x)); + const float gmax = min(min(tlBounds.y, trBounds.y), min(blBounds.y, brBounds.y)); + + // Find a new best value for the 4 intervals, also find the min/max values of the intervals covered by the best value + float newValue = 0.0f; + float minValue = 0.0; + float maxValue = 1.0; + + if (gmin <= gmax) + { + newValue = (gmin + gmax) * 0.5f; + minValue = gmin; + maxValue = gmax; + } + else + { + unsigned int bestcounter = 0; + unsigned int counter = 0; + + count_ivals(tlBounds, trBounds, blBounds, brBounds, counter); + if (counter > bestcounter) + { + bestcounter = counter; + newValue = tlBounds.x; + } + count_ivals(trBounds, tlBounds, blBounds, brBounds, counter); + if (counter > bestcounter) + { + bestcounter = counter; + newValue = trBounds.x; + } + count_ivals(blBounds, tlBounds, trBounds, brBounds, counter); + if (counter > bestcounter) + { + bestcounter = counter; + newValue = blBounds.x; + } + count_ivals(brBounds, tlBounds, trBounds, blBounds, counter); + if (counter > bestcounter) + { + bestcounter = counter; + newValue = brBounds.x; + } + + if (newValue >= tlBounds.x && newValue <= tlBounds.y) + { + minValue = max(minValue, tlBounds.x); + maxValue = min(maxValue, tlBounds.y); + } + if (newValue >= trBounds.x && newValue <= trBounds.y) + { + minValue = max(minValue, trBounds.x); + maxValue = min(maxValue, trBounds.y); + } + if (newValue >= blBounds.x && newValue <= blBounds.y) + { + minValue = max(minValue, blBounds.x); + maxValue = min(maxValue, blBounds.y); + } + if (newValue >= brBounds.x && newValue <= brBounds.y) + { + minValue = max(minValue, brBounds.x); + maxValue = min(maxValue, brBounds.y); + } + } + + // Compute whether the child exists (parent doesn't cover it) + bool tlExists = tlBounds.x > newValue || tlBounds.y < newValue; + bool trExists = trBounds.x > newValue || trBounds.y < newValue; + bool blExists = blBounds.x > newValue || blBounds.y < newValue; + bool brExists = brBounds.x > newValue || brBounds.y < newValue; + + ////////////////////////// Flags update + // Update the lowest leaf level below this node + size_t index = y * texDim.x + x; + size_t tlIndex = tl.y * texDim.x * 2 + tl.x; + size_t trIndex = tr.y * texDim.x * 2 + tr.x; + size_t blIndex = bl.y * texDim.x * 2 + bl.x; + size_t brIndex = br.y * texDim.x * 2 + br.x; + + uint32_t tlCount, trCount, blCount, brCount; + if (level == 1 && !preserveBottomLevel) { + tlCount = 1; + trCount = 1; + blCount = 1; + brCount = 1; + } else { + tlCount = prevExists[tlIndex]; // 1 x READ + trCount = prevExists[trIndex]; // 1 x READ + blCount = prevExists[blIndex]; // 1 x READ + brCount = prevExists[brIndex]; // 1 x READ + } + + tlCount -= (tlExists || tlCount > 1) ? 0 : 1; + trCount -= (trExists || trCount > 1) ? 0 : 1; + blCount -= (blExists || blCount > 1) ? 0 : 1; + brCount -= (brExists || brCount > 1) ? 0 : 1; + + prevExists[tlIndex] = tlCount ? 1 : 0; // 1 x WRITE + prevExists[trIndex] = trCount ? 1 : 0; // 1 x WRITE + prevExists[blIndex] = blCount ? 1 : 0; // 1 x WRITE + prevExists[brIndex] = brCount ? 1 : 0; // 1 x WRITE + + if (texDim.x > 1) { + nextExists[index] = tlCount + trCount + blCount + brCount + 1; // 1 x WRITE + } else { + nextExists[index] = 1; // 1 x WRITE + totalCount[0] = tlCount + trCount + blCount + brCount + 1; + } + + // Update values of the 4 children (set child to empty(=-1) if parent covers it) + float tlPrev = tlExists ? (tlBounds.x + tlBounds.y) * 0.5f : -1.f; + float trPrev = trExists ? (trBounds.x + trBounds.y) * 0.5f : -1.f; + float blPrev = blExists ? (blBounds.x + blBounds.y) * 0.5f : -1.f; + float brPrev = brExists ? (brBounds.x + brBounds.y) * 0.5f : -1.f; + + if (tlCount) + surf2Dwrite(tlPrev, prevValue, tl.x * sizeof(float), tl.y); + if (trCount) + surf2Dwrite(trPrev, prevValue, tr.x * sizeof(float), tr.y); + if (blCount) + surf2Dwrite(blPrev, prevValue, bl.x * sizeof(float), bl.y); + if (brCount) + surf2Dwrite(brPrev, prevValue, br.x * sizeof(float), br.y); + + if (tlCount || trCount || blCount || brCount) + levelExists[level - 1] = 1; + + // If this is the highest level, set the value to the average of the current min/max + if (texDim.x == 1) + surf2Dwrite(0.5f * (minValue + maxValue), nextValue, 0, 0); + + // Store the min/value for the node on the next level + surf2Dwrite(minValue, nextMin, x * sizeof(float), y); + surf2Dwrite(maxValue, nextMax, x * sizeof(float), y); + } + + void + createQuadtreeLevel(const unsigned int level, + const bool preserveBottomLevel, + cudaTextureObject_t prevMin, + cudaTextureObject_t prevMax, + cudaSurfaceObject_t prevValue, + uint32_t* prevExists, + cudaSurfaceObject_t nextMin, + cudaSurfaceObject_t nextMax, + cudaSurfaceObject_t nextValue, + uint32_t* nextExists, + uint32_t* totalCount, + uint8_t* levelExists, + dim3 texDim) + { + dim3 thread(min(32, texDim.x), min(32, texDim.y)); + const int extraX = texDim.x % thread.x ? 1 : 0; + const int extraY = texDim.y % thread.y ? 1 : 0; + dim3 block(texDim.x / thread.x + extraX, texDim.y / thread.y + extraY); + createLevel <<< block, thread >>> (level, + preserveBottomLevel, + prevMin, + prevMax, + prevValue, + prevExists, + nextMin, + nextMax, + nextValue, + nextExists, + totalCount, + levelExists, + texDim); + } + + ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + + + __global__ + void + fillIndices(const uint32_t level, + const uint32_t threadNum, + const uint32_t* exists, + const uint32_t* offset, + uint32_t* outputLevel, + uint32_t* outputLocalIndexVector, + int32_t* outputGlobalIndexVector) + { + const int i = blockIdx.x * blockDim.x + threadIdx.x; + + if (i >= threadNum) + return; + + if (!exists[i]) { + outputGlobalIndexVector[i] = -1; + return; + } + + int outputIndex = (*offset) + outputGlobalIndexVector[i]; + + outputLevel[outputIndex] = level; + outputLocalIndexVector[outputIndex] = i; + + outputGlobalIndexVector[i] = outputIndex; + } + + + + + + + void + fillNodeIndices(const uint32_t level, + const uint32_t levelsize, + const uint32_t* existsVector, + const uint32_t* offset, + uint32_t* outputLevel, + uint32_t* outputLocalIndexVector, + int32_t* outputGlobalIndexVector) + { + uint32_t threadNum = levelsize*levelsize; + cuda_kernel_launch_size_t kernel = computeLaunchSize1D(threadNum); + fillIndices <<< kernel.block, kernel.thread >>> (level, + threadNum, + existsVector, + //countVector, + offset, + outputLevel, + outputLocalIndexVector, + outputGlobalIndexVector); + } + + + + + ///////////////////////////////////////////////////////////////////////////////////////////////////////// + +#define INNER_BIT (0x0) +#define LEAF_BIT (0x1) +#define EMPTY_BIT (0x2) + + __global__ + void createNodes(const unsigned int level, + const bool preserveBottomLevel, + const unsigned int lowestlevel, + const size_t levelsize, + const uint32_t* levels, + const uint32_t* lidx, + const int32_t* gidx, + const uint32_t* exists, + cudaTextureObject_t values, + cudaTextureObject_t mins, + cudaTextureObject_t maxs, + const size_t numnodes, + const uint32_t root_idx, + const uint32_t* bottomLevelPtrs, + gpu_node_t* nodes) + { + const int i = blockIdx.x * blockDim.x + threadIdx.x; + if (i >= numnodes) + return; + + // Get level and check if it matches the node associated with the thread + const uint32_t tlevel = levels[i]; + if (level != tlevel) + return; + + // Get local index + const uint32_t idx = lidx[i]; + + // Fill in node information + gpu_node_t node; + // node.value = tex1Dfetch(values, idx); + + const uint32_t yidx = idx / levelsize; + const uint32_t xidx = idx - yidx * levelsize; + node.value = tex2D(values, xidx, yidx); + +//!! Only allow if we need min and max +#if 1 + node.min = tex2D(mins, xidx, yidx); + node.max = tex2D(maxs, xidx, yidx); +#endif + + bool isleaf = true; + + if (preserveBottomLevel && level == lowestlevel + 1) + { + unsigned int childofs[4][2] = { { 0, 0 }, { 1, 0 }, { 0, 1 }, { 1, 1 } }; + for (size_t k = 0; k < 4; ++k) + { + const uint32_t y = idx / levelsize; + const uint32_t x = idx - y * levelsize; + const uint32_t childidx = (2 * y + childofs[k][1]) * levelsize * 2 + (2 * x + childofs[k][0]); + node.childrenList[k] = exists[childidx] ? int32_t(bottomLevelPtrs[childidx]) : -1; + if (node.childrenList[k] != -1) + { + isleaf = false; + } + } + } + else if (level > lowestlevel) + { + unsigned int childofs[4][2] = { { 0, 0 }, { 1, 0 }, { 0, 1 }, { 1, 1 } }; + for (size_t k = 0; k < 4; ++k) + { + const uint32_t y = idx / levelsize; + const uint32_t x = idx - y * levelsize; + const uint32_t childidx = (2 * y + childofs[k][1]) * levelsize * 2 + (2 * x + childofs[k][0]); + node.childrenList[k] = gidx[childidx]; + + if (node.childrenList[k] != -1) + { + node.childrenList[k] += root_idx; + isleaf = false; + } + } + } + else + { + for (size_t k = 0; k < 4; ++k) + node.childrenList[k] = -1; + } + + if (isleaf) + node.flags = LEAF_BIT; + else + node.flags = (node.value == -1.0f) ? EMPTY_BIT : INNER_BIT; + + // Save node + nodes[i] = node; + } + + void + createQuadtreeNodes(const uint32_t level, + const bool preserveBottomLevel, + const uint32_t lowestlevel, + const size_t levelsize, + const uint32_t* d_levels, + const uint32_t* d_lidx, + const int32_t* d_gidx, + const uint32_t* d_exists, + cudaTextureObject_t values, + cudaTextureObject_t mins, + cudaTextureObject_t maxs, + const size_t numnodes, + const uint32_t root_idx, + const uint32_t* bottomLevelPtrs, + gpu_node_t* d_nodes) + { + const unsigned int numThreads = 1024; + dim3 thread(numThreads, 1, 1); + const int extra = numnodes % thread.x ? 1 : 0; + dim3 block(numnodes / thread.x + extra, 1, 1); + createNodes <<< block, thread >>> (level, preserveBottomLevel, lowestlevel, levelsize, d_levels, d_lidx, d_gidx, d_exists, values, mins, maxs, numnodes, root_idx, bottomLevelPtrs, d_nodes); + } + +} \ No newline at end of file diff --git a/cuda_code/RBFCalGPUHelper.cu b/cuda_code/RBFCalGPUHelper.cu new file mode 100644 index 0000000000000000000000000000000000000000..e5c17ae17b33dab0ff5518de5ffe3bc4ab310526 --- /dev/null +++ b/cuda_code/RBFCalGPUHelper.cu @@ -0,0 +1,146 @@ + +#include "kernelCalGPUHelper.h" +/* + * @brief: compute one Hessian row + * @param: pfDevSamples: data of samples. One dimension array represents a matrix + * @param: pfDevTransSamples: transpose data of samples + * @param: pfDevHessianRows: a Hessian row. the final result of this function + * @param: nNumofSamples: the number of samples + * @param: nNumofDim: the number of dimensions for samples + * @param: nStartRow: the Hessian row to be computed + */ +__device__ void RBFOneRow(real *pfDevSamples, real *pfDevTransSamples, + real *pfDevHessianRows, int nNumofSamples, int nNumofDim, + int nStartRow, real fGamma) +{ + int nThreadId = threadIdx.x; + int nBlockSize = blockDim.x; + int nGlobalIndex = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;//global index for thread + extern __shared__ real fSampleValue[]; + + int nTempPos = 0; + real fKernelValue = 0; + int nRemainDim = nNumofDim; + + if(nThreadId >= nBlockSize) + { + return; + } + + //when the # of dimension is huge, we process a part of dimension. one thread per kernel value + for(int j = 0; nRemainDim > 0; j++) + { + //if(nThreadId == 0) + //{ + //starting position of a sample, + j * nNumofValuesInShdMem is for case that dimension is too large + nTempPos = nStartRow * nNumofDim + j * nBlockSize; + //} + //__syncthreads(); + + //in the case, nThreadId < nMaxNumofThreads + //load (part of or all of) the sample values into shared memory + if(nThreadId < nRemainDim) + { + fSampleValue[nThreadId] = pfDevSamples[nTempPos + nThreadId]; + } + __syncthreads(); //synchronize threads within a block + + /* start compute kernel value */ + if(nGlobalIndex < nNumofSamples) + { + real fTempSampleValue; + real fDiff; + //when the block size is larger than remaining dim, k is bounded by nRemainDim + //when the nRemainDim is larger than block size, k is bounded by nBlockSize + for(int k = 0; (k < nBlockSize) && (k < nRemainDim); k++) + { + nTempPos = (nNumofDim - nRemainDim + k) * nNumofSamples + nGlobalIndex; + fTempSampleValue = pfDevTransSamples[nTempPos]; //transpose sample + fDiff = fSampleValue[k] - fTempSampleValue; + + fKernelValue += (fDiff * fDiff); + } + } + + nRemainDim -= nBlockSize; + //synchronize threads within block to avoid modifying shared memory + __syncthreads(); + }//end computing one kernel value + + //load the element to global + if(nGlobalIndex < nNumofSamples) + { + fKernelValue = fKernelValue * fGamma; + fKernelValue = -fKernelValue; //Gaussian kernel use "-gamma" + if(sizeof(real) == sizeof(double)) + fKernelValue = exp(fKernelValue); + else + fKernelValue = expf(fKernelValue); + + pfDevHessianRows[nGlobalIndex] = fKernelValue; + } +} + + +//a few blocks compute one row of the Hessian matrix. The # of threads invovled in a row is equal to the # of samples +//one thread an element of the row +//the # of thread is equal to the # of dimensions or the available size of shared memory +__global__ void RBFKernel(real *pfDevSamples, real *pfDevTransSamples, real *pfDevHessianRows, + int nNumofSamples, int nNumofDim, int nNumofRows, int nStartRow, real fGamma) +{ + real *pfDevTempHessianRow; + + //for(int i = 0; i < nNumofRows; i++) + { + //pointer to a hessian row + //pfDevTempHessianRow = pfDevHessianRows + i * nNumofSamples; + pfDevTempHessianRow = pfDevHessianRows + blockIdx.z * nNumofSamples; + + RBFOneRow(pfDevSamples, pfDevTransSamples, pfDevTempHessianRow, + nNumofSamples, nNumofDim, nStartRow + blockIdx.z, fGamma); + //nStartRow++;//increase to next row + } //end computing n rows of Hessian Matrix + +} + +//a few blocks compute one row of the Hessian matrix. The # of threads invovled in a row is equal to the # of samples +//one thread an element of the row +//the # of thread is equal to the # of dimensions or the available size of shared memory +__global__ void ObtainRBFKernel(real *pfDevHessianRows, real *pfDevSelfDot, int nNumofSamples, + int nNumofRows, real fGamma, int nStartRow, int nStartCol) +{ + int nGlobalIndex = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;//global index for thread + if(nGlobalIndex < nNumofSamples * nNumofRows) + { + int nRow = (nGlobalIndex / nNumofSamples + nStartRow); + int nCol = (nGlobalIndex % nNumofSamples + nStartCol); + + + float fKernelValue = (pfDevSelfDot[nRow] + pfDevSelfDot[nCol] -pfDevHessianRows[nGlobalIndex] * 2.f ) * fGamma; + fKernelValue = -fKernelValue; //Gaussian kernel use "-gamma" + if(sizeof(real) == sizeof(double)) + fKernelValue = exp(fKernelValue); + else + fKernelValue = expf(fKernelValue); + + //if(nGlobalIndex == 299 * nNumofSamples + 100) + // printf("%f, %f, %f, %f, %d\n", pfDevSelfDot[nCol], pfDevSelfDot[nRow], pfDevHessianRows[nGlobalIndex], fKernelValue, nGlobalIndex); + + pfDevHessianRows[nGlobalIndex] = fKernelValue; + } +} + +//a few blocks compute one row of the Hessian matrix. The # of threads involved in a row is equal to the # of samples +//one thread an element of the row +//the # of thread is equal to the # of dimensions or the available size of shared memory +__global__ void UpdateDiag(real *pfDevHessianRows, int nNumofSamples, int nNumofRows) +{ + int nGlobalIndex = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;//global index for thread + if(nGlobalIndex < nNumofSamples * nNumofRows) + { + int nRow = nGlobalIndex / nNumofSamples; + int nCol = nGlobalIndex % nNumofSamples; + if(nRow == nCol) + pfDevHessianRows[nGlobalIndex] = 1; + } +} diff --git a/cuda_code/RMSDKernels_1.cu b/cuda_code/RMSDKernels_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..5d57fbc79445f80e411789e1d2ccf7c8a818f6ce --- /dev/null +++ b/cuda_code/RMSDKernels_1.cu @@ -0,0 +1,61 @@ +#include "RMSDKernels.h" +#include + +template +__global__ void cuda_correlationMatrix( T *d_coords1, T *d_coords2, double *RMat, int *num_atoms, int atoms_stride){ + uint batch_idx = blockIdx.x; + uint i = threadIdx.x; + uint j = threadIdx.y; + int r_index = 9*batch_idx + 3*i+j; + int n_atoms = num_atoms[batch_idx]; + T *coords1 = d_coords1 + batch_idx*atoms_stride*3; + T *coords2 = d_coords2 + batch_idx*atoms_stride*3; + + RMat[r_index] = 0.0; + for(int k=0; k +__global__ void cuda_computeR2( T *d_coordinates, int num_atoms, double *R2){ + int dim_index = threadIdx.x; + R2[dim_index] = 0.0; + for(int i=0; i +void gpu_correlationMatrix(T *d_coords1, T *d_coords2, double *TMat, int *num_atoms, int batch_size, int atoms_stride){ + double *RMat; + cudaMalloc( &RMat, batch_size*9*sizeof(double)); + dim3 coords_dim(3, 3, 1); + cuda_correlationMatrix<<>>(d_coords1, d_coords2, RMat, num_atoms, atoms_stride); + cuda_TMatrix<<>>(RMat, TMat); + cudaFree(RMat); +} + +template +void gpu_computeR2( T *d_coordinates, int num_atoms, double *R2){ + cuda_computeR2<<<1,3>>>( d_coordinates, num_atoms, R2); +} + + +template void gpu_correlationMatrix(float*, float*, double*, int*, int, int); +template void gpu_correlationMatrix(double*, double*, double*, int*, int, int); + +template void gpu_computeR2(float*, int, double*); +template void gpu_computeR2(double*, int, double*); \ No newline at end of file diff --git a/cuda_code/Randperm_5.cu b/cuda_code/Randperm_5.cu new file mode 100644 index 0000000000000000000000000000000000000000..1702be2fd2859fcb8a5b0617a062327bb4d52ef7 --- /dev/null +++ b/cuda_code/Randperm_5.cu @@ -0,0 +1,83 @@ +#include +#include +#include +#include +#include + +#include + +namespace at { +namespace native { + +Tensor& randperm_out_cuda(int64_t n, c10::optional generator, Tensor& result) { + TORCH_CHECK(n >= 0, "n must be non-negative, got", n); + TORCH_CHECK(!generator.has_value() || (generator.has_value() && result.device() == generator->device()), "Expected a '", result.device(), "' generator device but found '", generator->device(), "'"); + check_supported_max_int_with_precision(n, result); + + result.resize_({n}); + + if (n < 30000) { // For small inputs, we offload it to CPU instead. + auto result_cpu = at::empty({n}, result.options().device(kCPU)); + randperm_out(result_cpu, n, generator); + return result.copy_(result_cpu); + } + +#if 0 + // This if condition should never be true because if n >= 30000 and the tensor has a Half type, + // check_supported_max_int_with_precision should have reported an error. This snippet is commented out but left here + // for the sake of clarity, because Half in thrust is spotty, and we do not want future change unaware of this. + if (result.scalar_type() == at::ScalarType::Half) { // Half in thrust is spotty. Avoid. + auto result_float = at::empty({n}, initialTensorOptions().device(Device(DeviceType::CUDA))); + return result.copy_(randperm_out_cuda(result_float, n, generator)); + } +#endif + + // Generate random values for the keys array + AT_DISPATCH_ALL_TYPES( + result.scalar_type(), "randperm_out_cuda", [&] { + TORCH_CHECK(n <= std::numeric_limits::max(), + "randperm of tensors larger than INT_MAX is not supported yet in pytorch"); + + auto keys = at::empty(result.sizes(), result.options()).random_(generator); + auto range = at::arange(n, result.options()); + auto keys_tmp = at::empty_like(keys); + + // shuffled_data points to the underlying data of the output tensor if the tensor is contiguous; otherwise it + // points to a new tensor. + Tensor shuffled; + scalar_t *shuffled_data; + if (result.is_contiguous()) { + shuffled_data = result.data_ptr(); + } else { + shuffled = at::empty(n, result.options()); + shuffled_data = shuffled.data_ptr(); + } + + // Use the sorted order of keys to rearrange the result array + size_t temp_storage_bytes = 0; + + cub::DeviceRadixSort::SortPairs( + nullptr, temp_storage_bytes, + keys.data_ptr(), keys_tmp.data_ptr(), + range.data_ptr(), shuffled_data, n, + 0, sizeof(scalar_t) * 8, at::cuda::getCurrentCUDAStream()); + auto& allocator = *::c10::cuda::CUDACachingAllocator::get(); + auto dataPtr = allocator.allocate(temp_storage_bytes); + cub::DeviceRadixSort::SortPairs( + dataPtr.get(), temp_storage_bytes, + keys.data_ptr(), keys_tmp.data_ptr(), + range.data_ptr(), shuffled_data, n, + 0, sizeof(scalar_t) * 8, at::cuda::getCurrentCUDAStream()); + + if (!result.is_contiguous()) { + result.copy_(shuffled); + } + } + ); + + return result; +} + + + +}} // namespace at::native diff --git a/cuda_code/Raster_1.cu b/cuda_code/Raster_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..c06d0b2d5de4978156a9f39e5bedd41522dba35e --- /dev/null +++ b/cuda_code/Raster_1.cu @@ -0,0 +1,123 @@ +#include "Raster.cuh" +namespace MNN { +namespace CUDA { + +// Blit don't care offset +template +__global__ void blit(const T *input, T *output, + int sizeZ, int sizeY, int sizeX, + int strideZ, int strideY, int strideX, + int dstStrideZ, int dstStrideY, int dstStrideX + ) { + int total = sizeZ * sizeY * sizeX; + for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < total; i += blockDim.x * gridDim.x) { + int x = i % sizeX; + int tmp = i / sizeX; + int y = tmp % sizeY; + int z = tmp / sizeY; + int srcOffset = z * strideZ + y * strideY + x * strideX; + int dstOffset = z * dstStrideZ + y * dstStrideY + x * dstStrideX; + output[dstOffset] = input[srcOffset]; + } +} + +void RasterBlit(uint8_t* output, const uint8_t* input, const Tensor::InsideDescribe::Region& reg, int bytes, CUDARuntime* runtime) { + int count = reg.size[0] * reg.size[1] * reg.size[2]; + int block_num = runtime->blocks_num(count); + int threads_num = runtime->threads_num(); + switch (bytes) { + case 4: + blit<<>>((const float*)input, (float*)output, + reg.size[0], reg.size[1], reg.size[2], + reg.src.stride[0], reg.src.stride[1], reg.src.stride[2], + reg.dst.stride[0], reg.dst.stride[1], reg.dst.stride[2]); + break; + case 2: + blit<<>>((const int16_t*)input, (int16_t*)output, + reg.size[0], reg.size[1], reg.size[2], + reg.src.stride[0], reg.src.stride[1], reg.src.stride[2], + reg.dst.stride[0], reg.dst.stride[1], reg.dst.stride[2]); + break; + case 1: + blit<<>>((const int8_t*)input, (int8_t*)output, + reg.size[0], reg.size[1], reg.size[2], + reg.src.stride[0], reg.src.stride[1], reg.src.stride[2], + reg.dst.stride[0], reg.dst.stride[1], reg.dst.stride[2]); + break; + default: + break; + } +} + +template +__global__ void pack_c4(const T *input, T *output, int inside, int axis, int outside, int axisC4) { + int total = inside * axis * outside; + for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < total; i += blockDim.x * gridDim.x) { + int x = i % inside; + int tmp = i / inside; + int y = tmp % axis; + int z = tmp / axis; + int y4 = y / 4; + int yR = y % 4; + int dstOffset = 4 * (z * axisC4 * inside + y4 * inside + x) + yR; + output[dstOffset] = input[i]; + } +} + +void PackC4(uint8_t* output, const uint8_t* input, int inside, int axis, int outside, int bytes, CUDARuntime* runtime) { + auto packAxis = (axis + 3) / 4; + if (axis % 4 != 0) { + runtime->memset(output, 0, inside * packAxis * 4 * outside * bytes); + } + int block_num = runtime->blocks_num(inside * axis * outside); + int threads_num = runtime->threads_num(); + switch (bytes) { + case 4: + pack_c4<<>>((const float*)input, (float*)output, inside, axis, outside, packAxis); + break; + case 2: + pack_c4<<>>((const int16_t*)input, (int16_t*)output, inside, axis, outside, packAxis); + break; + case 1: + pack_c4<<>>((const int8_t*)input, (int8_t*)output, inside, axis, outside, packAxis); + break; + default: + break; + } +} + +template +__global__ void unpack_c4(const T *input, T *output, int inside, int axis, int outside, int axisC4) { + int total = inside * axis * outside; + for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < total; i += blockDim.x * gridDim.x) { + int x = i % inside; + int tmp = i / inside; + int y = tmp % axis; + int z = tmp / axis; + int y4 = y / 4; + int yR = y % 4; + int srcOffset = 4 * (z * axisC4 * inside + y4 * inside + x) + yR; + output[i] = input[srcOffset]; + } +} +void UnpackC4(uint8_t* output, const uint8_t* input, int inside, int axis, int outside, int bytes, CUDARuntime* runtime) { + auto packAxis = (axis + 3) / 4; + int block_num = runtime->blocks_num(inside * axis * outside); + int threads_num = runtime->threads_num(); + switch (bytes) { + case 4: + unpack_c4<<>>((const float*)input, (float*)output, inside, axis, outside, packAxis); + break; + case 2: + unpack_c4<<>>((const int16_t*)input, (int16_t*)output, inside, axis, outside, packAxis); + break; + case 1: + unpack_c4<<>>((const int8_t*)input, (int8_t*)output, inside, axis, outside, packAxis); + break; + default: + break; + } +} + +} +} diff --git a/cuda_code/ReactorSpectrumPdf.cu b/cuda_code/ReactorSpectrumPdf.cu new file mode 100644 index 0000000000000000000000000000000000000000..94dc06f8c56d8935455572d46126deff0f0f90d4 --- /dev/null +++ b/cuda_code/ReactorSpectrumPdf.cu @@ -0,0 +1,70 @@ +/*****************************************************************************/ +// Author: Xuefeng Ding +// Insitute: Gran Sasso Science Institute, L'Aquila, 67100, Italy +// Date: 2018 April 7th +// Version: v1.0 +// Description: GooStats, a statistical analysis toolkit that runs on GPU. +// +// All rights reserved. 2018 copyrighted. +/*****************************************************************************/ +#include "ReactorSpectrumPdf.h" + +EXEC_TARGET fptype device_ReactorSpectrum (fptype* evt, fptype* p, unsigned int* indices) { + const fptype E = evt[RO_CACHE(indices[2 + RO_CACHE(indices[0])])]; // in MeV + /***************** neutrino spectrum [#neutrino per (fission x MeV)] **********************/ + const fptype U235 = RO_CACHE(p[RO_CACHE(indices[1])]); + const fptype U238 = RO_CACHE(p[RO_CACHE(indices[2])]); + const fptype Pu239 = RO_CACHE(p[RO_CACHE(indices[3])]); + //const fptype Pu241 = RO_CACHE(p[RO_CACHE(indices[4])]); + const fptype Pu241 = 1-U235-U238-Pu239; + const unsigned int cIndex = RO_CACHE(indices[5]); + const fptype *U235p = functorConstants+cIndex; + const fptype *U238p = functorConstants+cIndex+3; + const fptype *Pu239p = functorConstants+cIndex+6; + const fptype *Pu241p = functorConstants+cIndex+9; + const fptype phiU235 = EXP(RO_CACHE(U235p[0]) + RO_CACHE(U235p[1])*E + RO_CACHE(U235p[2])*E*E); + const fptype phiU238 = EXP(RO_CACHE(U238p[0]) + RO_CACHE(U238p[1])*E + RO_CACHE(U238p[2])*E*E); + const fptype phiPu239 = EXP(RO_CACHE(Pu239p[0]) + RO_CACHE(Pu239p[1])*E + RO_CACHE(Pu239p[2])*E*E); + const fptype phiPu241 = EXP(RO_CACHE(Pu241p[0]) + RO_CACHE(Pu241p[1])*E + RO_CACHE(Pu241p[2])*E*E); + const fptype nuEdNdE = phiU235*U235+phiU238*U238+phiPu239*Pu239+phiPu241*Pu241; // unit: #neutrino per (fission x MeV) + /******************************************************************************************/ + const fptype power = RO_CACHE(functorConstants[cIndex+12]); // unit: GWth + // M. F. James, “Energy released in fission,” J. Nucl. Energy, vol. 23, no. 9, pp. 517–536, Nov. 1969. + const fptype unitE = U235*201.7+U238*205.0+Pu239*210.0+Pu241*212.4; // unit: MeV + const fptype distance = RO_CACHE(functorConstants[cIndex+13]); // in cm (converted already) + // unit: neutrino / cm^2 / day + // power in GW + const fptype fissionRate = power*5.392661498e26/* GWxday -> MeV *//unitE; // unit: fission per day + const fptype ret = fissionRate*nuEdNdE/(4*3.1415926535*distance*distance); // unit: #neutrino per (MeV x day x cm^2) +#ifdef RPF_CHECK + if(THREADIDX==0) + printf("%d %lf -> (%lf / %lf %lf %lf) phi %lf Pth %lf unitE %lf L %lf %le\n",THREADIDX, E, + U235,U235p[0],U235p[1],U235p[2], + nuEdNdE,power,unitE,distance,ret); +#endif + return ret; +} + +MEM_DEVICE device_function_ptr ptr_to_ReactorSpectrum = device_ReactorSpectrum; + +__host__ ReactorSpectrumPdf::ReactorSpectrumPdf (std::string n, Variable *_x, + const std::vector &fractions,const std::vector &coefficients,fptype power,fptype distance /*km*/) +: GooPdf(_x, n) +{ + std::vector pindices; + for(auto fraction : fractions) + pindices.push_back(registerParameter(fraction)); + GET_FUNCTION_ADDR(ptr_to_ReactorSpectrum); + pindices.push_back(registerConstants(14));/*6*/ + fptype toCopy[12]; + for(int i = 0;i<12;++i) toCopy[i] = coefficients[i]; + MEMCPY_TO_SYMBOL(functorConstants, toCopy, 12*sizeof(fptype), cIndex*sizeof(fptype), cudaMemcpyHostToDevice); + MEMCPY_TO_SYMBOL(functorConstants, &power, sizeof(fptype), (cIndex+12)*sizeof(fptype), cudaMemcpyHostToDevice); + distance*=100000; // km to cm + MEMCPY_TO_SYMBOL(functorConstants, &distance, sizeof(fptype), (cIndex+13)*sizeof(fptype), cudaMemcpyHostToDevice); + initialise(pindices); +} +__host__ fptype ReactorSpectrumPdf::normalise () const { + host_normalisation[parameters] = 1.0; + return 1; +} diff --git a/cuda_code/ReduceMax_1.cu b/cuda_code/ReduceMax_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..ecbc5ed53f0bbb6be1d531e8e1176b939e9d652c --- /dev/null +++ b/cuda_code/ReduceMax_1.cu @@ -0,0 +1,727 @@ +/* NiuTrans.Tensor - an open-source tensor library + * Copyright (C) 2017, Natural Language Processing Lab, Northestern University. + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* +* $Created by: XIAO Tong (email: xiaotong@mail.neu.edu.cn) 2018-04-24 +*/ + +#include "../../XDevice.h" +#include "../../XTensor.h" +#include "../../XUtility.h" +#include "ReduceMax.h" +#include "ReduceMax.cuh" + +namespace nts{ // namespace nts(NiuTrans.Tensor) + +#ifdef USE_CUDA + + +/* +use PTX code to reduce float data +*/ +#define SHLFUNCFLOAT(funcName, reducePTXOp) \ +__device__ __forceinline__ \ +float funcName(float input) \ +{ \ + float output; \ + asm volatile( \ + "{" \ + ".reg .f32 r0;" \ + ".reg .pred p;" \ + "shfl.sync.down.b32 r0, %1, 0x10, 0x1f,0xffffffff;" \ + "setp."#reducePTXOp".f32 p,%1,r0;" \ + "@p mov.f32 %1,r0;" \ + "shfl.sync.down.b32 r0, %1, 0x8, 0xf,0xffffffff;" \ + "setp."#reducePTXOp".f32 p,%1,r0;" \ + "@p mov.f32 %1,r0;" \ + "shfl.sync.down.b32 r0, %1, 0x4, 0x7,0xffffffff;" \ + "setp."#reducePTXOp".f32 p,%1,r0;" \ + "@p mov.f32 %1,r0;" \ + "shfl.sync.down.b32 r0, %1, 0x2, 0x3,0xffffffff;" \ + "setp."#reducePTXOp".f32 p,%1,r0;" \ + "@p mov.f32 %1,r0;" \ + "shfl.sync.down.b32 r0, %1, 0x1, 0x1,0xffffffff;" \ + "setp."#reducePTXOp".f32 p, %1, r0; " \ + "@p mov.f32 %1,r0;" \ + "mov.f32 %0,%1;" \ + "}" \ + : "=f"(output) : "f"(input)); \ + return output; \ +} + +SHLFUNCFLOAT(shflDownReduceMax, lt) +SHLFUNCFLOAT(shflDownReduceMin, gt) + +/* +use PTX code to reduce int data +*/ +#define SHLFUNCINT(funcName, reducePTXOp) \ +__device__ __forceinline__ \ +int funcName(int input) \ +{ \ + int output; \ + asm volatile( \ + "{" \ + ".reg .s32 r0;" \ + ".reg .pred p;" \ + "shfl.sync.down.b32 r0, %1, 0x10, 0x1f,0xffffffff;" \ + "setp."#reducePTXOp".s32 p,%1,r0;" \ + "@p mov.s32 %1,r0;" \ + "shfl.sync.down.b32 r0, %1, 0x8, 0xf,0xffffffff;" \ + "setp."#reducePTXOp".s32 p,%1,r0;" \ + "@p mov.s32 %1,r0;" \ + "shfl.sync.down.b32 r0, %1, 0x4, 0x7,0xffffffff;" \ + "setp."#reducePTXOp".s32 p,%1,r0;" \ + "@p mov.s32 %1,r0;" \ + "shfl.sync.down.b32 r0, %1, 0x2, 0x3,0xffffffff;" \ + "setp."#reducePTXOp".s32 p,%1,r0;" \ + "@p mov.s32 %1,r0;" \ + "shfl.sync.down.b32 r0, %1, 0x1, 0x1,0xffffffff;" \ + "setp."#reducePTXOp".s32 p, %1, r0; " \ + "@p mov.s32 %1,r0;" \ + "mov.s32 %0,%1;" \ + "}" \ + : "=r"(output) : "r"(input)); \ + return output; \ +} + +SHLFUNCINT(shflDownReduceMax, lt) +SHLFUNCINT(shflDownReduceMin, gt) + +/* +reduce a tensor to another that keeps the max value along a dimension - slow version +Given a block of data, we go over each dimension i in the stride and we have +sum_i = max_{0<=j> input - the input array (representing a tensor) +>> output - the sum over each block. NOTE: output is also an array +>> stride - stride that we need to move to the next item +>> strideNum - how many strides we need to finish the reduce +>> reducedStrideNum - the number of strides after reducation +>> blockSize - size of the block (i.e., stride * strideNum) +>> blockNum - how many blocks +*/ +#define KERNELREDUCEFUN3(funName, opName, initData) \ + __global__ \ +void funName(DTYPE * input, DTYPE * output, \ + int stride, int strideNum, int reducedStrideNum, \ + int blockSize, int blockNum) \ +{ \ + __shared__ DTYPE iData[MAX_CUDA_THREAD_NUM_PER_BLOCK * MIN_CUDA_SHARED_MEM_COL_SIZE/2]; \ + \ + int idx = threadIdx.x * blockDim.y + threadIdx.y; \ + unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; \ + unsigned int j = blockIdx.y*blockDim.y + threadIdx.y; \ + \ + if(i >= stride * blockNum) \ + return; \ + \ + __syncthreads(); \ + \ + int k = i / stride; \ + int iOffset = i % stride; \ + \ + DTYPE value = (i < stride * blockNum && j < strideNum) ? \ + input[blockSize * k + stride * j + iOffset] : initData; \ + \ + /* load data into the shared mem */ \ + iData[threadIdx.x * blockDim.y + threadIdx.y] = value; \ + \ + __syncthreads(); \ + \ + /* do reduction in shared mem */ \ + for (unsigned int s = blockDim.y/2; s > 0; s >>= 1){ \ + if(threadIdx.y < s){ \ + iData[idx] = opName(iData[idx + s], iData[idx]); \ + } \ + \ + __syncthreads(); \ + } \ + \ + /* write result for this block to the output array */ \ + if (threadIdx.y == 0 && blockIdx.y < reducedStrideNum) \ + output[(k * reducedStrideNum + blockIdx.y) * stride + iOffset] = iData[threadIdx.x * blockDim.y]; \ + \ +} + +KERNELREDUCEFUN3(KernelReduceMax, MAX, FLOAT_MIN) +KERNELREDUCEFUN3(KernelReduceMin, MIN, MAX_FLOAT) + +/* +reduce a tensor to another that keeps the max value along a dimension - slow version +Given a block of data, we go over each dimension i in the stride and we have +sum_i = max_{0<=j> input - the input array (representing a tensor) +>> output - the sum over each block. NOTE: output is also an array +>> stride - stride that we need to move to the next item +>> strideNum - how many strides we need to finish the reduce +>> reducedStrideNum - the number of strides after reducation +>> blockSize - size of the block (i.e., stride * strideNum) +>> blockNum - how many blocks +*/ +__global__ +void KernelReduceMax(__half * input, __half * output, + int stride, int strideNum, int reducedStrideNum, + int blockSize, int blockNum) +{ + int idx = threadIdx.x * blockDim.y + threadIdx.y; + unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; + unsigned int j = blockIdx.y*blockDim.y + threadIdx.y; + + if (i >= stride * blockNum) + return; + +#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) + __shared__ __half iData[MAX_CUDA_THREAD_NUM_PER_BLOCK * MIN_CUDA_SHARED_MEM_COL_SIZE / 2]; +#else + __shared__ DTYPE iData[MAX_CUDA_THREAD_NUM_PER_BLOCK * MIN_CUDA_SHARED_MEM_COL_SIZE / 2]; +#endif + + __syncthreads(); + + int k = i / stride; + int iOffset = i % stride; + +#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) + __half value = (i < stride * blockNum && j < strideNum) ? + input[blockSize * k + stride * j + iOffset] : __half(FLOAT16_MIN); +#else + DTYPE value = (i < stride * blockNum && j < strideNum) ? + __half2float(input[blockSize * k + stride * j + iOffset]) : FLOAT_MIN; +#endif + + /* load data into the shared mem */ + iData[threadIdx.x * blockDim.y + threadIdx.y] = value; + + __syncthreads(); + + /* do reduction in shared mem */ + for (unsigned int s = blockDim.y / 2; s > 0; s >>= 1) { + if (threadIdx.y < s && iData[idx] < iData[idx + s]) { + iData[idx] = iData[idx + s]; + } + + __syncthreads(); + } + +#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) + /* write result for this block to the output array */ + if (threadIdx.y == 0 && blockIdx.y < reducedStrideNum) + output[(k * reducedStrideNum + blockIdx.y) * stride + iOffset] = iData[threadIdx.x * blockDim.y]; +#else + /* write result for this block to the output array */ + if (threadIdx.y == 0 && blockIdx.y < reducedStrideNum) + output[(k * reducedStrideNum + blockIdx.y) * stride + iOffset] = __half(iData[threadIdx.x * blockDim.y]); +#endif + + } + +/* +reduce a tensor to another that keeps the max value along a dimension - fast version +>> input - the input array (representing a tensor) +>> output - the sum over each block. NOTE: output is also an array +>> stride - stride that we need to move to the next item +>> strideNum - how many strides we need to finish the reduce +>> reducedStrideNum - the number of strides after reducation +>> blockSize - size of the block (i.e., stride * strideNum) +>> blockNum - how many blocks +*/ +#define KERNELREDUCEFUN4(funName, opName, opFuncName, initData) \ +template __global__ \ +void funName(DTYPE * input, DTYPE * output, \ + int stride, int strideNum, int reducedStrideNum, \ + int blockSize, int blockNum) \ +{ \ + __shared__ DTYPE iData[MAX_CUDA_THREAD_NUM_PER_BLOCK]; \ + \ + unsigned int tid = threadIdx.y; \ + unsigned int j = blockIdx.y * (blockDim.y * 2) + threadIdx.y; \ + unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; \ + \ + if(i >= stride * blockNum) \ + return; \ + \ + __syncthreads(); \ + \ + /* first level reduction */ \ + int k = i / stride; \ + int iOffset = i % stride; \ + \ + DTYPE * data = iData + threadIdx.x * blockDim.y; \ + DTYPE * inputData = input + k * blockSize; \ + DTYPE value = j < strideNum ? inputData[j * stride + iOffset] : initData; \ + DTYPE value2 = j + blockDim.y < strideNum ? inputData[(j + blockDim.y) * stride + iOffset]: initData; \ + \ + value = opName(value, value2); \ + value = opFuncName(value); \ + if ((tid & 0x1f) == 0) \ + data[tid / 32] = value; \ + __syncthreads(); \ + \ + if (tid < 32) { \ + if (tid < blockDim.y / 32) \ + value = data[tid]; \ + else \ + value = initData; \ + value = opFuncName(value); \ + if (tid == 0 && blockIdx.y < reducedStrideNum) \ + output[(k * reducedStrideNum + blockIdx.y) * stride + iOffset] = value; \ + } \ +} + +KERNELREDUCEFUN4(KernelReduceMaxFast, MAX, shflDownReduceMax, FLOAT_MIN) +KERNELREDUCEFUN4(KernelReduceMinFast, MIN, shflDownReduceMin, MAX_FLOAT) + +/* +reduce a tensor to another that keeps the max value along a dimension - fast version +>> input - the input array (representing a tensor) +>> output - the sum over each block. NOTE: output is also an array +>> stride - stride that we need to move to the next item +>> strideNum - how many strides we need to finish the reduce +>> reducedStrideNum - the number of strides after reducation +>> blockSize - size of the block (i.e., stride * strideNum) +>> blockNum - how many blocks +*/ +template __global__ +void KernelReduceMaxFast(__half * input, __half * output, + int stride, int strideNum, int reducedStrideNum, + int blockSize, int blockNum) +{ + unsigned int tid = threadIdx.y; + unsigned int j = blockIdx.y * (blockDim.y * 2) + threadIdx.y; + unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; + + if (i >= stride * blockNum) + return; + +#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) + __shared__ __half iData[MAX_CUDA_THREAD_NUM_PER_BLOCK]; +#else + __shared__ DTYPE iData[MAX_CUDA_THREAD_NUM_PER_BLOCK]; +#endif + + __syncthreads(); + + /* first level reduction */ + int k = i / stride; + int iOffset = i % stride; + +#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) + __half * data = iData + threadIdx.x * blockDim.y; + __half * inputData = input + k * blockSize; + __half value = j < strideNum ? inputData[j * stride + iOffset] : __half(FLOAT16_MIN); + __half value2 = j + blockDim.y < strideNum ? inputData[(j + blockDim.y) * stride + iOffset] : __half(FLOAT16_MIN); +#else + DTYPE * data = iData + threadIdx.x * blockDim.y; + __half * inputData = input + k * blockSize; + DTYPE value = j < strideNum ? __half2float(inputData[j * stride + iOffset]) : FLOAT_MIN; + DTYPE value2 = j + blockDim.y < strideNum ? __half2float(inputData[(j + blockDim.y) * stride + iOffset]) : FLOAT_MIN; +#endif + + /* load data into the shared mem */ + data[tid] = MAX(value, value2); + + __syncthreads(); + + /* unroll the warp */ + + if (goodSize >= 512) { if (tid < 256) { if (data[tid] < data[tid + 256]) data[tid] = data[tid + 256]; } __syncthreads(); } + if (goodSize >= 256) { if (tid < 128) { if (data[tid] < data[tid + 128]) data[tid] = data[tid + 128]; } __syncthreads(); } + if (goodSize >= 128) { if (tid < 64) { if (data[tid] < data[tid + 64]) data[tid] = data[tid + 64]; } __syncthreads(); } + if (goodSize >= 64) { if (tid < 32) { if (data[tid] < data[tid + 32]) data[tid] = data[tid + 32]; } __syncthreads(); } + if (goodSize >= 32) { if (tid < 16) { if (data[tid] < data[tid + 16]) data[tid] = data[tid + 16]; } __syncthreads(); } + if (goodSize >= 16) { if (tid < 8) { if (data[tid] < data[tid + 8]) data[tid] = data[tid + 8]; } __syncthreads(); } + if (goodSize >= 8) { if (tid < 4) { if (data[tid] < data[tid + 4]) data[tid] = data[tid + 4]; } __syncthreads(); } + if (goodSize >= 4) { if (tid < 2) { if (data[tid] < data[tid + 2]) data[tid] = data[tid + 2]; } __syncthreads(); } + if (goodSize >= 2) { if (tid < 1) { if (data[tid] < data[tid + 1]) data[tid] = data[tid + 1]; } __syncthreads(); } + +#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) + /* write result for this block to the output array */ + if (threadIdx.y == 0 && blockIdx.y < reducedStrideNum) + output[(k * reducedStrideNum + blockIdx.y) * stride + iOffset] = data[0]; +#else + /* write result for this block to the output array */ + if (threadIdx.y == 0 && blockIdx.y < reducedStrideNum) + output[(k * reducedStrideNum + blockIdx.y) * stride + iOffset] = __float2half(data[0]); +#endif +} + +/* +reduce a tensor to another that keeps the max value along a dimension - simple and fast version +*/ +__global__ +void KernelReduceMaxSimpleFast(DTYPE * input, DTYPE * output, + int stride, int strideNum, int blockSize, int blockNum) +{ + unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; + + if(i >= stride) + return; + + int blockIndex = i / blockSize; + int offset = i % blockSize; + + DTYPE * ip = input + blockIndex * blockSize + offset; + DTYPE * op = output + blockIndex * stride + offset; + + DTYPE max = DTYPE_MIN; + if(strideNum % 4 == 0){ + int stride2 = stride + stride; + int stride3 = stride2 + stride; + int stride4 = stride3 + stride; + for(int k = 0; k < blockSize; k += stride4){ + DTYPE m = MAX(MAX(ip[k], ip[k + stride]), MAX(ip[k + stride2], ip[k + stride3])); + max = MAX(max, m); + } + } + else{ + for (int k = 0; k < blockSize; k += stride) + max = MAX(max, ip[k]); + } + + __syncthreads(); + + op[offset] = max; +} + +/* +according the GPU's sm number allocation warp num +*/ +inline void continuousStorageThreadAllocation(dim3& grid, dim3& block, long long vectorNum, int vectorSize) +{ + int warpNum = 4; + if (vectorNum < 20 * 8){ + warpNum = 8; + if (vectorNum < 20 * 4){ + warpNum = 16; + if (warpNum < 20 * 2) + warpNum = 32; + } + } + int minWarpNum = vectorSize / 32; + if (vectorSize % 32 != 0) minWarpNum++; + warpNum = min(warpNum, minWarpNum); + + grid.x = (unsigned int)vectorNum; + grid.y = 1; + grid.z = 1; + block.x = 1; + block.y = warpNum * 32; + block.z = 1; +} + +/* +adjust threads.x number then we can use warp optimization +*/ +inline void adjustThreadForUseWarpOptimization(dim3& blocks, dim3& threads) +{ + if (threads.x > 1) { + blocks.x *= threads.x; + threads.x = 1; + } + if (threads.y < 32) + threads.y = 32; +} + +/* +In some case,we use less block to imporve efficiency +*/ +#define KERNELREDUCEFUN2(funName, opName, opFuncName, initData) \ +__global__ \ +void funName(DTYPE * input, DTYPE * output, int strideNum, int blockNum) \ +{ \ + int idx = threadIdx.x % 32; \ + int idy = (blockIdx.x * blockDim.x + threadIdx.x) / 32; \ + \ + int startIndex = idy * strideNum; \ + DTYPE threadMax = initData; \ + for (int i = idx; i < strideNum; i += 32) { \ + threadMax = opName(input[startIndex + i], threadMax); \ + } \ + threadMax = opFuncName(threadMax); \ + if (idx == 0) \ + output[idy] = threadMax; \ +} + +KERNELREDUCEFUN2(KernelReduceMaxOpLessBlocks, MAX, shflDownReduceMax, FLOAT_MIN) +KERNELREDUCEFUN2(KernelReduceMinOpLessBlocks, MIN, shflDownReduceMin, MAX_FLOAT) + + +/* +we use PTX code reduce +*/ +#define KERNELREDUCEFUN1(funName, opName, opFuncName, initData) \ +__global__ \ +void funName(DTYPE * input, DTYPE * output,int stride, int strideNum, \ + int reducedStrideNum,int blockSize, int blockNum) \ +{ \ + __shared__ DTYPE iData[MAX_CUDA_THREAD_NUM_PER_BLOCK / 32]; \ + \ + unsigned int tid = threadIdx.y; \ + unsigned int j = blockIdx.y * blockDim.y + threadIdx.y; \ + unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; \ + if (i >= stride * blockNum) \ + return; \ + \ + /* first level reduction */ \ + int k = i / stride; \ + int iOffset = i % stride; \ + \ + DTYPE threadMax = initData; \ + \ + DTYPE * data = iData + threadIdx.x * blockDim.y; \ + DTYPE * inputData = input + k * blockSize; \ + for (int it = j; it < strideNum; it += blockDim.y){ \ + threadMax = opName(inputData[it * stride + iOffset], threadMax); \ + } \ + \ + __syncthreads(); \ + threadMax = opFuncName(threadMax); \ + if ((tid & 0x1f) == 0) \ + data[tid / 32] = threadMax; \ + \ + __syncthreads(); \ + /* use one warp to reduce remaining data */ \ + if (tid < 32){ \ + if (tid < blockDim.y / 32) \ + threadMax = data[tid]; \ + else threadMax = initData; \ + threadMax = opFuncName(threadMax); \ + if (tid == 0 && blockIdx.y < reducedStrideNum) \ + output[(k * reducedStrideNum + blockIdx.y) * stride + iOffset] = threadMax; \ + } \ +} + +KERNELREDUCEFUN1(KernelReduceMaxOp, MAX, shflDownReduceMax, FLOAT_MIN) +KERNELREDUCEFUN1(KernelReduceMinOp, MIN, shflDownReduceMin, MAX_FLOAT) + +/* +get the max-valued items along a dimension of the tensor (cuda version). +For a 1-dimensional data array a, +sum_i = max_{0<=j> input - the input tensor +>> output - the output tensor +>> dim - which dimension to reduce +*/ +#define _CUDAREDUCE(_funcName, _reduceFunc1, _reduceFunc2, _reduceFunc3, _reduceFun4) \ +void _funcName(const XTensor * input, XTensor * output, int dim) \ +{ \ + CheckNTErrors(input && output, "Empty input or output tensors!"); \ + CheckNTErrors(input->order == output->order + 1, "Incorrect tensor sizes!"); \ + CheckNTErrors(input->order > dim && dim >=0, "Illegal dimension to reduce!"); \ + CheckNTErrors(input->dataType == output->dataType, "Unmatched data types!"); \ + \ + for(int i = 0; i < input->order; i++){ \ + if(i < dim){ \ + CheckNTErrors(input->dimSize[i] == output->dimSize[i], "Unmatched tensors!"); \ + } \ + else if(i > dim){ \ + CheckNTErrors(input->dimSize[i] == output->dimSize[i - 1], "Unmatched tensors!"); \ + } \ + } \ + \ + int cudaGridSize[3]; \ + int cudaBlockSize[3]; \ + int iter = 0; \ + int stride = 1; \ + int strideNum = input->dimSize[dim]; \ + int blockSize = 1; \ + int blockNum = 1; \ + \ + for (int i = 0; i < input->order; i++) { \ + if (i < dim) \ + blockNum *= input->dimSize[i]; \ + else if (i > dim) \ + stride *= input->dimSize[i]; \ + } \ + blockSize = stride * strideNum; \ + \ + int devID = input->devID; \ + \ + \ + int devIDBackup; \ + ProtectCudaDev(input->devID, devIDBackup); \ + \ + if (stride == 1 && blockNum >= 10) { \ + dim3 grids; \ + dim3 blocks; \ + continuousStorageThreadAllocation(grids, blocks, (long long)blockNum, strideNum);printf("%d %d %d %d\n", grids.x, grids.y, blocks.x, blocks.y); \ + if (blocks.y >= 128) { \ + _reduceFunc1 <<>> ((DTYPE *)input->data, (DTYPE*)output->data, stride, strideNum, grids.y, blockSize, blockNum); \ + } \ + else { \ + if (blockNum % 4 != 0) blockNum = (int)(blockNum / 4) + 1; \ + else blockNum = blockNum / 4; \ + _reduceFunc2 <<>> ((DTYPE *)input->data, (DTYPE*)output->data, strideNum, blockNum); \ + } \ + } \ + else { \ + XMem * mem = input->mem; \ + GDevs.GetCudaThread2D(devID, strideNum, stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); \ + int bufSize = input->unitSize * cudaGridSize[0] * stride * blockNum * 2; \ + DTYPE * buf = mem != NULL ? (DTYPE*)mem->AllocBuf(mem->devID, bufSize) : (DTYPE*)XMemAlloc(devID, bufSize); \ + DTYPE * buf1 = buf; \ + DTYPE * buf2 = buf + cudaGridSize[0] * stride * blockNum; \ + do { \ + if (input->dataType == DEFAULT_DTYPE) { \ + DTYPE * iData = NULL; \ + DTYPE * oData = NULL; \ + if (iter == 0) { \ + iData = (DTYPE*)input->data; \ + oData = buf1; \ + } \ + else if (iter % 2 == 1) { \ + iData = buf1; \ + oData = buf2; \ + } \ + else { \ + iData = buf2; \ + oData = buf1; \ + } \ + \ + /* unroll the reduction procedure. The code is messy but it is faster. */ \ + if (strideNum < 32) { \ + GDevs.GetCudaThread2D(devID, strideNum, stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); \ + dim3 blocks(cudaGridSize[1], cudaGridSize[0]), threads(cudaBlockSize[1], cudaBlockSize[0]); \ + if (cudaGridSize[0] == 1) \ + oData = (DTYPE*)output->data; \ + _reduceFunc3 <<>> (iData, oData, stride, strideNum, blocks.y, blockSize, blockNum); \ + } \ + else if (strideNum < 128) { \ + GDevs.GetCudaThread2D(devID, MAX(strideNum / 2 + 1, 64), stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); \ + dim3 blocks(cudaGridSize[1], cudaGridSize[0]), threads(cudaBlockSize[1], cudaBlockSize[0]); \ + if (cudaGridSize[0] == 1) \ + oData = (DTYPE*)output->data; \ + CheckNTErrors(cudaBlockSize[0] >= 64, "Incorrect thread number when calling the cuda kernel!"); \ + adjustThreadForUseWarpOptimization(blocks, threads); \ + _reduceFun4<64> <<>> (iData, oData, stride, strideNum, blocks.y, blockSize, blockNum); \ + } \ + else if (strideNum < 256) { \ + GDevs.GetCudaThread2D(devID, MAX(strideNum / 2 + 1, 128), stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); \ + dim3 blocks(cudaGridSize[1], cudaGridSize[0]), threads(cudaBlockSize[1], cudaBlockSize[0]); \ + if (cudaGridSize[0] == 1) \ + oData = (DTYPE*)output->data; \ + CheckNTErrors(cudaBlockSize[0] >= 128, "Incorrect thread number when calling the cuda kernel!"); \ + adjustThreadForUseWarpOptimization(blocks, threads); \ + _reduceFun4<128> <<>> (iData, oData, stride, strideNum, blocks.y, blockSize, blockNum); \ + } \ + else if (strideNum < 512) { \ + GDevs.GetCudaThread2D(devID, MAX(strideNum / 2 + 1, 256), stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); \ + dim3 blocks(cudaGridSize[1], cudaGridSize[0]), threads(cudaBlockSize[1], cudaBlockSize[0]); \ + if (cudaGridSize[0] == 1) \ + oData = (DTYPE*)output->data; \ + CheckNTErrors(cudaBlockSize[0] >= 256, "Incorrect thread number when calling the cuda kernel!"); \ + adjustThreadForUseWarpOptimization(blocks, threads); \ + _reduceFun4<256> <<>> (iData, oData, stride, strideNum, blocks.y, blockSize, blockNum); \ + } \ + else { \ + GDevs.GetCudaThread2D(devID, MAX(strideNum / 2 + 1, 512), stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); \ + dim3 blocks(cudaGridSize[1], cudaGridSize[0]), threads(cudaBlockSize[1], cudaBlockSize[0]); \ + if (cudaGridSize[0] == 1) \ + oData = (DTYPE*)output->data; \ + CheckNTErrors(cudaBlockSize[0] >= 512, "Incorrect thread number when calling the cuda kernel!"); \ + adjustThreadForUseWarpOptimization(blocks, threads); \ + _reduceFun4<512> <<>> (iData, oData, stride, strideNum, blocks.y, blockSize, blockNum); \ + } \ + } \ + else if (input->dataType == X_FLOAT16) { \ + __half * buf1ft16 = (__half *)buf1; \ + __half * buf2ft16 = (__half *)buf2; \ + __half * iData = NULL; \ + __half * oData = NULL; \ + if (iter == 0) { \ + iData = (__half*)input->data; \ + oData = buf1ft16; \ + } \ + else if (iter % 2 == 1) { \ + iData = buf1ft16; \ + oData = buf2ft16; \ + } \ + else { \ + iData = buf2ft16; \ + oData = buf1ft16; \ + } \ + \ + /* unroll the reduction procedure. The code is messy but it is faster. */ \ + if (strideNum < 32) { \ + GDevs.GetCudaThread2D(devID, strideNum, stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); \ + dim3 blocks(cudaGridSize[1], cudaGridSize[0]), threads(cudaBlockSize[1], cudaBlockSize[0]); \ + if (cudaGridSize[0] == 1) \ + oData = (__half*)output->data; \ + KernelReduceMax <<>> (iData, oData, stride, strideNum, blocks.y, blockSize, blockNum); \ + } \ + else if (strideNum < 128) { \ + GDevs.GetCudaThread2D(devID, MAX(strideNum / 2 + 1, 64), stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); \ + dim3 blocks(cudaGridSize[1], cudaGridSize[0]), threads(cudaBlockSize[1], cudaBlockSize[0]); \ + if (cudaGridSize[0] == 1) \ + oData = (__half*)output->data; \ + CheckNTErrors(cudaBlockSize[0] >= 64, "Incorrect thread number when calling the cuda kernel!"); \ + KernelReduceMaxFast<64> <<>> (iData, oData, stride, strideNum, blocks.y, blockSize, blockNum); \ + } \ + else if (strideNum < 256) { \ + GDevs.GetCudaThread2D(devID, MAX(strideNum / 2 + 1, 128), stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); \ + dim3 blocks(cudaGridSize[1], cudaGridSize[0]), threads(cudaBlockSize[1], cudaBlockSize[0]); \ + if (cudaGridSize[0] == 1) \ + oData = (__half*)output->data; \ + CheckNTErrors(cudaBlockSize[0] >= 128, "Incorrect thread number when calling the cuda kernel!"); \ + KernelReduceMaxFast<128> <<>> (iData, oData, stride, strideNum, blocks.y, blockSize, blockNum); \ + } \ + else if (strideNum < 512) { \ + GDevs.GetCudaThread2D(devID, MAX(strideNum / 2 + 1, 256), stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); \ + dim3 blocks(cudaGridSize[1], cudaGridSize[0]), threads(cudaBlockSize[1], cudaBlockSize[0]); \ + if (cudaGridSize[0] == 1) \ + oData = (__half*)output->data; \ + CheckNTErrors(cudaBlockSize[0] >= 256, "Incorrect thread number when calling the cuda kernel!"); \ + KernelReduceMaxFast<256> <<>> (iData, oData, stride, strideNum, blocks.y, blockSize, blockNum); \ + } \ + else { \ + GDevs.GetCudaThread2D(devID, MAX(strideNum / 2 + 1, 512), stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); \ + dim3 blocks(cudaGridSize[1], cudaGridSize[0]), threads(cudaBlockSize[1], cudaBlockSize[0]); \ + if (cudaGridSize[0] == 1) \ + oData = (__half*)output->data; \ + CheckNTErrors(cudaBlockSize[0] >= 512, "Incorrect thread number when calling the cuda kernel!"); \ + KernelReduceMaxFast<512> <<>> (iData, oData, stride, strideNum, blocks.y, blockSize, blockNum); \ + } \ + } \ + \ + strideNum = cudaGridSize[0]; \ + blockSize = cudaGridSize[0]; \ + \ + iter++; \ + \ + } while (strideNum > 1); \ + \ + \ + \ + if (mem != NULL) \ + mem->ReleaseBuf(mem->devID, bufSize); \ + else \ + XMemFree(input->devID, buf); \ + } \ + BacktoCudaDev(input->devID, devIDBackup); \ +} + +_CUDAREDUCE(_CudaReduceMax, KernelReduceMaxOp, KernelReduceMaxOpLessBlocks, KernelReduceMax, KernelReduceMaxFast) +_CUDAREDUCE(_CudaReduceMin, KernelReduceMinOp, KernelReduceMinOpLessBlocks, KernelReduceMin, KernelReduceMinFast) + + +#endif // USE_CUDA + +} // namespace nts(NiuTrans.Tensor) \ No newline at end of file diff --git a/cuda_code/ReduceNormKernel_2.cu b/cuda_code/ReduceNormKernel_2.cu new file mode 100644 index 0000000000000000000000000000000000000000..15a747f2a86485319d340079ad3da7d888f53d7e --- /dev/null +++ b/cuda_code/ReduceNormKernel_2.cu @@ -0,0 +1,51 @@ +#define TORCH_ASSERT_NO_OPERATORS +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { namespace native { + +// This reduction accumulates results as the type `acc_t`. By default, when +// `scalar_t` is complex, `acc_t` is the downgraded real number type. +// Otherwise, `acc_t` and `scalar_t` are the same type. +template ::type, typename out_t=typename scalar_value_type::type> +void norm_kernel_cuda_impl(TensorIterator& iter, double p) { + if (p == static_cast(0)) { + gpu_reduce_kernel(iter, NormZeroOps(), 0); + } else if (p == static_cast(1)) { + gpu_reduce_kernel(iter, NormOneOps(), 0); + } else if (p == static_cast(2)) { + gpu_reduce_kernel(iter, NormTwoOps(), 0); + } else if (p == static_cast(INFINITY)) { + gpu_reduce_kernel(iter, AbsMaxOps(), 0); + } else if (p == static_cast(-INFINITY)) { + gpu_reduce_kernel(iter, AbsMinOps(), std::numeric_limits::infinity()); + } else { + gpu_reduce_kernel(iter, NormOps{ acc_t(p) }, 0); + } +} + +void norm_launch_kernel(TensorIterator& iter, double ord) { + if (iter.dtype(0) == kHalf) { + return norm_kernel_cuda_impl(iter, ord); + } else if (iter.input_dtype() == kHalf && iter.dtype(0) == kFloat) { + // type promotion that does cast and reduction in a single kernel + return norm_kernel_cuda_impl(iter, ord); + } + else if(iter.dtype(0) == kBFloat16) { + return norm_kernel_cuda_impl(iter, ord); + } else if (iter.input_dtype() == kBFloat16 && iter.dtype(0) == kFloat) { + // type promotion that does cast and reduction in a single kernel + return norm_kernel_cuda_impl(iter, ord); + } + AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(iter.input_dtype(), "norm_cuda", [&] { + norm_kernel_cuda_impl(iter, ord); + }); +} + +}} // namespace at::native diff --git a/cuda_code/ReduceSumProdKernel_3.cu b/cuda_code/ReduceSumProdKernel_3.cu new file mode 100644 index 0000000000000000000000000000000000000000..6506d6d005127f4c60b1c8d0e43b53061e929a09 --- /dev/null +++ b/cuda_code/ReduceSumProdKernel_3.cu @@ -0,0 +1,67 @@ +#include +#include +#include +#include +#include +#include + +namespace at { namespace native { + +template +void sum_kernel_impl(TensorIterator& iter) { + gpu_reduce_kernel(iter, func_wrapper ([]GPU_LAMBDA(acc_t a, acc_t b) -> acc_t { + return a + b; + })); +} + +template +void prod_kernel_impl(TensorIterator& iter) { + gpu_reduce_kernel(iter, func_wrapper ([]GPU_LAMBDA(acc_t a, acc_t b) -> acc_t { + return a * b; + }), 1); +} + +static void sum_kernel_cuda(TensorIterator& iter) { + if (iter.dtype() == kHalf) { + return sum_kernel_impl(iter); + } else if (iter.dtype(1) == kHalf && iter.dtype() == kFloat) { + // type promotion that does cast and reduction in a single kernel + return sum_kernel_impl(iter); + } + #ifdef __HIP_PLATFORM_HCC__ + else if (iter.dtype() == kBFloat16) { + return sum_kernel_impl(iter); + } else if (iter.dtype(1) == kBFloat16 && iter.dtype() == kFloat) { + // type promotion that does cast and reduction in a single kernel + return sum_kernel_impl(iter); + } + #endif + AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND(ScalarType::Bool, iter.dtype(), "sum_cuda", [&]() { + sum_kernel_impl(iter); + }); +} + +static void prod_kernel_cuda(TensorIterator& iter) { + if (iter.dtype() == kHalf) { + return prod_kernel_impl(iter); + } else if (iter.dtype(1) == kHalf && iter.dtype() == kFloat) { + // type promotion that does cast and reduction in a single kernel + return prod_kernel_impl(iter); + } + #ifdef __HIP_PLATFORM_HCC__ + else if (iter.dtype() == kBFloat16) { + return prod_kernel_impl(iter); + } else if (iter.dtype(1) == kBFloat16 && iter.dtype() == kFloat) { + // type promotion that does cast and reduction in a single kernel + return prod_kernel_impl(iter); + } + #endif + AT_DISPATCH_ALL_TYPES(iter.dtype(), "prod_cuda", [&]() { + prod_kernel_impl(iter); + }); +} + +REGISTER_DISPATCH(sum_stub, &sum_kernel_cuda); +REGISTER_DISPATCH(prod_stub, &prod_kernel_cuda); + +}} // namespace at::native diff --git a/cuda_code/ReduceSum_1.cu b/cuda_code/ReduceSum_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..eaa8ae3a7a520b292dc9978d2eb36d1dc55b89c6 --- /dev/null +++ b/cuda_code/ReduceSum_1.cu @@ -0,0 +1,919 @@ +/* NiuTrans.Tensor - an open-source tensor library + * Copyright (C) 2017, Natural Language Processing Lab, Northestern University. + * All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* +* $Created by: XIAO Tong (email: xiaotong@mail.neu.edu.cn) 2018-04-24 +*/ + +#include "../../XDevice.h" +#include "../../XUtility.h" +#include "ReduceSum.cuh" + +namespace nts{ // namespace nts(NiuTrans.Tensor) + +#ifdef USE_CUDA + +/* +use PTX code to reduce float data +*/ +__device__ __forceinline__ +float shflDownReduceSum(float input) +{ + float output; + asm volatile( + "{" + ".reg .f32 r0;" + "shfl.sync.down.b32 r0, %1, 0x10, 0x1f,0xffffffff;" + "add.f32 %1, r0, %1;" + "shfl.sync.down.b32 r0, %1, 0x8, 0xf,0xffffffff;" + "add.f32 %1, r0, %1;" + "shfl.sync.down.b32 r0, %1, 0x4, 0x7,0xffffffff;" + "add.f32 %1, r0, %1;" + "shfl.sync.down.b32 r0, %1, 0x2, 0x3,0xffffffff;" + "add.f32 %1, r0, %1;" + "shfl.sync.down.b32 r0, %1, 0x1, 0x1,0xffffffff;" + "add.f32 %0, r0, %1;" + "}" + : "=f"(output) : "f"(input)); + return output; +} + +/* +use PTX code to reduce int data +*/ +__device__ __forceinline__ +int shflDownReduceSum(int input) +{ + int output; + asm volatile( + "{" + ".reg .s32 r0;" + "shfl.sync.down.b32 r0, %1, 0x10, 0x1f,0xffffffff;" + "add.s32 %1, r0, %1;" + "shfl.sync.down.b32 r0, %1, 0x8, 0xf,0xffffffff;" + "add.s32 %1, r0, %1;" + "shfl.sync.down.b32 r0, %1, 0x4, 0x7,0xffffffff;" + "add.s32 %1, r0, %1;" + "shfl.sync.down.b32 r0, %1, 0x2, 0x3,0xffffffff;" + "add.s32 %1, r0, %1;" + "shfl.sync.down.b32 r0, %1, 0x1, 0x1,0xffffffff;" + "add.s32 %0, r0, %1;" + "}" + : "=r"(output) : "r"(input)); + return output; +} + + +/* +reduce a tensor to another that keeps the sum along a dimension - slow version +Given a block of data, we go over each dimension i in the stride and we have +sum_i = sum_{0<=j> input - the input array (representing a tensor) +>> output - the sum over each block. NOTE: output is also an array +>> stride - stride that we need to move to the next item +>> strideNum - how many strides we need to finish the reduce +>> reducedStrideNum - the number of strides after reducation +>> blockSize - size of the block (i.e., stride * strideNum) +>> blockNum - how many blocks +>> shift - the bias imposed on the input +>> power - power of the item in the array +>> isExp - specify if we perform exp() on the input +*/ + __global__ +void KernelReduceSum(DTYPE * input, DTYPE * output, + int stride, int strideNum, int reducedStrideNum, + int blockSize, int blockNum, + DTYPE * shift, DTYPE power, bool isExp) +{ + __shared__ DTYPE iData[MAX_CUDA_THREAD_NUM_PER_BLOCK * MIN_CUDA_SHARED_MEM_COL_SIZE/2]; + __shared__ DTYPE bias[MAX_CUDA_THREAD_NUM_PER_BLOCK]; + + int idx = threadIdx.y * blockDim.x + threadIdx.x; + unsigned int i = blockIdx.y*blockDim.y + threadIdx.y; + unsigned int j = blockIdx.x*blockDim.x + threadIdx.x; + + if(i >= stride * blockNum) + return; + + if(threadIdx.x == 0) + bias[threadIdx.y] = shift != NULL ? shift[i] : 0; + + __syncthreads(); + + int k = i / stride; + int iOffset = i % stride; + bool isValid = (i < stride * blockNum && j < strideNum); + + DTYPE value = isValid ? input[blockSize * k + stride * j + iOffset] - bias[threadIdx.y] : 0; + + if(power != (DTYPE)1.0){ + if(power == (DTYPE)2.0) + value = value * value; + else if(power == (DTYPE)0.5) + value = sqrt(value); + else + value = pow(value, power); + } + + if(isExp && isValid) + value = exp(value); + + /* load data into the shared mem */ + iData[threadIdx.y * blockDim.x + threadIdx.x] = value; + + __syncthreads(); + + /* do reduction in shared mem */ + for (unsigned int s = blockDim.x/2; s > 0; s >>= 1){ + if (threadIdx.x < s) + iData[idx] += iData[idx + s]; + + __syncthreads(); + } + /* write result for this block to the output array */ + if (threadIdx.x == 0 && blockIdx.x < reducedStrideNum) + output[(k * reducedStrideNum + blockIdx.x) * stride + iOffset] = iData[threadIdx.y * blockDim.x]; +} + + /* +reduce a tensor to another that keeps the sum along a dimension - slow version +This is for float16 reduction. +Given a block of data, we go over each dimension i in the stride and we have +sum_i = sum_{0<=j> input - the input array (representing a tensor) +>> output - the sum over each block. NOTE: output is also an array +>> stride - stride that we need to move to the next item +>> strideNum - how many strides we need to finish the reduce +>> reducedStrideNum - the number of strides after reducation +>> blockSize - size of the block (i.e., stride * strideNum) +>> blockNum - how many blocks +>> shift - the bias imposed on the input +>> power - power of the item in the array +>> isExp - specify if we perform exp() on the input +*/ + __global__ +void KernelReduceSum(__half * input, __half * output, + int stride, int strideNum, int reducedStrideNum, + int blockSize, int blockNum, + __half * shift, __half power, bool isExp) +{ + int idx = threadIdx.x * blockDim.y + threadIdx.y; + unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; + unsigned int j = blockIdx.y*blockDim.y + threadIdx.y; + + if(i >= stride * blockNum) + return; + +#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) + __shared__ __half iData[MAX_CUDA_THREAD_NUM_PER_BLOCK * MIN_CUDA_SHARED_MEM_COL_SIZE/2]; + __shared__ __half bias[MAX_CUDA_THREAD_NUM_PER_BLOCK]; + + if(threadIdx.y == 0) + bias[threadIdx.x] = shift != NULL ? shift[i] : __half(0); +#else + __shared__ DTYPE iData[MAX_CUDA_THREAD_NUM_PER_BLOCK * MIN_CUDA_SHARED_MEM_COL_SIZE/2]; + __shared__ DTYPE bias[MAX_CUDA_THREAD_NUM_PER_BLOCK]; + + if(threadIdx.y == 0) + bias[threadIdx.x] = shift != NULL ? __half(shift[i]) : __half(0); +#endif + + __syncthreads(); + + int k = i / stride; + int iOffset = i % stride; + bool isValid = (i < stride * blockNum && j < strideNum); + +#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) + __half value = isValid ? __hsub(input[blockSize * k + stride * j + iOffset], bias[threadIdx.x]) : __half(0); + DTYPE power2 = __half2float(power); + + if(power2 != (DTYPE)1.0){ + if(power2 == (DTYPE)2.0) + value = __hmul(value, value); + else if(power2 == (DTYPE)0.5) + value = hsqrt(value); + } + + if(isExp && isValid) + value = hexp(value); +#else + DTYPE value = isValid ? __half2float(input[blockSize * k + stride * j + iOffset]) - __half2float(bias[threadIdx.x]) : 0; + DTYPE power2 = __half2float(power); + + if(power2 != (DTYPE)1.0){ + if(power2 == (DTYPE)2.0) + value = value * value; + else if(power2 == (DTYPE)0.5) + value = sqrt(value); + else + value = pow(value, power2); + } + + if(isExp && isValid) + value = exp(value); +#endif + + /* load data into the shared mem */ + iData[threadIdx.x * blockDim.y + threadIdx.y] = value; + + __syncthreads(); + + /* do reduction in shared mem */ + for (unsigned int s = blockDim.y/2; s > 0; s >>= 1){ + if (threadIdx.y < s) + iData[idx] += iData[idx + s]; + + __syncthreads(); + } + +#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) + /* write result for this block to the output array */ + if (threadIdx.y == 0 && blockIdx.y < reducedStrideNum) + output[(k * reducedStrideNum + blockIdx.y) * stride + iOffset] = iData[threadIdx.x * blockDim.y]; +#else + /* write result for this block to the output array */ + if (threadIdx.y == 0 && blockIdx.y < reducedStrideNum) + output[(k * reducedStrideNum + blockIdx.y) * stride + iOffset] = __half(iData[threadIdx.x * blockDim.y]); +#endif + +} + +/* +reduce a tensor to another that keeps the sum along a dimension - fast version +>> input - the input array (representing a tensor) +>> output - the sum over each block. NOTE: output is also an array +>> stride - stride that we need to move to the next item +>> strideNum - how many strides we need to finish the reduce +>> reducedStrideNum - the number of strides after reducation +>> blockSize - size of the block (i.e., stride * strideNum) +>> blockNum - how many blocks +>> shift - the bias imposed on the input +>> power - power of the item in the array +>> isExp - specify if we perform exp() on the input +*/ +template __global__ +void KernelReduceSumFast(DTYPE * input, DTYPE * output, + int stride, int strideNum, int reducedStrideNum, + int blockSize, int blockNum, + DTYPE * shift, DTYPE power, bool isExp) +{ + __shared__ DTYPE iData[MAX_CUDA_THREAD_NUM_PER_BLOCK]; + __shared__ DTYPE bias[MAX_CUDA_THREAD_NUM_PER_BLOCK]; + + unsigned int tid = threadIdx.x; + unsigned int j = blockIdx.x * (blockDim.x * 2) + threadIdx.x; + unsigned int i = blockIdx.y * blockDim.y + threadIdx.y; + + if(i >= stride * blockNum) + return; + + if (threadIdx.x == 0) + bias[threadIdx.y] = shift != NULL ? shift[i] : 0; + + __syncthreads(); + + /* first level reduction */ + int k = i / stride; + int iOffset = i % stride; + + bool isValid = j < strideNum; + bool isValid2 = j + blockDim.x < strideNum; + + DTYPE * data = iData + threadIdx.y * blockDim.x; + DTYPE * inputData = input + k * blockSize; + DTYPE value = isValid ? inputData[j * stride + iOffset] - bias[threadIdx.y]: 0; + DTYPE value2 = isValid2 ? inputData[(j + blockDim.x) * stride + iOffset] - bias[threadIdx.y]: 0; + + if(power != (DTYPE)1.0){ + if(power == (DTYPE)2.0){ + value = value * value; + value2 = value2 * value2; + } + else if(power == (DTYPE)0.5){ + value = sqrt(value); + value2 = sqrt(value2); + } + else{ + value = pow(value, power); + value2 = pow(value2, power); + } + } + + if(isExp){ + if(isValid) + value = exp(value); + if(isValid2) + value2 = exp(value2); + } + + value = value + value2; + + __syncthreads(); + + value = shflDownReduceSum(value); + if ((tid & 0x1f) == 0) + data[tid / 32] = value; + + __syncthreads(); + + if (tid < 32){ + if (tid < blockDim.x / 32) + value = data[tid]; + else + value = 0; + value = shflDownReduceSum(value); + + if (tid == 0 && blockIdx.x < reducedStrideNum) { + output[(k * reducedStrideNum + blockIdx.x) * stride + iOffset] = value; + } + } +} + +/* +reduce a tensor to another that keeps the sum along a dimension - fast version +This is for float16 reduction +>> input - the input array (representing a tensor) +>> output - the sum over each block. NOTE: output is also an array +>> stride - stride that we need to move to the next item +>> strideNum - how many strides we need to finish the reduce +>> reducedStrideNum - the number of strides after reducation +>> blockSize - size of the block (i.e., stride * strideNum) +>> blockNum - how many blocks +>> shift - the bias imposed on the input +>> power - power of the item in the array +>> isExp - specify if we perform exp() on the input +*/ +template __global__ +void KernelReduceSumFast(__half * input, __half * output, + int stride, int strideNum, int reducedStrideNum, + int blockSize, int blockNum, + __half * shift, __half power, bool isExp) +{ + unsigned int tid = threadIdx.y; + unsigned int j = blockIdx.y * (blockDim.y * 2) + threadIdx.y; + unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; + + if(i >= stride * blockNum) + return; + +#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) + __shared__ __half iData[MAX_CUDA_THREAD_NUM_PER_BLOCK]; + __shared__ __half bias[MAX_CUDA_THREAD_NUM_PER_BLOCK]; + + if(threadIdx.y == 0) + bias[threadIdx.x] = shift != NULL ? shift[i] : __float2half(0); +#else + __shared__ DTYPE iData[MAX_CUDA_THREAD_NUM_PER_BLOCK]; + __shared__ DTYPE bias[MAX_CUDA_THREAD_NUM_PER_BLOCK]; + + if(threadIdx.y == 0) + bias[threadIdx.x] = shift != NULL ? __half2float(shift[i]) : 0; +#endif + + __syncthreads(); + + /* first level reduction */ + int k = i / stride; + int iOffset = i % stride; + bool isValid = j < strideNum; + bool isValid2 = j + blockDim.y < strideNum; + +#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) + __half * data = iData + threadIdx.x * blockDim.y; + __half * inputData = input + k * blockSize; + __half value = isValid ? __hsub(inputData[j * stride + iOffset], bias[threadIdx.x]) : __float2half(0); + __half value2 = isValid2 ? __hsub(inputData[(j + blockDim.y) * stride + iOffset], bias[threadIdx.x]) : __float2half(0); + + DTYPE powerf = __half2float(power); + + if(powerf != (DTYPE)1.0){ + if(powerf == (DTYPE)2.0){ + value = __hmul(value, value); + value2 = __hmul(value2, value2); + } + else if(powerf == (DTYPE)0.5){ + value = hsqrt(value); + value2 = hsqrt(value2); + } + } + + if(isExp){ + if(isValid) + value = hexp(value); + if(isValid2) + value2 = hexp(value2); + } + +#else + DTYPE * data = iData + threadIdx.x * blockDim.y; + __half * inputData = input + k * blockSize; + DTYPE value = isValid ? __half2float(inputData[j * stride + iOffset]) - __half2float(bias[threadIdx.x]): 0; + DTYPE value2 = isValid2 ? __half2float(inputData[(j + blockDim.y) * stride + iOffset]) - __half2float(bias[threadIdx.x]): 0; + + DTYPE powerf = __half2float(power); + + if(powerf != (DTYPE)1.0){ + if(powerf == (DTYPE)2.0){ + value = value * value; + value2 = value2 *value2; + } + else if(powerf == (DTYPE)0.5){ + value = sqrt(value); + value2 = sqrt(value2); + } + else{ + value = pow(value, powerf); + value2 = pow(value2, powerf); + } + } + + if(isExp){ + if(isValid) + value = exp(value); + if(isValid2) + value2 = exp(value2); + } +#endif + + /* load data into the shared mem */ + data[tid] = value + value2; + + __syncthreads(); + + /* unroll the warp */ + if(goodSize >= 512) {if(tid < 256) {data[tid] += data[tid + 256];} __syncthreads();} + if(goodSize >= 256) {if(tid < 128) {data[tid] += data[tid + 128];} __syncthreads();} + if(goodSize >= 128) {if(tid < 64) {data[tid] += data[tid + 64];} __syncthreads();} + if(goodSize >= 64) {if(tid < 32) {data[tid] += data[tid + 32];} __syncthreads();} + if(goodSize >= 32) {if(tid < 16) {data[tid] += data[tid + 16];} __syncthreads();} + if(goodSize >= 16) {if(tid < 8) {data[tid] += data[tid + 8];} __syncthreads();} + if(goodSize >= 8) {if(tid < 4) {data[tid] += data[tid + 4];} __syncthreads();} + if(goodSize >= 4) {if(tid < 2) {data[tid] += data[tid + 2];} __syncthreads();} + if(goodSize >= 2) {if(tid < 1) {data[tid] += data[tid + 1];} __syncthreads();} + +#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) + /* write result for this block to the output array */ + if(threadIdx.y == 0 && blockIdx.y < reducedStrideNum) + output[(k * reducedStrideNum + blockIdx.y) * stride + iOffset] = data[0]; +#else + /* write result for this block to the output array */ + if(threadIdx.y == 0 && blockIdx.y < reducedStrideNum) + output[(k * reducedStrideNum + blockIdx.y) * stride + iOffset] = __float2half(data[0]); +#endif +} + +/* +if data storage is discontinuius ,use this way to reduce +*/ +__global__ +void KernelReduceSumDiscontinuousStorage(DTYPE * input, DTYPE * output, int stride, int strideNum, + int blockNum, DTYPE * shift, DTYPE power, bool isExp) +{ + __shared__ DTYPE bias[MAX_CUDA_THREAD_NUM_PER_BLOCK]; + int idx = blockDim.x * blockIdx.x + threadIdx.x; + int blockIndex = idx / stride; + int offsetInBlock = idx % stride; + if (idx >= stride * blockNum) + return; + bias[idx % blockDim.x] = shift != NULL ? shift[idx] : 0; + DTYPE ans = 0; + +#pragma unroll + for (int i = stride * strideNum * blockIndex + offsetInBlock; + i < stride * strideNum * blockIndex + offsetInBlock + stride * strideNum; + i += stride){ + DTYPE value = input[i]; + value = value - bias[idx % blockDim.x]; + if (power != (DTYPE)1.0) { + if (power == (DTYPE)2.0) { + value = value * value; + } + else if (power == (DTYPE)0.5) { + value = sqrt(value); + } + else { + value = pow(value, power); + } + } + if (isExp) { + value = exp(value); + } + ans += value; + } + output[idx] = ans; +} + +__global__ +void KernelReduceSumOp(DTYPE * input, DTYPE * output, + int stride, int strideNum, int reducedStrideNum, + int blockSize, int blockNum, + DTYPE * shift, DTYPE power, bool isExp) +{ + __shared__ DTYPE iData[MAX_CUDA_THREAD_NUM_PER_BLOCK / 32]; + __shared__ DTYPE bias[MAX_CUDA_THREAD_NUM_PER_BLOCK]; + + unsigned int tid = threadIdx.y; + unsigned int j = blockIdx.y * blockDim.y + threadIdx.y; + unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; + if (i >= stride * blockNum) + return; + + if (threadIdx.y == 0) + bias[threadIdx.x] = shift != NULL ? shift[i] : 0; + + __syncthreads(); + + /* first level reduction */ + int k = i / stride; + int iOffset = i % stride; + + DTYPE threadSum = 0; + + DTYPE * data = iData + threadIdx.x * blockDim.y; + DTYPE * inputData = input + k * blockSize; + for (int it = j; it < strideNum; it += blockDim.y){ + DTYPE value = inputData[it * stride + iOffset] - bias[threadIdx.x]; + if (power != (DTYPE)1.0) { + if (power == (DTYPE)2.0) { + value = value * value; + } + else if (power == (DTYPE)0.5) { + value = sqrt(value); + } + else { + value = pow(value, power); + } + } + if (isExp) value = exp(value); + threadSum += value; + } + __syncthreads(); + threadSum = shflDownReduceSum(threadSum); + if ((tid & 0x1f) == 0) { data[tid / 32] = threadSum; } + __syncthreads(); + if (tid < 32){ + if (tid < blockDim.y / 32) + threadSum = data[tid]; + else + threadSum = 0; + threadSum = shflDownReduceSum(threadSum); + if (tid == 0 && blockIdx.y < reducedStrideNum) + output[(k * reducedStrideNum + blockIdx.y) * stride + iOffset] = threadSum; + } + +} + +__global__ +void KernelReduceSumOpLessBlocks(DTYPE * input, DTYPE * output, + int strideNum, int blockNum, + DTYPE * shift, DTYPE power, bool isExp) +{ + __shared__ DTYPE bias[MAX_CUDA_THREAD_NUM_PER_BLOCK]; + int idx = threadIdx.x % 32; + int idy = (blockIdx.x * blockDim.x + threadIdx.x) / 32; + + if (idx == 0) + bias[threadIdx.x / 32] = shift != NULL ? shift[idy] : 0; + + int startIndex = idy * strideNum; + DTYPE threadSum = 0; + for (int i = idx; i < strideNum; i += 32) { + DTYPE value = input[startIndex + i] - bias[threadIdx.x / 32]; + if (power != (DTYPE)1.0) { + if (power == (DTYPE)2.0) { + value = value * value; + } + else if (power == (DTYPE)0.5) { + value = sqrt(value); + } + else { + value = pow(value, power); + } + } + if (isExp) value = exp(value); + threadSum += value; + } + threadSum = shflDownReduceSum(threadSum); + if (idx == 0) + output[idy] = threadSum; +} + +/* +according the GPU's sm number allocation warp num +*/ +inline void continuousStorageThreadAllocation(dim3& grid, dim3& block, long long vectorNum, int vectorSize) +{ + int warpNum = 4; + if (vectorNum < 20 * 8) { + warpNum = 8; + if (vectorNum < 20 * 4) { + warpNum = 16; + if (warpNum < 20 * 2) + warpNum = 32; + } + } + int minWarpNum = vectorSize / 32; + if (vectorSize % 32 != 0) minWarpNum++; + warpNum = min(warpNum, minWarpNum); + + grid.x = (unsigned int)vectorNum; + grid.y = 1; + grid.z = 1; + block.x = 1; + block.y = warpNum * 32; + block.z = 1; +} + +/* +this situation we use block.x * grid.x deal one vector for continuous read +*/ +void discontinuousStorageNoShareMemThreadAllocation(dim3* grid, dim3* block, int stride, int blockNum) +{ + block->x = 512; + block->y = 1; + if ((stride * blockNum) % 512 == 0) + grid->x = (stride * blockNum) / 512; + else + grid->x = (stride * blockNum) / 512 + 1; + grid->y = 1; +} + +/* +adjust threads.x number then we can use warp optimization +*/ +void adjustThreadForUseWarpOptimization(dim3* blocks, dim3* threads) +{ + if (threads->y > 1){ + blocks->y *= threads->y; + threads->y = 1; + } + if (threads->x < 32) + threads->x = 32; +} + +/* +sum the items along a dimension of the tensor (cuda version). +For a 1-dimensional data array a, +sum = \sum_i (a_i - shift)^power if isExp == false +sum = \sum_i exp((a_i - shift)^power) if isExp == true +>> input - the input tensor +>> output - the output tensor +>> dim - which dimension to reduce +>> shift - the bias on the input +>> power - we perform pow(item_i, power) on each item +>> ieExp - specify if the exp() is performed +*/ +void _CudaReduceSum(const XTensor * input, XTensor * output, int dim, const XTensor * shift, DTYPE power, bool isExp) +{ + CheckNTErrors(input && output, "Empty input or output tensors!"); + CheckNTErrors(input->order == output->order + 1, "Incorrect tensor sizes!"); + CheckNTErrors(input->order > dim && dim >= 0, "Illegal dimension to reduce!"); + CheckNTErrors(input->dataType == output->dataType, "Unmatched data types!"); + CheckNTErrors(shift == NULL || output->unitNum == shift->unitNum, "Incorrect shift tensor size!"); + + for(int i = 0; i < input->order; i++){ + if(i < dim){ + CheckNTErrors(input->dimSize[i] == output->dimSize[i], "Unmatched tensors!"); + } + else if(i > dim){ + CheckNTErrors(input->dimSize[i] == output->dimSize[i - 1], "Unmatched tensors!"); + } + } + + if(input->dataType == X_FLOAT16) + CheckNTErrors(power == 0 || power == 0.5 || power == 1.0 || power == 2.0, "TODO!"); + + int cudaGridSize[3]; + int cudaBlockSize[3]; + int iter = 0; + int stride = 1; + int strideNum = input->dimSize[dim]; + int blockSize = 1; + int blockNum = 1; + + for (int i = 0; i < input->order; i++) { + if (i < dim) + blockNum *= input->dimSize[i]; + else if (i > dim) + stride *= input->dimSize[i]; + } + blockSize = stride * strideNum; + + int devID = input->devID; + int devIDBackup; + ProtectCudaDev(devID, devIDBackup); + + DTYPE * sp = shift != NULL ? (DTYPE*)shift->data : NULL; + + if (stride == 1 && blockNum >= 10) { + dim3 grids; + dim3 blocks; + continuousStorageThreadAllocation(grids, blocks, (long long)blockNum, strideNum); + if (blocks.y >= 128) + KernelReduceSumOp <<>> ((DTYPE *)input->data, (DTYPE*)output->data, stride, + strideNum, grids.y, blockSize, blockNum, sp, power, isExp); + else { + if (blockNum % 4 != 0) + blockNum = (int)(blockNum / 4) + 1; + else + blockNum = blockNum / 4; + KernelReduceSumOpLessBlocks <<>> ((DTYPE *)input->data, (DTYPE*)output->data, + strideNum, blockNum, sp, power, isExp); + } + } + else if (stride != 1 && stride * blockNum > 4096) { + //GDevs->GetGridAndBlockSize2D(devID, stride * blockNum, strideNum,MAX_INT, cudaGridSize, cudaBlockSize); + //unsigned int* goutput = (unsigned int *)input->data; + //convert2uintV2 << > > ((float*)input->data, goutput, stride, strideNum, blockNum, strideNum*blockNum*stride); + dim3 grid, block; + discontinuousStorageNoShareMemThreadAllocation(&grid, &block, stride, blockNum); + KernelReduceSumDiscontinuousStorage <<>> ((DTYPE *)input->data, (DTYPE*)output->data, stride, + strideNum, blockNum,sp, power, isExp); + } + else { + XMem * mem = input->mem; + + GDevs.GetCudaThread2D(devID, strideNum, stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); + + int bufSize = input->unitSize * cudaGridSize[0] * stride * blockNum * 2; + DTYPE * buf = mem != NULL ? (DTYPE*)mem->AllocBuf(mem->devID, bufSize) : (DTYPE*)XMemAlloc(devID, bufSize); + DTYPE * buf1 = buf; + DTYPE * buf2 = buf + cudaGridSize[0] * stride * blockNum; + do { + if (input->dataType == DEFAULT_DTYPE) { + DTYPE * iData = NULL; + DTYPE * oData = NULL; + if (iter == 0) { + iData = (DTYPE*)input->data; + oData = buf1; + } + else if (iter % 2 == 1) { + iData = buf1; + oData = buf2; + } + else { + iData = buf2; + oData = buf1; + } + /* unroll the reduction procedure. The code is messy but it is faster. */ + if (strideNum <= 32) { + GDevs.GetCudaThread2D(devID, strideNum, stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); + dim3 blocks(cudaGridSize[0], cudaGridSize[1]), threads(cudaBlockSize[0], cudaBlockSize[1]); + if (cudaGridSize[0] == 1) + oData = (DTYPE*)output->data; + KernelReduceSum <<>> (iData, oData, stride, strideNum, blocks.x, + blockSize, blockNum, sp, power, isExp); + } + else if (strideNum < 128) { + GDevs.GetCudaThread2D(devID, MAX(strideNum / 2 + 1, 64), stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); + dim3 blocks(cudaGridSize[0], cudaGridSize[1]), threads(cudaBlockSize[0], cudaBlockSize[1]); + if (cudaGridSize[0] == 1) + oData = (DTYPE*)output->data; + CheckNTErrors((cudaBlockSize[0] >= 64), "Incorrect thread number when calling the cuda kernel!"); + adjustThreadForUseWarpOptimization(&blocks, &threads); + KernelReduceSumFast<64> <<>> (iData, oData, stride, strideNum, blocks.x, + blockSize, blockNum, sp, power, isExp); + } + else if (strideNum < 256) { + GDevs.GetCudaThread2D(devID, MAX(strideNum / 2 + 1, 128), stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); + dim3 blocks(cudaGridSize[0], cudaGridSize[1]), threads(cudaBlockSize[0], cudaBlockSize[1]); + if (cudaGridSize[0] == 1) + oData = (DTYPE*)output->data; + CheckNTErrors((cudaBlockSize[0] >= 128), "Incorrect thread number when calling the cuda kernel!"); + adjustThreadForUseWarpOptimization(&blocks, &threads); + KernelReduceSumFast<128> <<>> (iData, oData, stride, strideNum, blocks.x, + blockSize, blockNum, sp, power, isExp); + } + else if (strideNum < 512) { + GDevs.GetCudaThread2D(devID, MAX(strideNum / 2 + 1, 256), stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); + dim3 blocks(cudaGridSize[0], cudaGridSize[1]), threads(cudaBlockSize[0], cudaBlockSize[1]); + if (cudaGridSize[0] == 1) + oData = (DTYPE*)output->data; + CheckNTErrors((cudaBlockSize[0] >= 256), "Incorrect thread number when calling the cuda kernel!"); + adjustThreadForUseWarpOptimization(&blocks, &threads); + KernelReduceSumFast<256> <<>> (iData, oData, stride, strideNum, blocks.x, + blockSize, blockNum, sp, power, isExp); + } + else { + GDevs.GetCudaThread2D(devID, MAX(strideNum / 2 + 1, 512), stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); + dim3 blocks(cudaGridSize[0], cudaGridSize[1]), threads(cudaBlockSize[0], cudaBlockSize[1]); + if (cudaGridSize[0] == 1) + oData = (DTYPE*)output->data; + CheckNTErrors((cudaBlockSize[0] >= 512), "Incorrect thread number when calling the cuda kernel!"); + adjustThreadForUseWarpOptimization(&blocks, &threads); + KernelReduceSumFast<512> <<>> (iData, oData, stride, strideNum, blocks.x, + blockSize, blockNum, sp, power, isExp); + } + } + else if (input->dataType == X_FLOAT16) { + __half * buf1ft16 = (__half *)buf1; + __half * buf2ft16 = (__half *)buf2; + __half * spft16 = (__half *)sp; + unsigned short power2 = FloatToFloat16(power); + __half * powerft16p = (__half*)&power2; + __half * iData = NULL; + __half * oData = NULL; + if (iter == 0) { + iData = (__half*)input->data; + oData = buf1ft16; + } + else if (iter % 2 == 1) { + iData = buf1ft16; + oData = buf2ft16; + } + else { + iData = buf2ft16; + oData = buf1ft16; + } + + /* unroll the reduction procedure. The code is messy but it is faster. */ + if (strideNum < 32) { + GDevs.GetCudaThread2D(devID, strideNum, stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); + dim3 blocks(cudaGridSize[1], cudaGridSize[0]), threads(cudaBlockSize[1], cudaBlockSize[0]); + if (cudaGridSize[0] == 1) + oData = (__half*)output->data; + KernelReduceSum <<>> (iData, oData, stride, strideNum, blocks.y, + blockSize, blockNum, spft16, *powerft16p, isExp); + } + else if (strideNum < 128) { + GDevs.GetCudaThread2D(devID, MAX(strideNum / 2 + 1, 64), stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); + dim3 blocks(cudaGridSize[1], cudaGridSize[0]), threads(cudaBlockSize[1], cudaBlockSize[0]); + if (cudaGridSize[0] == 1) + oData = (__half*)output->data; + CheckNTErrors((cudaBlockSize[0] >= 64), "Incorrect thread number when calling the cuda kernel!"); + KernelReduceSumFast<64> <<>> (iData, oData, stride, strideNum, blocks.y, + blockSize, blockNum, spft16, *powerft16p, isExp); + } + else if (strideNum < 256) { + GDevs.GetCudaThread2D(devID, MAX(strideNum / 2 + 1, 128), stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); + dim3 blocks(cudaGridSize[1], cudaGridSize[0]), threads(cudaBlockSize[1], cudaBlockSize[0]); + if (cudaGridSize[0] == 1) + oData = (__half*)output->data; + CheckNTErrors((cudaBlockSize[0] >= 128), "Incorrect thread number when calling the cuda kernel!"); + KernelReduceSumFast<128> <<>> (iData, oData, stride, strideNum, blocks.y, + blockSize, blockNum, spft16, *powerft16p, isExp); + } + else if (strideNum < 512) { + GDevs.GetCudaThread2D(devID, MAX(strideNum / 2 + 1, 256), stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); + dim3 blocks(cudaGridSize[1], cudaGridSize[0]), threads(cudaBlockSize[1], cudaBlockSize[0]); + if (cudaGridSize[0] == 1) + oData = (__half*)output->data; + CheckNTErrors((cudaBlockSize[0] >= 256), "Incorrect thread number when calling the cuda kernel!"); + KernelReduceSumFast<256> <<>> (iData, oData, stride, strideNum, blocks.y, + blockSize, blockNum, spft16, *powerft16p, isExp); + } + else { + GDevs.GetCudaThread2D(devID, MAX(strideNum / 2 + 1, 512), stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); + dim3 blocks(cudaGridSize[1], cudaGridSize[0]), threads(cudaBlockSize[1], cudaBlockSize[0]); + if (cudaGridSize[0] == 1) + oData = (__half*)output->data; + CheckNTErrors((cudaBlockSize[0] >= 512), "Incorrect thread number when calling the cuda kernel!"); + KernelReduceSumFast<512> <<>> (iData, oData, stride, strideNum, blocks.y, + blockSize, blockNum, spft16, *powerft16p, isExp); + } + } + + strideNum = cudaGridSize[0]; + blockSize = cudaGridSize[0]; + sp = NULL; + power = (DTYPE)1.0; + isExp = false; + + iter++; + + } while (strideNum > 1); + + + if (mem != NULL) + mem->ReleaseBuf(mem->devID, bufSize); + else + XMemFree(devID, buf); + } + + BacktoCudaDev(devID, devIDBackup); +} + +#endif // USE_CUDA + +} // namespace nts(NiuTrans.Tensor) \ No newline at end of file diff --git a/cuda_code/Renderer_2.cu b/cuda_code/Renderer_2.cu new file mode 100644 index 0000000000000000000000000000000000000000..ec36d0b4d91b59338f13ca82f342440993589143 --- /dev/null +++ b/cuda_code/Renderer_2.cu @@ -0,0 +1,545 @@ +#include "Renderer.hpp" +#include "kernels.hpp" +#include "shading.hpp" +#include "../util/util.hpp" +#include +#include "math.h" +#include +#include +#include + +const int queueSize = 200000; +const int blockDimensionX = 512; +const float gamma22 = 2.2; + +Renderer::Renderer(int width, int height) +: width(width), + height(height), + r_width(width), + r_height(height), + iterationsDone(0), + destIterationsDone(0), + numBounces(30), + r_numBounces(30), + image(nullptr), + destImage(nullptr), + randState(nullptr), + pathstates(nullptr), + rays(nullptr), + shadowRays(nullptr), + pathRequests(nullptr), + raycastRequests(nullptr), + shadowRaycastRequests(nullptr), + diffuseMaterialRequests(nullptr), + specularReflectionRequests(nullptr), + specularTransmissionRequests(nullptr), + filmIndex(nullptr), + numPathRequests(nullptr), + numRaycastRequests(nullptr), + numShadowRaycastRequests(nullptr), + numDiffuseMaterialRequests(nullptr), + numSpecularReflectionRequests(nullptr), + numSpecularTransmissionRequests(nullptr), + shapes(nullptr), + lights(nullptr), + shapeLen(0), + lightLen(0), + camera(width, height, glm::vec3(0.0, 0.0, 10.0), glm::vec3(0.0)), + stopRendering(false), + sceneChanged(false), + resolutionChanged(false), + imageUpdated(false), + cameraChanged(false), + renderThread(nullptr), + useLightSampling(true), + renderCaustics(true), + r_useLightSampling(true), + r_renderCaustics(true) +{ + setup(); +} + +void Renderer::start() { + stopRendering = false; + renderThread = new std::thread(&Renderer::renderLoop, this); +} + +Renderer::~Renderer() { + stop(); + freeArrays(); +} + +int Renderer::getWidth() { + return width; +} + +int Renderer::getHeight() { + return height; +} + +void Renderer::copyShapes() { + cudaMallocManaged(&shapes, shapeVec.size() * sizeof(Shape)); + waitAndCheckError("setup::cudaMallocManaged(shapes)"); + + std::memcpy(shapes, shapeVec.data(), shapeVec.size() * sizeof(Shape)); +} + +void Renderer::copyLights() { + cudaMallocManaged(&lights, lightVec.size() * sizeof(Shape)); + waitAndCheckError("setup::cudaMallocManaged(lights)"); + + std::memcpy(lights, lightVec.data(), lightVec.size() * sizeof(Shape)); +} + +void Renderer::setup() { + freeArrays(); + + r_width = width; + r_height = height; + r_useLightSampling = useLightSampling; + r_renderCaustics = renderCaustics; + r_numBounces = numBounces; + camera.update(r_width, r_height); + + int imgLen = r_width * r_height; + int imgLenStratified = imgLen * camera.getStratificationLevel() * camera.getStratificationLevel(); + + + cudaMallocManaged(&destImage, imgLen * sizeof(float)); + waitAndCheckError("setup::cudaMallocManaged(destImage)"); + cudaMallocManaged(&image, imgLenStratified * sizeof(glm::vec3)); + waitAndCheckError("setup::cudaMallocManaged(image)"); + cudaMallocManaged(&randState, queueSize * sizeof(curandState)); + waitAndCheckError("setup::cudaMallocManaged(randState)"); + if(shapeVec.size() > 0) { + copyShapes(); + } + if(lightVec.size() > 0) { + copyLights(); + } + shapeLen = shapeVec.size(); + lightLen = lightVec.size(); + cudaMallocManaged(&pathstates, queueSize * sizeof(Pathstate)); + waitAndCheckError("setup::cudaMallocManaged(pathstates)"); + cudaMallocManaged(&rays, queueSize * sizeof(Ray)); + waitAndCheckError("setup::cudaMallocManaged(rays)"); + cudaMallocManaged(&shadowRays, queueSize * sizeof(Ray)); + waitAndCheckError("setup::cudaMallocManaged(shadowRays)"); + cudaMallocManaged(&pathRequests, queueSize * sizeof(int)); + waitAndCheckError("setup::cudaMallocManaged(pathRequests)"); + cudaMallocManaged(&raycastRequests, queueSize * sizeof(int)); + waitAndCheckError("setup::cudaMallocManaged(raycastRequests)"); + cudaMallocManaged(&shadowRaycastRequests, queueSize * sizeof(int)); + waitAndCheckError("setup::cudaMallocManaged(shadowRaycastRequests)"); + cudaMallocManaged(&diffuseMaterialRequests, queueSize * sizeof(int)); + waitAndCheckError("setup::cudaMallocManaged(diffuseMaterialRequests)"); + cudaMallocManaged(&specularReflectionRequests, queueSize * sizeof(int)); + waitAndCheckError("setup::cudaMallocManaged(specularReflectionRequests)"); + cudaMallocManaged(&specularTransmissionRequests, queueSize * sizeof(int)); + waitAndCheckError("setup::cudaMallocManaged(specularTransmissionRequests)"); + cudaMallocManaged(&filmIndex, sizeof(int)); + waitAndCheckError("setup::cudaMallocManaged(filmIndex)"); + cudaMallocManaged(&numRaycastRequests, sizeof(int)); + waitAndCheckError("setup::cudaMallocManaged(numRaycastRequests)"); + cudaMallocManaged(&numShadowRaycastRequests, sizeof(int)); + waitAndCheckError("setup::cudaMallocManaged(numShadowRaycastRequests)"); + cudaMallocManaged(&numPathRequests, sizeof(int)); + waitAndCheckError("setup::cudaMallocManaged(numPathRequests)"); + cudaMallocManaged(&numDiffuseMaterialRequests, sizeof(int)); + waitAndCheckError("setup::cudaMallocManaged(numDiffuseMaterialRequests)"); + cudaMallocManaged(&numSpecularReflectionRequests, sizeof(int)); + waitAndCheckError("setup::cudaMallocManaged(numSpecularReflectionRequests)"); + cudaMallocManaged(&numSpecularTransmissionRequests, sizeof(int)); + waitAndCheckError("setup::cudaMallocManaged(numSpecularTransmissionRequests)"); + cudaMemset(image, 0, imgLenStratified * sizeof(glm::vec3)); + waitAndCheckError("setup::cudaMallocManaged(image)"); + cudaMemset(destImage, 0, imgLen * sizeof(unsigned int)); + waitAndCheckError("setup::cudaMallocManaged(destImage)"); + + + cudaMallocManaged(&pathstateSoA.currentIsect, queueSize * sizeof(Interaction)); + waitAndCheckError("setup::cudaMallocManaged(pathstates)"); + cudaMallocManaged(&pathstateSoA.extensionIsect, queueSize * sizeof(Interaction)); + waitAndCheckError("setup::cudaMallocManaged(pathstates)"); + cudaMallocManaged(&pathstateSoA.foundExtensionIsect, queueSize * sizeof(bool)); + waitAndCheckError("setup::cudaMallocManaged(pathstates)"); + cudaMallocManaged(&pathstateSoA.isActive, queueSize * sizeof(bool)); + waitAndCheckError("setup::cudaMallocManaged(pathstates)"); + cudaMallocManaged(&pathstateSoA.pdf, queueSize * sizeof(float)); + waitAndCheckError("setup::cudaMallocManaged(pathstates)"); + cudaMallocManaged(&pathstateSoA.lightPdf, queueSize * sizeof(float)); + waitAndCheckError("setup::cudaMallocManaged(pathstates)"); + cudaMallocManaged(&pathstateSoA.bounces, queueSize * sizeof(int)); + waitAndCheckError("setup::cudaMallocManaged(pathstates)"); + cudaMallocManaged(&pathstateSoA.beta, queueSize * sizeof(glm::vec3)); + waitAndCheckError("setup::cudaMallocManaged(pathstates)"); + cudaMallocManaged(&pathstateSoA.L, queueSize * sizeof(glm::vec3)); + waitAndCheckError("setup::cudaMallocManaged(pathstates)"); + cudaMallocManaged(&pathstateSoA.wi_L, queueSize * sizeof(glm::vec3)); + waitAndCheckError("setup::cudaMallocManaged(pathstates)"); + cudaMallocManaged(&pathstateSoA.Lemit, queueSize * sizeof(glm::vec3)); + waitAndCheckError("setup::cudaMallocManaged(pathstates)"); + cudaMallocManaged(&pathstateSoA.filmIndex, queueSize * sizeof(int)); + waitAndCheckError("setup::cudaMallocManaged(pathstates)"); + cudaMallocManaged(&pathstateSoA.material, queueSize * sizeof(Material)); + waitAndCheckError("setup::cudaMallocManaged(pathstates)"); + cudaMallocManaged(&pathstateSoA.f, queueSize * sizeof(glm::vec3)); + waitAndCheckError("setup::cudaMallocManaged(pathstates)"); + cudaMallocManaged(&pathstateSoA.light_f, queueSize * sizeof(glm::vec3)); + waitAndCheckError("setup::cudaMallocManaged(pathstates)"); + cudaMallocManaged(&pathstateSoA.Lsample, queueSize * sizeof(glm::vec3)); + waitAndCheckError("setup::cudaMallocManaged(pathstates)"); + cudaMallocManaged(&pathstateSoA.LsampleOccluded, queueSize * sizeof(bool)); + waitAndCheckError("setup::cudaMallocManaged(pathstates)"); + cudaMallocManaged(&pathstateSoA.isLightSample, queueSize * sizeof(bool)); + waitAndCheckError("setup::cudaMallocManaged(pathstates)"); + cudaMallocManaged(&pathstateSoA.diffuseBounce, queueSize * sizeof(bool)); + waitAndCheckError("setup::cudaMallocManaged(pathstates)"); + + + iterationsDone = 0; + destIterationsDone = 0; + *filmIndex = 0; + *numPathRequests = 0; + *numRaycastRequests = 0; + *numShadowRaycastRequests = 0; + *numDiffuseMaterialRequests = 0; + *numSpecularReflectionRequests = 0; + *numSpecularTransmissionRequests = 0; + camera.setImage(image); + camera.setFilmIndexPointer(filmIndex); + + dim3 blockDim(blockDimensionX); + dim3 gridDim(queueSize / blockDim.x + (queueSize % blockDim.x ? 1 : 0)); + + initRand<<>>(randState, queueSize); + waitAndCheckError("setup::initRand<<<>>>"); + + resolutionChanged = false; + sceneChanged = false; +} + +void Renderer::clearImage() { + cudaMemset(image, 0, r_width * r_height * camera.getStratificationLevel() * camera.getStratificationLevel() * sizeof(glm::vec3)); + waitAndCheckError("clearImage::cudaMemset"); + iterationsDone = 0; + destIterationsDone = 0; +} + +void Renderer::renderLoop() { + while(!stopRendering) { + // if resolutionChanged bool lock mutex and copy resolution -> setup + cameraMutex.lock(); + resolutionMutex.lock(); + if(resolutionChanged) { + destImageMutex.lock(); + sceneMutex.lock(); + setup(); + destImageMutex.unlock(); + sceneMutex.unlock(); + } + // if cameraChanged bool lock mutex and copy camera + Camera renderCamera = camera; + renderCamera.resetFilmIndex(); + if(cameraChanged) { + clearImage(); + cameraChanged = false; + } + cameraMutex.unlock(); + resolutionMutex.unlock(); + + // if sceneChanged bool lock mutex and copy scene -> setup + if(sceneChanged) { + std::lock_guard sceneLock(sceneMutex); + if(shapes) { + cudaFree(shapes); + waitAndCheckError("start::cudaFree(shapes)"); + shapes = nullptr; + } + if(shapeVec.size() > 0) { + copyShapes(); + } + if(lights) { + cudaFree(lights); + waitAndCheckError("start::cudaFree(lights)"); + lights = nullptr; + } + if(lightVec.size() > 0) { + copyLights(); + } + shapeLen = shapeVec.size(); + lightLen = lightVec.size(); + clearImage(); + r_useLightSampling = useLightSampling; + r_renderCaustics = renderCaustics; + r_numBounces = numBounces; + sceneChanged = false; + } + + dim3 blockDim(blockDimensionX); + dim3 gridDim(queueSize / blockDim.x + (queueSize % blockDim.x ? 1 : 0)); + + init<<>>(pathstateSoA, queueSize, pathRequests, raycastRequests, shadowRaycastRequests, diffuseMaterialRequests, specularReflectionRequests, specularTransmissionRequests); + waitAndCheckError("renderLoop::init<<<>>>"); + do { + *numPathRequests = 0; + *numRaycastRequests = 0; + *numShadowRaycastRequests = 0; + *numDiffuseMaterialRequests = 0; + *numSpecularReflectionRequests = 0; + *numSpecularTransmissionRequests = 0; + render<<>>(renderCamera, pathstateSoA, queueSize, pathRequests, numPathRequests, diffuseMaterialRequests, numDiffuseMaterialRequests, specularReflectionRequests, numSpecularReflectionRequests, specularTransmissionRequests, numSpecularTransmissionRequests, randState, r_useLightSampling, r_renderCaustics, r_numBounces); + waitAndCheckError("renderLoop::render<<<>>>(loop)"); + newPath<<>>(pathRequests, pathstateSoA, numPathRequests, raycastRequests, rays, numRaycastRequests, renderCamera, randState); + waitAndCheckError("renderLoop::newPath<<<>>>"); + diffuseKernel<<>>(diffuseMaterialRequests, numDiffuseMaterialRequests, pathstateSoA, queueSize, raycastRequests, rays, numRaycastRequests, shadowRaycastRequests, shadowRays, numShadowRaycastRequests, lights, lightLen, randState, r_useLightSampling); + waitAndCheckError("renderLoop::diffuseKernel<<<>>>"); + specularReflectionKernel<<>>(specularReflectionRequests, numSpecularReflectionRequests, pathstateSoA, queueSize, raycastRequests, rays, numRaycastRequests, randState); + waitAndCheckError("renderLoop::specularReflectionKernel<<<>>>"); + specularTransmissionKernel<<>>(specularTransmissionRequests, numSpecularTransmissionRequests, pathstateSoA, queueSize, raycastRequests, rays, numRaycastRequests, randState); + waitAndCheckError("renderLoop::specularTransmissionKernel<<<>>>"); + raycast<<>>(raycastRequests, rays, numRaycastRequests, pathstateSoA, queueSize, shapes, shapeLen); + waitAndCheckError("renderLoop::raycast<<<>>>"); + shadowRaycast<<>>(shadowRaycastRequests, shadowRays, numShadowRaycastRequests, pathstateSoA, queueSize, shapes, shapeLen); + waitAndCheckError("renderLoop::shadowRaycast<<<>>>"); + + cudaDeviceSynchronize(); + } while(!(*numRaycastRequests == 0 && *numShadowRaycastRequests == 0 && *numDiffuseMaterialRequests == 0 && *numSpecularReflectionRequests == 0 && *numSpecularTransmissionRequests == 0)); + + ++iterationsDone; + if(destImageMutex.try_lock()) { + dim3 filterBlockDim(32, 32, 1); + dim3 filterGridDim(r_width / filterBlockDim.x + (r_width % filterBlockDim.x ? 1 : 0), r_height / filterBlockDim.y + (r_height % filterBlockDim.y ? 1 : 0)); + filterImage<<>>(image, destImage, r_width, r_height, renderCamera.getStratificationLevel(), iterationsDone, 1 / gamma22); + waitAndCheckError("renderLoop::filterImage<<<>>>"); + imageUpdated = true; + destImageWidth = r_width; + destImageHeight = r_height; + destIterationsDone = iterationsDone; + cudaDeviceSynchronize(); // need to wait for kernel before unlocking mutex + destImageMutex.unlock(); + } + } +} + +int Renderer::getData(unsigned int *outImage, int width, int height) { + if(!imageUpdated) { + return -1; + } + + std::lock_guard destImageLock(destImageMutex); + imageUpdated = false; + + if(width != destImageWidth || height != destImageHeight || destImage == nullptr) { + return -1; + } + std::memcpy(outImage, destImage, width * height * sizeof(unsigned int)); + return destIterationsDone; +} + +void Renderer::stop() { + if(!stopRendering) { + stopRendering = true; + renderThread->join(); + delete renderThread; + renderThread = nullptr; + } +} + +void Renderer::addObject(int id, int materialId, int sceneObjectType, float arr[16], float lightIntensity, float lightColor[3], float area, bool isVisible) { + std::lock_guard sceneLock(sceneMutex); + glm::mat4 objectToWorld = glm::make_mat4(arr); + glm::vec3 color = glm::make_vec3(lightColor); + bool isLight = !isZeroVec(color) && lightIntensity != 0; + switch(sceneObjectType) { + case SceneObjectType::sphere: + shapeVec.push_back(Shape(ShapeType::sphere, materialMap[materialId], area, objectToWorld, lightIntensity * pow(color, 1.0 / gamma22), isVisible)); + if(isLight) + lightVec.push_back(Shape(ShapeType::sphere, materialMap[materialId], area, objectToWorld, lightIntensity * pow(color, 1.0 / gamma22),isVisible)); + break; + case SceneObjectType::cube: + shapeVec.push_back(Shape(ShapeType::cube, materialMap[materialId], area, objectToWorld, lightIntensity * pow(color, 1.0 / gamma22), isVisible)); + if(isLight) + lightVec.push_back(Shape(ShapeType::cube, materialMap[materialId], area, objectToWorld, lightIntensity * pow(color, 1.0 / gamma22),isVisible)); + break; + case SceneObjectType::plane: + shapeVec.push_back(Shape(ShapeType::plane, materialMap[materialId], area, objectToWorld, lightIntensity * pow(color, 1.0 / gamma22), isVisible)); + if(isLight) + lightVec.push_back(Shape(ShapeType::plane, materialMap[materialId], area, objectToWorld, lightIntensity * pow(color, 1.0 / gamma22),isVisible)); + break; + case SceneObjectType::disc: + shapeVec.push_back(Shape(ShapeType::disc, materialMap[materialId], area, objectToWorld, lightIntensity * pow(color, 1.0 / gamma22), isVisible)); + if(isLight) + lightVec.push_back(Shape(ShapeType::disc, materialMap[materialId], area, objectToWorld, lightIntensity * pow(color, 1.0 / gamma22),isVisible)); + break; + default: + return; + } + sceneChanged = true; +} + +void Renderer::addDiffuseMaterial(int id, float colorArray[3]) { + std::lock_guard sceneLock(sceneMutex); + glm::vec3 color = glm::make_vec3(colorArray); + color = pow(color, gamma22); + Material m; + m.materialType = Materialtype::diffuse; + m.diffuse = DiffuseMaterial{color}; + materialMap.insert(std::pair(id, m)); +} + +void Renderer::addSpecularMaterial(int id, float reflectionColorArray[3], float transmissionColorArray[3], float IOR) { + std::lock_guard sceneLock(sceneMutex); + glm::vec3 reflectionColor = glm::make_vec3(reflectionColorArray); + glm::vec3 transmissionColor = glm::make_vec3(transmissionColorArray); + reflectionColor = pow(reflectionColor, gamma22); + transmissionColor = pow(transmissionColor, gamma22); + Material m; + m.materialType = Materialtype::specular; + m.specular = SpecularMaterial{reflectionColor, transmissionColor, 1.0, IOR}; + materialMap.insert(std::pair(id, m)); +} + +void Renderer::clearObjects() { + std::lock_guard sceneLock(sceneMutex); + shapeVec.clear(); + lightVec.clear(); + sceneChanged = true; +} + +void Renderer::clearMaterials() { + std::lock_guard sceneLock(sceneMutex); + materialMap.clear(); + sceneChanged = true; +} + +void Renderer::setResolution(int width, int height) { + std::lock_guard resolutionLock(resolutionMutex); + this->width = width; + this->height = height; + resolutionChanged = true; +} + +void Renderer::updateCamera(float worldPos[3], float target[3], float fov_y, float fStop, float focusDistance, int stratificationLevel) { + std::lock_guard cameraLock(cameraMutex); + std::lock_guard resolutionLock(resolutionMutex); + glm::vec3 worldVec = glm::make_vec3(worldPos); + glm::vec3 targetVec = glm::make_vec3(target); + if(stratificationLevel != camera.getStratificationLevel()) + resolutionChanged = true; + camera.update(worldVec, targetVec, fov_y, fStop, focusDistance, stratificationLevel); + cameraChanged = true; +} + +void Renderer::freeArrays() { + if(image) { + cudaFree(image); + waitAndCheckError("setup::cudaFree(image)"); + image = nullptr; + } + if(destImage) { + cudaFree(destImage); + waitAndCheckError("setup::cudaFree(destImage)"); + destImage = nullptr; + } + if(randState) { + cudaFree(randState); + waitAndCheckError("setup::cudaFree(randstate)"); + randState = nullptr; + } + if(shapes) { + cudaFree(shapes); + waitAndCheckError("setup::cudaFree(shapes)"); + shapes = nullptr; + } + if(pathstates) { + cudaFree(pathstates); + waitAndCheckError("setup::cudaFree(pathstates)"); + pathstates = nullptr; + } + if(rays) { + cudaFree(rays); + waitAndCheckError("setup::cudaFree(rays)"); + rays = nullptr; + } + if(shadowRays) { + cudaFree(shadowRays); + waitAndCheckError("setup::cudaFree(shadowRays)"); + shadowRays = nullptr; + } + if(pathRequests) { + cudaFree(pathRequests); + waitAndCheckError("setup::cudaFree(pathRequests)"); + pathRequests = nullptr; + } + if(filmIndex) { + cudaFree(filmIndex); + waitAndCheckError("setup::cudaFree(filmIndex)"); + filmIndex = nullptr; + } + if(raycastRequests) { + cudaFree(raycastRequests); + waitAndCheckError("setup::cudaFree(raycastRequests)"); + raycastRequests = nullptr; + } + if(numRaycastRequests) { + cudaFree(numRaycastRequests); + waitAndCheckError("setup::cudaFree(numRaycastRequests)"); + numRaycastRequests = nullptr; + } + if(shadowRaycastRequests) { + cudaFree(shadowRaycastRequests); + waitAndCheckError("setup::cudaFree(shadowRaycastRequests)"); + shadowRaycastRequests = nullptr; + } + if(numShadowRaycastRequests) { + cudaFree(numShadowRaycastRequests); + waitAndCheckError("setup::cudaFree(numShadowRaycastRequests)"); + numShadowRaycastRequests = nullptr; + } + if(numPathRequests) { + cudaFree(numPathRequests); + waitAndCheckError("setup::cudaFree(numPathRequests)"); + numPathRequests = nullptr; + } + if(diffuseMaterialRequests) { + cudaFree(diffuseMaterialRequests); + waitAndCheckError("setup::cudaFree(diffuseMaterialRequests)"); + diffuseMaterialRequests = nullptr; + } + if(numDiffuseMaterialRequests) { + cudaFree(numDiffuseMaterialRequests); + waitAndCheckError("setup::cudaFree(numDiffuseMaterialRequests)"); + numDiffuseMaterialRequests = nullptr; + } + if(specularReflectionRequests) { + cudaFree(specularReflectionRequests); + waitAndCheckError("setup::cudaFree(specularReflectionRequests)"); + specularReflectionRequests = nullptr; + } + if(specularTransmissionRequests) { + cudaFree(specularTransmissionRequests); + waitAndCheckError("setup::cudaFree(specularTransmissionRequests)"); + specularTransmissionRequests = nullptr; + } + if(numSpecularReflectionRequests) { + cudaFree(numSpecularReflectionRequests); + waitAndCheckError("setup::cudaFree(numSpecularReflectionRequests)"); + numSpecularReflectionRequests = nullptr; + } + if(numSpecularTransmissionRequests) { + cudaFree(numSpecularTransmissionRequests); + waitAndCheckError("setup::cudaFree(numSpecularTransmissionRequests)"); + numSpecularTransmissionRequests = nullptr; + } + + pathstateSoA.freeArrays(); +} + +void Renderer::setRenderSettings(bool useLightSampling, bool renderCaustics, int numBounces) { + std::lock_guard sceneLock(sceneMutex); + this->useLightSampling = useLightSampling; + this->renderCaustics = renderCaustics; + this->numBounces = numBounces; + sceneChanged = true; +} \ No newline at end of file diff --git a/cuda_code/ReplicationPadding_10.cu b/cuda_code/ReplicationPadding_10.cu new file mode 100644 index 0000000000000000000000000000000000000000..71e21c3ea251baf0675301007ffd59aa2a75d155 --- /dev/null +++ b/cuda_code/ReplicationPadding_10.cu @@ -0,0 +1,739 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + + +namespace at { +namespace native { +__host__ __device__ __forceinline__ int imin(int a, int b) { + return a > b ? b : a; +} + +__host__ __device__ __forceinline__ int imax(int a, int b) { + return a > b ? a : b; +} + +namespace { +template +__global__ void replication_pad_forward_kernel1d( + PackedTensorAccessor64 input, + PackedTensorAccessor64 output, + int padL, int padR, int y_shift, int z_shift) { + + int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; + int plane = blockIdx.y + y_shift; + int batch = blockIdx.z + z_shift; + if (outputPointId >= output.size(2)) { + return; + } + int outputPointX = outputPointId % output.size(2); + + int iStartX = imax(0, -padL); + int oStartX = imax(0, padL); + + int inputPointX = imin(imax(padL, outputPointX), input.size(2) + padL - 1) - oStartX + iStartX; + + scalar_t valueToCopy = input[batch][plane][inputPointX]; + output[batch][plane][outputPointX] = valueToCopy; +} + +template +__global__ void replication_pad_backward_kernel( + PackedTensorAccessor64 gradInput, + PackedTensorAccessor64 gradOutput, + int padL, int padR, int y_shift, int z_shift) { + + int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; + int plane = blockIdx.y + y_shift; + int batch = blockIdx.z + z_shift; + if (outputPointId >= gradOutput.size(2)) { + return; + } + int outputPointX = outputPointId % gradOutput.size(2); + + int iStartX = imax(0, -padL); + int oStartX = imax(0, padL); + + int inputPointX = imin(imax(padL, outputPointX), gradInput.size(2) + padL - 1) - oStartX + iStartX; + + scalar_t valueToCopy = gradOutput[batch][plane][outputPointX]; + gpuAtomicAdd(&gradInput[batch][plane][inputPointX], valueToCopy); +} + +template +__global__ void replication_pad_forward_kernel2d( + PackedTensorAccessor64 input, + PackedTensorAccessor64 output, + int padT, int padB, int padL, int padR, int y_shift, int z_shift) { + + int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; + int plane = blockIdx.y + y_shift; + int batch = blockIdx.z + z_shift; + if (outputPointId >= output.size(2) * output.size(3)) { + return; + } + int outputPointX = outputPointId % output.size(3); + int outputPointY = outputPointId / output.size(3); + + int iStartX = imax(0, -padL); + int iStartY = imax(0, -padT); + int oStartX = imax(0, padL); + int oStartY = imax(0, padT); + + int inputPointX = imin(imax(padL, outputPointX), input.size(3) + padL - 1) - oStartX + iStartX; + int inputPointY = imin(imax(padT, outputPointY), input.size(2) + padT - 1) - oStartY + iStartY; + + scalar_t valueToCopy = input[batch][plane][inputPointY][inputPointX]; + output[batch][plane][outputPointY][outputPointX] = valueToCopy; +} + +template +__global__ void replication_pad_backward_kernel( + PackedTensorAccessor64 gradInput, + PackedTensorAccessor64 gradOutput, + int padT, int padB, int padL, int padR, int y_shift, int z_shift) { + + int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; + int plane = blockIdx.y + y_shift; + int batch = blockIdx.z + z_shift; + if (outputPointId >= gradOutput.size(2) * gradOutput.size(3)) { + return; + } + int outputPointX = outputPointId % gradOutput.size(3); + int outputPointY = outputPointId / gradOutput.size(3); + + int iStartX = imax(0, -padL); + int iStartY = imax(0, -padT); + int oStartX = imax(0, padL); + int oStartY = imax(0, padT); + + int inputPointX = imin(imax(padL, outputPointX), gradInput.size(3) + padL - 1) - oStartX + iStartX; + int inputPointY = imin(imax(padT, outputPointY), gradInput.size(2) + padT - 1) - oStartY + iStartY; + + scalar_t valueToCopy = gradOutput[batch][plane][outputPointY][outputPointX]; + gpuAtomicAdd(&gradInput[batch][plane][inputPointY][inputPointX], valueToCopy); +} + +template +__global__ void replication_pad_forward_kernel3d( + PackedTensorAccessor64 input, + PackedTensorAccessor64 output, + int pfront, int pback, int ptop, int pbottom, int pleft, int pright, int y_shift, int z_shift) { + + int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; + int plane = blockIdx.y + y_shift; + int batch = blockIdx.z + z_shift; + if (outputPointId >= (output.size(2) * output.size(3) * + output.size(4))) { + return; + } + int outputPointX = outputPointId % output.size(4); + int outputPointY = (outputPointId / output.size(4)) % output.size(3); + int outputPointZ = outputPointId / (output.size(3) * output.size(4)); + + int iStartX = imax(0, -pleft); + int iStartY = imax(0, -ptop); + int iStartZ = imax(0, -pfront); + int oStartX = imax(0, pleft); + int oStartY = imax(0, ptop); + int oStartZ = imax(0, pfront); + + int inputPointX = imin(imax(pleft, outputPointX), + input.size(4) + pleft - 1) - oStartX + iStartX; + int inputPointY = imin(imax(ptop, outputPointY), + input.size(3) + ptop - 1) - oStartY + iStartY; + int inputPointZ = imin(imax(pfront, outputPointZ), + input.size(2) + pfront - 1) - oStartZ + iStartZ; + + scalar_t valueToCopy = + input[batch][plane][inputPointZ][inputPointY][inputPointX]; + output[batch][plane][outputPointZ][outputPointY][outputPointX] = valueToCopy; +} + +template +__global__ void replication_pad_backward_kernel( + PackedTensorAccessor64 gradInput, + PackedTensorAccessor64 gradOutput, + int pfront, int pback, int ptop, int pbottom, int pleft, int pright, int y_shift, int z_shift) { + int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; + int plane = blockIdx.y + y_shift; + int batch = blockIdx.z + z_shift; + + if (outputPointId >= (gradOutput.size(2) * gradOutput.size(3) * + gradOutput.size(4))) { + return; + } + int outputPointX = outputPointId % gradOutput.size(4); + int outputPointY = (outputPointId / gradOutput.size(4)) % + gradOutput.size(3); + int outputPointZ = outputPointId / (gradOutput.size(3) * + gradOutput.size(4)); + + int iStartX = imax(0, -pleft); + int iStartY = imax(0, -ptop); + int iStartZ = imax(0, -pfront); + int oStartX = imax(0, pleft); + int oStartY = imax(0, ptop); + int oStartZ = imax(0, pfront); + + int inputPointX = imin(imax(pleft, outputPointX), + gradInput.size(4) + pleft - 1) - oStartX + iStartX; + int inputPointY = imin(imax(ptop, outputPointY), + gradInput.size(3) + ptop - 1) - oStartY + iStartY; + int inputPointZ = imin(imax(pfront, outputPointZ), + gradInput.size(2) + pfront - 1) - oStartZ + iStartZ; + + scalar_t valueToCopy = + gradOutput[batch][plane][outputPointZ][outputPointY][outputPointX]; + gpuAtomicAdd(&gradInput[batch][plane][inputPointZ][inputPointY][inputPointX], + valueToCopy); +} + +void replication_pad2d_backward_out_cuda_template( + Tensor& gradInput, + const Tensor& gradOutput, + const Tensor& input, + IntArrayRef paddingSize) +{ + + TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), + "input tensor must fit into 32-bit index math"); + TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(gradOutput), + "output gradient tensor must fit into 32-bit index math"); + TORCH_CHECK(paddingSize.size() == 4, "padding Size is expected to be 4"); + + int padL = paddingSize[0]; + int padR = paddingSize[1]; + int padT = paddingSize[2]; + int padB = paddingSize[3]; + int planeDim = 0; + int dimh = 1; + int dimw = 2; + + int numInputDims = input.dim(); + if (numInputDims == 4) { + planeDim++; + dimh++; + dimw++; + } + int iheight = input.size(dimh); + int iwidth = input.size(dimw); + int oheight = iheight + padT + padB; + int owidth = iwidth + padL + padR; + + TORCH_CHECK(owidth == gradOutput.size(dimw), + "gradOutput width unexpected. Expected: ", owidth, ", Got: ", + gradOutput.size(dimw)); + TORCH_CHECK(oheight == gradOutput.size(dimh), + "gradOutput height unexpected. Expected: ", oheight, ", Got: ", + gradOutput.size(dimh)); + + gradInput.resize_as_(input); + if (gradInput.numel() == 0) { + return; + } + gradInput.zero_(); + + AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, + input.scalar_type(), "replication_pad2d_backward_cuda", [&] { + + auto gradInput_ = gradInput; + auto gradOutput_ = gradOutput; + if (numInputDims == 3) { + gradInput_ = gradInput.unsqueeze(0); + gradOutput_ = gradOutput.unsqueeze(0); + } + auto devGradInput = gradInput_.packed_accessor64(); + auto devGradOutput = gradOutput_.packed_accessor64(); + + int64_t outputPlaneSize = devGradOutput.size(2) * devGradOutput.size(3); + int64_t size1 = devGradOutput.size(1); + int64_t size0 = devGradOutput.size(0); + + for (int64_t block_y = 0; block_y < size1; block_y += 65535) { + int64_t block_y_size = std::min(size1 - block_y, static_cast(65535)); + for (int64_t block_z = 0; block_z < size0; block_z += 65535) { + int64_t block_z_size = std::min(size0 - block_z, static_cast(65535)); + + dim3 gridSize(THCCeilDiv(outputPlaneSize, static_cast(256)), block_y_size, block_z_size); + dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); + + replication_pad_backward_kernel <<>>( + devGradInput, devGradOutput, padT, padB, padL, padR, block_y, block_z); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + } + } + } + ); +} + +static inline void shapeCheck3d( + const Tensor& input, + int pleft, int pright, + int ptop, int pbottom, + int pfront, int pback) { + TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), + "input tensor must fit into 32-bit index math"); + int numInputDims = input.dim(); + + bool valid_dims = input.size(1) != 0 && input.size(2) != 0 && input.size(3) != 0; + TORCH_CHECK( + (numInputDims == 4 && input.size(0) != 0 && valid_dims) || + (numInputDims == 5 && valid_dims && input.size(4) != 0), + "Expected 4D or 5D (batch mode) tensor with possibly 0 batch size and other non-zero dimensions for input, but got: ", + input.sizes()); + + int planeDim = 0; + int dimd = 1; + int dimh = 2; + int dimw = 3; + if (numInputDims == 5) { + planeDim++; + dimd++; + dimh++; + dimw++; + } + + int numPlanes = input.size(planeDim); + int idepth = input.size(dimd); + int iheight = input.size(dimh); + int iwidth = input.size(dimw); + int odepth = idepth + pfront + pback; + int oheight = iheight + ptop + pbottom; + int owidth = iwidth + pleft + pright; + TORCH_CHECK(owidth >= 1 || oheight >= 1 || odepth >= 1, + "input (D: ", idepth, " H: ", iheight, ", W: ", iwidth, + ") is too small." + " Calculated output D: ", odepth, " H: ", oheight, " W: ", owidth); + +} + +static inline void shapeAndGradOutputCheck3d( + const Tensor& input, + const Tensor& gradOutput, + int pleft, int pright, + int ptop, int pbottom, + int pfront, int pback) { + TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), + "input tensor must fit into 32-bit index math"); + int numInputDims = input.dim(); + + bool valid_dims = input.size(1) != 0 && input.size(2) != 0 && input.size(3) != 0; + TORCH_CHECK( + (numInputDims == 4 && valid_dims) || + (numInputDims == 5 && valid_dims && input.size(4) != 0), + "Expected 4D or 5D (batch mode) tensor with possibly 0 batch size and other non-zero dimensions for input, but got: ", + input.sizes()); + + int planeDim = 0; + int dimd = 1; + int dimh = 2; + int dimw = 3; + if (numInputDims == 5) { + planeDim++; + dimd++; + dimh++; + dimw++; + } + + int numPlanes = input.size(planeDim); + int idepth = input.size(dimd); + int iheight = input.size(dimh); + int iwidth = input.size(dimw); + int odepth = idepth + pfront + pback; + int oheight = iheight + ptop + pbottom; + int owidth = iwidth + pleft + pright; + TORCH_CHECK(owidth >= 1 || oheight >= 1 || odepth >= 1, + "input (D: ", idepth, " H: ", iheight, ", W: ", iwidth, + ") is too small." + " Calculated output D: ", odepth, " H: ", oheight, " W: ", owidth); + + TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(gradOutput), + "output gradient tensor must fit into 32-bit index math"); + + TORCH_CHECK(numPlanes == gradOutput.size(planeDim), + "gradOutput width unexpected. Expected: ", numPlanes, ", Got: ", + gradOutput.size(planeDim)); + TORCH_CHECK(owidth == gradOutput.size(dimw), + "gradOutput width unexpected. Expected: ", owidth, ", Got: ", + gradOutput.size(dimw)); + TORCH_CHECK(oheight == gradOutput.size(dimh), + "gradOutput height unexpected. Expected: ", oheight, ", Got: ", + gradOutput.size(dimh)); + TORCH_CHECK(odepth == gradOutput.size(dimd), + "gradOutput depth unexpected. Expected: ", odepth, ", Got: ", + gradOutput.size(dimd)); +} + +void replication_pad3d_backward_out_cuda_template( + Tensor& gradInput, + const Tensor& gradOutput, + const Tensor& input, + IntArrayRef paddingSize) +{ + TORCH_CHECK(paddingSize.size() == 6, "padding Size is expected to be 6"); + int pleft = paddingSize[0]; + int pright = paddingSize[1]; + int ptop = paddingSize[2]; + int pbottom = paddingSize[3]; + int pfront = paddingSize[4]; + int pback = paddingSize[5]; + shapeAndGradOutputCheck3d(input, gradOutput, pleft, pright, ptop, + pbottom, pfront, pback); + + int planeDim = 0; + int dimd = 1; + int dimh = 2; + int dimw = 3; + + int numInputDims = input.dim(); + if (numInputDims == 5) { + planeDim++; + dimd++; + dimh++; + dimw++; + } + + gradInput.resize_as_(input); + if (gradInput.numel() == 0) { + return; + } + gradInput.zero_(); + + AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, + input.scalar_type(), "replication_pad3d_backward_cuda", [&] { + auto gradInput_ = gradInput; + auto gradOutput_ = gradOutput; + if (numInputDims == 4) { + gradInput_ = gradInput.unsqueeze(0); + gradOutput_ = gradOutput.unsqueeze(0); + } + auto devGradInput = gradInput_.packed_accessor64(); + auto devGradOutput = gradOutput_.packed_accessor64(); + + int64_t outputPlaneSize = devGradOutput.size(2) * devGradOutput.size(3) * devGradOutput.size(4); + int64_t size1 = devGradOutput.size(1); + int64_t size0 = devGradOutput.size(0); + + for (int64_t block_y = 0; block_y < size1; block_y += 65535) { + int64_t block_y_size = std::min(size1 - block_y, static_cast(65535)); + for (int64_t block_z = 0; block_z < size0; block_z += 65535) { + int64_t block_z_size = std::min(size0 - block_z, static_cast(65535)); + + dim3 gridSize(THCCeilDiv(outputPlaneSize, static_cast(256)), block_y_size, block_z_size); + dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); + + replication_pad_backward_kernel <<>>( + devGradInput, devGradOutput, pfront, pback, ptop, pbottom, pleft, pright, block_y, block_z); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + } + } + } + ); +} +} // namespace + +TORCH_IMPL_FUNC(replication_pad1d_out_cuda) ( + const Tensor& input, IntArrayRef paddingSize, const Tensor& output +) { + TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), + "input tensor must fit into 32-bit index math"); + + int64_t padL = paddingSize[0]; + int64_t padR = paddingSize[1]; + constexpr int64_t planeDim = -2; + constexpr int64_t dimw = -1; + int64_t numBatch = 1; + + int numInputDims = input.ndimension(); + + if (numInputDims == 3) { + numBatch = input.size(0); + } + + int64_t numPlanes = input.size(planeDim); + int64_t inputW = input.size(dimw); + int64_t outputW = output.size(dimw); + + if (input.numel() == 0) { + return; + } + + AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, + input.scalar_type(), "replication_pad1d_cuda", [&] { + at::Tensor input_ = input; + at::Tensor output_ = output; + if (numInputDims == 2) { + input_ = input.unsqueeze(0); + output_ = output.unsqueeze(0); + } + + auto devInput = input_.packed_accessor64(); + auto devOutput = output_.packed_accessor64(); + + int64_t outputPlaneSize = devOutput.size(2); + int64_t size1 = devOutput.size(1); + int64_t size0 = devOutput.size(0); + + for (int64_t block_y = 0; block_y < size1; block_y += 65535) { + int64_t block_y_size = std::min(size1 - block_y, static_cast(65535)); + for (int64_t block_z = 0; block_z < size0; block_z += 65535) { + int64_t block_z_size = std::min(size0 - block_z, static_cast(65535)); + + dim3 gridSize(THCCeilDiv(outputPlaneSize, static_cast(256)), block_y_size, block_z_size); + dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); + + replication_pad_forward_kernel1d <<>>(devInput, devOutput, padL, padR, block_y, block_z); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + } + } + } + ); +} + +TORCH_IMPL_FUNC(replication_pad1d_backward_out_cuda) ( + const Tensor& gradOutput, + const Tensor& input, + IntArrayRef paddingSize, + const Tensor& gradInput +) { + // See Note [Writing Nondeterministic Operations] + // Nondeterministic because of atomicAdd usage + globalContext().alertNotDeterministic("replication_pad1d_backward_cuda"); + + TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), + "input tensor must fit into 32-bit index math"); + TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(gradOutput), + "output gradient tensor must fit into 32-bit index math"); + + int padL = paddingSize[0]; + int padR = paddingSize[1]; + int planeDim = 0; + int dimw = 1; + + int numInputDims = input.ndimension(); + if (numInputDims == 3) { + planeDim++; + dimw++; + } + int iwidth = input.size(dimw); + int owidth = iwidth + padL + padR; + + if (gradInput.numel() == 0) { + return; + } + gradInput.zero_(); + + AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, + input.scalar_type(), "replication_pad1d_backward_cuda", [&] { + + auto gradInput_ = gradInput; + auto gradOutput_ = gradOutput; + if (numInputDims == 2) { + gradInput_ = gradInput.unsqueeze(0); + gradOutput_ = gradOutput.unsqueeze(0); + } + auto devGradInput = gradInput_.packed_accessor64(); + auto devGradOutput = gradOutput_.packed_accessor64(); + + int64_t outputPlaneSize = devGradOutput.size(2); + int64_t size1 = devGradOutput.size(1); + int64_t size0 = devGradOutput.size(0); + + for (int64_t block_y = 0; block_y < size1; block_y += 65535) { + int64_t block_y_size = std::min(size1 - block_y, static_cast(65535)); + for (int64_t block_z = 0; block_z < size0; block_z += 65535) { + int64_t block_z_size = std::min(size0 - block_z, static_cast(65535)); + + dim3 gridSize(THCCeilDiv(outputPlaneSize, static_cast(256)), block_y_size, block_z_size); + dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); + + replication_pad_backward_kernel <<>>( + devGradInput, devGradOutput, padL, padR, block_y, block_z); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + } + } + }); +} + +TORCH_IMPL_FUNC(replication_pad2d_out_cuda) ( + const Tensor& input, IntArrayRef paddingSize, const Tensor& output +) { + TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), + "input tensor must fit into 32-bit index math"); + if (input.numel() == 0) { + return; + } + int64_t padL = paddingSize[0]; + int64_t padR = paddingSize[1]; + int64_t padT = paddingSize[2]; + int64_t padB = paddingSize[3]; + AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, + input.scalar_type(), "replication_pad2d_cuda", [&] { + at::Tensor input_ = input; + at::Tensor output_ = output; + if (input.dim() == 3) { + input_ = input.unsqueeze(0); + output_ = output.unsqueeze(0); + } + auto devInput = input_.packed_accessor64(); + auto devOutput = output_.packed_accessor64(); + int64_t outputPlaneSize = devOutput.size(2) * devOutput.size(3); + int64_t size1 = devOutput.size(1); + int64_t size0 = devOutput.size(0); + for (int64_t block_y = 0; block_y < size1; block_y += 65535) { + int64_t block_y_size = std::min(size1 - block_y, static_cast(65535)); + for (int64_t block_z = 0; block_z < size0; block_z += 65535) { + int64_t block_z_size = std::min(size0 - block_z, static_cast(65535)); + dim3 gridSize(THCCeilDiv(outputPlaneSize, static_cast(256)), block_y_size, block_z_size); + dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); + replication_pad_forward_kernel2d <<>>( + devInput, devOutput, padT, padB, padL, padR, block_y, block_z); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + } + } + } + ); +} + +Tensor& replication_pad2d_backward_out_cuda(const Tensor& gradOutput, + const Tensor& input, + IntArrayRef paddingSize, + Tensor& gradInput) +{ + // See Note [Writing Nondeterministic Operations] + // Nondeterministic because of atomicAdd usage + globalContext().alertNotDeterministic("replication_pad2d_backward_out_cuda"); + replication_pad2d_backward_out_cuda_template( + gradInput, gradOutput, input, paddingSize); + return gradInput; +} + +Tensor replication_pad2d_backward_cuda( + const Tensor& gradOutput, + const Tensor& input, + IntArrayRef paddingSize) +{ + // See Note [Writing Nondeterministic Operations] + // Nondeterministic because of atomicAdd usage + globalContext().alertNotDeterministic("replication_pad2d_backward_cuda"); + auto gradInput = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); + replication_pad2d_backward_out_cuda_template( + gradInput, gradOutput, input, paddingSize); + return gradInput; +} + + +TORCH_IMPL_FUNC(replication_pad3d_out_cuda) ( + const Tensor& input, IntArrayRef paddingSize, const Tensor& output +) { + int pleft = paddingSize[0]; + int pright = paddingSize[1]; + int ptop = paddingSize[2]; + int pbottom = paddingSize[3]; + int pfront = paddingSize[4]; + int pback = paddingSize[5]; + + int planeDim = 0; + int dimd = 1; + int dimh = 2; + int dimw = 3; + int numBatch = 1; + + int numInputDims = input.dim(); + + if (numInputDims == 5) { + numBatch = input.size(0); + planeDim++; + dimd++; + dimh++; + dimw++; + } + + int numPlanes = input.size(planeDim); + int inputD = input.size(dimd); + int inputH = input.size(dimh); + int inputW = input.size(dimw); + int outputD = output.size(dimd); + int outputH = output.size(dimh); + int outputW = output.size(dimw); + + if (input.numel() == 0) { + return; + } + + AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, + input.scalar_type(), "replication_pad3d_cuda", [&] { + at::Tensor input_ = input; + at::Tensor output_ = output; + if (numInputDims == 4) { + auto input_ = input.unsqueeze(0); + auto output_ = output.unsqueeze(0); + } + + auto devInput = input_.packed_accessor64(); + auto devOutput = output_.packed_accessor64(); + + int64_t outputPlaneSize = devOutput.size(2) * devOutput.size(3) * devOutput.size(4); + int64_t size1 = devOutput.size(1); + int64_t size0 = devOutput.size(0); + + for (int64_t block_y = 0; block_y < size1; block_y += 65535) { + int64_t block_y_size = std::min(size1 - block_y, static_cast(65535)); + for (int64_t block_z = 0; block_z < size0; block_z += 65535) { + int64_t block_z_size = std::min(size0 - block_z, static_cast(65535)); + + dim3 gridSize(THCCeilDiv(outputPlaneSize, static_cast(256)), block_y_size, block_z_size); + dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); + + replication_pad_forward_kernel3d <<>>( + devInput, devOutput, pfront, pback, ptop, pbottom, pleft, pright, block_y, block_z); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + } + } + } + ); +} + +Tensor& replication_pad3d_backward_out_cuda(const Tensor& gradOutput, + const Tensor& input, + IntArrayRef paddingSize, + Tensor& gradInput) +{ + // See Note [Writing Nondeterministic Operations] + // Nondeterministic because of atomicAdd usage + globalContext().alertNotDeterministic("replication_pad3d_backward_out_cuda"); + replication_pad3d_backward_out_cuda_template( + gradInput, gradOutput, input, paddingSize); + return gradInput; +} + +Tensor replication_pad3d_backward_cuda( + const Tensor& gradOutput, + const Tensor& input, + IntArrayRef paddingSize) +{ + // See Note [Writing Nondeterministic Operations] + // Nondeterministic because of atomicAdd usage + globalContext().alertNotDeterministic("replication_pad3d_backward_cuda"); + auto gradInput = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); + replication_pad3d_backward_out_cuda_template( + gradInput, gradOutput, input, paddingSize); + return gradInput; +} + +} // at::native +} // at diff --git a/cuda_code/Resample2d_kernel_2.cu b/cuda_code/Resample2d_kernel_2.cu new file mode 100644 index 0000000000000000000000000000000000000000..095893d93da34f2fd78b9661cbf7e6faa260860d --- /dev/null +++ b/cuda_code/Resample2d_kernel_2.cu @@ -0,0 +1,242 @@ +#include +#include +#include +#include + +#define CUDA_NUM_THREADS 512 +#define THREADS_PER_BLOCK 64 + +#define DIM0(TENSOR) ((TENSOR).x) +#define DIM1(TENSOR) ((TENSOR).y) +#define DIM2(TENSOR) ((TENSOR).z) +#define DIM3(TENSOR) ((TENSOR).w) + +#define DIM3_INDEX(TENSOR, xx, yy, zz, ww) ((TENSOR)[((xx) * (TENSOR##_stride.x)) + ((yy) * (TENSOR##_stride.y)) + ((zz) * (TENSOR##_stride.z)) + ((ww) * (TENSOR##_stride.w))]) + +#ifdef __cplusplus + extern "C" { +#endif + +__global__ void kernel_Resample2d_updateOutput(const int n, const float* input1, const long4 input1_size, const long4 input1_stride, + const float* input2, const long4 input2_size, const long4 input2_stride, float* output, const long4 output_size, const long4 output_stride, int kernel_size) { + int index = blockIdx.x * blockDim.x + threadIdx.x; + + if (index >= n) { + return; + } + + float val = 0.0; + + int dim_b = DIM0(output_size); + int dim_c = DIM1(output_size); + int dim_h = DIM2(output_size); + int dim_w = DIM3(output_size); + int dim_chw = dim_c * dim_h * dim_w; + int dim_hw = dim_h * dim_w; + + int b = ( index / dim_chw ) % dim_b; + int c = ( index / dim_hw ) % dim_c; + int y = ( index / dim_w ) % dim_h; + int x = ( index ) % dim_w; + + float dx = DIM3_INDEX(input2, b, 0, y, x); + float dy = DIM3_INDEX(input2, b, 1, y, x); + + float xf = float(x) + dx; + float yf = float(y) + dy; + float alpha = xf - floor(xf); // alpha + float beta = yf - floor(yf); // beta + + int xL = max(min( int (floor(xf)), dim_w-1), 0); + int xR = max(min( int (floor(xf)+1), dim_w -1), 0); + int yT = max(min( int (floor(yf)), dim_h-1), 0); + int yB = max(min( int (floor(yf)+1), dim_h-1), 0); + + for (int fy = 0; fy < kernel_size; fy += 1) { + for (int fx = 0; fx < kernel_size; fx += 1) { + val += (1. - alpha)*(1. - beta) * DIM3_INDEX(input1, b, c, yT + fy, xL + fx); + val += (alpha)*(1. - beta) * DIM3_INDEX(input1, b, c, yT + fy, xR + fx); + val += (1. - alpha)*(beta) * DIM3_INDEX(input1, b, c, yB + fy, xL + fx); + val += (alpha)*(beta) * DIM3_INDEX(input1, b, c, yB + fy, xR + fx); + } + } + + output[index] = val; + +} + + +__global__ void kernel_Resample2d_backward_input1( + const int n, const float* input1, const long4 input1_size, const long4 input1_stride, const float* input2, const long4 input2_size, const long4 input2_stride, + const float* gradOutput, const long4 gradOutput_size, const long4 gradOutput_stride, float* gradInput, const long4 gradInput_size, const long4 gradInput_stride, int kernel_size) { + + int index = blockIdx.x * blockDim.x + threadIdx.x; + + if (index >= n) { + return; + } + + int dim_b = DIM0(gradOutput_size); + int dim_c = DIM1(gradOutput_size); + int dim_h = DIM2(gradOutput_size); + int dim_w = DIM3(gradOutput_size); + int dim_chw = dim_c * dim_h * dim_w; + int dim_hw = dim_h * dim_w; + + int b = ( index / dim_chw ) % dim_b; + int c = ( index / dim_hw ) % dim_c; + int y = ( index / dim_w ) % dim_h; + int x = ( index ) % dim_w; + + float dx = DIM3_INDEX(input2, b, 0, y, x); + float dy = DIM3_INDEX(input2, b, 1, y, x); + + float xf = float(x) + dx; + float yf = float(y) + dy; + float alpha = xf - int(xf); // alpha + float beta = yf - int(yf); // beta + + int idim_h = DIM2(input1_size); + int idim_w = DIM3(input1_size); + + int xL = max(min( int (floor(xf)), idim_w-1), 0); + int xR = max(min( int (floor(xf)+1), idim_w -1), 0); + int yT = max(min( int (floor(yf)), idim_h-1), 0); + int yB = max(min( int (floor(yf)+1), idim_h-1), 0); + + for (int fy = 0; fy < kernel_size; fy += 1) { + for (int fx = 0; fx < kernel_size; fx += 1) { + atomicAdd(&DIM3_INDEX(gradInput, b, c, (yT + fy), (xL + fx)), (1-alpha)*(1-beta) * DIM3_INDEX(gradOutput, b, c, y, x)); + atomicAdd(&DIM3_INDEX(gradInput, b, c, (yT + fy), (xR + fx)), (alpha)*(1-beta) * DIM3_INDEX(gradOutput, b, c, y, x)); + atomicAdd(&DIM3_INDEX(gradInput, b, c, (yB + fy), (xL + fx)), (1-alpha)*(beta) * DIM3_INDEX(gradOutput, b, c, y, x)); + atomicAdd(&DIM3_INDEX(gradInput, b, c, (yB + fy), (xR + fx)), (alpha)*(beta) * DIM3_INDEX(gradOutput, b, c, y, x)); + } + } + +} + +__global__ void kernel_Resample2d_backward_input2( + const int n, const float* input1, const long4 input1_size, const long4 input1_stride, const float* input2, const long4 input2_size, const long4 input2_stride, + const float* gradOutput, const long4 gradOutput_size, const long4 gradOutput_stride, float* gradInput, const long4 gradInput_size, const long4 gradInput_stride, int kernel_size) { + + int index = blockIdx.x * blockDim.x + threadIdx.x; + + if (index >= n) { + return; + } + + float output = 0.0; + int kernel_rad = (kernel_size - 1)/2; + + int dim_b = DIM0(gradInput_size); + int dim_c = DIM1(gradInput_size); + int dim_h = DIM2(gradInput_size); + int dim_w = DIM3(gradInput_size); + int dim_chw = dim_c * dim_h * dim_w; + int dim_hw = dim_h * dim_w; + + int b = ( index / dim_chw ) % dim_b; + int c = ( index / dim_hw ) % dim_c; + int y = ( index / dim_w ) % dim_h; + int x = ( index ) % dim_w; + + int odim_c = DIM1(gradOutput_size); + + float dx = DIM3_INDEX(input2, b, 0, y, x); + float dy = DIM3_INDEX(input2, b, 1, y, x); + + float xf = float(x) + dx; + float yf = float(y) + dy; + + int xL = max(min( int (floor(xf)), dim_w-1), 0); + int xR = max(min( int (floor(xf)+1), dim_w -1), 0); + int yT = max(min( int (floor(yf)), dim_h-1), 0); + int yB = max(min( int (floor(yf)+1), dim_h-1), 0); + + if (c % 2) { + float gamma = 1 - (xf - floor(xf)); // alpha + for (int i = 0; i <= 2*kernel_rad; ++i) { + for (int j = 0; j <= 2*kernel_rad; ++j) { + for (int ch = 0; ch < odim_c; ++ch) { + output += (gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yB + j), (xL + i)); + output -= (gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yT + j), (xL + i)); + output += (1-gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yB + j), (xR + i)); + output -= (1-gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yT + j), (xR + i)); + } + } + } + } + else { + float gamma = 1 - (yf - floor(yf)); // alpha + for (int i = 0; i <= 2*kernel_rad; ++i) { + for (int j = 0; j <= 2*kernel_rad; ++j) { + for (int ch = 0; ch < odim_c; ++ch) { + output += (gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yT + j), (xR + i)); + output -= (gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yT + j), (xL + i)); + output += (1-gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yB + j), (xR + i)); + output -= (1-gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yB + j), (xL + i)); + } + } + } + + } + + gradInput[index] = output; + +} + +void Resample2d_kernel_forward(THCState* state, THCudaTensor* input1, THCudaTensor* input2, THCudaTensor* output, int kernel_size) { + int n = 0; + + const long4 input1_size = make_long4(input1->size[0], input1->size[1], input1->size[2], input1->size[3]); + const long4 input1_stride = make_long4(input1->stride[0], input1->stride[1], input1->stride[2], input1->stride[3]); + + const long4 input2_size = make_long4(input2->size[0], input2->size[1], input2->size[2], input2->size[3]); + const long4 input2_stride = make_long4(input2->stride[0], input2->stride[1], input2->stride[2], input2->stride[3]); + + const long4 output_size = make_long4(output->size[0], output->size[1], output->size[2], output->size[3]); + const long4 output_stride = make_long4(output->stride[0], output->stride[1], output->stride[2], output->stride[3]); + + n = THCudaTensor_nElement(state, output); + kernel_Resample2d_updateOutput<<< (n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS, CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >>>( + n, THCudaTensor_data(state, input1), input1_size, input1_stride, THCudaTensor_data(state, input2), input2_size, input2_stride, + THCudaTensor_data(state, output), output_size, output_stride, kernel_size); + + THCudaCheck(cudaGetLastError()); +} + +void Resample2d_kernel_backward(THCState* state, THCudaTensor* input1, THCudaTensor* input2, THCudaTensor* gradOutput, THCudaTensor* gradInput1, THCudaTensor* gradInput2, int kernel_size) { + int n = 0; + + const long4 input1_size = make_long4(input1->size[0], input1->size[1], input1->size[2], input1->size[3]); + const long4 input1_stride = make_long4(input1->stride[0], input1->stride[1], input1->stride[2], input1->stride[3]); + + const long4 input2_size = make_long4(input2->size[0], input2->size[1], input2->size[2], input2->size[3]); + const long4 input2_stride = make_long4(input2->stride[0], input2->stride[1], input2->stride[2], input2->stride[3]); + + const long4 gradOutput_size = make_long4(gradOutput->size[0], gradOutput->size[1], gradOutput->size[2], gradOutput->size[3]); + const long4 gradOutput_stride = make_long4(gradOutput->stride[0], gradOutput->stride[1], gradOutput->stride[2], gradOutput->stride[3]); + + const long4 gradInput1_size = make_long4(gradInput1->size[0], gradInput1->size[1], gradInput1->size[2], gradInput1->size[3]); + const long4 gradInput1_stride = make_long4(gradInput1->stride[0], gradInput1->stride[1], gradInput1->stride[2], gradInput1->stride[3]); + + n = THCudaTensor_nElement(state, gradOutput); + kernel_Resample2d_backward_input1<<< (n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS, CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >>>( + n, THCudaTensor_data(state, input1), input1_size, input1_stride, THCudaTensor_data(state, input2), input2_size, input2_stride, + THCudaTensor_data(state, gradOutput), gradOutput_size, gradOutput_stride, THCudaTensor_data(state, gradInput1), gradInput1_size, gradInput1_stride, kernel_size + ); + + const long4 gradInput2_size = make_long4(gradInput2->size[0], gradInput2->size[1], gradInput2->size[2], gradInput2->size[3]); + const long4 gradInput2_stride = make_long4(gradInput2->stride[0], gradInput2->stride[1], gradInput2->stride[2], gradInput2->stride[3]); + + n = THCudaTensor_nElement(state, gradInput2); + kernel_Resample2d_backward_input2<<< (n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS, CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >>>( + n, THCudaTensor_data(state, input1), input1_size, input1_stride, THCudaTensor_data(state, input2), input2_size, input2_stride, + THCudaTensor_data(state, gradOutput), gradOutput_size, gradOutput_stride, THCudaTensor_data(state, gradInput2), gradInput2_size, gradInput2_stride, kernel_size + ); + THCudaCheck(cudaGetLastError()); +} + +#ifdef __cplusplus + } +#endif \ No newline at end of file diff --git a/cuda_code/ResizeNearest_1.cu b/cuda_code/ResizeNearest_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..aae180b1042d27b761af3b6f93001ba1adf93371 --- /dev/null +++ b/cuda_code/ResizeNearest_1.cu @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "ResizeNearest.hpp" +#include +#include + +// TODO: Move this to a common header +inline bool is_CHW(nvinfer1::Dims const& dims) { + return (dims.nbDims == 3 && + dims.type[0] == nvinfer1::DimensionType::kCHANNEL && + dims.type[1] == nvinfer1::DimensionType::kSPATIAL && + dims.type[2] == nvinfer1::DimensionType::kSPATIAL); +} + +nvinfer1::Dims ResizeNearestPlugin::getOutputDimensions(int index, + const nvinfer1::Dims *inputDims, + int nbInputs) { + assert(nbInputs == 1); + nvinfer1::Dims const& input = inputDims[0]; + assert(is_CHW(input)); + assert(_ndims == 2); + assert(index == 0); + nvinfer1::Dims output; + output.nbDims = input.nbDims; + int s = 0; + for( int d=0; d=abs(tmp-floor(tmp))){ + output.d[d] = floor(tmp); + } + else{ + output.d[d] = ceil(tmp); + } + s++; + } else { + output.d[d] = input.d[d]; + } + } + + return output; +} + +int ResizeNearestPlugin::initialize() { + _output_dims = this->getOutputDimensions(0, &this->getInputDims(0), 1); + assert(is_CHW(this->getInputDims(0))); + assert(is_CHW(_output_dims)); + assert(_ndims == 2); + return 0; +} + +template +__global__ +void resize_nearest_kernel_2d(int nbatch, + float2 scale, + int2 osize, + Data const* idata, int istride, int ibatchstride, + Data* odata, int ostride, int obatchstride) { + int x0 = threadIdx.x + blockIdx.x * blockDim.x; + int y0 = threadIdx.y + blockIdx.y * blockDim.y; + int z0 = blockIdx.z; + for( int batch=z0; batchgetInputDims(0); + int nchan = input_dims.d[0]; + switch( _ndims ) { + case 2: { + float2 scale = {_scale[1], _scale[0]}; + int2 osize = {_output_dims.d[2], _output_dims.d[1]}; + int istride = input_dims.d[2]; + int ostride = _output_dims.d[2]; + int ibatchstride = input_dims.d[1] * istride; + int obatchstride = _output_dims.d[1] * ostride; + dim3 block(32, 16); + dim3 grid((osize.x - 1) / block.x + 1, + (osize.y - 1) / block.y + 1, + std::min(batchSize * nchan, 65535)); + if (getDataType()==nvinfer1::DataType::kFLOAT) { + resize_nearest_kernel_2d<<>> + (batchSize * nchan, scale, osize, + static_cast( inputs[0]), istride, ibatchstride, + static_cast(outputs[0]), ostride, obatchstride); + } else { + resize_nearest_kernel_2d<<>> + (batchSize * nchan, scale, osize, + static_cast<__half const*>( inputs[0]), istride, ibatchstride, + static_cast<__half* >(outputs[0]), ostride, obatchstride); + } + return cudaGetLastError() != cudaSuccess; + } + default: return -1; + } +} diff --git a/cuda_code/ResourcePool.cu b/cuda_code/ResourcePool.cu new file mode 100644 index 0000000000000000000000000000000000000000..bd5384eebbcc00b8166cb8bee88fe6a1053d9ce9 --- /dev/null +++ b/cuda_code/ResourcePool.cu @@ -0,0 +1,316 @@ +/* Copyright (C) 2010 Ion Torrent Systems, Inc. All Rights Reserved */ + +#include + +#include "ResourcePool.h" + + +using namespace std; + + + + +streamResources::streamResources(size_t hostSize, size_t deviceSize, int id):_HMem(hostSize,HostPageLocked),_DMem(deviceSize, DeviceGlobal) +{ + + _inUse = false; + _stream = NULL; + _streamId = id; + cudaGetDevice(&_devId); + + cout << getLogHeader() << " acquiring cudaStream" << endl; + + if(_HMem.memoryAvailable() < hostSize || _DMem.memoryAvailable() < deviceSize ){ + throw cudaException(cudaErrorMemoryAllocation); + } + + cudaStreamCreate(&_stream); + + + cudaError_t err = cudaGetLastError(); + if(_stream ==NULL || err != cudaSuccess){ + _stream=NULL; + throw cudaStreamCreationError(__FILE__,__LINE__); + } + + +} + +streamResources::~streamResources() +{ + if(_stream != NULL) cudaStreamDestroy(_stream); //CUDA_ERROR_CHECK(); +} + +cudaStream_t streamResources::getStream() +{ + return _stream; +} + + +MemoryResource * streamResources::getHostMem() +{ + return &_HMem; +} + +MemoryResource * streamResources::getDevMem() +{ + return &_DMem; +} + + +MemSegment streamResources::getHostSegment(size_t size) +{ + return _HMem.getMemSegment(size); +} +MemSegment streamResources::getDevSegment(size_t size) +{ + return _DMem.getMemSegment(size); +} +MemSegPair streamResources::GetHostDevPair(size_t size) +{ + return MemSegPair(_HMem.getMemSegment(size),_DMem.getMemSegment(size)); +} + +MemSegPair streamResources::GetCurrentPairGroup() +{ + if(_HMem.getCurrentSegGroupSize() == _DMem.getCurrentSegGroupSize()) + return MemSegPair(_HMem.getCurrentSegGroup(),_DMem.getCurrentSegGroup()); + + cout << getLogHeader() << " Memory Manager Warning: No valid Memory Host Device Pair available, returning NULL Segment!" << endl; + return MemSegPair(); +} + +MemSegment streamResources::GetCurrentHostGroup() +{ + return _HMem.getCurrentSegGroup(); +} +MemSegment streamResources::GetCurrentDeviceGroup() +{ + return _DMem.getCurrentSegGroup(); +} + +size_t streamResources::StartNewSegGroup() +{ + size_t hs = StartNewHostSegGroup(); + size_t ds = StartNewDeviceSegGroup(); + return (hs == ds)?(hs):(0); +} + + +size_t streamResources::StartNewHostSegGroup() +{ + return _HMem.startNewSegGroup(); +} +size_t streamResources::StartNewDeviceSegGroup() +{ + return _DMem.startNewSegGroup(); +} + + +int streamResources::getStreamId() +{ + return _streamId; +} + + +bool streamResources::aquire() +{ + if(_inUse) return false; + _inUse = true; + return _inUse; +} + +void streamResources::release() +{ + _inUse = false; + _HMem.releaseAll(); + _DMem.releaseAll(); +} + +bool streamResources::isSet() +{ + if(_stream == NULL) return false; + if(!_HMem.isSet()) return false; + if(!_DMem.isSet()) return false; + if(_inUse) return true; + return true; +} + + +int streamResources::getDevId() +{ + return _devId; +} + +string streamResources::Status() +{ + ostringstream output; + size_t divMB = 1024*1024; + output << getLogHeader() << " Host Memory allocated(used): " << _HMem.getSize()/divMB <<"(" << _HMem.memoryUsed()/divMB << ")MB Device Memory: " << _DMem.getSize()/divMB <<"(" << _DMem.memoryUsed()/divMB << ")MB"; + + return output.str(); +} + + +string streamResources::getLogHeader() +{ + ostringstream headerinfo; + + headerinfo << "CUDA " << _devId << " StreamResource " << _streamId << ":"; + + return headerinfo.str(); +} +//////////////////////////////////////////////////////////// + + +size_t cudaResourcePool::_SrequestedDeviceSize = 0; +size_t cudaResourcePool::_SrequestedHostSize = 0; + +cudaResourcePool::cudaResourcePool(int numStreams) +{ + _HostSize = _SrequestedHostSize; + _DeviceSize = _SrequestedDeviceSize; + cudaGetDevice(&_devId); + + + tryAddResource(numStreams); + + if(_sRes.empty()) throw cudaStreamCreationError( __FILE__,__LINE__); + +} +/* +cudaResourcePool::cudaResourcePool(size_t hostsize, size_t devicesize, int numStreams) +{ + _HostSize = hostsize; + _DeviceSize = devicesize; + cudaGetDevice(&_devId); + + + tryAddResource(numStreams); + + if(_sRes.empty()) throw cudaStreamCreationError( __FILE__,__LINE__); + +}*/ + +void cudaResourcePool::tryAddResource(unsigned int numStreams) +{ + + streamResources * srestmp; + + while(_sRes.size() < numStreams){ + int i = _sRes.size(); + srestmp = NULL; + try{ + // remove print memory since we observed a segfault in the libcuda api call, see TS-7922 + //printMemoryUsage(); + cout << getLogHeader() << " trying to create stream resources" << i <<" with "<< _HostSize/(1024.0*1024.0) << "MB Host and " << _DeviceSize/(1024.0*1024.0) << "MB Device memory" << endl; + + srestmp = new streamResources(_HostSize, _DeviceSize, i); + _sRes.push_back (srestmp); + } + catch(cudaException &e){ + cout << getLogHeader() << " creation of stream resource " << i << " failed! " << endl; + // getNumStreams(); + break; + } + } + +} + + + +cudaResourcePool::~cudaResourcePool() +{ + cout << getLogHeader() << " destroying ResourcePool" << endl; + while (!_sRes.empty()) + { + delete _sRes.back(); + _sRes.pop_back(); + } +} + +streamResources * cudaResourcePool::getResource() +{ + + vector::iterator it; + + for ( it=_sRes.begin() ; it < _sRes.end(); it++ ){ + if ((*it)->aquire()){ + return (*it); + } + } + return NULL; +} + +void cudaResourcePool::releaseResource(streamResources *& res) +{ + vector::iterator it; + for ( it=_sRes.begin() ; it < _sRes.end(); it++ ){ + if ((*it) == res){ + res->release(); + break; + } + } + res = NULL; +} + +// cleans out streamResources where reallocation of resources might have failed +// during execution or for whatever reason could not be allocated. +void cudaResourcePool::poolCleaning() +{ + for(int i=(_sRes.size()-1); i >= 0 ; i--){ // iterate backwards for easy delete + if(!_sRes[i]->isSet()){ + cout << getLogHeader() << " removing SR " << _sRes[i]->getStreamId() << " from StreamPool" << endl; + delete _sRes[i]; // destroy resource object + _sRes.erase(_sRes.begin()+i); //delete resource from list + } + } +} + + + +int cudaResourcePool::getNumStreams() +{ + poolCleaning(); // clean out resources where reallocation of resources might have failed + return _sRes.size(); +} + +string cudaResourcePool::getLogHeader() +{ + ostringstream headerinfo; + + headerinfo << "CUDA " << _devId << " ResourcePool:"; + + return headerinfo.str(); +} + +void cudaResourcePool::printMemoryUsage() +{ + size_t free_byte ; + size_t total_byte ; + cudaMemGetInfo( &free_byte, &total_byte ) ; + + double free_db = (double)free_byte ; + double total_db = (double)total_byte ; + double used_db = total_db - free_db ; + double divMB = 1024*1024; + cout << getLogHeader() << " GPU memory usage: used = " << used_db/divMB<< ", free = " << free_db/divMB<< " MB, total = "<< total_db/divMB<<" MB" << endl; +} + +size_t cudaResourcePool::requestDeviceMemory(size_t size) +{ + _SrequestedDeviceSize= (_SrequestedDeviceSize> size)?(_SrequestedDeviceSize):(size); + return _SrequestedDeviceSize; +} +void cudaResourcePool::setDeviceMemory(size_t size) +{ + _SrequestedDeviceSize=size; +} + +size_t cudaResourcePool::requestHostMemory(size_t size) +{ + _SrequestedHostSize = (_SrequestedHostSize > size)?(_SrequestedHostSize):(size); + return _SrequestedHostSize; +} + + diff --git a/cuda_code/ResultSetSortImpl_4.cu b/cuda_code/ResultSetSortImpl_4.cu new file mode 100644 index 0000000000000000000000000000000000000000..925805d8ac45219e55db5c41394f043bb018a2c3 --- /dev/null +++ b/cuda_code/ResultSetSortImpl_4.cu @@ -0,0 +1,411 @@ +#include "ResultSetSortImpl.h" +#include "BufferCompaction.h" +#include "GpuMemUtils.h" +#include "GpuRtConstants.h" +#include "ResultSetBufferAccessors.h" +#include "ThrustAllocator.h" + +#include +#include +#include +#include +#include + +namespace { + +template +bool is_empty_entry(const size_t entry_idx, const int8_t* groupby_buffer, const GroupByBufferLayoutInfo& layout); + +template <> +bool is_empty_entry(const size_t entry_idx, + const int8_t* groupby_buffer, + const GroupByBufferLayoutInfo& layout) { + const auto key_ptr = groupby_buffer + entry_idx * layout.row_bytes; + return (*reinterpret_cast(key_ptr) == EMPTY_KEY_32); +} + +template <> +bool is_empty_entry(const size_t entry_idx, + const int8_t* groupby_buffer, + const GroupByBufferLayoutInfo& layout) { + const auto key_ptr = groupby_buffer + entry_idx * layout.row_bytes; + return (*reinterpret_cast(key_ptr) == EMPTY_KEY_64); +} + +template +std::vector do_radix_sort(const ExecutorDeviceType device_type, + ThrustAllocator& thrust_allocator, + const int8_t* groupby_buffer, + V dev_oe_col_buffer_begin, + V dev_oe_col_buffer_end, + I dev_idx_buff_begin, + const size_t dev_idx_buff_size, + const PodOrderEntry& oe, + const GroupByBufferLayoutInfo& layout, + const size_t top_n) { + if (dev_idx_buff_size == 0) { + return {}; + } + if (oe.is_desc) { + if (device_type == ExecutorDeviceType::GPU) { + thrust::sort_by_key(thrust::device(thrust_allocator), + dev_oe_col_buffer_begin, + dev_oe_col_buffer_end, + dev_idx_buff_begin, + thrust::greater()); + } else { + thrust::sort_by_key( + dev_oe_col_buffer_begin, dev_oe_col_buffer_end, dev_idx_buff_begin, thrust::greater()); + } + } else { + if (device_type == ExecutorDeviceType::GPU) { + thrust::sort_by_key( + thrust::device(thrust_allocator), dev_oe_col_buffer_begin, dev_oe_col_buffer_end, dev_idx_buff_begin); + } else { + thrust::sort_by_key(dev_oe_col_buffer_begin, dev_oe_col_buffer_end, dev_idx_buff_begin); + } + } + // Speculatively transfer only the top_n first, most of the time it'll be enough. + thrust::host_vector host_vector_result(dev_idx_buff_begin, + dev_idx_buff_begin + std::min(top_n, dev_idx_buff_size)); + // Sometimes, radix sort can bring to the front entries which are empty. + // For example, ascending sort on COUNT(*) will bring non-existent groups + // to the front of dev_idx_buff since they're 0 in our system. Re-do the + // transfer in that case to bring the entire dev_idx_buff; existing logic + // in row iteration will take care of skipping the empty rows. + for (size_t i = 0; i < host_vector_result.size(); ++i) { + const auto entry_idx = host_vector_result[i]; + if (is_empty_entry(entry_idx, groupby_buffer, layout)) { + host_vector_result = thrust::host_vector(dev_idx_buff_begin, dev_idx_buff_begin + dev_idx_buff_size); + break; + } + } + std::vector result; + result.reserve(std::min(top_n, host_vector_result.size())); + for (size_t i = 0; i < host_vector_result.size(); ++i) { + const auto entry_idx = host_vector_result[i]; + if (!is_empty_entry(entry_idx, groupby_buffer, layout)) { + result.push_back(entry_idx); + if (result.size() >= top_n) { + break; + } + } + } + return result; +} + +void add_nulls(std::vector& idx_buff, const std::vector& null_idx_buff, const PodOrderEntry& oe) { + if (null_idx_buff.empty()) { + return; + } + const auto insertion_point = oe.nulls_first ? idx_buff.begin() : idx_buff.end(); + idx_buff.insert(insertion_point, null_idx_buff.begin(), null_idx_buff.end()); +} + +template +thrust::device_ptr get_device_copy_ptr(const thrust::host_vector& host_vec, ThrustAllocator& thrust_allocator) { + if (host_vec.empty()) { + return thrust::device_ptr(static_cast(nullptr)); + } + const auto host_vec_bytes = host_vec.size() * sizeof(T); + T* dev_ptr = reinterpret_cast(thrust_allocator.allocateScopedBuffer(align_to_int64(host_vec_bytes))); + copy_to_gpu(thrust_allocator.getDataMgr(), + reinterpret_cast(dev_ptr), + &host_vec[0], + host_vec_bytes, + thrust_allocator.getDeviceId()); + return thrust::device_ptr(dev_ptr); +} + +template +std::vector baseline_sort_fp(const ExecutorDeviceType device_type, + const int device_id, + Data_Namespace::DataMgr* data_mgr, + const int8_t* groupby_buffer, + const thrust::host_vector& oe_col_buffer, + const PodOrderEntry& oe, + const GroupByBufferLayoutInfo& layout, + const size_t top_n, + const size_t start, + const size_t step) { + thrust::host_vector neg_idx_buff; + thrust::host_vector pos_idx_buff; + std::vector null_idx_buff; + thrust::host_vector neg_oe_col_buffer; + thrust::host_vector pos_oe_col_buffer; + const auto slice_entry_count = layout.entry_count / step + (layout.entry_count % step ? 1 : 0); + neg_idx_buff.reserve(slice_entry_count); + pos_idx_buff.reserve(slice_entry_count); + null_idx_buff.reserve(slice_entry_count); + neg_oe_col_buffer.reserve(slice_entry_count); + pos_oe_col_buffer.reserve(slice_entry_count); + size_t oe_col_buffer_idx = 0; + const auto& oe_info = layout.oe_target_info; + const auto col_ti = oe_info.agg_kind == kAVG ? SQLTypeInfo(kDOUBLE, false) : oe_info.sql_type; + // Execlude AVG b/c collect_order_entry_column already makes its pair collapse into a double + const bool float_argument_input = takes_float_argument(oe_info) && oe_info.agg_kind != kAVG; + + auto is_negtive = float_argument_input ? [](const int64_t v) -> bool { return (v & (1 << 31)) != 0; } + : [](const int64_t v) -> bool { return v < 0; }; + + for (size_t i = start; i < layout.entry_count; i += step, ++oe_col_buffer_idx) { + if (!is_empty_entry(i, groupby_buffer, layout) && + oe_col_buffer[oe_col_buffer_idx] == null_val_bit_pattern(col_ti, float_argument_input)) { + null_idx_buff.push_back(i); + continue; + } + if (is_negtive(oe_col_buffer[oe_col_buffer_idx])) { // sign bit works the same for integer and floating point + neg_idx_buff.push_back(i); + neg_oe_col_buffer.push_back(oe_col_buffer[oe_col_buffer_idx]); + } else { + pos_idx_buff.push_back(i); + pos_oe_col_buffer.push_back(oe_col_buffer[oe_col_buffer_idx]); + } + } + std::vector pos_result; + ThrustAllocator thrust_allocator(data_mgr, device_id); + if (device_type == ExecutorDeviceType::GPU) { + const auto dev_pos_idx_buff = get_device_copy_ptr(pos_idx_buff, thrust_allocator); + const auto dev_pos_oe_col_buffer = get_device_copy_ptr(pos_oe_col_buffer, thrust_allocator); + pos_result = do_radix_sort(device_type, + thrust_allocator, + groupby_buffer, + dev_pos_oe_col_buffer, + dev_pos_oe_col_buffer + pos_oe_col_buffer.size(), + dev_pos_idx_buff, + pos_idx_buff.size(), + oe, + layout, + top_n); + } else { + CHECK(device_type == ExecutorDeviceType::CPU); + pos_result = do_radix_sort(device_type, + thrust_allocator, + groupby_buffer, + pos_oe_col_buffer.begin(), + pos_oe_col_buffer.end(), + pos_idx_buff.begin(), + pos_idx_buff.size(), + oe, + layout, + top_n); + } + std::vector neg_result; + PodOrderEntry reverse_oe{oe.tle_no, !oe.is_desc, oe.nulls_first}; + if (device_type == ExecutorDeviceType::GPU) { + const auto dev_neg_idx_buff = get_device_copy_ptr(neg_idx_buff, thrust_allocator); + const auto dev_neg_oe_col_buffer = get_device_copy_ptr(neg_oe_col_buffer, thrust_allocator); + neg_result = do_radix_sort(device_type, + thrust_allocator, + groupby_buffer, + dev_neg_oe_col_buffer, + dev_neg_oe_col_buffer + neg_oe_col_buffer.size(), + dev_neg_idx_buff, + neg_idx_buff.size(), + reverse_oe, + layout, + top_n); + } else { + CHECK(device_type == ExecutorDeviceType::CPU); + neg_result = do_radix_sort(device_type, + thrust_allocator, + groupby_buffer, + neg_oe_col_buffer.begin(), + neg_oe_col_buffer.end(), + neg_idx_buff.begin(), + neg_idx_buff.size(), + reverse_oe, + layout, + top_n); + } + if (oe.is_desc) { + pos_result.insert(pos_result.end(), neg_result.begin(), neg_result.end()); + add_nulls(pos_result, null_idx_buff, oe); + return pos_result; + } + neg_result.insert(neg_result.end(), pos_result.begin(), pos_result.end()); + add_nulls(neg_result, null_idx_buff, oe); + return neg_result; +} + +template +std::vector baseline_sort_int(const ExecutorDeviceType device_type, + const int device_id, + Data_Namespace::DataMgr* data_mgr, + const int8_t* groupby_buffer, + const thrust::host_vector& oe_col_buffer, + const PodOrderEntry& oe, + const GroupByBufferLayoutInfo& layout, + const size_t top_n, + const size_t start, + const size_t step) { + const auto& entry_ti = get_compact_type(layout.oe_target_info); + std::vector null_idx_buff; + thrust::host_vector notnull_idx_buff; + const auto slice_entry_count = layout.entry_count / step + (layout.entry_count % step ? 1 : 0); + null_idx_buff.reserve(slice_entry_count); + notnull_idx_buff.reserve(slice_entry_count); + thrust::host_vector notnull_oe_col_buffer; + notnull_oe_col_buffer.reserve(slice_entry_count); + size_t oe_col_buffer_idx = 0; + for (size_t i = start; i < layout.entry_count; i += step, ++oe_col_buffer_idx) { + if (!is_empty_entry(i, groupby_buffer, layout) && + oe_col_buffer[oe_col_buffer_idx] == null_val_bit_pattern(entry_ti, false)) { + null_idx_buff.push_back(i); + } else { + notnull_idx_buff.push_back(i); + notnull_oe_col_buffer.push_back(oe_col_buffer[oe_col_buffer_idx]); + } + } + std::vector notnull_result; + ThrustAllocator thrust_allocator(data_mgr, device_id); + if (device_type == ExecutorDeviceType::GPU) { + const auto dev_notnull_idx_buff = get_device_copy_ptr(notnull_idx_buff, thrust_allocator); + const auto dev_notnull_oe_col_buffer = get_device_copy_ptr(notnull_oe_col_buffer, thrust_allocator); + notnull_result = do_radix_sort(device_type, + thrust_allocator, + groupby_buffer, + dev_notnull_oe_col_buffer, + dev_notnull_oe_col_buffer + notnull_oe_col_buffer.size(), + dev_notnull_idx_buff, + notnull_idx_buff.size(), + oe, + layout, + top_n); + } else { + CHECK(device_type == ExecutorDeviceType::CPU); + notnull_result = do_radix_sort(device_type, + thrust_allocator, + groupby_buffer, + notnull_oe_col_buffer.begin(), + notnull_oe_col_buffer.end(), + notnull_idx_buff.begin(), + notnull_idx_buff.size(), + oe, + layout, + top_n); + } + add_nulls(notnull_result, null_idx_buff, oe); + return notnull_result; +} + +template +thrust::host_vector collect_order_entry_column(const int8_t* groupby_buffer, + const GroupByBufferLayoutInfo& layout, + const size_t start, + const size_t step) { + thrust::host_vector oe_col_buffer; + const auto row_ptr = groupby_buffer + start * layout.row_bytes; + auto crt_group_ptr1 = + layout.target_groupby_index >= 0 ? row_ptr + layout.target_groupby_index * sizeof(K) : row_ptr + layout.col_off; + const int8_t* crt_group_ptr2{nullptr}; + if (layout.oe_target_info.agg_kind == kAVG) { + crt_group_ptr2 = crt_group_ptr1 + layout.col_bytes; + } + const auto& entry_ti = get_compact_type(layout.oe_target_info); + const bool float_argument_input = takes_float_argument(layout.oe_target_info); + const auto step_bytes = layout.row_bytes * step; + for (size_t i = start; i < layout.entry_count; i += step) { + auto val1 = read_int_from_buff(crt_group_ptr1, layout.col_bytes > 0 ? layout.col_bytes : sizeof(K)); + if (crt_group_ptr2) { + const auto val2 = read_int_from_buff(crt_group_ptr2, 8); + const auto avg_val = pair_to_double({val1, val2}, entry_ti, float_argument_input); + val1 = *reinterpret_cast(&avg_val); + } + oe_col_buffer.push_back(val1); + crt_group_ptr1 += step_bytes; + if (crt_group_ptr2) { + crt_group_ptr2 += step_bytes; + } + } + return oe_col_buffer; +} + +template +thrust::device_ptr get_device_ptr(const size_t host_vec_size, ThrustAllocator& thrust_allocator) { + CHECK_GT(host_vec_size, size_t(0)); + const auto host_vec_bytes = host_vec_size * sizeof(T); + T* dev_ptr = reinterpret_cast(thrust_allocator.allocateScopedBuffer(align_to_int64(host_vec_bytes))); + return thrust::device_ptr(dev_ptr); +} + +} // namespace + +template +std::vector baseline_sort(const ExecutorDeviceType device_type, + const int device_id, + Data_Namespace::DataMgr* data_mgr, + const int8_t* groupby_buffer, + const PodOrderEntry& oe, + const GroupByBufferLayoutInfo& layout, + const size_t top_n, + const size_t start, + const size_t step) { + auto oe_col_buffer = collect_order_entry_column(groupby_buffer, layout, start, step); + const auto& entry_ti = get_compact_type(layout.oe_target_info); + CHECK(entry_ti.is_number()); + if (entry_ti.is_fp() || layout.oe_target_info.agg_kind == kAVG) { + return baseline_sort_fp( + device_type, device_id, data_mgr, groupby_buffer, oe_col_buffer, oe, layout, top_n, start, step); + } + // Because of how we represent nulls for integral types, they'd be at the + // wrong position in these two cases. Separate them into a different buffer. + if ((oe.is_desc && oe.nulls_first) || (!oe.is_desc && !oe.nulls_first)) { + return baseline_sort_int( + device_type, device_id, data_mgr, groupby_buffer, oe_col_buffer, oe, layout, top_n, start, step); + } + ThrustAllocator thrust_allocator(data_mgr, device_id); + // Fastest path, no need to separate nulls away since they'll end up at the + // right place as a side effect of how we're representing nulls. + if (device_type == ExecutorDeviceType::GPU) { + if (oe_col_buffer.empty()) { + return {}; + } + const auto dev_idx_buff = get_device_ptr(oe_col_buffer.size(), thrust_allocator); + thrust::sequence(dev_idx_buff, dev_idx_buff + oe_col_buffer.size(), start, step); + const auto dev_oe_col_buffer = get_device_copy_ptr(oe_col_buffer, thrust_allocator); + return do_radix_sort(device_type, + thrust_allocator, + groupby_buffer, + dev_oe_col_buffer, + dev_oe_col_buffer + oe_col_buffer.size(), + dev_idx_buff, + oe_col_buffer.size(), + oe, + layout, + top_n); + } + CHECK(device_type == ExecutorDeviceType::CPU); + thrust::host_vector host_idx_buff(oe_col_buffer.size()); + thrust::sequence(host_idx_buff.begin(), host_idx_buff.end(), start, step); + return do_radix_sort(device_type, + thrust_allocator, + groupby_buffer, + oe_col_buffer.begin(), + oe_col_buffer.end(), + host_idx_buff.begin(), + host_idx_buff.size(), + oe, + layout, + top_n); +} + +template std::vector baseline_sort(const ExecutorDeviceType device_type, + const int device_id, + Data_Namespace::DataMgr* data_mgr, + const int8_t* groupby_buffer, + const PodOrderEntry& oe, + const GroupByBufferLayoutInfo& layout, + const size_t top_n, + const size_t start, + const size_t step); + +template std::vector baseline_sort(const ExecutorDeviceType device_type, + const int device_id, + Data_Namespace::DataMgr* data_mgr, + const int8_t* groupby_buffer, + const PodOrderEntry& oe, + const GroupByBufferLayoutInfo& layout, + const size_t top_n, + const size_t start, + const size_t step); diff --git a/cuda_code/RigidSolver.cu b/cuda_code/RigidSolver.cu new file mode 100644 index 0000000000000000000000000000000000000000..c8a0271e3c460b4b7eb77e9a2003a88f2a142ff1 --- /dev/null +++ b/cuda_code/RigidSolver.cu @@ -0,0 +1,222 @@ +#include "common/device_intrinsics.h" +#include "common/ConfigParser.h" +#include "math/device_mat.h" +#include "core/warp_solver/RigidSolver.h" +#include "RigidSolver.h" + +#include + +namespace surfelwarp { namespace device { + + struct RigidSolverDevice { + + //The constants for matrix size and blk size + enum + { + //The stoge layout + lhs_matrix_size = 21, + rhs_vector_size = 6, + total_shared_size = lhs_matrix_size + rhs_vector_size, + + //The block size + block_size = 256, + num_warps = block_size / 32, + }; + + //The map from the renderer + struct { + cudaTextureObject_t vertex_map; + cudaTextureObject_t normal_map; + } model_maps; + + //The map from the depth image + struct { + cudaTextureObject_t vertex_map; + cudaTextureObject_t normal_map; + } observation_maps; + + //The camera information + mat34 init_world2camera; + Intrinsic intrinsic; + + //The image information + unsigned image_rows; + unsigned image_cols; + + //The processing interface + __device__ __forceinline__ void solverIteration( + PtrStep reduce_buffer + ) const { + const auto flatten_pixel_idx = threadIdx.x + blockDim.x * blockIdx.x; + const auto x = flatten_pixel_idx % image_cols; + const auto y = flatten_pixel_idx / image_cols; + + //Prepare the jacobian and err + float jacobian[6] = {0}; + float err = 0.0f; + + //The pixel in range, cannot return as reduction is required + if(x < image_cols && y < image_rows) + { + //Load from the rendered maps + const float4 model_v4 = tex2D(model_maps.vertex_map, x, y); + const float4 model_n4 = tex2D(model_maps.normal_map, x, y); + + //Transform into camera view + const auto model_v = init_world2camera.rot * model_v4 + init_world2camera.trans; + const auto model_n = init_world2camera.rot * model_n4; + + //Project to depth image + const ushort2 img_coord = { + __float2uint_rn(((model_v.x / (model_v.z + 1e-10)) * intrinsic.focal_x) + intrinsic.principal_x), + __float2uint_rn(((model_v.y / (model_v.z + 1e-10)) * intrinsic.focal_y) + intrinsic.principal_y) + }; + + //The projected point is in range + if(img_coord.x < image_cols && img_coord.y < image_rows) + { + //Load the depth map + const float4 depth_v4 = tex2D(observation_maps.vertex_map, img_coord.x, img_coord.y); + const float4 depth_n4 = tex2D(observation_maps.normal_map, img_coord.x, img_coord.y); + + //Check correspondence + if(dotxyz(model_n, depth_n4) < 0.8f || squared_distance(model_v, depth_v4) > (0.01f * 0.01f) || is_zero_vertex(depth_v4)) { + //Pass + } + else { + err = dotxyz(depth_n4, make_float4(model_v.x - depth_v4.x, model_v.y - depth_v4.y, model_v.z - depth_v4.z, 0.0f)); + *(float3*)jacobian = cross_xyz(model_v, depth_n4); + *(float3*)(jacobian + 3) = make_float3(depth_n4.x, depth_n4.y, depth_n4.z); + } + } + } + + //Time to do reduction + __shared__ float reduce_mem[total_shared_size][num_warps]; + unsigned shift = 0; + const auto warp_id = threadIdx.x >> 5; + const auto lane_id = threadIdx.x & 31; + + //Reduce on matrix + for (int i = 0; i < 6; i++) { //Row index + for (int j = i; j < 6; j++) { //Column index, the matrix is symmetry + float data = (jacobian[i] * jacobian[j]); + data = warp_scan(data); + if (lane_id == 31) { + reduce_mem[shift++][warp_id] = data; + } + //Another sync here for reduced mem + __syncthreads(); + } + } + + //Reduce on vector + for (int i = 0; i < 6; i++) { + float data = (-err * jacobian[i]); + data = warp_scan(data); + if (lane_id == 31) { + reduce_mem[shift++][warp_id] = data; + } + //Another sync here for reduced mem + __syncthreads(); + } + + //Store the result to global memory + const auto flatten_blk = blockIdx.x + gridDim.x * blockIdx.y; + for (int i = threadIdx.x; i < total_shared_size; i += 32) { + if (warp_id == 0) { + const auto warp_sum = reduce_mem[i][0] + reduce_mem[i][1] + reduce_mem[i][2] + reduce_mem[i][3] + + reduce_mem[i][4] + reduce_mem[i][5] + reduce_mem[i][6] + reduce_mem[i][7]; + reduce_buffer.ptr(i)[flatten_blk] = warp_sum; + } + } + } + }; + + __global__ void rigidSolveIterationKernel( + const RigidSolverDevice solver, + PtrStep reduce_buffer + ) { + solver.solverIteration(reduce_buffer); + } + + + __global__ void columnReduceKernel( + const PtrStepSz global_buffer, + float* target + ) { + const auto idx = threadIdx.x; //There are 32 threads on x direction + const auto y = threadIdx.y + blockIdx.y * blockDim.y; //There are total memory size on y direction + float sum = 0.0f; + for (auto i = threadIdx.x; i < global_buffer.cols; i += 32) { + sum += global_buffer.ptr(y)[i]; + } + + //__syncthreads(); + + // Warp reduction + sum = warp_scan(sum); + if (idx == 31) { + target[y] = sum; + } + } + + + +} // device +} // surfelwarp + +void surfelwarp::RigidSolver::allocateReduceBuffer() { + //Allcate the memory for the reduced matrix and vector + m_reduced_matrix_vector.AllocateBuffer(device::RigidSolverDevice::total_shared_size); + m_reduced_matrix_vector.ResizeArrayOrException(device::RigidSolverDevice::total_shared_size); + + //Allocate the memory for the reduction buffer + const auto& config = ConfigParser::Instance(); + const auto pixel_size = config.clip_image_rows() * config.clip_image_cols(); + m_reduce_buffer.create(device::RigidSolverDevice::total_shared_size, divUp(pixel_size, device::RigidSolverDevice::block_size)); +} + +void surfelwarp::RigidSolver::rigidSolveDeviceIteration(cudaStream_t stream) { + //Construct the device solver + device::RigidSolverDevice solver; + + //The camera info + solver.intrinsic = m_project_intrinsic; + solver.init_world2camera = m_curr_world2camera; + solver.image_rows = m_image_rows; + solver.image_cols = m_image_cols; + + //The map from renderer + solver.model_maps.vertex_map = m_solver_maps.live_vertex_map; + solver.model_maps.normal_map = m_solver_maps.live_normal_map; + + //The map from observation + solver.observation_maps.vertex_map = m_observation.vertex_map; + solver.observation_maps.normal_map = m_observation.normal_map; + + dim3 blk(device::RigidSolverDevice::block_size); + dim3 grid(divUp(m_image_cols * m_image_rows, blk.x)); + device::rigidSolveIterationKernel<<>>(solver, m_reduce_buffer); + + //Sync and check error +#if defined(CUDA_DEBUG_SYNC_CHECK) + cudaSafeCall(cudaStreamSynchronize(stream)); + cudaSafeCall(cudaGetLastError()); +#endif + + //Do reduction on the buffer + device::columnReduceKernel<<>>( + m_reduce_buffer, + m_reduced_matrix_vector.DevicePtr() + ); + + //Sync and check error +#if defined(CUDA_DEBUG_SYNC_CHECK) + cudaSafeCall(cudaStreamSynchronize(stream)); + cudaSafeCall(cudaGetLastError()); +#endif + + //Sync to host + m_reduced_matrix_vector.SynchronizeToHost(stream, false); +} diff --git a/cuda_code/Runtime+Sentinel.cu b/cuda_code/Runtime+Sentinel.cu new file mode 100644 index 0000000000000000000000000000000000000000..a435896ee6ebbde5083d952469dd3757edb7cddf --- /dev/null +++ b/cuda_code/Runtime+Sentinel.cu @@ -0,0 +1,384 @@ +#include +#include +#include +#include +#ifdef __device__ +#undef __device__ +#define __device__ +#endif +#ifdef __constant__ +#undef __constant__ +#define __constant__ +#endif +#define SENTINEL +#define RUNTIME_NAME RuntimeS +#include "RuntimeHost.h" +#include "Runtime.h" + +#if OS_MAP +#pragma region OS_MAP +#include "Runtime+Alloc.cu" +#include "Runtime+BenignAlloc.cu" +#include "Runtime+Mem0.cu" +#include "Runtime+Mem1.cu" +#include "Runtime+TagBase.cu" + +static RuntimeSentinelContext _ctx; + +static bool Executor(void *tag, RuntimeSentinelMessage *data, int length) +{ + switch (data->OP) + { + case 0: { + Messages::Stdio_fprintf *msg = (Messages::Stdio_fprintf *)data; + msg->RC = fprintf(msg->File, msg->Format); + return true; } + case 1: { + Messages::Stdio_setvbuf *msg = (Messages::Stdio_setvbuf *)data; + msg->RC = setvbuf(msg->File, msg->Buffer, msg->Mode, msg->Size); + return true; } + case 2: { + Messages::Stdio_fopen *msg = (Messages::Stdio_fopen *)data; + msg->RC = fopen(msg->Filename, msg->Mode); + return true; } + case 3: { + Messages::Stdio_fflush *msg = (Messages::Stdio_fflush *)data; + msg->RC = fflush(msg->File); + return true; } + case 4: { + Messages::Stdio_fclose *msg = (Messages::Stdio_fclose *)data; + msg->RC = fclose(msg->File); + return true; } + case 5: { + Messages::Stdio_fgetc *msg = (Messages::Stdio_fgetc *)data; + msg->RC = fgetc(msg->File); + return true; } + case 6: { + Messages::Stdio_fgets *msg = (Messages::Stdio_fgets *)data; + msg->RC = fgets(msg->Str, msg->Num, msg->File); + return true; } + case 7: { + Messages::Stdio_fputc *msg = (Messages::Stdio_fputc *)data; + msg->RC = fputc(msg->Ch, msg->File); + return true; } + case 8: { + Messages::Stdio_fputs *msg = (Messages::Stdio_fputs *)data; + msg->RC = fputs(msg->Str, msg->File); + return true; } + case 9: { + Messages::Stdio_fread *msg = (Messages::Stdio_fread *)data; + msg->RC = fread(msg->Ptr, msg->Size, msg->Num, msg->File); + return true; } + case 10: { + Messages::Stdio_fwrite *msg = (Messages::Stdio_fwrite *)data; + msg->RC = fwrite(msg->Ptr, msg->Size, msg->Num, msg->File); + return true; } + case 11: { + Messages::Stdio_fseek *msg = (Messages::Stdio_fseek *)data; + msg->RC = fseek(msg->File, msg->Offset, msg->Origin); + return true; } + case 12: { + Messages::Stdio_ftell *msg = (Messages::Stdio_ftell *)data; + msg->RC = ftell(msg->File); + return true; } + case 13: { + Messages::Stdio_feof *msg = (Messages::Stdio_feof *)data; + msg->RC = feof(msg->File); + return true; } + case 14: { + Messages::Stdio_ferror *msg = (Messages::Stdio_ferror *)data; + msg->RC = ferror(msg->File); + return true; } + case 15: { + Messages::Stdio_clearerr *msg = (Messages::Stdio_clearerr *)data; + clearerr(msg->File); + return true; } + case 16: { + Messages::Stdio_rename *msg = (Messages::Stdio_rename *)data; + msg->RC = rename(msg->Oldname, msg->Newname); + return true; } + case 17: { + Messages::Stdio_unlink *msg = (Messages::Stdio_unlink *)data; + msg->RC = unlink(msg->Str); + return true; } + case 18: { + Messages::Stdio_close *msg = (Messages::Stdio_close *)data; + msg->RC = close(msg->Handle); + return true; } + case 19: { + Messages::Stdio_system *msg = (Messages::Stdio_system *)data; + msg->RC = system(msg->Str); + return true; } + } + return false; +} + +#if HAS_HOSTSENTINEL +static HANDLE _threadHostHandle = NULL; +static unsigned int __stdcall SentinelHostThread(void *data) +{ + RuntimeSentinelContext *ctx = &_ctx; + RuntimeSentinelMap *map = ctx->HostMap; + while (map) + { + long id = map->GetId; + RuntimeSentinelCommand *cmd = (RuntimeSentinelCommand *)&map->Data[id%sizeof(map->Data)]; + volatile long *status = (volatile long *)&cmd->Status; + unsigned int s_; + while (_threadHostHandle && (s_ = InterlockedCompareExchange((long *)status, 3, 2)) != 2) { /*printf("[%d ]", s_);*/ Sleep(50); } // + if (!_threadHostHandle) return 0; + if (cmd->Magic != SENTINEL_MAGIC) + { + printf("Bad Sentinel Magic"); + exit(1); + } + //map->Dump(); + //cmd->Dump(); + RuntimeSentinelMessage *msg = (RuntimeSentinelMessage *)cmd->Data; + for (RuntimeSentinelExecutor *exec = _ctx.List; exec && exec->Executor && !exec->Executor(exec->Tag, msg, cmd->Length); exec = exec->Next) { } + //printf("."); + *status = (!msg->Async ? 4 : 0); + map->GetId += SENTINEL_MSGSIZE; + } + return 0; +} +#endif + +static HANDLE _threadDeviceHandle[SENTINEL_DEVICEMAPS]; +static unsigned int __stdcall SentinelDeviceThread(void *data) +{ + int threadId = (int)data; + RuntimeSentinelContext *ctx = &_ctx; + RuntimeSentinelMap *map = ctx->DeviceMap[threadId]; + while (map) + { + long id = map->GetId; + RuntimeSentinelCommand *cmd = (RuntimeSentinelCommand *)&map->Data[id%sizeof(map->Data)]; + volatile long *status = (volatile long *)&cmd->Status; + unsigned int s_; + while (_threadDeviceHandle[threadId] && (s_ = InterlockedCompareExchange((long *)status, 3, 2)) != 2) { /*printf("[%d ]", s_);*/ Sleep(50); } // + if (!_threadDeviceHandle[threadId]) return 0; + if (cmd->Magic != SENTINEL_MAGIC) + { + printf("Bad Sentinel Magic"); + exit(1); + } + //map->Dump(); + //cmd->Dump(); + RuntimeSentinelMessage *msg = (RuntimeSentinelMessage *)cmd->Data; + for (RuntimeSentinelExecutor *exec = _ctx.List; exec && exec->Executor && !exec->Executor(exec->Tag, msg, cmd->Length); exec = exec->Next) { } + //printf("."); + *status = (!msg->Async ? 4 : 0); + map->GetId += SENTINEL_MSGSIZE; + } + return 0; +} + +static RuntimeSentinelExecutor _baseExecutor; +#if HAS_HOSTSENTINEL +static RuntimeSentinelMap *_runtimeSentinelHostMap = nullptr; +static HANDLE _hostMapHandle = NULL; +static int *_hostMap = nullptr; +#endif +static int *_deviceMap[SENTINEL_DEVICEMAPS]; + +#if 0 +// https://msdn.microsoft.com/en-us/library/windows/hardware/ff569918(v=vs.85).aspx +// http://www.dreamincode.net/forums/topic/171917-how-to-setread-registry-key-in-c/ +DWORD _savedTdrDelay = -2; +void TdrInitialize() +{ + HKEY key; + if (RegOpenKeyExW(HKEY_LOCAL_MACHINE, L"System\\CurrentControlSet\\Control\\GraphicsDrivers", 0, KEY_QUERY_VALUE, &key) != ERROR_SUCCESS) + return; + DWORD dwBufSize = sizeof(DWORD); + if (RegQueryValueExW(key, L"TdrDelay", 0, 0, (LPBYTE)&_savedTdrDelay, &dwBufSize) != ERROR_SUCCESS) + _savedTdrDelay = -1; + else + printf("Key value is: %d\n", _savedTdrDelay); + DWORD newTdrDelay = 10; + if (RegSetValueExW(key, L"TdrDelay", 0, REG_DWORD, (const BYTE *)&newTdrDelay, sizeof(newTdrDelay)) != ERROR_SUCCESS) + _savedTdrDelay = -1; + RegCloseKey(key); +} + +void TdrShutdown() +{ + HKEY key; + if (RegOpenKeyExW(HKEY_LOCAL_MACHINE, L"System\\CurrentControlSet\\Control\\GraphicsDrivers", 0, KEY_ALL_ACCESS, &key) != ERROR_SUCCESS) + return; + if (_savedTdrDelay < 0) + { + if (RegDeleteValueW(key, L"TdrDelay") != ERROR_SUCCESS) + _savedTdrDelay = -1; + } + else + { + if (RegSetValueExW(key, L"TdrDelay", 0, REG_DWORD, (const BYTE *)&_savedTdrDelay, sizeof(_savedTdrDelay)) != ERROR_SUCCESS) + _savedTdrDelay = -1; + } + RegCloseKey(key); +} +#endif + +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366551(v=vs.85).aspx +// https://github.com/pathscale/nvidia_sdk_samples/blob/master/simpleStreams/0_Simple/simpleStreams/simpleStreams.cu +void RuntimeSentinel::ServerInitialize(RuntimeSentinelExecutor *executor, char *mapHostName) +{ + // create host map +#if HAS_HOSTSENTINEL + _hostMapHandle = CreateFileMapping(INVALID_HANDLE_VALUE, NULL, PAGE_READWRITE, 0, sizeof(RuntimeSentinelMap) + MEMORY_ALIGNMENT, mapHostName); + if (!_hostMapHandle) + { + printf("Could not create file mapping object (%d).\n", GetLastError()); + exit(1); + } + _hostMap = (int *)MapViewOfFile(_hostMapHandle, FILE_MAP_ALL_ACCESS, 0, 0, sizeof(RuntimeSentinelMap) + MEMORY_ALIGNMENT); + if (!_hostMap) + { + printf("Could not map view of file (%d).\n", GetLastError()); + CloseHandle(_hostMapHandle); + exit(1); + } + _runtimeSentinelHostMap = _ctx.HostMap = (RuntimeSentinelMap *)_ROUNDN(_hostMap, MEMORY_ALIGNMENT); +#endif + + // create device maps +#ifdef _GPU + RuntimeSentinelMap *d_deviceMap[SENTINEL_DEVICEMAPS]; + for (int i = 0; i < SENTINEL_DEVICEMAPS; i++) + { + cudaErrorCheckF(cudaHostAlloc(&_deviceMap[i], sizeof(RuntimeSentinelMap), cudaHostAllocPortable | cudaHostAllocMapped), goto initialize_error); + d_deviceMap[i] = _ctx.DeviceMap[i] = (RuntimeSentinelMap *)_deviceMap[i]; + cudaErrorCheckF(cudaHostGetDevicePointer(&d_deviceMap[i], _ctx.DeviceMap[i], 0), goto initialize_error); + } + cudaErrorCheckF(cudaMemcpyToSymbol(_runtimeSentinelDeviceMap, &d_deviceMap, sizeof(d_deviceMap)), goto initialize_error); +#else + for (int i = 0; i < SENTINEL_DEVICEMAPS; i++) + { + //_deviceMap[i] = (int *)VirtualAlloc(NULL, (sizeof(RuntimeSentinelMap) + MEMORY_ALIGNMENT), MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE); + _deviceMap[i] = (int *)malloc(sizeof(RuntimeSentinelMap) + MEMORY_ALIGNMENT); + _runtimeSentinelDeviceMap[i] = _ctx.DeviceMap[i] = (RuntimeSentinelMap *)_ROUNDN(_deviceMap[i], MEMORY_ALIGNMENT); + if (!_runtimeSentinelDeviceMap[i]) + { + printf("Could not create map.\n"); + goto initialize_error; + } + memset(_runtimeSentinelDeviceMap[i], 0, sizeof(RuntimeSentinelMap)); + } +#endif + + // register executor + _baseExecutor.Name = "base"; + _baseExecutor.Executor = Executor; + _baseExecutor.Tag = nullptr; + RegisterExecutor(&_baseExecutor, true); + if (executor) + RegisterExecutor(executor, true); + + // launch threads +#if HAS_HOSTSENTINEL + _threadHostHandle = (HANDLE)_beginthreadex(0, 0, SentinelHostThread, nullptr, 0, 0); +#endif + memset(_threadDeviceHandle, 0, sizeof(_threadDeviceHandle)); + for (int i = 0; i < SENTINEL_DEVICEMAPS; i++) + _threadDeviceHandle[i] = (HANDLE)_beginthreadex(0, 0, SentinelDeviceThread, (void *)i, 0, 0); + return; +initialize_error: + ServerShutdown(); + exit(1); +} + +void RuntimeSentinel::ServerShutdown() +{ + // close host map +#if HAS_HOSTSENTINEL + if (_threadHostHandle) { CloseHandle(_threadHostHandle); _threadHostHandle = NULL; } + if (_hostMap) { UnmapViewOfFile(_hostMap); _hostMap = nullptr; } + if (_hostMapHandle) { CloseHandle(_hostMapHandle); _hostMapHandle = NULL; } +#endif + // close device maps + for (int i = 0; i < SENTINEL_DEVICEMAPS; i++) + { + if (_threadDeviceHandle[i]) { CloseHandle(_threadDeviceHandle[i]); _threadDeviceHandle[i] = NULL; } +#ifdef _GPU + if (_deviceMap[i]) { cudaErrorCheckA(cudaFreeHost(_deviceMap[i])); _deviceMap[i] = nullptr; } +#else + if (_deviceMap[i]) { free(_deviceMap[i]); /*VirtualFree(_deviceMap[i], 0, MEM_RELEASE);*/ _deviceMap[i] = nullptr; } +#endif + } +} + +void RuntimeSentinel::ClientInitialize(char *mapHostName) +{ +#if HAS_HOSTSENTINEL + _hostMapHandle = OpenFileMapping(FILE_MAP_ALL_ACCESS, FALSE, mapHostName); + if (!_hostMapHandle) + { + printf("Could not open file mapping object (%d).\n", GetLastError()); + exit(1); + } + _hostMap = (int *)MapViewOfFile(_hostMapHandle, FILE_MAP_ALL_ACCESS, 0, 0, sizeof(RuntimeSentinelMap) + MEMORY_ALIGNMENT); + if (!_hostMap) + { + printf("Could not map view of file (%d).\n", GetLastError()); + CloseHandle(_hostMapHandle); + exit(1); + } + _runtimeSentinelHostMap = _ctx.HostMap = (RuntimeSentinelMap *)_ROUNDN(_hostMap, MEMORY_ALIGNMENT); +#endif +} + +void RuntimeSentinel::ClientShutdown() +{ +#if HAS_HOSTSENTINEL + if (_hostMap) { UnmapViewOfFile(_hostMap); _hostMap = nullptr; } + if (_hostMapHandle) { CloseHandle(_hostMapHandle); _hostMapHandle = NULL; } +#endif +} + +RuntimeSentinelExecutor *RuntimeSentinel::FindExecutor(const char *name) +{ + RuntimeSentinelExecutor *exec = nullptr; + for (exec = _ctx.List; exec && name && strcmp(name, exec->Name); exec = exec->Next) { } + return exec; +} + +static void UnlinkExecutor(RuntimeSentinelExecutor *exec) +{ + if (!exec) { } + else if (_ctx.List == exec) + _ctx.List = exec->Next; + else if (_ctx.List) + { + RuntimeSentinelExecutor *p = _ctx.List; + while (p->Next && p->Next != exec) + p = p->Next; + if (p->Next == exec) + p->Next = exec->Next; + } +} + +void RuntimeSentinel::RegisterExecutor(RuntimeSentinelExecutor *exec, bool default_) +{ + UnlinkExecutor(exec); + if (default_ || !_ctx.List) + { + exec->Next = _ctx.List; + _ctx.List = exec; + } + else + { + exec->Next = _ctx.List->Next; + _ctx.List->Next = exec; + } + assert(_ctx.List != nullptr); +} + +void RuntimeSentinel::UnregisterExecutor(RuntimeSentinelExecutor *exec) +{ + UnlinkExecutor(exec); +} + +#pragma endregion +#endif \ No newline at end of file diff --git a/cuda_code/ScalePlugin.cu b/cuda_code/ScalePlugin.cu new file mode 100644 index 0000000000000000000000000000000000000000..0271a13371a75a035e9ce087c8e1c1d5e983bafb --- /dev/null +++ b/cuda_code/ScalePlugin.cu @@ -0,0 +1,38 @@ +#include "ScalePlugin.hpp" +namespace MNN { + +template +__global__ void SCALE(const int n, const int channels, const int dim, const T* in, T* out, + const float* scaleData, const float* biasData); + +template <> +__global__ void SCALE(const int n, const int channels, const int dim, const float* in, float* out, + const float* scaleData, const float* biasData) { + CUDA_KERNEL_LOOP(index, n) { + int c = (index / dim) % channels; + out[index] = in[index] * scaleData[c] + biasData[c]; + } +} + +template <> +__global__ void SCALE<__half>(const int n, const int channels, const int dim, const __half* in, __half* out, + const float* scaleData, const float* biasData) { + CUDA_KERNEL_LOOP(index, n) { + int c = (index / dim) % channels; + out[index] = in[index] * __float2half(scaleData[c]) + __float2half(biasData[c]); + } +} + +cudaError_t ScalePlugin::ScaleExecute(nvinfer1::DataType dataType, const int count, const int channels, const int dim, const float* bottom_data, + float* top_data, const float* scale, const float* bias, cudaStream_t stream) { + if (dataType == nvinfer1::DataType::kFLOAT){ + SCALE<<>>(count, channels, dim, bottom_data, top_data, + scale, bias); + }else{ + SCALE<__half><<>>(count, channels, dim, (const __half*)bottom_data, (__half*)top_data, + scale, bias); + } + + return cudaPeekAtLastError(); +} +}; // namespace MNN \ No newline at end of file diff --git a/cuda_code/ScatterGatherKernel_8.cu b/cuda_code/ScatterGatherKernel_8.cu new file mode 100644 index 0000000000000000000000000000000000000000..42f889382b4ef9e9387b68cd9e0e5b701450be1d --- /dev/null +++ b/cuda_code/ScatterGatherKernel_8.cu @@ -0,0 +1,486 @@ +#include + +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include + +namespace at { namespace native { + +// Implement as functors since lambdas don't get optimized. +class ReduceMultiply { +public: + template + constexpr C10_DEVICE void operator() (scalar_t * self_data, const scalar_t * src_data) const { + gpuAtomicMul(self_data, *src_data); + } +}; +static ReduceMultiply reduce_multiply; + +class ReduceAdd { +public: + template + constexpr C10_DEVICE void operator() (scalar_t * self_data, const scalar_t * src_data) const { + gpuAtomicAdd(self_data, *src_data); + } +}; +static ReduceAdd reduce_add; + +class TensorAssign { +public: + template + constexpr C10_DEVICE void operator() (scalar_t * self_data, const scalar_t * src_data) const { + *self_data = *src_data; + } +}; +static TensorAssign tensor_assign; + +// The kernels are implemented on an opaque, +// self-aligned type of the correct size, +// to avoid redundant kernels for different types +// of the same size. +template struct alignas(N) OpaqueType { char data[N]; }; + +// essentialy rewritten related to legacy::launch_kernel parts +template +C10_LAUNCH_BOUNDS_2(nt, vt) +__global__ void _scatter_gather_elementwise_kernel(int N, func_t f) { + constexpr int nv = nt * vt; + int idx = nv * blockIdx.x + threadIdx.x; + + #pragma unroll + for (int i = 0; i < vt; ++i) { + if (idx < N) { + f(idx); + idx += nt; + } + } +} + +template +static void _launch_scatter_gather_kernel(int64_t N, const func_t& f) { + TORCH_INTERNAL_ASSERT(N >= 0 && N <= std::numeric_limits::max()); + if (N == 0) { + return; + } + + const dim3 block(nt); + const dim3 grid((N + block.x * vt - 1) / (block.x * vt)); + const auto stream = at::cuda::getCurrentCUDAStream(); + _scatter_gather_elementwise_kernel<<>>(N, f); + C10_CUDA_KERNEL_LAUNCH_CHECK(); +} + + +template +struct _cuda_scatter_gather_internal_kernel { + template + void operator() ( + TensorIterator& iter, + int64_t index_size, + int64_t index_stride, + const func_t& f + ) { + if (iter.numel() == 0) { + return; + } + + if (!iter.can_use_32bit_indexing()) { + for (auto& sub_iter : iter.with_32bit_indexing()) { + _cuda_scatter_gather_internal_kernel()( + sub_iter, index_size, index_stride, f + ); + } + return; + } + + char* self_ptr = (char*)iter.data_ptr(0); + char* src_ptr = (char*)iter.data_ptr(1); + char* index_ptr = (char*)iter.data_ptr(2); + + auto offset_calc = make_offset_calculator<3>(iter); + auto loop = [=]C10_DEVICE(int i) { + auto offsets = offset_calc.get(i); + + int64_t idx_dim = *(int64_t*)(index_ptr + offsets[2]); + CUDA_KERNEL_ASSERT(idx_dim >= 0 && idx_dim < index_size + && "index out of bounds"); + + char* self_data = self_ptr + offsets[0]; + char* src_data = src_ptr + offsets[1]; + + f( + (scalar_t*)self_data + (is_scatter_like ? idx_dim * index_stride : 0), + (scalar_t*)src_data + (is_scatter_like ? 0 : idx_dim * index_stride) + ); + + }; + + _launch_scatter_gather_kernel(iter.numel(), loop); + } +}; // struct _cuda_scatter_fill_internal_kernel + +template +struct cuda_scatter_gather_base_kernel { + template + void operator()( + Tensor& self, int64_t dim, + const Tensor& index, const Tensor& src, + const std::string& method_name, + const func_t& f + ) { + // no-op if index is empty + if (index.numel() == 0) { + return; + } + at::assert_no_internal_overlap(self); + + dim = maybe_wrap_dim(dim, self.dim()); + + scatter_gather_dtype_check(method_name, self, index, src); + if (!is_scatter_like) { + gather_shape_check(self, dim, index, src); + } + + auto index_sizes = ensure_nonempty_vec(index.sizes().vec()); + auto self_strides = ensure_nonempty_vec(self.strides().vec()); + auto src_strides = ensure_nonempty_vec(src.strides().vec()); + + // restride self and src such that + // self.shape = src.shape = index.shape + // + // restride stride[dim] such that + // if (is_scatter_like) self.stride[dim] = 0 + // else src.stride[dim] = 0 + auto self_restrided = is_scatter_like ? + restride_dim(self, dim, index_sizes) + : self.as_strided(index_sizes, self_strides); + auto src_restrided = is_scatter_like ? + src.as_strided(index_sizes, src_strides) + : restride_dim(src, dim, index_sizes); + + auto iter = TensorIteratorConfig() + .set_check_mem_overlap(false) + .check_all_same_dtype(false) + .resize_outputs(false) + .add_output(self_restrided) + .add_input(src_restrided) + .add_input(index) + .build(); + + auto self_dim_stride = ensure_nonempty_stride(self, dim); + auto self_dim_size = ensure_nonempty_size(self, dim); + + auto src_dim_stride = ensure_nonempty_stride(src, dim); + auto src_dim_size = ensure_nonempty_size(src, dim); + + auto index_size = is_scatter_like ? self_dim_size : src_dim_size; + auto index_stride = is_scatter_like ? self_dim_stride : src_dim_stride; + + + AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( + at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, + iter.dtype(), + "cuda_scatter_gather_base_kernel_func", [&] { + using dtype = typename std::conditional, scalar_t>::type; + + _cuda_scatter_gather_internal_kernel()( + iter, index_size, index_stride, f + ); + } + ); + } + + void operator()( + Tensor& self, int64_t dim, + const Tensor& index, const Tensor& src, + const std::string& method_name, + const ReduceMultiply& f + ) { + // no-op if index is empty + if (index.numel() == 0) { + return; + } + at::assert_no_internal_overlap(self); + + dim = maybe_wrap_dim(dim, self.dim()); + + scatter_gather_dtype_check(method_name, self, index, src); + if (!is_scatter_like) { + gather_shape_check(self, dim, index, src); + } + + auto index_sizes = ensure_nonempty_vec(index.sizes().vec()); + auto self_strides = ensure_nonempty_vec(self.strides().vec()); + auto src_strides = ensure_nonempty_vec(src.strides().vec()); + + // restride self and src such that + // self.shape = src.shape = index.shape + // + // restride stride[dim] such that + // if (is_scatter_like) self.stride[dim] = 0 + // else src.stride[dim] = 0 + auto self_restrided = is_scatter_like ? + restride_dim(self, dim, index_sizes) + : self.as_strided(index_sizes, self_strides); + auto src_restrided = is_scatter_like ? + src.as_strided(index_sizes, src_strides) + : restride_dim(src, dim, index_sizes); + + auto iter = TensorIteratorConfig() + .set_check_mem_overlap(false) + .check_all_same_dtype(false) + .resize_outputs(false) + .add_output(self_restrided) + .add_input(src_restrided) + .add_input(index) + .build(); + + auto self_dim_stride = ensure_nonempty_stride(self, dim); + auto self_dim_size = ensure_nonempty_size(self, dim); + + auto src_dim_stride = ensure_nonempty_stride(src, dim); + auto src_dim_size = ensure_nonempty_size(src, dim); + + auto index_size = is_scatter_like ? self_dim_size : src_dim_size; + auto index_stride = is_scatter_like ? self_dim_stride : src_dim_stride; + + + AT_DISPATCH_FLOATING_TYPES_AND2( + at::ScalarType::Half, at::ScalarType::BFloat16, + iter.dtype(), + "cuda_scatter_gather_base_kernel_reduce_multiply", [&] { + using dtype = typename std::conditional, scalar_t>::type; + + _cuda_scatter_gather_internal_kernel()( + iter, index_size, index_stride, f + ); + } + ); + } +}; // struct cuda_scatter_gather_base_kernel + +template +struct _cuda_scatter_fill_internal_kernel { + template + void operator()( + TensorIterator& iter, + scalar_t src_val, + int64_t index_size, + int64_t index_stride, + const func_t& f + ) { + if (iter.numel() == 0) { + return; + } + + if (!iter.can_use_32bit_indexing()) { + for (auto& sub_iter : iter.with_32bit_indexing()) { + _cuda_scatter_fill_internal_kernel()( + sub_iter, src_val, index_size, index_stride, f + ); + } + return; + } + + char* self_ptr = (char*)iter.data_ptr(0); + char* index_ptr = (char*)iter.data_ptr(1); + + auto offset_calc = make_offset_calculator<2>(iter); + auto loop = [=]C10_DEVICE(int i) { + auto offsets = offset_calc.get(i); + + int64_t idx_dim = *(int64_t*)(index_ptr + offsets[1]); + CUDA_KERNEL_ASSERT(idx_dim >= 0 && idx_dim < index_size + && "index out of bounds" + ); + + char* self_data = self_ptr + offsets[0]; + + f( + (scalar_t*)self_data + idx_dim * index_stride, + (scalar_t*)&src_val + ); + + }; + + _launch_scatter_gather_kernel(iter.numel(), loop); + } +}; // struct _cuda_scatter_fill_internal_kernel + +template +struct cuda_scatter_fill_base_kernel { + template + void operator()( + Tensor& self, int64_t dim, + const Tensor& index, Scalar src, + const std::string& method_name, + const func_t& f + ) { + // no-op if index is empty + if (index.numel() == 0) { + return; + } + at::assert_no_internal_overlap(self); + + dim = maybe_wrap_dim(dim, self.dim()); + + auto index_sizes = ensure_nonempty_vec(index.sizes().vec()); + + // restride self such that + // self.shape = index.shape and + // self.stride[dim] = 0 + auto self_restrided = restride_dim(self, dim, index_sizes); + + auto iter = TensorIteratorConfig() + .set_check_mem_overlap(false) + .check_all_same_dtype(false) + .resize_outputs(false) + .add_output(self_restrided) + .add_input(index) + .build(); + + auto index_size = ensure_nonempty_size(self, dim); + auto index_stride = ensure_nonempty_stride(self, dim); + + AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( + at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, + iter.dtype(), + "cuda_scatter_fill_base_kernel_func", [&] { + using dtype = typename std::conditional, scalar_t>::type; + + auto src_scalar_val = src.to(); + auto src_val = *(dtype*)&src_scalar_val; + + _cuda_scatter_fill_internal_kernel()( + iter, src_val, index_size, index_stride, f + ); + } + ); + } + + void operator()( + Tensor& self, int64_t dim, + const Tensor& index, Scalar src, + const std::string& method_name, + const ReduceMultiply& f + ) { + // no-op if index is empty + if (index.numel() == 0) { + return; + } + at::assert_no_internal_overlap(self); + + dim = maybe_wrap_dim(dim, self.dim()); + + auto index_sizes = ensure_nonempty_vec(index.sizes().vec()); + + // restride self such that + // self.shape = index.shape and + // self.stride[dim] = 0 + auto self_restrided = restride_dim(self, dim, index_sizes); + + auto iter = TensorIteratorConfig() + .set_check_mem_overlap(false) + .check_all_same_dtype(false) + .resize_outputs(false) + .add_output(self_restrided) + .add_input(index) + .build(); + + auto index_size = ensure_nonempty_size(self, dim); + auto index_stride = ensure_nonempty_stride(self, dim); + + AT_DISPATCH_FLOATING_TYPES_AND2( + at::ScalarType::Half, at::ScalarType::BFloat16, + iter.dtype(), + "cuda_scatter_fill_base_kernel_reduce_multiply", [&] { + using dtype = typename std::conditional, scalar_t>::type; + + auto src_scalar_val = src.to(); + auto src_val = *(dtype*)&src_scalar_val; + + _cuda_scatter_fill_internal_kernel()( + iter, src_val, index_size, index_stride, f + ); + } + ); + } +}; // struct cuda_scatter_fill_base_kernel + +void gather_cuda_kernel(Tensor& result, const Tensor& self, int64_t dim, const Tensor& index) { + cuda_scatter_gather_base_kernel()( + result, dim, index, self, + "gather_out_cuda", tensor_assign); +} + +void scatter_cuda_kernel(Tensor& self, int64_t dim, const Tensor& index, const Tensor& src) { + cuda_scatter_gather_base_kernel<>()( + self, dim, index, src, + "scatter_cuda_", tensor_assign); +} + +void scatter_fill_cuda_kernel(Tensor& self, int64_t dim, const Tensor& index, const Scalar& src) { + cuda_scatter_fill_base_kernel<>()( + self, dim, index, src, + "scatter_fill_cuda_", tensor_assign); +} + +void scatter_add_cuda_kernel(Tensor& self, int64_t dim, const Tensor& index, const Tensor& src) { + // See Note [Writing Nondeterministic Operations] + // Nondeterministic because of atomicAdd usage + globalContext().alertNotDeterministic("scatter_add_cuda_kernel"); + cuda_scatter_gather_base_kernel()( + self, dim, index, src, + "scatter_add_cuda_", reduce_add); +} + +void scatter_reduce_cuda_kernel(Tensor& self, const int64_t dim, const Tensor& index, + const Tensor& src, const SCATTER_GATHER_OP& reduce) { + switch (reduce) { + case SCATTER_GATHER_OP::REDUCE_ADD : + cuda_scatter_gather_base_kernel()(self, dim, index, src, + "scatter_reduce_cuda_add_", reduce_add); + break; + case SCATTER_GATHER_OP::REDUCE_MULTIPLY : + cuda_scatter_gather_base_kernel()(self, dim, index, src, + "scatter_reduce_cuda_multiply_", reduce_multiply); + break; + } +} + +void scatter_scalar_reduce_cuda_kernel(Tensor& self, const int64_t dim, const Tensor& index, + const Scalar& value, const SCATTER_GATHER_OP& reduce) { + switch (reduce) { + case SCATTER_GATHER_OP::REDUCE_ADD : + cuda_scatter_fill_base_kernel()(self, dim, index, value, + "scatter_fill_cuda_add_", reduce_add); + break; + case SCATTER_GATHER_OP::REDUCE_MULTIPLY : + cuda_scatter_fill_base_kernel()(self, dim, index, value, + "scatter_fill_cuda_multiply_", reduce_multiply); + break; + } +} + + +REGISTER_DISPATCH(gather_stub, &gather_cuda_kernel); +REGISTER_DISPATCH(scatter_stub, &scatter_cuda_kernel); +REGISTER_DISPATCH(scatter_fill_stub, &scatter_fill_cuda_kernel); +REGISTER_DISPATCH(scatter_add_stub, &scatter_add_cuda_kernel); +REGISTER_DISPATCH(scatter_reduce_stub, &scatter_reduce_cuda_kernel); +REGISTER_DISPATCH(scatter_scalar_reduce_stub, &scatter_scalar_reduce_cuda_kernel); + +}} // namespace at::native diff --git a/cuda_code/ScatterNdPlugin.cu b/cuda_code/ScatterNdPlugin.cu new file mode 100644 index 0000000000000000000000000000000000000000..cd841d566af82b9e80e707570ada04a31820be3b --- /dev/null +++ b/cuda_code/ScatterNdPlugin.cu @@ -0,0 +1,87 @@ +#include "ScatterNdPlugin.hpp" +namespace MNN { + +template +__global__ void SetZero(const int n, T* outputPtr) { + CUDA_KERNEL_LOOP(index, n) { + outputPtr[index] = (T)0; + } +} + +struct Lock{ + int *mutex; + Lock(void){ + int state = 0; + cudaMalloc((void**) &mutex, sizeof(int)); + cudaMemcpy(mutex, &state, sizeof(int), cudaMemcpyHostToDevice); + } + ~Lock(void){ + cudaFree(mutex); + } + __device__ void lock(void){ + while(atomicCAS(mutex, 0, 1) != 0); + } + __device__ void unlock(void){ + atomicExch(mutex, 0); + } +}; + +template +__global__ void ScatterNd(const int n, const int indicesLastDim, const int accNumber, const T* indicesPtr, + const T* updatesPtr, T* outputPtr, const int* dimsToCount, Lock cuLock); + + +template <> +__global__ void ScatterNd(const int n, const int indicesLastDim, const int accNumber, const float* indicesPtr, + const float* updatesPtr, float* outputPtr, const int* dimsToCount, Lock cuLock) { + CUDA_KERNEL_LOOP(index, n) { + int pos = 0; + for (int j = 0; j < indicesLastDim; ++j) { + auto curIndex = (int)indicesPtr[index * indicesLastDim + j]; + // MNN_ASSERT(curIndex >= 0 && curIndex < output->length(j)); + pos += curIndex * dimsToCount[j]; + } + for (int k = 0; k < accNumber; ++k) { + float updateValue = updatesPtr[index * accNumber + k]; + atomicAdd(outputPtr + pos + k, updateValue); + } + } +} + +template <> +__global__ void ScatterNd<__half>(const int n, const int indicesLastDim, const int accNumber, const __half* indicesPtr, + const __half* updatesPtr, __half* outputPtr, const int* dimsToCount, Lock cuLock) { + CUDA_KERNEL_LOOP(index, n) { + int pos = 0; + for (int j = 0; j < indicesLastDim; ++j) { + auto curIndex = (int)indicesPtr[index * indicesLastDim + j]; + // MNN_ASSERT(curIndex >= 0 && curIndex < output->length(j)); + pos += curIndex * dimsToCount[j]; + } + for (int k = 0; k < accNumber; ++k) { + float updateValue = updatesPtr[index * accNumber + k]; + cuLock.lock(); + outputPtr[pos + k] += updateValue; + cuLock.unlock(); + } + } +} + +cudaError_t ScatterNdPlugin::ScatterNdExecute(nvinfer1::DataType dataType, const int count, const int outElementSize, const int indicesLastDim, + const int accNumber, const float* indice, const void* update, void* top_data, + const int32_t* dimsToCount, cudaStream_t stream) { + Lock cuLock; + if (dataType == nvinfer1::DataType::kFLOAT){ + SetZero<<>>(outElementSize, (float*)top_data); + ScatterNd<<>>( + count, indicesLastDim, accNumber, (const float*)indice, (const float*)update, (float*)top_data, dimsToCount, cuLock); + }else{ + SetZero<__half><<>>(outElementSize, (__half*)top_data); + ScatterNd<__half><<>>( + count, indicesLastDim, accNumber, (const __half*)indice, (const __half*)update, (__half*)top_data, dimsToCount, cuLock); + } + + return cudaPeekAtLastError(); +} + +}; // namespace MNN \ No newline at end of file diff --git a/cuda_code/SegmentReduce_10.cu b/cuda_code/SegmentReduce_10.cu new file mode 100644 index 0000000000000000000000000000000000000000..1728961960573e1d6b856221a45ed0d11e9d15ad --- /dev/null +++ b/cuda_code/SegmentReduce_10.cu @@ -0,0 +1,87 @@ + +#include + +#include +#include +#include +#include +#include + +namespace at { +namespace native { + +struct CustomMax { + template + __host__ __device__ __forceinline__ OutputT + operator()(const OutputT& a, const OutputT& b) const { + if (at::_isnan(a)) { + return a; + } else if (at::_isnan(b)) { + return b; + } + return std::max(a, b); + } +}; + +Tensor _get_complete_sum(const Tensor& lengths) { + int64_t segment_count = lengths.numel(); + TORCH_CHECK(segment_count < INT_MAX); + auto offsets = at::empty({segment_count + 1}, lengths.options()); + offsets[0].zero_(); + auto* lengths_data_ptr = lengths.data_ptr(); + auto* offsets_data_ptr = offsets.data_ptr(); + + CUB_WRAPPER( + cub::DeviceScan::InclusiveSum, + lengths_data_ptr, + offsets_data_ptr + 1, + segment_count, + at::cuda::getCurrentCUDAStream()); + + return offsets; +} + +Tensor _segment_reduce_cuda_kernel( + SegmentReductionType reduction, + const Tensor& data, + const Tensor& lengths, + int64_t axis, + const c10::optional& initial) { + int64_t segment_count = lengths.numel(); + auto output = at::empty({segment_count}, data.options()); + + auto offsets = _get_complete_sum(lengths); + auto* offsets_data_ptr = offsets.data_ptr(); + + AT_DISPATCH_ALL_TYPES_AND2( + at::ScalarType::Half, + at::ScalarType::BFloat16, + data.scalar_type(), + "segment_reduce_cuda", + [&]() { + auto* data_data_ptr = data.data_ptr(); + auto* output_data_ptr = output.data_ptr(); + + CustomMax max_op{}; + scalar_t initial_value = initial.has_value() + ? initial.value().to() + : std::numeric_limits::lowest(); + CUB_WRAPPER( + cub::DeviceSegmentedReduce::Reduce, + data_data_ptr, + output_data_ptr, + segment_count, + offsets_data_ptr, + offsets_data_ptr + 1, + max_op, + initial_value, + at::cuda::getCurrentCUDAStream()); + }); + + return output; +} + +REGISTER_DISPATCH(_segment_reduce_stub, &_segment_reduce_cuda_kernel); + +} // namespace native +} // namespace at diff --git a/cuda_code/Shaders_1.cu b/cuda_code/Shaders_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..8197ee94d69eae942727be8db85d048ad6c5091d --- /dev/null +++ b/cuda_code/Shaders_1.cu @@ -0,0 +1,211 @@ +/* + * Modified version, originally from Samuli Laine's and Tero Karras' CudaRaster. + * (http://code.google.com/p/cudaraster/) + * + * 04-2012 - Thibault Coppex + * + * --------------------------------------------------------------------------- + * + * Copyright 2009-2010 NVIDIA Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// EMIT_NVCC_OPTIONS -use_fast_math + +#include "Shaders.hpp" + +// CudaRaster +#include + +using namespace FW; + +//------------------------------------------------------------------------ +// Lighting. +//------------------------------------------------------------------------ + +__device__ +Vec3f FW::evaluateLighting(Vec3f cameraPos, Vec3f cameraNormal, + const Material& material, Vec3f diffuseColor) +{ + Vec3f I = normalize(cameraPos); + Vec3f N = normalize(cameraNormal); + F32 dotIN = dot(I, N); + Vec3f R = I - N * (dotIN * 2.0f); + + F32 diffuseCoef = fmaxf(-dotIN, 0.0f) * 0.75f + 0.25f; + F32 specularCoef = powf(fmaxf(-dot(I, R), 0.0f), material.glossiness); + return diffuseCoef * diffuseColor + specularCoef * material.specularColor; +} + +//------------------------------------------------------------------------ +// Vertex shaders. +//------------------------------------------------------------------------ + +extern "C" __global__ +void FW::vertexShader_gouraud(const InputVertex* inPtr, + ShadedVertex_gouraud* outPtr, + int numVertices) +{ + // Pick a vertex. + + int vidx = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * + (blockIdx.x + gridDim.x * blockIdx.y)); + + if (vidx >= numVertices) + return; + + const InputVertex& in = inPtr[vidx]; + ShadedVertex_gouraud& out = outPtr[vidx]; + + // Shade. + + Vec3f cameraPos = (c_constants.posToCamera * Vec4f(in.modelPos, 1.0f)).getXYZ(); + Vec3f cameraNormal = c_constants.normalToCamera * in.modelNormal; + int materialIdx = ((const S32*)c_constants.vertexMaterialIdx)[vidx]; + const Material& material = ((const Material*)c_constants.materials)[materialIdx]; + Vec4f diffuseColor = material.diffuseColor; + Vec3f color = evaluateLighting(cameraPos, cameraNormal, material, diffuseColor.getXYZ()); + + out.clipPos = c_constants.posToClip * Vec4f(in.modelPos, 1.0f); + out.color = Vec4f(color, diffuseColor.w); +} + +//------------------------------------------------------------------------ + +extern "C" __global__ +void FW::vertexShader_texPhong(const InputVertex* inPtr, + ShadedVertex_texPhong* outPtr, + int numVertices) +{ + // Pick a vertex. + + int vidx = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * + (blockIdx.x + gridDim.x * blockIdx.y)); + + if (vidx >= numVertices) + return; + + const InputVertex& in = inPtr[vidx]; + ShadedVertex_texPhong& out = outPtr[vidx]; + + // Shade. + + out.clipPos = c_constants.posToClip * Vec4f(in.modelPos, 1.0f); + out.cameraPos = c_constants.posToCamera * Vec4f(in.modelPos, 1.0f); + out.cameraNormal = Vec4f(c_constants.normalToCamera * in.modelNormal, 0.0f); + out.texCoord = Vec4f(in.texCoord, 0.0f, 1.0f); +} + + +//============================================================================== + + +//------------------------------------------------------------------------ +// Fragment shaders. +//------------------------------------------------------------------------ + +typedef GouraudShader FragmentShader_gouraud; + +//------------------------------------------------------------------------ + +class FragmentShader_texPhong : public FragmentShaderBase +{ + public: + __device__ __inline__ + Vec4f texture2D(const TextureSpec& spec, const Vec2f& tex, + const Vec2f& texDX, const Vec2f& texDY) + { + // Choose LOD. + F32 dxlen = sqr(texDX.x * spec.size.x) + sqr(texDX.y * spec.size.y); + F32 dylen = sqr(texDY.x * spec.size.x) + sqr(texDY.y * spec.size.y); + F32 lod = fminf(fmaxf(log2f(fmaxf(dxlen, dylen)) * 0.5f, 0.0f), + (F32)(FW_ARRAY_SIZE(spec.mipLevels) - 2)); + int levelIdx = (int)lod; + Vec4f m0 = spec.mipLevels[levelIdx + 0]; + Vec4f m1 = spec.mipLevels[levelIdx + 1]; + + // Perform two bilinear lookups and interpolate. + F32 tx = tex.x - floorf(tex.x); + F32 ty = tex.y - floorf(tex.y); + float4 v0 = tex2D(t_textureAtlas, tx * m0.x + m0.z, ty * m0.y + m0.w); + float4 v1 = tex2D(t_textureAtlas, tx * m1.x + m1.z, ty * m1.y + m1.w); + + return lerp( Vec4f(v0.x, v0.y, v0.z, v0.w), + Vec4f(v1.x, v1.y, v1.z, v1.w), + lod - (F32)levelIdx); + } + + __device__ __inline__ + void run(void) + { + // Interpolate attributes. + + Vec3f cameraPos = interpolateVarying(0, m_centroid).getXYZ(); + Vec3f cameraNormal = interpolateVarying(1, m_centroid).getXYZ(); + Vec2f tex, texDX, texDY; + + if ((RENDER_MODE_FLAGS & RenderModeFlag_EnableQuads) == 0) + { + // Sample at pixel centroid, use analytical derivatives. + tex = interpolateVarying(2, m_centroid).getXY(); + texDX = interpolateVarying(2, m_centroidDX).getXY(); + texDY = interpolateVarying(2, m_centroidDY).getXY(); + } + else + { + // Sample at pixel center, use numerical derivatices. + tex = interpolateVarying(2, m_center).getXY(); + texDX = dFdx(tex); + texDY = dFdy(tex); + } + + // Fetch material and perform texture lookups. + int materialIdx = ((const S32*)c_constants.triangleMaterialIdx)[m_triIdx]; + const Material& material = ((const Material*)c_constants.materials)[materialIdx]; + Vec4f diffuseColor = material.diffuseColor; + + if (material.diffuseTexture.size.x != 0.0f) + { + diffuseColor = Vec4f(texture2D(material.diffuseTexture, tex, texDX, texDY).getXYZ(), + diffuseColor.w); + } + + if (material.alphaTexture.size.x != 0.0f) { + diffuseColor.w = texture2D(material.alphaTexture, tex, texDX, texDY).y; + } + + // Alpha test. + if (diffuseColor.w < 0.5f) + { + m_discard = true; + return; + } + + // Shading. + Vec3f color = evaluateLighting(cameraPos, cameraNormal, material, diffuseColor.getXYZ()); + m_color = toABGR(Vec4f(color, diffuseColor.w)); + } +}; + +//------------------------------------------------------------------------ +// Pixel pipes. +//------------------------------------------------------------------------ + +CR_DEFINE_PIXEL_PIPE( PixelPipe_gouraud, ShadedVertex_gouraud, FragmentShader_gouraud, + BLEND_SHADER, SAMPLES_LOG2, RENDER_MODE_FLAGS) + +CR_DEFINE_PIXEL_PIPE( PixelPipe_texPhong, ShadedVertex_texPhong, FragmentShader_texPhong, + BLEND_SHADER, SAMPLES_LOG2, RENDER_MODE_FLAGS) + +//------------------------------------------------------------------------ diff --git a/cuda_code/Shape_14.cu b/cuda_code/Shape_14.cu new file mode 100644 index 0000000000000000000000000000000000000000..05fa4c6e165c4681d601476f744d42bb8599e9d8 --- /dev/null +++ b/cuda_code/Shape_14.cu @@ -0,0 +1,624 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +namespace at { +namespace native { + +#ifdef __HIP_PLATFORM_HCC__ +constexpr int CAT_ARRAY_BATCH_SIZE = 1024; +#else +constexpr int CAT_ARRAY_BATCH_SIZE = 128; +#endif +constexpr int CAT_ARRAY_MAX_INPUT_DIMS = 4; + +namespace { + +inline bool getCatGrid(ptrdiff_t nTensors, dim3& grid) { + const int numSM = at::cuda::getCurrentDeviceProperties()->multiProcessorCount; + + //X dim of grid for cat array cooperates on a single tensor in the cat. + //Given half of the GPU, full utilization will always occur. + grid = dim3( 2LL * numSM, (long long) nTensors ); + + return true; +} + +// Similar to any other IndexToOffset calculation for copying along a given +// dimension. +template +struct CatArrIndexToOffset { + static inline __device__ IndexType compute( + const IndexType tensorSize[Dims], + const IndexType tensorStride[Dims], + const IndexType dimSize, + const unsigned int concatDim, + IndexType linearIndex) { + // linearIndex is not really linear index, but instead the offset in + // input tensor. If the input tensor is contiguous, then this offset + // is the linear index, but if the input tensor is channels last, then + // it is the linear index of the permuted contiguous tensor + IndexType offset = 0; + +#pragma unroll + for (int i = Dims - 1; i >= 1; --i) { + IndexType curDimSize = i == concatDim ? dimSize : tensorSize[i]; + IndexType nextDimIndex = linearIndex / curDimSize; + IndexType curDimIndex = linearIndex - curDimSize * nextDimIndex; + IndexType curDimOffset = curDimIndex * tensorStride[i]; + offset += curDimOffset; + linearIndex = nextDimIndex; + } + + return offset + linearIndex * tensorStride[0]; + } +}; + +template +struct TensorSizeStride { + IndexType tensorSize[MaxDims]; + IndexType tensorStride[MaxDims]; +}; + +/** + * Kernel used to concatenated grimDim.y tensors into an output tensor. Uses a + * grid-stride loop based off of the blockIdx.x, threadIdx.x for each input to + * copy each element from each input tensor into the output. + * + * output: base pointer to the storage associated with the output tensor + * inputs: GPU-allocated array of input metadata for each input to concatenate + * in the kernel + * os: the size/stride vectors for the output tensor + * concatDim: dimension along which we are concatenating + * dimStride: the stride of the output tensor at the concatDim + * + * The most important assumption made is that the input tensors are contiguous. + */ + + +// Use pinned memory and and pass the struct by pointer on ROCm +template +struct CatArrInputTensor { + T* input; + IndexType offset; + IndexType dimSize; + IndexType nElements; +}; + +template +C10_LAUNCH_BOUNDS_1(512) +__global__ void HIP_CatArrayBatchedCopy( + T* output, + CatArrInputTensor* inputs, + TensorSizeStride os, + const int concatDim, + IndexType dimStride) { + + IndexType tid = blockIdx.x * blockDim.x + threadIdx.x; + IndexType nElements = inputs[blockIdx.y].nElements; + + if(tid >= nElements) return; + + T* data = inputs[blockIdx.y].input; + IndexType offset = inputs[blockIdx.y].offset; + IndexType dimSize = inputs[blockIdx.y].dimSize; + IndexType dataOffset = offset * dimStride; + + IndexType stride = gridDim.x * blockDim.x; + + while( tid < nElements){ + IndexType elementOffset = CatArrIndexToOffset::compute( + os.tensorSize, os.tensorStride, dimSize, concatDim, tid); + output[dataOffset + elementOffset] = data[tid]; + + tid += stride; + } +} + +// pass meta data directly through kernel argument instead of pin memory +// In contiguous case, we will not need stride_size, setting it as 1 as placeholder +// to pass compile. +template +struct CatArrInputTensorMetadata { + T* input[n]; + IndexType offset[n]; + IndexType dimSize[n]; + IndexType nElements[n]; + bool isContiguous[n]; + TensorSizeStride tensorStride[stride_size]; +}; + +template +__global__ void CatArrayBatchedCopy( + T* output, + CatArrInputTensorMetadata inputs, + TensorSizeStride os, + const int concatDim, + IndexType dimStride) { + + IndexType tid = blockIdx.x * blockDim.x + threadIdx.x; + IndexType nElements = inputs.nElements[blockIdx.y]; + TensorSizeStride ins = stride_size > 1 ? inputs.tensorStride[blockIdx.y] : inputs.tensorStride[0]; + bool isContig = inputs.isContiguous[blockIdx.y]; + + if(tid >= nElements) return; + + T* data = inputs.input[blockIdx.y]; + IndexType offset = inputs.offset[blockIdx.y]; + IndexType dimSize = inputs.dimSize[blockIdx.y]; + IndexType dataOffset = offset * dimStride; + + IndexType stride = gridDim.x * blockDim.x; + + while( tid < nElements){ + IndexType elementOffset = CatArrIndexToOffset::compute( + os.tensorSize, os.tensorStride, dimSize, concatDim, tid); + if (isContig) { + output[dataOffset + elementOffset] = data[tid]; + } else { + IndexType inElementOffset = CatArrIndexToOffset::compute( + ins.tensorSize, ins.tensorStride, dimSize, concatDim, tid); + output[dataOffset + elementOffset] = data[inElementOffset]; + } + tid += stride; + } +} + +void check_shape_except_dim(const Tensor &first, const Tensor &second, + int dimension, int index) +{ + int first_dims = first.dim(); + int second_dims = second.dim(); + TORCH_CHECK(first_dims == second_dims, + "Tensors must have same number of dimensions: got ", first_dims, + " and ", second_dims); + for (int dim = 0; dim < first_dims; dim++) { + if (dim == dimension) { + continue; + } + int64_t first_dim_size = at::native::size(first, dim); + int64_t second_dim_size = at::native::size(second, dim); + TORCH_CHECK(first_dim_size == second_dim_size, + "Sizes of tensors must match except in dimension ", dim, ". Got ", + static_cast(first_dim_size), " and ", + static_cast(second_dim_size), " (The offending index is ", + index, ")"); + } +} + +template +void hip_parallel_cat(Tensor &out, const TensorList &inputs, int64_t dimension, + int nDims, c10::MemoryFormat memory_format) { + // First, let's set up our kernel parameters. We start with a raw pointer to + // the storage for the output Tensor. + scalar_t *data = out.data_ptr(); + + // Kernel Parameter + long tensorMetadataSize = + sizeof(CatArrInputTensor) * CAT_ARRAY_BATCH_SIZE; + auto d_inputs_storage = at::empty( + {tensorMetadataSize}, out.options().dtype(at::kByte)); + auto d_inputs = static_cast *>( + d_inputs_storage.data_ptr()); + + TensorSizeStride outputParam; + + // Next, let's initialize the size, stride arrays for the output Tensor. + if (memory_format == c10::MemoryFormat::Contiguous) { + for (int i = 0; i < nDims; ++i) { + outputParam.tensorSize[i] = at::native::size(out, i); + outputParam.tensorStride[i] = out.stride(i); + } + } else if (memory_format == c10::MemoryFormat::ChannelsLast || memory_format == c10::MemoryFormat::ChannelsLast3d) { + // permute the semantics of dims from NCHW to NHWC so that the input + // tensor is now contiguous + outputParam.tensorSize[0] = at::native::size(out, 0); + outputParam.tensorStride[0] = out.stride(0); + for (int i = 1; i < nDims - 1; ++i) { + outputParam.tensorSize[i] = at::native::size(out, i + 1); + outputParam.tensorStride[i] = out.stride(i + 1); + } + outputParam.tensorSize[nDims - 1] = at::native::size(out, 1); + outputParam.tensorStride[nDims - 1] = out.stride(1); + } else { + TORCH_CHECK(false, "unsupported memory format"); + } + + at::cuda::CUDAStream stream = at::cuda::getCurrentCUDAStream(); + + // Now we loop + int batchCounter = 0; + int64_t offset = 0; + for (int i = 0; i < inputs.size() ; i += CAT_ARRAY_BATCH_SIZE) { + // Re-allocate stackInputs every iteration to avoid read-after-write hazard + { + auto stackInputs_storage = at::empty({tensorMetadataSize}, + out.options().dtype(at::kByte).device(at::kCPU).pinned_memory(true)); + auto stackInputs = + static_cast *>( + stackInputs_storage.data_ptr()); + for (batchCounter = 0; + batchCounter < CAT_ARRAY_BATCH_SIZE && + (i+batchCounter) < inputs.size(); + ++batchCounter) { + int64_t dimSize = 0; + // There is a legacy case where a 1-D empty tensor can be concat with + // high-dimensional tensor + if (inputs[i+batchCounter].numel() > 0) { + dimSize = at::native::size(inputs[i+batchCounter], dimension); + } + + stackInputs[batchCounter].input = + inputs[i+batchCounter].data_ptr(); + stackInputs[batchCounter].offset = offset; + stackInputs[batchCounter].dimSize = dimSize; + stackInputs[batchCounter].nElements = inputs[i+batchCounter].numel(); + + // update offset + offset += dimSize; + } + at::native::copy_(d_inputs_storage, stackInputs_storage, + /* non_blocking= */ true); + } + + // Next, let's consider how we set our kernel launch parameters. + // We borrow from THCApply, which the kernel's internal indexing + // is based on. + dim3 applyBlock = dim3(32*16); + + //Get grid where x dim fills half gpu and y dim is number of tensors. + //This will have cating two tensors fill the entire grid, but prevent + //many threads from needlessly load meta data if their sizes is small. + dim3 catGrid; + getCatGrid(batchCounter, catGrid); + + if (memory_format != c10::MemoryFormat::Contiguous) { + switch (dimension) { + case 0: + break; + case 1: + dimension = nDims - dimension; + break; + default: + dimension--; + } + } + // Template Declarations for dim = 1, 2, 3, 4 +#define HANDLE_CASE(DIMS) \ + HIP_CatArrayBatchedCopy<<<\ + catGrid, applyBlock, 0, stream.stream()>>>(\ + data, d_inputs, outputParam, dimension, outputParam.tensorStride[dimension]); \ + C10_CUDA_KERNEL_LAUNCH_CHECK(); + switch (nDims) { + case 1: + HANDLE_CASE(1); + break; + case 2: + HANDLE_CASE(2); + break; + case 3: + HANDLE_CASE(3); + break; + case 4: + HANDLE_CASE(4); + break; + } +#undef HANDLE_CASE + } +} + +template +void parallel_cat(Tensor &out, const TensorList &inputs, int64_t dimension, + int nDims, c10::MemoryFormat memory_format) { + // First, let's set up our kernel parameters. We start with a raw pointer to + // the storage for the output Tensor. + scalar_t *data = out.data_ptr(); + CatArrInputTensorMetadata catMetaData; + TensorSizeStride outputParam; + + // Next, let's initialize the size, stride arrays for the output Tensor. + if (memory_format == c10::MemoryFormat::Contiguous) { + for (int i = 0; i < nDims; ++i) { + outputParam.tensorSize[i] = at::native::size(out, i); + outputParam.tensorStride[i] = out.stride(i); + } + } else if (memory_format == c10::MemoryFormat::ChannelsLast || memory_format == c10::MemoryFormat::ChannelsLast3d) { + // permute the semantics of dims from NCHW to NHWC so that the input + // tensor is now contiguous + outputParam.tensorSize[0] = at::native::size(out, 0); + outputParam.tensorStride[0] = out.stride(0); + for (int i = 1; i < nDims - 1; ++i) { + outputParam.tensorSize[i] = at::native::size(out, i + 1); + outputParam.tensorStride[i] = out.stride(i + 1); + } + outputParam.tensorSize[nDims - 1] = at::native::size(out, 1); + outputParam.tensorStride[nDims - 1] = out.stride(1); + } else { + TORCH_CHECK(false, "unsupported memory format"); + } + + at::cuda::CUDAStream stream = at::cuda::getCurrentCUDAStream(); + + // Now we loop + int batchCounter = 0; + int64_t offset = 0; + for (int i = 0; i < inputs.size() ; i += batch_size) { + for (batchCounter = 0; + batchCounter < batch_size && + (i+batchCounter) < inputs.size(); + ++batchCounter) { + int64_t dimSize = 0; + // There is a legacy case where a 1-D empty tensor can be concat with + // high-dimensional tensor + if (inputs[i+batchCounter].numel() > 0) { + dimSize = at::native::size(inputs[i+batchCounter], dimension); + } + catMetaData.input[batchCounter] = inputs[i+batchCounter].data_ptr(); + catMetaData.offset[batchCounter] = offset; + catMetaData.dimSize[batchCounter] = dimSize; + catMetaData.nElements[batchCounter] = inputs[i+batchCounter].numel(); + if (stride_size > 1) { + auto strides = inputs[i+batchCounter].strides(); + auto sizes = inputs[i+batchCounter].sizes(); + for(int j = 0; j < nDims; j++){ + catMetaData.tensorStride[batchCounter].tensorSize[j] = sizes[j]; + catMetaData.tensorStride[batchCounter].tensorStride[j] = strides[j]; + } + catMetaData.isContiguous[batchCounter] = false; + } else { + catMetaData.isContiguous[batchCounter] = true; + } + // update offset + offset += dimSize; + } + // Next, let's consider how we set our kernel launch parameters. + // We borrow from THCApply, which the kernel's internal indexing + // is based on. + dim3 applyBlock = dim3(32*16); + + //Get grid where x dim fills half gpu and y dim is number of tensors. + //This will have cating two tensors fill the entire grid, but prevent + //many threads from needlessly load meta data if their sizes is small. + dim3 catGrid; + getCatGrid(batchCounter, catGrid); + + if (memory_format != c10::MemoryFormat::Contiguous) { + switch (dimension) { + case 0: + break; + case 1: + dimension = nDims - dimension; + break; + default: + dimension--; + } + } + // Template Declarations for dim = 1, 2, 3, 4 +#define HANDLE_CASE(DIMS) \ + CatArrayBatchedCopy<<<\ + catGrid, applyBlock, 0, stream.stream()>>>(\ + data, catMetaData, outputParam, dimension, outputParam.tensorStride[dimension]); \ + C10_CUDA_KERNEL_LAUNCH_CHECK(); + switch (nDims) { + case 1: + HANDLE_CASE(1); + break; + case 2: + HANDLE_CASE(2); + break; + case 3: + HANDLE_CASE(3); + break; + case 4: + HANDLE_CASE(4); + break; + } +#undef HANDLE_CASE + } +} +} // namespace + +Tensor cat_cuda(TensorList inputs, int64_t dimension) { + ScalarType high_type = result_type(inputs); + Tensor out = at::empty({0}, inputs.front().options().dtype(high_type)); + at::native::cat_out_cuda(inputs, dimension, out); + return out; +} + +inline c10::MemoryFormat compute_output_memory_format(const TensorList &inputs) { + c10::optional format = c10::nullopt; + for (auto &t : inputs) { + auto f = t.suggest_memory_format(); + if (!format.has_value()) { + format = f; + continue; + } + if (format.value() == f) { + continue; + } + bool contiguous = (format.value() == c10::MemoryFormat::Contiguous || f == c10::MemoryFormat::Contiguous || format.value() != f); + if (contiguous) { + return c10::MemoryFormat::Contiguous; + } + } + return format.value(); +} + +Tensor& cat_out_cuda(TensorList inputs, int64_t dimension, Tensor& out) { + + // previously, size [0] tensors were the only possible empty tensors; thus, it + // wasn't possible to cat empty tensors unless all the other tensors were + // 1-dimensional, so we allowed these tensors to be "skipped". We maintain + // this behavior for backwards compatibility, but only for this specific size + // (i.e. other empty sizes are not skipped). + // FIXME: warn if this is the case + auto should_skip = [](const Tensor &t) { + return t.dim() == 1 && at::native::size(t, 0) == 0; + }; + + const Tensor *notSkippedTensor = NULL; // non-owning reference + int nDims = 0; + + // Check for type promotion + TORCH_CHECK(canCast(result_type(inputs), out.scalar_type()), "torch.cat(): input types ", + " can't be cast to the desired output type ", + out.scalar_type()); + + // Inputs cannot alias the output tensor + for (int i = 0; i < inputs.size(); i++) { + auto lap = at::get_overlap_status(out, inputs[i]); + TORCH_CHECK(lap != at::MemOverlapStatus::PARTIAL && + lap != at::MemOverlapStatus::FULL, + "torch.cat(): unsupported operation: the input tensors cannot refer to any " + "of the output memory locations. Found overlap in input " + "tensor ", i); + } + at::assert_no_internal_overlap(out); + + for (int i = 0; i < inputs.size(); i++) { + if (should_skip(inputs[i])) { + continue; + } + nDims = inputs[i].dim(); + notSkippedTensor = &inputs[i]; + } + + // If all inputs are empty tensors, return an empty tensor + if (notSkippedTensor == NULL) { + return out; + } + + TORCH_CHECK(inputs.size() > 0, "torch.cat(): invalid number of inputs ", inputs.size()); + TORCH_CHECK(dimension >= 0, "torch.cat(): invalid dimension ", dimension); + + for (const Tensor& t: inputs) { + TORCH_CHECK(t.device() == notSkippedTensor->device(), + "torch.cat(): all input tensors must be on the same device. Received ", + t.device(), " and ", notSkippedTensor->device()); + } + + TORCH_CHECK( + out.device() == notSkippedTensor->device(), + "torch.cat(): all input tensors and out must be on the same device, but inputs are on ", + notSkippedTensor->device(), " and out is on ", out.device()); + + c10::MemoryFormat memory_format = compute_output_memory_format(inputs); + + std::vector size(notSkippedTensor->sizes().vec()); + + // Compute size of the result in the cat dimension + int64_t cat_dim_size = 0; + for (int i = 0; i < inputs.size(); i++) { + const Tensor &tensor = inputs[i]; + if (should_skip(tensor)) { + continue; + } + check_shape_except_dim(*notSkippedTensor, tensor, dimension, i); + cat_dim_size += at::native::size(tensor, dimension); + } + + // Compute the size of the result + size[dimension] = cat_dim_size; + + // skip resizing if size of result is same as expected + // raise a warning while resizing if output has one or more elements + // See https://github.com/pytorch/pytorch/pull/62560#discussion_r687363362 + // for understanding why at::native::resize_output is not called directly. + if (at::native::resize_output_check(out, size)) { + out.resize_(size, memory_format); + } + + if (out.numel() == 0) { + return out; + } + + // We parallelize the copy if all 6 conditions pass: + // + // 1. There is more than one input tensor + // 2. The out tensor is 32-bit indexable + // 3. The number of dimensions is <= 4 + // 4. All input tensors are contiguous (output tensor may be non-contig) + // 5. All input tensors can use 32-bit indexing + + const bool all32BitIndexable = std::all_of(inputs.begin(), inputs.end(), + [] (const Tensor& t) { + return at::cuda::detail::canUse32BitIndexMath(t); + }); + const bool allContiguous = std::all_of(inputs.begin(), inputs.end(), + [=](const Tensor& t) { + return !t.defined() || t.is_contiguous(memory_format); + }); + ScalarType firstType = inputs[0].scalar_type(); + bool allSameType = std::all_of(inputs.begin(), inputs.end(), + [firstType](const Tensor& t) { + return t.scalar_type() == firstType; + }); + allSameType = allSameType && (out.scalar_type() == firstType); + +#ifdef __HIP_PLATFORM_HCC__ + if (inputs.size() > 1 && + out.dim() <= CAT_ARRAY_MAX_INPUT_DIMS && + at::cuda::detail::canUse32BitIndexMath(out) && + allContiguous && + all32BitIndexable && + allSameType) { + AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( + at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, + out.scalar_type(), "cat_cuda", [&]() { + hip_parallel_cat(out, inputs, dimension, nDims, memory_format); + }); +#else + // We support the contiguous inputs and non-contiguous input (<=4 dims) in different ways + // For contiguous input, we don't need to pass stride meta data to cuda kernel through constant + // memory. Therefore, we could pass more inputs to cuda threads. + // For non-contiguous, we reduce the number of inputs passed to cuda kernel due to the limitation + // of constant memory. + if (inputs.size() > 1 && + out.dim() <= CAT_ARRAY_MAX_INPUT_DIMS && + at::cuda::detail::canUse32BitIndexMath(out) && + allContiguous && + all32BitIndexable && + allSameType) { + AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( + at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, + out.scalar_type(), "cat_cuda", [&]() { + parallel_cat(out, inputs, dimension, nDims, memory_format); + }); + } else if (inputs.size() > 1 && + out.dim() <= CAT_ARRAY_MAX_INPUT_DIMS && + at::cuda::detail::canUse32BitIndexMath(out) && + nDims <= CAT_ARRAY_MAX_INPUT_DIMS && + all32BitIndexable && + allSameType && + memory_format == c10::MemoryFormat::Contiguous) { + AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( + at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, + out.scalar_type(), "cat_cuda", [&]() { + parallel_cat(out, inputs, dimension, nDims, memory_format); + }); +#endif + } else { + int64_t offset = 0; + for (int j = 0; j < inputs.size(); j++) + { + if (should_skip(inputs[j])) continue; + int64_t dimSize = at::native::size(inputs[j], dimension); + Tensor nt = at::narrow(out, dimension, offset, dimSize); + copy_(nt, inputs[j]); + offset += dimSize; + } + } + + return out; +} + +} // namespace native +} // namespace at diff --git a/cuda_code/SineWaveSimulation_3.cu b/cuda_code/SineWaveSimulation_3.cu new file mode 100644 index 0000000000000000000000000000000000000000..68e63d7a5ca9b968aa5bd6dfd12400966e5fb3e0 --- /dev/null +++ b/cuda_code/SineWaveSimulation_3.cu @@ -0,0 +1,138 @@ +/* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of NVIDIA CORPORATION nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "SineWaveSimulation.h" +#include +#include + +__global__ void sinewave(float *heightMap, unsigned int width, unsigned int height, float time) +{ + const float freq = 4.0f; + const size_t stride = gridDim.x * blockDim.x; + + // Iterate through the entire array in a way that is + // independent of the grid configuration + for (size_t tid = blockIdx.x * blockDim.x + threadIdx.x; tid < width * height; tid += stride) { + // Calculate the x, y coordinates + const size_t y = tid / width; + const size_t x = tid - y * width; + // Normalize x, y to [0,1] + const float u = ((2.0f * x) / width) - 1.0f; + const float v = ((2.0f * y) / height) - 1.0f; + // Calculate the new height value + const float w = 0.5f * sinf(u * freq + time) * cosf(v * freq + time); + // Store this new height value + heightMap[tid] = w; + } +} + +SineWaveSimulation::SineWaveSimulation(size_t width, size_t height) + : m_heightMap(nullptr), m_width(width), m_height(height) +{ +} + +void SineWaveSimulation::initCudaLaunchConfig(int device) +{ + cudaDeviceProp prop = {}; + checkCudaErrors(cudaSetDevice(device)); + checkCudaErrors(cudaGetDeviceProperties(&prop, device)); + + // We don't need large block sizes, since there's not much inter-thread communication + m_threads = prop.warpSize; + + // Use the occupancy calculator and fill the gpu as best as we can + checkCudaErrors(cudaOccupancyMaxActiveBlocksPerMultiprocessor(&m_blocks, sinewave, prop.warpSize, 0)); + m_blocks *= prop.multiProcessorCount; + + // Go ahead and the clamp the blocks to the minimum needed for this height/width + m_blocks = std::min(m_blocks, (int)((m_width * m_height + m_threads - 1) / m_threads)); +} + +int SineWaveSimulation::initCuda(uint8_t *vkDeviceUUID, size_t UUID_SIZE) +{ + int current_device = 0; + int device_count = 0; + int devices_prohibited = 0; + + cudaDeviceProp deviceProp; + checkCudaErrors(cudaGetDeviceCount(&device_count)); + + if (device_count == 0) { + fprintf(stderr, "CUDA error: no devices supporting CUDA.\n"); + exit(EXIT_FAILURE); + } + + // Find the GPU which is selected by Vulkan + while (current_device < device_count) { + cudaGetDeviceProperties(&deviceProp, current_device); + + if ((deviceProp.computeMode != cudaComputeModeProhibited)) { + // Compare the cuda device UUID with vulkan UUID + int ret = memcmp((void*)&deviceProp.uuid, vkDeviceUUID, UUID_SIZE); + if (ret == 0) + { + checkCudaErrors(cudaSetDevice(current_device)); + checkCudaErrors(cudaGetDeviceProperties(&deviceProp, current_device)); + printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", + current_device, deviceProp.name, deviceProp.major, + deviceProp.minor); + + return current_device; + } + + } else { + devices_prohibited++; + } + + current_device++; + } + + if (devices_prohibited == device_count) { + fprintf(stderr, + "CUDA error:" + " No Vulkan-CUDA Interop capable GPU found.\n"); + exit(EXIT_FAILURE); + } + + return -1; +} + +SineWaveSimulation::~SineWaveSimulation() +{ + m_heightMap = NULL; +} + +void SineWaveSimulation::initSimulation(float *heights) +{ + m_heightMap = heights; +} + +void SineWaveSimulation::stepSimulation(float time, cudaStream_t stream) +{ + sinewave <<< m_blocks, m_threads, 0, stream >>> (m_heightMap, m_width, m_height, time); + getLastCudaError("Failed to launch CUDA simulation"); +} diff --git a/cuda_code/Slab.cu b/cuda_code/Slab.cu new file mode 100644 index 0000000000000000000000000000000000000000..90314e6c2ee95326b5b4778e7b3aa1b95e74bc59 --- /dev/null +++ b/cuda_code/Slab.cu @@ -0,0 +1,73 @@ +#include +#include + +#define BLOCKS 134 +#define THREADS_PER_BLOCK 512 + +SlabUnified::SlabUnified(int size) : SlabUnified(size, 0, nullptr) {} + +SlabUnified::SlabUnified(int size, int gpu) + : SlabUnified(size, gpu, nullptr) {} + +SlabUnified::SlabUnified(int size, cudaStream_t *stream) + : SlabUnified(size, 0, stream) {} + +SlabUnified::SlabUnified(int size, int gpu, cudaStream_t *stream) { + gpuErrchk(cudaSetDevice(gpu)); + + if(stream == nullptr){ + _stream = new cudaStream_t(); + *_stream = cudaStreamDefault; + } else { + _stream = stream; + } + + slabGAlloc = new groupallocator::GroupAllocator(0, 4096); + allocGAlloc = new groupallocator::GroupAllocator(1, 4096); + bufferGAlloc = new groupallocator::GroupAllocator(2, 4096); + this->slab = setUpGroup(*slabGAlloc, size, 1, gpu, *_stream); + bufferGAlloc->allocate(&batchKeys, + BLOCKS * THREADS_PER_BLOCK * sizeof(unsigned), false); + bufferGAlloc->allocate(&batchValues, + BLOCKS * THREADS_PER_BLOCK * sizeof(unsigned), false); + bufferGAlloc->allocate(&batchRequests, + BLOCKS * THREADS_PER_BLOCK * sizeof(int), false); + this->ctx = setupWarpAllocCtxGroup(*allocGAlloc, THREADS_PER_BLOCK, BLOCKS, + gpu, *_stream); + + _gpu = gpu; + mapSize = size; +} + +SlabUnified::~SlabUnified() { + delete slabGAlloc; + delete allocGAlloc; + delete bufferGAlloc; +} + +void SlabUnified::batch(unsigned *keys, unsigned *values, unsigned *requests) { + + gpuErrchk(cudaSetDevice(_gpu)); + + for (int i = 0; i < THREADS_PER_BLOCK * BLOCKS; i++) { + batchKeys[i] = keys[i]; + batchValues[i] = values[i]; + batchRequests[i] = requests[i]; + } + + bufferGAlloc->moveToDevice(_gpu, *_stream); + gpuErrchk(cudaStreamSynchronize(*_stream)); + + requestHandler<<>>( + slab->slabs, slab->num_of_buckets, batchKeys, batchValues, batchRequests, + ctx); + gpuErrchk(cudaStreamSynchronize(*_stream)); + bufferGAlloc->moveToDevice(cudaCpuDeviceId, *_stream); + gpuErrchk(cudaStreamSynchronize(*_stream)); + + for (int i = 0; i < THREADS_PER_BLOCK * BLOCKS; i++) { + keys[i] = batchKeys[i]; + values[i] = batchValues[i]; + requests[i] = batchRequests[i]; + } +} diff --git a/cuda_code/SmallSortTestBindings.cu b/cuda_code/SmallSortTestBindings.cu new file mode 100644 index 0000000000000000000000000000000000000000..ef8edba00315a220c29e3ff30d9519a788c0f44a --- /dev/null +++ b/cuda_code/SmallSortTestBindings.cu @@ -0,0 +1,101 @@ +// Copyright 2004-present Facebook. All Rights Reserved. + +#include "cuda/SmallSortTestBindings.cuh" +#include "cuda/DeviceTensor.cuh" +#include "cuda/SmallSort.cuh" + +using namespace std; + +namespace facebook { namespace cuda { + +__global__ void +sortDevice(DeviceTensor data, DeviceTensor out) { + warpSort >(data, out); +} + +__global__ void +sortDevice(DeviceTensor data, + DeviceTensor out, + DeviceTensor indices) { + warpSort > >(data, out, indices); +} + +vector +sort(const vector& data) { + const size_t sizeBytes = data.size() * sizeof(float); + + float* devFloat = NULL; + cudaMalloc(&devFloat, sizeBytes); + cudaMemcpy(devFloat, data.data(), sizeBytes, cudaMemcpyHostToDevice); + + float* devResult = NULL; + cudaMalloc(&devResult, sizeBytes); + cudaMemset(devResult, 0, sizeBytes); + + dim3 grid(1); + dim3 block(32); + + int dataSizes[] = { (int) data.size() }; + int outSizes[] = { (int) data.size() }; + + sortDevice<<>>( + DeviceTensor(devFloat, dataSizes), + DeviceTensor(devResult, outSizes)); + + vector vals(data.size()); + cudaMemcpy(vals.data(), devResult, sizeBytes, cudaMemcpyDeviceToHost); + + cudaFree(devFloat); + cudaFree(devResult); + + return vals; +} + +vector > +sortWithIndices(const std::vector& data) { + const size_t sizeBytes = data.size() * sizeof(float); + const size_t sizeIndicesBytes = data.size() * sizeof(int); + + float* devFloat = NULL; + cudaMalloc(&devFloat, sizeBytes); + cudaMemcpy(devFloat, data.data(), sizeBytes, cudaMemcpyHostToDevice); + + float* devResult = NULL; + cudaMalloc(&devResult, sizeBytes); + cudaMemset(devResult, 0, sizeBytes); + int* devIndices = NULL; + cudaMalloc(&devIndices, sizeIndicesBytes); + cudaMemset(devIndices, 0, sizeIndicesBytes); + + dim3 grid(1); + dim3 block(32); + + int dataSizes[] = { (int) data.size() }; + int outSizes[] = { (int) data.size() }; + + sortDevice<<>>( + DeviceTensor(devFloat, dataSizes), + DeviceTensor(devResult, outSizes), + DeviceTensor(devIndices, outSizes)); + + vector vals(data.size()); + cudaMemcpy(vals.data(), + devResult, sizeBytes, cudaMemcpyDeviceToHost); + + vector indices(data.size()); + cudaMemcpy(indices.data(), + devIndices, sizeIndicesBytes, cudaMemcpyDeviceToHost); + + cudaFree(devFloat); + cudaFree(devResult); + cudaFree(devIndices); + + vector > result; + for (int i = 0; i < data.size(); ++i) { + result.push_back(make_pair(vals[i], indices[i])); + } + + return result; +} + +} } // namespace diff --git a/cuda_code/SobelFilter_kernels_3.cu b/cuda_code/SobelFilter_kernels_3.cu new file mode 100644 index 0000000000000000000000000000000000000000..c29773bfd540f0e5003c5cbe239e47b2dd932d13 --- /dev/null +++ b/cuda_code/SobelFilter_kernels_3.cu @@ -0,0 +1,244 @@ +/* + * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. + * + * Please refer to the NVIDIA end user license agreement (EULA) associated + * with this source code for terms and conditions that govern your use of + * this software. Any use, reproduction, disclosure, or distribution of + * this software and related documentation outside the terms of the EULA + * is strictly prohibited. + * + */ + +#include +#include +#include // includes cuda.h and cuda_runtime_api.h + +#include "SobelFilter_kernels.h" + +// Texture reference for reading image +texture tex; +extern __shared__ unsigned char LocalBlock[]; +static cudaArray *array = NULL; + +#define RADIUS 1 + +#ifdef FIXED_BLOCKWIDTH +#define BlockWidth 80 +#define SharedPitch 384 +#endif + +// This will output the proper CUDA error strings in the event that a CUDA host call returns an error +#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) + +inline void __checkCudaErrors( cudaError err, const char *file, const int line ) +{ + if( cudaSuccess != err) { + fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n", + file, line, (int)err, cudaGetErrorString( err ) ); + exit(-1); + } +} + + +__device__ unsigned char +ComputeSobel(unsigned char ul, // upper left + unsigned char um, // upper middle + unsigned char ur, // upper right + unsigned char ml, // middle left + unsigned char mm, // middle (unused) + unsigned char mr, // middle right + unsigned char ll, // lower left + unsigned char lm, // lower middle + unsigned char lr, // lower right + float fScale ) +{ + short Horz = ur + 2*mr + lr - ul - 2*ml - ll; + short Vert = ul + 2*um + ur - ll - 2*lm - lr; + short Sum = (short) (fScale*(abs((int)Horz)+abs((int)Vert))); + if ( Sum < 0 ) return 0; else if ( Sum > 0xff ) return 0xff; + return (unsigned char) Sum; +} + +__global__ void +SobelShared( uchar4 *pSobelOriginal, unsigned short SobelPitch, +#ifndef FIXED_BLOCKWIDTH + short BlockWidth, short SharedPitch, +#endif + short w, short h, float fScale ) +{ + short u = 4*blockIdx.x*BlockWidth; + short v = blockIdx.y*blockDim.y + threadIdx.y; + short ib; + + int SharedIdx = threadIdx.y * SharedPitch; + + for ( ib = threadIdx.x; ib < BlockWidth+2*RADIUS; ib += blockDim.x ) { + LocalBlock[SharedIdx+4*ib+0] = tex2D( tex, + (float) (u+4*ib-RADIUS+0), (float) (v-RADIUS) ); + LocalBlock[SharedIdx+4*ib+1] = tex2D( tex, + (float) (u+4*ib-RADIUS+1), (float) (v-RADIUS) ); + LocalBlock[SharedIdx+4*ib+2] = tex2D( tex, + (float) (u+4*ib-RADIUS+2), (float) (v-RADIUS) ); + LocalBlock[SharedIdx+4*ib+3] = tex2D( tex, + (float) (u+4*ib-RADIUS+3), (float) (v-RADIUS) ); + } + if ( threadIdx.y < RADIUS*2 ) { + // + // copy trailing RADIUS*2 rows of pixels into shared + // + SharedIdx = (blockDim.y+threadIdx.y) * SharedPitch; + for ( ib = threadIdx.x; ib < BlockWidth+2*RADIUS; ib += blockDim.x ) { + LocalBlock[SharedIdx+4*ib+0] = tex2D( tex, + (float) (u+4*ib-RADIUS+0), (float) (v+blockDim.y-RADIUS) ); + LocalBlock[SharedIdx+4*ib+1] = tex2D( tex, + (float) (u+4*ib-RADIUS+1), (float) (v+blockDim.y-RADIUS) ); + LocalBlock[SharedIdx+4*ib+2] = tex2D( tex, + (float) (u+4*ib-RADIUS+2), (float) (v+blockDim.y-RADIUS) ); + LocalBlock[SharedIdx+4*ib+3] = tex2D( tex, + (float) (u+4*ib-RADIUS+3), (float) (v+blockDim.y-RADIUS) ); + } + } + + __syncthreads(); + + u >>= 2; // index as uchar4 from here + uchar4 *pSobel = (uchar4 *) (((char *) pSobelOriginal)+v*SobelPitch); + SharedIdx = threadIdx.y * SharedPitch; + + for ( ib = threadIdx.x; ib < BlockWidth; ib += blockDim.x ) { + + unsigned char pix00 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+0]; + unsigned char pix01 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+1]; + unsigned char pix02 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+2]; + unsigned char pix10 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+0]; + unsigned char pix11 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+1]; + unsigned char pix12 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+2]; + unsigned char pix20 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+0]; + unsigned char pix21 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+1]; + unsigned char pix22 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+2]; + + uchar4 out; + + out.x = ComputeSobel(pix00, pix01, pix02, + pix10, pix11, pix12, + pix20, pix21, pix22, fScale ); + + pix00 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+3]; + pix10 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+3]; + pix20 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+3]; + out.y = ComputeSobel(pix01, pix02, pix00, + pix11, pix12, pix10, + pix21, pix22, pix20, fScale ); + + pix01 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+4]; + pix11 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+4]; + pix21 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+4]; + out.z = ComputeSobel( pix02, pix00, pix01, + pix12, pix10, pix11, + pix22, pix20, pix21, fScale ); + + pix02 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+5]; + pix12 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+5]; + pix22 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+5]; + out.w = ComputeSobel( pix00, pix01, pix02, + pix10, pix11, pix12, + pix20, pix21, pix22, fScale ); + if ( u+ib < w/4 && v < h ) { + pSobel[u+ib] = out; + } + } + + __syncthreads(); +} + +__global__ void +SobelCopyImage( Pixel *pSobelOriginal, unsigned int Pitch, + int w, int h, float fscale ) +{ + unsigned char *pSobel = + (unsigned char *) (((char *) pSobelOriginal)+blockIdx.x*Pitch); + for ( int i = threadIdx.x; i < w; i += blockDim.x ) { + pSobel[i] = min( max((tex2D( tex, (float) i, (float) blockIdx.x ) * fscale), 0.f), 255.f); + } +} + +__global__ void +SobelTex( Pixel *pSobelOriginal, unsigned int Pitch, + int w, int h, float fScale ) +{ + unsigned char *pSobel = + (unsigned char *) (((char *) pSobelOriginal)+blockIdx.x*Pitch); + for ( int i = threadIdx.x; i < w; i += blockDim.x ) { + unsigned char pix00 = tex2D( tex, (float) i-1, (float) blockIdx.x-1 ); + unsigned char pix01 = tex2D( tex, (float) i+0, (float) blockIdx.x-1 ); + unsigned char pix02 = tex2D( tex, (float) i+1, (float) blockIdx.x-1 ); + unsigned char pix10 = tex2D( tex, (float) i-1, (float) blockIdx.x+0 ); + unsigned char pix11 = tex2D( tex, (float) i+0, (float) blockIdx.x+0 ); + unsigned char pix12 = tex2D( tex, (float) i+1, (float) blockIdx.x+0 ); + unsigned char pix20 = tex2D( tex, (float) i-1, (float) blockIdx.x+1 ); + unsigned char pix21 = tex2D( tex, (float) i+0, (float) blockIdx.x+1 ); + unsigned char pix22 = tex2D( tex, (float) i+1, (float) blockIdx.x+1 ); + pSobel[i] = ComputeSobel(pix00, pix01, pix02, + pix10, pix11, pix12, + pix20, pix21, pix22, fScale ); + } +} + +extern "C" void setupTexture(int iw, int ih, Pixel *data, int Bpp) +{ + cudaChannelFormatDesc desc; + + if (Bpp == 1) { + desc = cudaCreateChannelDesc(); + } else { + desc = cudaCreateChannelDesc(); + } + + checkCudaErrors(cudaMallocArray(&array, &desc, iw, ih)); + checkCudaErrors(cudaMemcpyToArray(array, 0, 0, data, Bpp*sizeof(Pixel)*iw*ih, cudaMemcpyHostToDevice)); +} + +extern "C" void deleteTexture(void) +{ + checkCudaErrors(cudaFreeArray(array)); +} + + +// Wrapper for the __global__ call that sets up the texture and threads +extern "C" void sobelFilter(Pixel *odata, int iw, int ih, enum SobelDisplayMode mode, float fScale) +{ + checkCudaErrors(cudaBindTextureToArray(tex, array)); + + switch ( mode ) { + case SOBELDISPLAY_IMAGE: + SobelCopyImage<<>>(odata, iw, iw, ih, fScale ); + break; + case SOBELDISPLAY_SOBELTEX: + SobelTex<<>>(odata, iw, iw, ih, fScale ); + break; + case SOBELDISPLAY_SOBELSHARED: + { + dim3 threads(16,4); +#ifndef FIXED_BLOCKWIDTH + int BlockWidth = 80; // must be divisible by 16 for coalescing +#endif + dim3 blocks = dim3(iw/(4*BlockWidth)+(0!=iw%(4*BlockWidth)), + ih/threads.y+(0!=ih%threads.y)); + int SharedPitch = ~0x3f&(4*(BlockWidth+2*RADIUS)+0x3f); + int sharedMem = SharedPitch*(threads.y+2*RADIUS); + + // for the shared kernel, width must be divisible by 4 + iw &= ~3; + + SobelShared<<>>((uchar4 *) odata, + iw, +#ifndef FIXED_BLOCKWIDTH + BlockWidth, SharedPitch, +#endif + iw, ih, fScale ); + } + break; + } + + checkCudaErrors(cudaUnbindTexture(tex)); +} diff --git a/cuda_code/Solver2D_t.cu b/cuda_code/Solver2D_t.cu new file mode 100644 index 0000000000000000000000000000000000000000..bcbfa8810dd339716fa8f07d6c31b912100c3fcb --- /dev/null +++ b/cuda_code/Solver2D_t.cu @@ -0,0 +1,322 @@ +#include "solvers/Solver2D_t.cuh" +#include "polynomials/ChebyshevPolynomial_t.cuh" +#include "polynomials/LegendrePolynomial_t.cuh" +#include "helpers/ProgressBar_t.h" +#include "helpers/constants.h" +#include +#include +#include + +using SEM::Entities::Vec2; +using SEM::Entities::Element2D_t; +using SEM::Entities::Face2D_t; + +SEM::Solvers::Solver2D_t::Solver2D_t(deviceFloat CFL, std::vector output_times, deviceFloat viscosity) : + CFL_{CFL}, + output_times_{output_times}, + viscosity_{viscosity} {} + +template auto SEM::Solvers::Solver2D_t::solve(const SEM::Entities::NDG_t &NDG, SEM::Meshes::Mesh2D_t& mesh, const SEM::Helpers::DataWriter_t& data_writer) const -> void; // Get with the times c++, it's crazy I have to do this +template auto SEM::Solvers::Solver2D_t::solve(const SEM::Entities::NDG_t &NDG, SEM::Meshes::Mesh2D_t& mesh, const SEM::Helpers::DataWriter_t& data_writer) const -> void; + +template +auto SEM::Solvers::Solver2D_t::solve(const SEM::Entities::NDG_t &NDG, SEM::Meshes::Mesh2D_t& mesh, const SEM::Helpers::DataWriter_t& data_writer) const -> void { + int global_rank; + MPI_Comm_rank(MPI_COMM_WORLD, &global_rank); + deviceFloat time = 0.0; + const deviceFloat t_end = output_times_.back(); + SEM::Helpers::ProgressBar_t bar; + size_t timestep = 0; + constexpr std::array am {0, -5.0/9.0, -153.0/128.0}; + constexpr std::array bm {0, 1.0/3.0, 0.75}; + constexpr std::array gm {1.0/3.0, 15.0/16.0, 8.0/15.0}; + + deviceFloat delta_t = get_delta_t(mesh); + + for (auto const& e : std::as_const(output_times_)) { + if ((time >= e) && (time < e + delta_t)) { + if (global_rank == 0) { + bar.set_status_text("Writing solution"); + bar.update(0.0); + } + mesh.write_complete_data(time, NDG.nodes_, NDG.interpolation_matrices_, data_writer); + } + } + + if (global_rank == 0) { + bar.set_status_text("Iteration 0"); + bar.update(0.0); + } + + while (time < t_end) { + ++timestep; + delta_t = get_delta_t(mesh); + if (time + delta_t > t_end) { + delta_t = t_end - time; + } + + // Kinda algorithm 62 + deviceFloat t = time + bm[0] * delta_t; + mesh.interpolate_to_boundaries(NDG.lagrange_interpolant_left_, NDG.lagrange_interpolant_right_); + mesh.boundary_conditions(t, NDG.nodes_, NDG.weights_, NDG.barycentric_weights_); + mesh.project_to_faces(NDG.nodes_, NDG.barycentric_weights_); + SEM::Solvers::calculate_wave_fluxes<<>>(mesh.faces_.size(), mesh.faces_.data(), mesh.elements_.data()); + mesh.project_to_elements(NDG.nodes_, NDG.weights_, NDG.barycentric_weights_); + SEM::Solvers::compute_dg_wave_derivative<<>>(mesh.n_elements_, mesh.elements_.data(), mesh.faces_.data(), NDG.weights_.data(), NDG.derivative_matrices_hat_.data(), NDG.lagrange_interpolant_left_.data(), NDG.lagrange_interpolant_right_.data()); + SEM::Solvers::rk3_first_step<<>>(mesh.n_elements_, mesh.elements_.data(), delta_t, gm[0]); + + t = time + bm[1] * delta_t; + mesh.interpolate_to_boundaries(NDG.lagrange_interpolant_left_, NDG.lagrange_interpolant_right_); + mesh.boundary_conditions(t, NDG.nodes_, NDG.weights_, NDG.barycentric_weights_); + mesh.project_to_faces(NDG.nodes_, NDG.barycentric_weights_); + SEM::Solvers::calculate_wave_fluxes<<>>(mesh.faces_.size(), mesh.faces_.data(), mesh.elements_.data()); + mesh.project_to_elements(NDG.nodes_, NDG.weights_, NDG.barycentric_weights_); + SEM::Solvers::compute_dg_wave_derivative<<>>(mesh.n_elements_, mesh.elements_.data(), mesh.faces_.data(), NDG.weights_.data(), NDG.derivative_matrices_hat_.data(), NDG.lagrange_interpolant_left_.data(), NDG.lagrange_interpolant_right_.data()); + SEM::Solvers::rk3_step<<>>(mesh.n_elements_, mesh.elements_.data(), delta_t, am[1], gm[1]); + + t = time + bm[2] * delta_t; + mesh.interpolate_to_boundaries(NDG.lagrange_interpolant_left_, NDG.lagrange_interpolant_right_); + mesh.boundary_conditions(t, NDG.nodes_, NDG.weights_, NDG.barycentric_weights_); + mesh.project_to_faces(NDG.nodes_, NDG.barycentric_weights_); + SEM::Solvers::calculate_wave_fluxes<<>>(mesh.faces_.size(), mesh.faces_.data(), mesh.elements_.data()); + mesh.project_to_elements(NDG.nodes_, NDG.weights_, NDG.barycentric_weights_); + SEM::Solvers::compute_dg_wave_derivative<<>>(mesh.n_elements_, mesh.elements_.data(), mesh.faces_.data(), NDG.weights_.data(), NDG.derivative_matrices_hat_.data(), NDG.lagrange_interpolant_left_.data(), NDG.lagrange_interpolant_right_.data()); + SEM::Solvers::rk3_step<<>>(mesh.n_elements_, mesh.elements_.data(), delta_t, am[2], gm[2]); + + time += delta_t; + for (auto const& e : std::as_const(output_times_)) { + if ((time >= e) && (time < e + delta_t)) { + if (global_rank == 0) { + bar.set_status_text("Writing solution"); + bar.update(time/t_end); + } + mesh.estimate_error(NDG.nodes_, NDG.weights_); + mesh.write_complete_data(time, NDG.nodes_, NDG.interpolation_matrices_, data_writer); + break; + } + } + + if (timestep % mesh.adaptivity_interval_ == 0) { + if (global_rank == 0) { + bar.set_status_text("Adapting"); + bar.update(time/t_end); + } + + mesh.estimate_error(NDG.nodes_, NDG.weights_); + mesh.adapt(NDG.N_max_, NDG.nodes_, NDG.barycentric_weights_); + } + + if (global_rank == 0) { + std::stringstream ss; + ss << "Iteration " << timestep; + bar.set_status_text(ss.str()); + bar.update(time/t_end); + } + } + + bool did_write = false; + for (auto const& e : std::as_const(output_times_)) { + if ((time >= e) && (time < e + delta_t)) { + did_write = true; + break; + } + } + + if (!did_write) { + mesh.estimate_error(NDG.nodes_, NDG.weights_); + if (global_rank == 0) { + bar.set_status_text("Writing solution"); + bar.update(1.0); + } + mesh.write_complete_data(time, NDG.nodes_, NDG.interpolation_matrices_, data_writer); + } + if (global_rank == 0) { + bar.set_status_text("Done"); + bar.update(1.0); + } + if (global_rank == 0) { + std::cout << std::endl; + } +} + +auto SEM::Solvers::Solver2D_t::get_delta_t(SEM::Meshes::Mesh2D_t& mesh) const -> deviceFloat { + SEM::Solvers::reduce_wave_delta_t<<>>(CFL_, mesh.n_elements_, mesh.elements_.data(), mesh.device_delta_t_array_.data()); + mesh.device_delta_t_array_.copy_to(mesh.host_delta_t_array_, mesh.stream_); + cudaStreamSynchronize(mesh.stream_); + + deviceFloat delta_t_min_local = std::numeric_limits::infinity(); + for (int i = 0; i < mesh.elements_numBlocks_; ++i) { + delta_t_min_local = min(delta_t_min_local, mesh.host_delta_t_array_[i]); + } + + deviceFloat delta_t_min; + constexpr MPI_Datatype data_type = (sizeof(deviceFloat) == sizeof(float)) ? MPI_FLOAT : MPI_DOUBLE; // CHECK this is a bad way of doing this + MPI_Allreduce(&delta_t_min_local, &delta_t_min, 1, data_type, MPI_MIN, MPI_COMM_WORLD); + return delta_t_min; +} + +__host__ __device__ +auto SEM::Solvers::Solver2D_t::x_flux(deviceFloat p, deviceFloat u, deviceFloat v) -> std::array { + return {SEM::Constants::c * u, p, 0}; +} + +__host__ __device__ +auto SEM::Solvers::Solver2D_t::y_flux(deviceFloat p, deviceFloat u, deviceFloat v) -> std::array { + return {SEM::Constants::c * v, 0, p}; +} + +__device__ +void SEM::Solvers::Solver2D_t::matrix_vector_multiply(int N, const deviceFloat* matrix, const deviceFloat* vector, deviceFloat* result) { + for (int i = 0; i <= N; ++i) { + result[i] = 0.0; + for (int j = 0; j <= N; ++j) { + result[i] += matrix[i * (N + 1) + j] * vector[j]; + } + } +} + +__global__ +auto SEM::Solvers::calculate_wave_fluxes(size_t N_faces, Face2D_t* faces, const Element2D_t* elements) -> void { + const int index = blockIdx.x * blockDim.x + threadIdx.x; + const int stride = blockDim.x * gridDim.x; + + for (size_t face_index = index; face_index < N_faces; face_index += stride) { + Face2D_t& face = faces[face_index]; + + // Computing fluxes + for (int i = 0; i <= face.N_; ++i) { + const Vec2 u_L {face.u_[0][i], face.v_[0][i]}; + const Vec2 u_R {face.u_[1][i], face.v_[1][i]}; + + const deviceFloat w_L = face.p_[0][i] + SEM::Constants::c * u_L.dot(face.normal_); + const deviceFloat w_R = face.p_[1][i] - SEM::Constants::c * u_R.dot(face.normal_); + + face.p_flux_[i] = SEM::Constants::c * (w_L - w_R) / 2; + face.u_flux_[i] = face.normal_.x() * (w_L + w_R) / 2; + face.v_flux_[i] = face.normal_.y() * (w_L + w_R) / 2; + } + } +} + +// Algorithm 114 +__global__ +auto SEM::Solvers::compute_dg_wave_derivative(size_t N_elements, Element2D_t* elements, const Face2D_t* faces, const deviceFloat* weights, const deviceFloat* derivative_matrices_hat, const deviceFloat* lagrange_interpolant_left, const deviceFloat* lagrange_interpolant_right) -> void { + const int index = blockIdx.x * blockDim.x + threadIdx.x; + const int stride = blockDim.x * gridDim.x; + + for (size_t element_index = index; element_index < N_elements; element_index += stride) { + Element2D_t& element = elements[element_index]; + const size_t offset_1D = element.N_ * (element.N_ + 1) /2; // CHECK cache? + const size_t offset_2D = element.N_ * (element.N_ + 1) * (2 * element.N_ + 1) /6; + + // Horizontal direction + for (int j = 0; j <= element.N_; ++j) { + for (int i = 0; i <= element.N_; ++i) { + const std::array flux_x = SEM::Solvers::Solver2D_t::x_flux(element.p_[i * (element.N_ + 1) + j], element.u_[i * (element.N_ + 1) + j], element.v_[i * (element.N_ + 1) + j]); + const std::array flux_y = SEM::Solvers::Solver2D_t::y_flux(element.p_[i * (element.N_ + 1) + j], element.u_[i * (element.N_ + 1) + j], element.v_[i * (element.N_ + 1) + j]); + + element.p_flux_[i] = element.deta_dy_[i * (element.N_ + 1) + j] * flux_x[0] - element.deta_dx_[i * (element.N_ + 1) + j] * flux_y[0]; + element.u_flux_[i] = element.deta_dy_[i * (element.N_ + 1) + j] * flux_x[1] - element.deta_dx_[i * (element.N_ + 1) + j] * flux_y[1]; + element.v_flux_[i] = element.deta_dy_[i * (element.N_ + 1) + j] * flux_x[2] - element.deta_dx_[i * (element.N_ + 1) + j] * flux_y[2]; + } + + SEM::Solvers::Solver2D_t::matrix_vector_multiply(element.N_, derivative_matrices_hat + offset_2D, element.p_flux_.data(), element.p_flux_derivative_.data()); + SEM::Solvers::Solver2D_t::matrix_vector_multiply(element.N_, derivative_matrices_hat + offset_2D, element.u_flux_.data(), element.u_flux_derivative_.data()); + SEM::Solvers::Solver2D_t::matrix_vector_multiply(element.N_, derivative_matrices_hat + offset_2D, element.v_flux_.data(), element.v_flux_derivative_.data()); + + // For the boundaries, the numbering increases from the first node to the second. + // Inside the element, the ksi and eta coordinates increase from left to right, bottom to top. + // This means that there is an inconsistency on the top and left edges, and the numbering has to be reversed. + // This way, the projection from the element edge to the face(s) can always be done in the same way. + // The same process has to be done when interpolating to the boundaries. + for (int i = 0; i <= element.N_; ++i) { + element.p_flux_derivative_[i] += (element.p_flux_extrapolated_[1][j] * lagrange_interpolant_right[offset_1D + i] + element.p_flux_extrapolated_[3][element.N_ - j] * lagrange_interpolant_left[offset_1D + i]) / weights[offset_1D + i]; + element.u_flux_derivative_[i] += (element.u_flux_extrapolated_[1][j] * lagrange_interpolant_right[offset_1D + i] + element.u_flux_extrapolated_[3][element.N_ - j] * lagrange_interpolant_left[offset_1D + i]) / weights[offset_1D + i]; + element.v_flux_derivative_[i] += (element.v_flux_extrapolated_[1][j] * lagrange_interpolant_right[offset_1D + i] + element.v_flux_extrapolated_[3][element.N_ - j] * lagrange_interpolant_left[offset_1D + i]) / weights[offset_1D + i]; + } + + for (int i = 0; i <= element.N_; ++i) { + element.G_p_[i * (element.N_ + 1) + j] = -element.p_flux_derivative_[i]; + element.G_u_[i * (element.N_ + 1) + j] = -element.u_flux_derivative_[i]; + element.G_v_[i * (element.N_ + 1) + j] = -element.v_flux_derivative_[i]; + } + } + + // Vertical direction + for (int i = 0; i <= element.N_; ++i) { + for (int j = 0; j <= element.N_; ++j) { + const std::array flux_x = SEM::Solvers::Solver2D_t::x_flux(element.p_[i * (element.N_ + 1) + j], element.u_[i * (element.N_ + 1) + j], element.v_[i * (element.N_ + 1) + j]); + const std::array flux_y = SEM::Solvers::Solver2D_t::y_flux(element.p_[i * (element.N_ + 1) + j], element.u_[i * (element.N_ + 1) + j], element.v_[i * (element.N_ + 1) + j]); + + element.p_flux_[j] = -element.dxi_dy_[i * (element.N_ + 1) + j] * flux_x[0] + element.dxi_dx_[i * (element.N_ + 1) + j] * flux_y[0]; + element.u_flux_[j] = -element.dxi_dy_[i * (element.N_ + 1) + j] * flux_x[1] + element.dxi_dx_[i * (element.N_ + 1) + j] * flux_y[1]; + element.v_flux_[j] = -element.dxi_dy_[i * (element.N_ + 1) + j] * flux_x[2] + element.dxi_dx_[i * (element.N_ + 1) + j] * flux_y[2]; + } + + SEM::Solvers::Solver2D_t::matrix_vector_multiply(element.N_, derivative_matrices_hat + offset_2D, element.p_flux_.data(), element.p_flux_derivative_.data()); + SEM::Solvers::Solver2D_t::matrix_vector_multiply(element.N_, derivative_matrices_hat + offset_2D, element.u_flux_.data(), element.u_flux_derivative_.data()); + SEM::Solvers::Solver2D_t::matrix_vector_multiply(element.N_, derivative_matrices_hat + offset_2D, element.v_flux_.data(), element.v_flux_derivative_.data()); + + // For the boundaries, the numbering increases from the first node to the second. + // Inside the element, the ksi and eta coordinates increase from left to right, bottom to top. + // This means that there is an inconsistency on the top and left edges, and the numbering has to be reversed. + // This way, the projection from the element edge to the face(s) can always be done in the same way. + // The same process has to be done when interpolating to the boundaries. + for (int j = 0; j <= element.N_; ++j) { + element.p_flux_derivative_[j] += (element.p_flux_extrapolated_[2][element.N_ - i] * lagrange_interpolant_right[offset_1D + j] + element.p_flux_extrapolated_[0][i] * lagrange_interpolant_left[offset_1D + j]) / weights[offset_1D + j]; + element.u_flux_derivative_[j] += (element.u_flux_extrapolated_[2][element.N_ - i] * lagrange_interpolant_right[offset_1D + j] + element.u_flux_extrapolated_[0][i] * lagrange_interpolant_left[offset_1D + j]) / weights[offset_1D + j]; + element.v_flux_derivative_[j] += (element.v_flux_extrapolated_[2][element.N_ - i] * lagrange_interpolant_right[offset_1D + j] + element.v_flux_extrapolated_[0][i] * lagrange_interpolant_left[offset_1D + j]) / weights[offset_1D + j]; + } + + for (int j = 0; j <= element.N_; ++j) { + element.G_p_[i * (element.N_ + 1) + j] = (element.G_p_[i * (element.N_ + 1) + j] - element.p_flux_derivative_[j]) / element.jacobian_[i * (element.N_ + 1) + j]; + element.G_u_[i * (element.N_ + 1) + j] = (element.G_u_[i * (element.N_ + 1) + j] - element.u_flux_derivative_[j]) / element.jacobian_[i * (element.N_ + 1) + j]; + element.G_v_[i * (element.N_ + 1) + j] = (element.G_v_[i * (element.N_ + 1) + j] - element.v_flux_derivative_[j]) / element.jacobian_[i * (element.N_ + 1) + j]; + } + } + } +} + +__global__ +auto SEM::Solvers::rk3_first_step(size_t N_elements, SEM::Entities::Element2D_t* elements, deviceFloat delta_t, deviceFloat g) -> void { + const int index = blockIdx.x * blockDim.x + threadIdx.x; + const int stride = blockDim.x * gridDim.x; + + for (size_t element_index = index; element_index < N_elements; element_index += stride) { + Element2D_t& element = elements[element_index]; + + for (int i = 0; i <= element.N_; ++i){ + for (int j = 0; j <= element.N_; ++j){ + element.p_intermediate_[i * (element.N_ + 1) + j] = element.G_p_[i * (element.N_ + 1) + j]; + element.u_intermediate_[i * (element.N_ + 1) + j] = element.G_u_[i * (element.N_ + 1) + j]; + element.v_intermediate_[i * (element.N_ + 1) + j] = element.G_v_[i * (element.N_ + 1) + j]; + + element.p_[i * (element.N_ + 1) + j] += g * delta_t * element.p_intermediate_[i * (element.N_ + 1) + j]; + element.u_[i * (element.N_ + 1) + j] += g * delta_t * element.u_intermediate_[i * (element.N_ + 1) + j]; + element.v_[i * (element.N_ + 1) + j] += g * delta_t * element.v_intermediate_[i * (element.N_ + 1) + j]; + } + } + } +} + +__global__ +auto SEM::Solvers::rk3_step(size_t N_elements, SEM::Entities::Element2D_t* elements, deviceFloat delta_t, deviceFloat a, deviceFloat g) -> void { + const int index = blockIdx.x * blockDim.x + threadIdx.x; + const int stride = blockDim.x * gridDim.x; + + for (size_t element_index = index; element_index < N_elements; element_index += stride) { + Element2D_t& element = elements[element_index]; + + for (int i = 0; i <= element.N_; ++i){ + for (int j = 0; j <= element.N_; ++j){ + element.p_intermediate_[i * (element.N_ + 1) + j] = a * element.p_intermediate_[i * (element.N_ + 1) + j] + element.G_p_[i * (element.N_ + 1) + j]; + element.u_intermediate_[i * (element.N_ + 1) + j] = a * element.u_intermediate_[i * (element.N_ + 1) + j] + element.G_u_[i * (element.N_ + 1) + j]; + element.v_intermediate_[i * (element.N_ + 1) + j] = a * element.v_intermediate_[i * (element.N_ + 1) + j] + element.G_v_[i * (element.N_ + 1) + j]; + + element.p_[i * (element.N_ + 1) + j] += g * delta_t * element.p_intermediate_[i * (element.N_ + 1) + j]; + element.u_[i * (element.N_ + 1) + j] += g * delta_t * element.u_intermediate_[i * (element.N_ + 1) + j]; + element.v_[i * (element.N_ + 1) + j] += g * delta_t * element.v_intermediate_[i * (element.N_ + 1) + j]; + } + } + } +} diff --git a/cuda_code/SolverBundling.cu b/cuda_code/SolverBundling.cu new file mode 100644 index 0000000000000000000000000000000000000000..0f89b39ef2f0c7796eafd8be0221b351f5d15bbc --- /dev/null +++ b/cuda_code/SolverBundling.cu @@ -0,0 +1,1264 @@ +#include + +////for debug purposes +//#define PRINT_RESIDUALS_SPARSE +//#define PRINT_RESIDUALS_DENSE + +#define ENABLE_EARLY_OUT + +#include "GlobalDefines.h" +#include "SolverBundlingParameters.h" +#include "SolverBundlingState.h" +#include "SolverBundlingUtil.h" +#include "SolverBundlingEquations.h" +#include "SolverBundlingEquationsLie.h" +#include "SolverBundlingDenseUtil.h" +#include "../../SiftGPU/CUDATimer.h" + +#include + +#define THREADS_PER_BLOCK_DENSE_DEPTH 128 +#define THREADS_PER_BLOCK_DENSE_DEPTH_FLIP 64 + +#define THREADS_PER_BLOCK_DENSE_OVERLAP 512 + + +///////////////////////////////////////////////////////////////////////// +// Dense Depth Term +///////////////////////////////////////////////////////////////////////// +template +__global__ void FindImageImageCorr_Kernel(SolverInput input, SolverState state, SolverParameters parameters) +{ + // image indices + unsigned int i, j; // project from j to i + if (usePairwise) { + i = blockIdx.x; j = blockIdx.y; // all pairwise + if (i >= j) return; + } + else { + i = blockIdx.x; j = i + 1; // frame-to-frame + } + if (input.d_validImages[i] == 0 || input.d_validImages[j] == 0) return; + + const unsigned int tidx = threadIdx.x; + const unsigned int subWidth = input.denseDepthWidth / parameters.denseOverlapCheckSubsampleFactor; + const unsigned int x = (tidx % subWidth) * parameters.denseOverlapCheckSubsampleFactor; + const unsigned int y = (tidx / subWidth) * parameters.denseOverlapCheckSubsampleFactor; + const unsigned int idx = y * input.denseDepthWidth + x; + + if (idx < (input.denseDepthWidth * input.denseDepthHeight)) { +#ifdef USE_LIE_SPACE + float4x4 transform = state.d_xTransformInverses[i] * state.d_xTransforms[j]; +#else + float4x4 transform_i = evalRtMat(state.d_xRot[i], state.d_xTrans[i]); + float4x4 transform_j = evalRtMat(state.d_xRot[j], state.d_xTrans[j]); + float4x4 invTransform_i = transform_i.getInverse(); //TODO PRECOMPUTE? + float4x4 transform = invTransform_i * transform_j; +#endif + //if (!computeAngleDiff(transform, 1.0f)) return; //~60 degrees //TODO HERE ANGIE + //if (!computeAngleDiff(transform, 0.8f)) return; //~45 degrees + if (!computeAngleDiff(transform, 0.52f)) return; //~30 degrees + + // find correspondence + __shared__ int foundCorr[1]; foundCorr[0] = 0; + __syncthreads(); + if (findDenseCorr(idx, input.denseDepthWidth, input.denseDepthHeight, + parameters.denseDistThresh, transform, input.intrinsics, + input.d_cacheFrames[i].d_depthDownsampled, input.d_cacheFrames[j].d_depthDownsampled, + parameters.denseDepthMin, parameters.denseDepthMax)) { //i tgt, j src //TODO PARAMS + atomicAdd(foundCorr, 1); + } // found correspondence + __syncthreads(); + if (tidx == 0) { + if (foundCorr[0] > 10) { //TODO PARAMS + int addr = atomicAdd(state.d_numDenseOverlappingImages, 1); + state.d_denseOverlappingImages[addr] = make_uint2(i, j); + } + } + } // valid image pixel +} + +__global__ void FlipJtJ_Kernel(unsigned int total, unsigned int dim, float* d_JtJ) +{ + const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; + if (idx < total) { + const unsigned int x = idx % dim; + const unsigned int y = idx / dim; + if (x > y) { + d_JtJ[y * dim + x] = d_JtJ[x * dim + y]; + } + } +} +__global__ void FindDenseCorrespondences_Kernel(SolverInput input, SolverState state, SolverParameters parameters) +{ + const int imPairIdx = blockIdx.x; //should not go out of bounds, no need to check + uint2 imageIndices = state.d_denseOverlappingImages[imPairIdx]; + unsigned int i = imageIndices.x; unsigned int j = imageIndices.y; + + const unsigned int tidx = threadIdx.x; + const unsigned int gidx = tidx * gridDim.y + blockIdx.y; + + if (gidx < (input.denseDepthWidth * input.denseDepthHeight)) { +#ifdef USE_LIE_SPACE + float4x4 transform = state.d_xTransformInverses[i] * state.d_xTransforms[j]; //invTransform_i * transform_j +#else + float4x4 transform_i = evalRtMat(state.d_xRot[i], state.d_xTrans[i]); + float4x4 transform_j = evalRtMat(state.d_xRot[j], state.d_xTrans[j]); + float4x4 invTransform_i = transform_i.getInverse(); + float4x4 transform = invTransform_i * transform_j; +#endif + // find correspondence + const int numWarps = THREADS_PER_BLOCK_DENSE_DEPTH / WARP_SIZE; + __shared__ int s_count[numWarps]; + s_count[0] = 0; + int count = 0.0f; + //TODO HERE ANGIE +#ifdef CUDACACHE_UCHAR_NORMALS + if (findDenseCorr(gidx, input.denseDepthWidth, input.denseDepthHeight, + parameters.denseDistThresh, parameters.denseNormalThresh, transform, input.intrinsics, + input.d_cacheFrames[i].d_depthDownsampled, input.d_cacheFrames[i].d_normalsDownsampledUCHAR4, + input.d_cacheFrames[j].d_depthDownsampled, input.d_cacheFrames[j].d_normalsDownsampledUCHAR4, + parameters.denseDepthMin, parameters.denseDepthMax)) { //i tgt, j src +#elif defined(CUDACACHE_FLOAT_NORMALS) + if (findDenseCorr(gidx, input.denseDepthWidth, input.denseDepthHeight, + parameters.denseDistThresh, parameters.denseNormalThresh, transform, input.intrinsics, + input.d_cacheFrames[i].d_depthDownsampled, input.d_cacheFrames[i].d_normalsDownsampled, + input.d_cacheFrames[j].d_depthDownsampled, input.d_cacheFrames[j].d_normalsDownsampled, + parameters.denseDepthMin, parameters.denseDepthMax)) { //i tgt, j src +#endif +//#ifdef CUDACACHE_UCHAR_NORMALS +// if (findDenseCorr(gidx, input.denseDepthWidth, input.denseDepthHeight, +// parameters.denseDistThresh, parameters.denseNormalThresh, transform, input.intrinsics, +// input.d_cacheFrames[i].d_depthDownsampled, input.d_cacheFrames[i].d_normalsDownsampledUCHAR4, +// input.d_cacheFrames[j].d_depthDownsampled, input.d_cacheFrames[j].d_normalsDownsampledUCHAR4, +// parameters.denseDepthMin, parameters.denseDepthMax)) { //i tgt, j src +//#else +// if (findDenseCorr(gidx, input.denseDepthWidth, input.denseDepthHeight, +// parameters.denseDistThresh, parameters.denseNormalThresh, transform, input.intrinsics, +// input.d_cacheFrames[i].d_depthDownsampled, input.d_cacheFrames[i].d_normalsDownsampled, +// input.d_cacheFrames[j].d_depthDownsampled, input.d_cacheFrames[j].d_normalsDownsampled, +// parameters.denseDepthMin, parameters.denseDepthMax)) { //i tgt, j src +//#endif + //atomicAdd(&state.d_denseCorrCounts[imPairIdx], 1.0f); + count++; + } // found correspondence + count = warpReduce(count); + __syncthreads(); + if (tidx % WARP_SIZE == 0) { + s_count[tidx / WARP_SIZE] = count; + //atomicAdd(&state.d_denseCorrCounts[imPairIdx], count); + } + __syncthreads(); + for (unsigned int stride = numWarps / 2; stride > 0; stride /= 2) { + if (tidx < stride) s_count[tidx] = s_count[tidx] + s_count[tidx + stride]; + __syncthreads(); + } + if (tidx == 0) { + atomicAdd(&state.d_denseCorrCounts[imPairIdx], s_count[0]); + } + } // valid image pixel +} + +__global__ void WeightDenseCorrespondences_Kernel(unsigned int N, SolverState state) +{ + const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; + if (idx < N) { + // apply ln to weights + float x = state.d_denseCorrCounts[idx]; + if (x > 0) { + //if (x < 3200) state.d_denseCorrCounts[idx] = 0; //don't consider too small #corr //TODO PARAMS + if (x < 800) state.d_denseCorrCounts[idx] = 0; //don't consider too small #corr //TODO PARAMS + //if (x < 400) state.d_denseCorrCounts[idx] = 0; //don't consider too small #corr //TODO PARAMS + //if (x < 200) state.d_denseCorrCounts[idx] = 0; //don't consider too small #corr //TODO PARAMS //TODO EVAL DEBUG + else { + state.d_denseCorrCounts[idx] = 1.0f / min(logf(x), 9.0f); // natural log //TODO PARAMS + } + + //state.d_denseCorrCounts[idx] = 1.0f / clamp(logf(x), 2.0f, 9.0f); // natural log //TODO PARAMS + } + } +} + +template +__global__ void BuildDenseSystem_Kernel(SolverInput input, SolverState state, SolverParameters parameters) +{ + const int imPairIdx = blockIdx.x; + uint2 imageIndices = state.d_denseOverlappingImages[imPairIdx]; + unsigned int i = imageIndices.x; unsigned int j = imageIndices.y; + + float imPairWeight = state.d_denseCorrCounts[imPairIdx]; + if (imPairWeight == 0.0f) return; + + const unsigned int idx = threadIdx.x; + const unsigned int srcIdx = idx * gridDim.y + blockIdx.y; + + if (srcIdx < (input.denseDepthWidth * input.denseDepthHeight)) { +#ifdef USE_LIE_SPACE + float4x4 transform_i = state.d_xTransforms[i]; + float4x4 transform_j = state.d_xTransforms[j]; + float4x4 invTransform_i = state.d_xTransformInverses[i]; + float4x4 invTransform_j = state.d_xTransformInverses[j]; + float4x4 transform = invTransform_i * transform_j; +#else + float4x4 transform_i = evalRtMat(state.d_xRot[i], state.d_xTrans[i]); + float4x4 transform_j = evalRtMat(state.d_xRot[j], state.d_xTrans[j]); + float4x4 invTransform_i = transform_i.getInverse(); //TODO PRECOMPUTE? + float4x4 transform = invTransform_i * transform_j; +#endif + // point-to-plane term + matNxM<1, 6> depthJacBlockRow_i, depthJacBlockRow_j; depthJacBlockRow_i.setZero(); depthJacBlockRow_j.setZero(); + float depthRes = 0.0f; float depthWeight = 0.0f; + // color term + matNxM<1, 6> colorJacBlockRow_i, colorJacBlockRow_j; colorJacBlockRow_i.setZero(); colorJacBlockRow_j.setZero(); + float colorRes = 0.0f; float colorWeight = 0.0f; + + // find correspondence + float3 camPosSrc; float3 camPosSrcToTgt; float3 camPosTgt; float3 normalTgt; float2 tgtScreenPos; + //TODO HERE ANGIE +#ifdef CUDACACHE_FLOAT_NORMALS + bool foundCorr = findDenseCorr(srcIdx, input.denseDepthWidth, input.denseDepthHeight, + parameters.denseDistThresh, parameters.denseNormalThresh, transform, input.intrinsics, + input.d_cacheFrames[i].d_cameraposDownsampled, input.d_cacheFrames[i].d_normalsDownsampled, + input.d_cacheFrames[j].d_cameraposDownsampled, input.d_cacheFrames[j].d_normalsDownsampled, + parameters.denseDepthMin, parameters.denseDepthMax, camPosSrc, camPosSrcToTgt, tgtScreenPos, camPosTgt, normalTgt); //i tgt, j src +#elif defined(CUDACACHE_UCHAR_NORMALS) + bool foundCorr = findDenseCorr(srcIdx, input.denseDepthWidth, input.denseDepthHeight, + parameters.denseDistThresh, parameters.denseNormalThresh, transform, input.intrinsics, + input.d_cacheFrames[i].d_cameraposDownsampled, input.d_cacheFrames[i].d_normalsDownsampledUCHAR4, + input.d_cacheFrames[j].d_cameraposDownsampled, input.d_cacheFrames[j].d_normalsDownsampledUCHAR4, + parameters.denseDepthMin, parameters.denseDepthMax, camPosSrc, camPosSrcToTgt, tgtScreenPos, camPosTgt, normalTgt); //i tgt, j src +#endif +//#ifdef CUDACACHE_UCHAR_NORMALS +// bool foundCorr = findDenseCorr(srcIdx, input.denseDepthWidth, input.denseDepthHeight, +// parameters.denseDistThresh, parameters.denseNormalThresh, transform, input.intrinsics, +// input.d_cacheFrames[i].d_cameraposDownsampled, input.d_cacheFrames[i].d_normalsDownsampledUCHAR4, +// input.d_cacheFrames[j].d_cameraposDownsampled, input.d_cacheFrames[j].d_normalsDownsampledUCHAR4, +// parameters.denseDepthMin, parameters.denseDepthMax, camPosSrc, camPosSrcToTgt, tgtScreenPos, camPosTgt, normalTgt); //i tgt, j src +//#else +// bool foundCorr = findDenseCorr(srcIdx, input.denseDepthWidth, input.denseDepthHeight, +// parameters.denseDistThresh, parameters.denseNormalThresh, transform, input.intrinsics, +// input.d_cacheFrames[i].d_cameraposDownsampled, input.d_cacheFrames[i].d_normalsDownsampled, +// input.d_cacheFrames[j].d_cameraposDownsampled, input.d_cacheFrames[j].d_normalsDownsampled, +// parameters.denseDepthMin, parameters.denseDepthMax, camPosSrc, camPosSrcToTgt, tgtScreenPos, camPosTgt, normalTgt); //i tgt, j src +//#endif + if (useDepth) { + if (foundCorr) { + // point-to-plane residual + float3 diff = camPosTgt - camPosSrcToTgt; + depthRes = dot(diff, normalTgt); + //depthWeight = parameters.weightDenseDepth * imPairWeight * max(0.0f, 0.5f*((1.0f - length(diff) / parameters.denseDistThresh) + (1.0f - camPosTgt.z / parameters.denseDepthMax))); + //depthWeight = parameters.weightDenseDepth * imPairWeight * max(0.0f, (1.0f - camPosTgt.z / 2.0f)); //fr1_desk + //depthWeight = parameters.weightDenseDepth * imPairWeight * max(0.0f, (1.0f - camPosTgt.z / 2.5f)); //fr3_office, fr2_xyz_half // livingroom1 + //depthWeight = parameters.weightDenseDepth * imPairWeight * max(0.0f, (1.0f - camPosTgt.z / 3.0f)); //fr3_nstn + //depthWeight = parameters.weightDenseDepth * imPairWeight * max(0.0f, (1.0f - camPosTgt.z / 1.8f)); + //depthWeight = parameters.weightDenseDepth * imPairWeight * (pow(max(0.0f, 1.0f - camPosTgt.z / 2.5f), 1.8f)); + //depthWeight = parameters.weightDenseDepth * imPairWeight * (pow(max(0.0f, 1.0f - camPosTgt.z / 2.0f), 1.8f)); //fr3_office, fr1_desk_f20 + depthWeight = parameters.weightDenseDepth * imPairWeight * (pow(max(0.0f, 1.0f - camPosTgt.z / 2.0f), 2.5f)); //fr2_xyz_half + //depthWeight = parameters.weightDenseDepth * imPairWeight * (pow(max(0.0f, 1.0f - camPosTgt.z / 3.5f), 1.8f)); //fr3_nstn + + //depthWeight = parameters.weightDenseDepth * imPairWeight * (pow(max(0.0f, 1.0f - camPosTgt.z / parameters.denseDepthMax), 1.8f)); //TODO EVAL DEBUGGING + + //float wtgt = (pow(max(0.0f, 1.0f - camPosTgt.z / 2.5f), 1.8f)); + //float wsrc = (pow(max(0.0f, 1.0f - camPosSrc.z / 2.5f), 1.8f)); + //depthWeight = parameters.weightDenseDepth * imPairWeight * wtgt * wsrc; +#ifdef USE_LIE_SPACE + if (i > 0) computeJacobianBlockRow_i(depthJacBlockRow_i, transform_i, invTransform_j, camPosSrc, normalTgt); + if (j > 0) computeJacobianBlockRow_j(depthJacBlockRow_j, invTransform_i, transform_j, camPosSrc, normalTgt); +#else + if (i > 0) computeJacobianBlockRow_i(depthJacBlockRow_i, state.d_xRot[i], state.d_xTrans[i], transform_j, camPosSrc, normalTgt); + if (j > 0) computeJacobianBlockRow_j(depthJacBlockRow_j, state.d_xRot[j], state.d_xTrans[j], invTransform_i, camPosSrc, normalTgt); +#endif + } + addToLocalSystem(foundCorr, state.d_denseJtJ, state.d_denseJtr, input.numberOfImages * 6, + depthJacBlockRow_i, depthJacBlockRow_j, i, j, depthRes, depthWeight, idx + , state.d_sumResidual, state.d_corrCount); + //addToLocalSystemBrute(foundCorr, state.d_denseJtJ, state.d_denseJtr, input.numberOfImages * 6, + // depthJacBlockRow_i, depthJacBlockRow_j, i, j, depthRes, depthWeight, idx); + } + if (useColor) { + bool foundCorrColor = false; + if (foundCorr) { + const float2 intensityDerivTgt = bilinearInterpolationFloat2(tgtScreenPos.x, tgtScreenPos.y, input.d_cacheFrames[i].d_intensityDerivsDownsampled, input.denseDepthWidth, input.denseDepthHeight); + const float intensityTgt = bilinearInterpolationFloat(tgtScreenPos.x, tgtScreenPos.y, input.d_cacheFrames[i].d_intensityDownsampled, input.denseDepthWidth, input.denseDepthHeight); + colorRes = intensityTgt - input.d_cacheFrames[j].d_intensityDownsampled[srcIdx]; + foundCorrColor = (intensityDerivTgt.x != MINF && abs(colorRes) < parameters.denseColorThresh && length(intensityDerivTgt) > parameters.denseColorGradientMin); + if (foundCorrColor) { + const float2 focalLength = make_float2(input.intrinsics.x, input.intrinsics.y); +#ifdef USE_LIE_SPACE + if (i > 0) computeJacobianBlockIntensityRow_i(colorJacBlockRow_i, focalLength, transform_i, invTransform_j, camPosSrc, camPosSrcToTgt, intensityDerivTgt); + if (j > 0) computeJacobianBlockIntensityRow_j(colorJacBlockRow_j, focalLength, invTransform_i, transform_j, camPosSrc, camPosSrcToTgt, intensityDerivTgt); +#else + if (i > 0) computeJacobianBlockIntensityRow_i(colorJacBlockRow_i, focalLength, state.d_xRot[i], state.d_xTrans[i], transform_j, camPosSrc, camPosSrcToTgt, intensityDerivTgt); + if (j > 0) computeJacobianBlockIntensityRow_j(colorJacBlockRow_j, focalLength, state.d_xRot[j], state.d_xTrans[j], invTransform_i, camPosSrc, camPosSrcToTgt, intensityDerivTgt); +#endif + colorWeight = parameters.weightDenseColor * imPairWeight * max(0.0f, 1.0f - abs(colorRes) / (1.15f*parameters.denseColorThresh)); + //colorWeight = parameters.weightDenseColor * imPairWeight * max(0.0f, 1.0f - abs(colorRes) / parameters.denseColorThresh) * max(0.0f, (1.0f - camPosTgt.z / 1.0f)); + //colorWeight = parameters.weightDenseColor * imPairWeight * max(0.0f, 0.5f*(1.0f - abs(colorRes) / parameters.denseColorThresh) + 0.5f*max(0.0f, (1.0f - camPosTgt.z / 1.0f))); + } + } + addToLocalSystem(foundCorrColor, state.d_denseJtJ, state.d_denseJtr, input.numberOfImages * 6, + colorJacBlockRow_i, colorJacBlockRow_j, i, j, colorRes, colorWeight, idx + , state.d_sumResidualColor, state.d_corrCountColor); + //addToLocalSystemBrute(foundCorrColor, state.d_denseJtJ, state.d_denseJtr, input.numberOfImages * 6, + // colorJacBlockRow_i, colorJacBlockRow_j, i, j, colorRes, colorWeight, idx); + } + } // valid image pixel +} + +bool BuildDenseSystem(const SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer* timer) +{ + const unsigned int N = input.numberOfImages; + const int sizeJtr = 6 * N; + const int sizeJtJ = sizeJtr * sizeJtr; + +#ifdef PRINT_RESIDUALS_DENSE + cutilSafeCall(cudaMemset(state.d_corrCount, 0, sizeof(int))); + cutilSafeCall(cudaMemset(state.d_sumResidual, 0, sizeof(float))); + cutilSafeCall(cudaMemset(state.d_corrCountColor, 0, sizeof(int))); + cutilSafeCall(cudaMemset(state.d_sumResidualColor, 0, sizeof(float))); +#endif + + const unsigned int maxDenseImPairs = input.numberOfImages * (input.numberOfImages - 1) / 2; + cutilSafeCall(cudaMemset(state.d_denseCorrCounts, 0, sizeof(float) * maxDenseImPairs)); + cutilSafeCall(cudaMemset(state.d_denseJtJ, 0, sizeof(float) * sizeJtJ)); + cutilSafeCall(cudaMemset(state.d_denseJtr, 0, sizeof(float) * sizeJtr)); + cutilSafeCall(cudaMemset(state.d_numDenseOverlappingImages, 0, sizeof(int))); +#ifdef _DEBUG + cutilSafeCall(cudaDeviceSynchronize()); + cutilCheckMsg(__FUNCTION__); +#endif + + dim3 gridImImOverlap; + if (parameters.useDenseDepthAllPairwise) gridImImOverlap = dim3(N, N, 1); // pairwise + else gridImImOverlap = dim3(N - 1, 1, 1); // for frame-to-frame + + if (timer) timer->startEvent("BuildDenseDepthSystem - find image corr"); + if (parameters.useDenseDepthAllPairwise) FindImageImageCorr_Kernel << < gridImImOverlap, THREADS_PER_BLOCK_DENSE_OVERLAP >> >(input, state, parameters); + else FindImageImageCorr_Kernel << < gridImImOverlap, THREADS_PER_BLOCK_DENSE_OVERLAP >> >(input, state, parameters); +#ifdef _DEBUG + cutilSafeCall(cudaDeviceSynchronize()); + cutilCheckMsg(__FUNCTION__); +#endif + if (timer) timer->endEvent(); + + int numOverlapImagePairs; + cutilSafeCall(cudaMemcpy(&numOverlapImagePairs, state.d_numDenseOverlappingImages, sizeof(int), cudaMemcpyDeviceToHost)); + if (numOverlapImagePairs == 0) { + printf("warning: no overlapping images for dense solve\n"); + return false; + } + const int reductionGlobal = (input.denseDepthWidth*input.denseDepthHeight + THREADS_PER_BLOCK_DENSE_DEPTH - 1) / THREADS_PER_BLOCK_DENSE_DEPTH; + dim3 grid(numOverlapImagePairs, reductionGlobal); + //if (N > 11) printf("num overlap image pairs = %d\n", numOverlapImagePairs); //debugging only + + if (timer) timer->startEvent("BuildDenseDepthSystem - compute im-im weights"); + + FindDenseCorrespondences_Kernel << > >(input, state, parameters); +#ifdef _DEBUG + cutilSafeCall(cudaDeviceSynchronize()); + cutilCheckMsg(__FUNCTION__); +#endif + ////debugging //remember the delete! + //float* denseCorrCounts = new float[numOverlapImagePairs]; + //cutilSafeCall(cudaMemcpy(denseCorrCounts, state.d_denseCorrCounts, sizeof(float)*numOverlapImagePairs, cudaMemcpyDeviceToHost)); + //unsigned int totalCount = 0; + //for (unsigned int i = 0; i < numOverlapImagePairs; i++) { totalCount += (unsigned int)denseCorrCounts[i]; } + //printf("total count = %d\n", totalCount); + + //uint2* imageIndices = new uint2[numOverlapImagePairs]; + //cutilSafeCall(cudaMemcpy(imageIndices, state.d_denseOverlappingImages, sizeof(uint2)*numOverlapImagePairs, cudaMemcpyDeviceToHost)); + //if (imageIndices) delete[] imageIndices; + ////debugging + + //debugging - compute some overlap stats + //if (true || input.numberOfImages > 11) { + // float4x4* transforms = new float4x4[input.numberOfImages]; + // float* denseCorrCounts = new float[numOverlapImagePairs]; + // uint2* imageIndices = new uint2[numOverlapImagePairs]; + // cutilSafeCall(cudaMemcpy(denseCorrCounts, state.d_denseCorrCounts, sizeof(float)*numOverlapImagePairs, cudaMemcpyDeviceToHost)); + // cutilSafeCall(cudaMemcpy(imageIndices, state.d_denseOverlappingImages, sizeof(uint2)*numOverlapImagePairs, cudaMemcpyDeviceToHost)); + // cutilSafeCall(cudaMemcpy(transforms, state.d_xTransforms, sizeof(float4x4)*input.numberOfImages, cudaMemcpyDeviceToHost)); + // FILE* fp = fopen("debug/overlaps.csv", "w"); + // char buffer[128]; + // for (int i = 0; i < numOverlapImagePairs; i++) { + // if (denseCorrCounts[i] > 0) { + // float3 d = transforms[imageIndices[i].x].getTranslation() - transforms[imageIndices[i].y].getTranslation(); + // sprintf(buffer, "%d,%d,%d,%f\n", imageIndices[i].x, imageIndices[i].y, (int)denseCorrCounts[i], length(d)); + // fwrite(buffer, sizeof(char), strlen(buffer), fp); + // } + // } + // fclose(fp); + // if (transforms) delete[] transforms; + // if (denseCorrCounts) delete[] denseCorrCounts; + // if (imageIndices) delete[] imageIndices; + // int a = 5; + //} + + int wgrid = (numOverlapImagePairs + THREADS_PER_BLOCK_DENSE_DEPTH_FLIP - 1) / THREADS_PER_BLOCK_DENSE_DEPTH_FLIP; + WeightDenseCorrespondences_Kernel << < wgrid, THREADS_PER_BLOCK_DENSE_DEPTH_FLIP >> >(maxDenseImPairs, state); +#ifdef _DEBUG + cutilSafeCall(cudaDeviceSynchronize()); + cutilCheckMsg(__FUNCTION__); +#endif + ////debugging + //cutilSafeCall(cudaMemcpy(denseCorrCounts, state.d_denseCorrCounts, sizeof(float)*maxDenseImPairs, cudaMemcpyDeviceToHost)); + //totalCount = 0; + //for (unsigned int i = 0; i < maxDenseImPairs; i++) { if (denseCorrCounts[i] > 0.0f) totalCount++; } + //printf("total count = %d\n", totalCount); + //if (denseCorrCounts) delete[] denseCorrCounts; + ////debugging + if (timer) timer->endEvent(); + if (timer) timer->startEvent("BuildDenseDepthSystem - build jtj/jtr"); + + if (parameters.weightDenseDepth > 0.0f) { + if (parameters.weightDenseColor > 0.0f) BuildDenseSystem_Kernel << > >(input, state, parameters); + else BuildDenseSystem_Kernel << > >(input, state, parameters); + } + else { + BuildDenseSystem_Kernel << > >(input, state, parameters); + } +#ifdef _DEBUG + cutilSafeCall(cudaDeviceSynchronize()); + cutilCheckMsg(__FUNCTION__); +#endif + + ////debugging + //bool debugPrint = true; + //float* h_JtJ = NULL; + //float* h_Jtr = NULL; + //if (debugPrint) { + // h_JtJ = new float[sizeJtJ]; + // h_Jtr = new float[sizeJtr]; + // cutilSafeCall(cudaMemcpy(h_JtJ, state.d_denseJtJ, sizeof(float) * sizeJtJ, cudaMemcpyDeviceToHost)); + // cutilSafeCall(cudaMemcpy(h_Jtr, state.d_denseJtr, sizeof(float) * sizeJtr, cudaMemcpyDeviceToHost)); + // printf("JtJ:\n"); + // //for (unsigned int i = 0; i < 6 * N; i++) { + // // for (unsigned int j = 0; j < 6 * N; j++) + // for (unsigned int i = 6 * 1; i < 6 * 2; i++) { + // for (unsigned int j = 6 * 1; j < 6 * 2; j++) + // printf(" %f,", h_JtJ[j * 6 * N + i]); + // printf("\n"); + // } + // printf("Jtr:\n"); + // for (unsigned int i = 0; i < 6 * N; i++) { + // printf(" %f,", h_Jtr[i]); + // } + // printf("\n"); + //} + ////debugging +#ifdef PRINT_RESIDUALS_DENSE + if (parameters.weightDenseDepth > 0) { + float sumResidual; int corrCount; + cutilSafeCall(cudaMemcpy(&sumResidual, state.d_sumResidual, sizeof(float), cudaMemcpyDeviceToHost)); + cutilSafeCall(cudaMemcpy(&corrCount, state.d_corrCount, sizeof(int), cudaMemcpyDeviceToHost)); + printf("\tdense depth: weights * residual = %f * %f = %f\t[#corr = %d]\n", parameters.weightDenseDepth, sumResidual / parameters.weightDenseDepth, sumResidual, corrCount); + } + if (parameters.weightDenseColor > 0) { + float sumResidual; int corrCount; + cutilSafeCall(cudaMemcpy(&sumResidual, state.d_sumResidualColor, sizeof(float), cudaMemcpyDeviceToHost)); + cutilSafeCall(cudaMemcpy(&corrCount, state.d_corrCountColor, sizeof(int), cudaMemcpyDeviceToHost)); + printf("\tdense color: weights * residual = %f * %f = %f\t[#corr = %d]\n", parameters.weightDenseColor, sumResidual / parameters.weightDenseColor, sumResidual, corrCount); + } +#endif + const unsigned int flipgrid = (sizeJtJ + THREADS_PER_BLOCK_DENSE_DEPTH_FLIP - 1) / THREADS_PER_BLOCK_DENSE_DEPTH_FLIP; + FlipJtJ_Kernel << > >(sizeJtJ, sizeJtr, state.d_denseJtJ); +#ifdef _DEBUG + cutilSafeCall(cudaDeviceSynchronize()); + cutilCheckMsg(__FUNCTION__); +#endif + if (timer) timer->endEvent(); + return true; +} + +//todo more efficient?? (there are multiple per image-image...) +//get high residuals +__global__ void collectHighResidualsDevice(SolverInput input, SolverState state, SolverStateAnalysis analysis, SolverParameters parameters, unsigned int maxNumHighResiduals) +{ + const unsigned int N = input.numberOfCorrespondences; // Number of block variables + const unsigned int corrIdx = blockIdx.x * blockDim.x + threadIdx.x; + + if (corrIdx < N) { + float residual = evalAbsMaxResidualDevice(corrIdx, input, state, parameters); + if (residual > parameters.highResidualThresh) { + int idx = atomicAdd(state.d_countHighResidual, 1); + if (idx < maxNumHighResiduals) { + analysis.d_maxResidual[idx] = residual; + analysis.d_maxResidualIndex[idx] = corrIdx; + } + } + } +} +extern "C" void collectHighResiduals(SolverInput& input, SolverState& state, SolverStateAnalysis& analysis, SolverParameters& parameters, CUDATimer* timer) +{ + if (timer) timer->startEvent(__FUNCTION__); + cutilSafeCall(cudaMemset(state.d_countHighResidual, 0, sizeof(int))); + + const unsigned int N = input.numberOfCorrespondences; // Number of correspondences + unsigned int maxNumHighResiduals = (input.maxCorrPerImage*input.maxNumberOfImages + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; + collectHighResidualsDevice << <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(input, state, analysis, parameters, maxNumHighResiduals); + +#ifdef _DEBUG + cutilSafeCall(cudaDeviceSynchronize()); + cutilCheckMsg(__FUNCTION__); +#endif + if (timer) timer->endEvent(); +} + +///////////////////////////////////////////////////////////////////////// +// Eval Max Residual +///////////////////////////////////////////////////////////////////////// + +__global__ void EvalMaxResidualDevice(SolverInput input, SolverState state, SolverStateAnalysis analysis, SolverParameters parameters) +{ + __shared__ int maxResIndex[THREADS_PER_BLOCK]; + __shared__ float maxRes[THREADS_PER_BLOCK]; + + const unsigned int N = input.numberOfCorrespondences; // Number of block variables + const unsigned int corrIdx = blockIdx.x * blockDim.x + threadIdx.x; + + maxResIndex[threadIdx.x] = 0; + maxRes[threadIdx.x] = 0.0f; + + if (corrIdx < N) { + float residual = evalAbsMaxResidualDevice(corrIdx, input, state, parameters); + + maxRes[threadIdx.x] = residual; + maxResIndex[threadIdx.x] = corrIdx; + + __syncthreads(); + + for (int stride = THREADS_PER_BLOCK / 2; stride > 0; stride /= 2) { + + if (threadIdx.x < stride) { + int first = threadIdx.x; + int second = threadIdx.x + stride; + if (maxRes[first] < maxRes[second]) { + maxRes[first] = maxRes[second]; + maxResIndex[first] = maxResIndex[second]; + } + } + + __syncthreads(); + } + + if (threadIdx.x == 0) { + //printf("d_maxResidual[%d] = %f (index %d)\n", blockIdx.x, maxRes[0], maxResIndex[0]); + analysis.d_maxResidual[blockIdx.x] = maxRes[0]; + analysis.d_maxResidualIndex[blockIdx.x] = maxResIndex[0]; + } + } +} + +extern "C" void evalMaxResidual(SolverInput& input, SolverState& state, SolverStateAnalysis& analysis, SolverParameters& parameters, CUDATimer* timer) +{ + if (timer) timer->startEvent(__FUNCTION__); + + const unsigned int N = input.numberOfCorrespondences; // Number of correspondences + EvalMaxResidualDevice << <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(input, state, analysis, parameters); + +#ifdef _DEBUG + cutilSafeCall(cudaDeviceSynchronize()); + cutilCheckMsg(__FUNCTION__); +#endif + if (timer) timer->endEvent(); +} + +///////////////////////////////////////////////////////////////////////// +// Eval Cost +///////////////////////////////////////////////////////////////////////// + +__global__ void ResetResidualDevice(SolverInput input, SolverState state, SolverParameters parameters) +{ + const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; + if (x == 0) state.d_sumResidual[0] = 0.0f; +} + +__global__ void EvalResidualDevice(SolverInput input, SolverState state, SolverParameters parameters) +{ + const unsigned int N = input.numberOfCorrespondences; // Number of block variables + const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; + + float residual = 0.0f; + if (x < N) { + residual = evalFDevice(x, input, state, parameters); + //float out = warpReduce(residual); + //unsigned int laneid; + ////This command gets the lane ID within the current warp + //asm("mov.u32 %0, %%laneid;" : "=r"(laneid)); + //if (laneid == 0) { + // atomicAdd(&state.d_sumResidual[0], out); + //} + atomicAdd(&state.d_sumResidual[0], residual); + } +} + +extern "C" float EvalResidual(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer* timer) +{ + if (timer) timer->startEvent(__FUNCTION__); + + float residual = 0.0f; + + const unsigned int N = input.numberOfCorrespondences; // Number of block variables + ResetResidualDevice << < 1, 1, 1 >> >(input, state, parameters); + EvalResidualDevice << <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(input, state, parameters); + + residual = state.getSumResidual(); + +#ifdef _DEBUG + cutilSafeCall(cudaDeviceSynchronize()); + cutilCheckMsg(__FUNCTION__); +#endif + if (timer) timer->endEvent(); + + return residual; +} + +///////////////////////////////////////////////////////////////////////// +// Eval Linear Residual +///////////////////////////////////////////////////////////////////////// + +//__global__ void SumLinearResDevice(SolverInput input, SolverState state, SolverParameters parameters) +//{ +// const unsigned int N = input.numberOfImages; // Number of block variables +// const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; +// +// float residual = 0.0f; +// if (x > 0 && x < N) { +// residual = dot(state.d_rRot[x], state.d_rRot[x]) + dot(state.d_rTrans[x], state.d_rTrans[x]); +// atomicAdd(state.d_sumLinResidual, residual); +// } +//} +//float EvalLinearRes(SolverInput& input, SolverState& state, SolverParameters& parameters) +//{ +// float residual = 0.0f; +// +// const unsigned int N = input.numberOfImages; // Number of block variables +// +// // Do PCG step +// const int blocksPerGrid = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; +// +// float init = 0.0f; +// cutilSafeCall(cudaMemcpy(state.d_sumLinResidual, &init, sizeof(float), cudaMemcpyHostToDevice)); +// +// SumLinearResDevice << > >(input, state, parameters); +//#ifdef _DEBUG +// cutilSafeCall(cudaDeviceSynchronize()); +// cutilCheckMsg(__FUNCTION__); +//#endif +// +// cutilSafeCall(cudaMemcpy(&residual, state.d_sumLinResidual, sizeof(float), cudaMemcpyDeviceToHost)); +// return residual; +//} + +///////////////////////////////////////////////////////////////////////// +// Count High Residuals +///////////////////////////////////////////////////////////////////////// + +__global__ void CountHighResidualsDevice(SolverInput input, SolverState state, SolverParameters parameters) +{ + const unsigned int N = input.numberOfCorrespondences; // Number of block variables + const unsigned int corrIdx = blockIdx.x * blockDim.x + threadIdx.x; + + if (corrIdx < N) { + float residual = evalAbsMaxResidualDevice(corrIdx, input, state, parameters); + + if (residual > parameters.verifyOptDistThresh) + atomicAdd(state.d_countHighResidual, 1); + } +} + +extern "C" int countHighResiduals(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer* timer) +{ + if (timer) timer->startEvent(__FUNCTION__); + + const unsigned int N = input.numberOfCorrespondences; // Number of correspondences + cutilSafeCall(cudaMemset(state.d_countHighResidual, 0, sizeof(int))); + CountHighResidualsDevice << <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(input, state, parameters); + + int count; + cutilSafeCall(cudaMemcpy(&count, state.d_countHighResidual, sizeof(int), cudaMemcpyDeviceToHost)); +#ifdef _DEBUG + cutilSafeCall(cudaDeviceSynchronize()); + cutilCheckMsg(__FUNCTION__); +#endif + + if (timer) timer->endEvent(); + return count; +} + +///////////////////////////////////////////////////////////////////////// +// Convergence Analysis +///////////////////////////////////////////////////////////////////////// + +//uses same data store as max residual +__global__ void EvalGNConvergenceDevice(SolverInput input, SolverStateAnalysis analysis, SolverState state) //compute max of delta +{ + __shared__ float maxVal[THREADS_PER_BLOCK]; + + const unsigned int N = input.numberOfImages; + const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; + + maxVal[threadIdx.x] = 0.0f; + + if (x < N) + { + if (x == 0 || input.d_validImages[x] == 0) + maxVal[threadIdx.x] = 0.0f; + else { + float3 r3 = fmaxf(fabs(state.d_deltaRot[x]), fabs(state.d_deltaTrans[x])); + float r = fmaxf(r3.x, fmaxf(r3.y, r3.z)); + maxVal[threadIdx.x] = r; + } + __syncthreads(); + + for (int stride = THREADS_PER_BLOCK / 2; stride > 0; stride /= 2) { + if (threadIdx.x < stride) { + int first = threadIdx.x; + int second = threadIdx.x + stride; + maxVal[first] = fmaxf(maxVal[first], maxVal[second]); + } + __syncthreads(); + } + if (threadIdx.x == 0) { + analysis.d_maxResidual[blockIdx.x] = maxVal[0]; + } + } +} +float EvalGNConvergence(SolverInput& input, SolverState& state, SolverStateAnalysis& analysis, CUDATimer* timer) +{ + if (timer) timer->startEvent(__FUNCTION__); + + const unsigned int N = input.numberOfImages; + const unsigned int blocksPerGrid = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; + EvalGNConvergenceDevice << < blocksPerGrid, THREADS_PER_BLOCK >> >(input, analysis, state); + +#ifdef _DEBUG + cutilSafeCall(cudaDeviceSynchronize()); + cutilCheckMsg(__FUNCTION__); +#endif + //copy to host and compute max + cutilSafeCall(cudaMemcpy(analysis.h_maxResidual, analysis.d_maxResidual, sizeof(float) * blocksPerGrid, cudaMemcpyDeviceToHost)); + cutilSafeCall(cudaMemcpy(analysis.h_maxResidualIndex, analysis.d_maxResidualIndex, sizeof(int) * blocksPerGrid, cudaMemcpyDeviceToHost)); + float maxVal = 0.0f; + for (unsigned int i = 0; i < blocksPerGrid; i++) { + if (maxVal < analysis.h_maxResidual[i]) maxVal = analysis.h_maxResidual[i]; + } + if (timer) timer->endEvent(); + + return maxVal; +} + +// For the naming scheme of the variables see: +// http://en.wikipedia.org/wiki/Conjugate_gradient_method +// This code is an implementation of their PCG pseudo code + +template +__global__ void PCGInit_Kernel1(SolverInput input, SolverState state, SolverParameters parameters) +{ + const unsigned int N = input.numberOfImages; + const int x = blockIdx.x * blockDim.x + threadIdx.x; + + float d = 0.0f; + if (x > 0 && x < N) + { + float3 resRot, resTrans; + evalMinusJTFDevice(x, input, state, parameters, resRot, resTrans); // residuum = J^T x -F - A x delta_0 => J^T x -F, since A x x_0 == 0 + + state.d_rRot[x] = resRot; // store for next iteration + state.d_rTrans[x] = resTrans; // store for next iteration + + const float3 pRot = state.d_precondionerRot[x] * resRot; // apply preconditioner M^-1 + state.d_pRot[x] = pRot; + + const float3 pTrans = state.d_precondionerTrans[x] * resTrans; // apply preconditioner M^-1 + state.d_pTrans[x] = pTrans; + + d = dot(resRot, pRot) + dot(resTrans, pTrans); // x-th term of nomimator for computing alpha and denominator for computing beta + + state.d_Ap_XRot[x] = make_float3(0.0f, 0.0f, 0.0f); + state.d_Ap_XTrans[x] = make_float3(0.0f, 0.0f, 0.0f); + } + + d = warpReduce(d); + if (threadIdx.x % WARP_SIZE == 0) + { + atomicAdd(state.d_scanAlpha, d); + } +} + +__global__ void PCGInit_Kernel2(unsigned int N, SolverState state) +{ + const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; + + if (x > 0 && x < N) state.d_rDotzOld[x] = state.d_scanAlpha[0]; // store result for next kernel call +} + +void Initialization(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer* timer) +{ + const unsigned int N = input.numberOfImages; + + const int blocksPerGrid = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; + + if (blocksPerGrid > THREADS_PER_BLOCK) + { + std::cout << "Too many variables for this block size. Maximum number of variables for two kernel scan: " << THREADS_PER_BLOCK*THREADS_PER_BLOCK << std::endl; + while (1); + } + + if (timer) timer->startEvent("Initialization"); + + //!!!DEBUGGING //remember to uncomment the delete... + //float3* rRot = new float3[input.numberOfImages]; // -jtf + //float3* rTrans = new float3[input.numberOfImages]; + //!!!DEBUGGING + + cutilSafeCall(cudaMemset(state.d_scanAlpha, 0, sizeof(float))); +#ifdef _DEBUG + cutilSafeCall(cudaDeviceSynchronize()); + cutilCheckMsg(__FUNCTION__); +#endif + + if (parameters.useDense) PCGInit_Kernel1 << > >(input, state, parameters); + else PCGInit_Kernel1 << > >(input, state, parameters); +#ifdef _DEBUG + cutilSafeCall(cudaDeviceSynchronize()); + cutilCheckMsg(__FUNCTION__); +#endif + + //cutilSafeCall(cudaMemcpy(rRot, state.d_rRot, sizeof(float3)*input.numberOfImages, cudaMemcpyDeviceToHost)); + //cutilSafeCall(cudaMemcpy(rTrans, state.d_rTrans, sizeof(float3)*input.numberOfImages, cudaMemcpyDeviceToHost)); + //for (unsigned int i = 1; i < input.numberOfImages; i++) { if (isnan(rRot[i].x)) { printf("NaN in jtr rRot %d\n", i); getchar(); } } + //for (unsigned int i = 1; i < input.numberOfImages; i++) { if (isnan(rTrans[i].x)) { printf("NaN in jtr rTrans %d\n", i); getchar(); } } + //cutilSafeCall(cudaMemcpy(rRot, state.d_pRot, sizeof(float3)*input.numberOfImages, cudaMemcpyDeviceToHost)); + //cutilSafeCall(cudaMemcpy(rTrans, state.d_pTrans, sizeof(float3)*input.numberOfImages, cudaMemcpyDeviceToHost)); + //for (unsigned int i = 1; i < input.numberOfImages; i++) { if (isnan(rRot[i].x)) { printf("NaN in jtr pRot %d\n", i); getchar(); } } + //for (unsigned int i = 1; i < input.numberOfImages; i++) { if (isnan(rTrans[i].x)) { printf("NaN in jtr pTrans %d\n", i); getchar(); } } + + PCGInit_Kernel2 << > >(N, state); +#ifdef _DEBUG + cutilSafeCall(cudaDeviceSynchronize()); + cutilCheckMsg(__FUNCTION__); +#endif + + if (timer) timer->endEvent(); + + //float scanAlpha; + //cutilSafeCall(cudaMemcpy(&scanAlpha, state.d_scanAlpha, sizeof(float), cudaMemcpyDeviceToHost)); + //if (rRot) delete[] rRot; + //if (rTrans) delete[] rTrans; +} + +///////////////////////////////////////////////////////////////////////// +// PCG Iteration Parts +///////////////////////////////////////////////////////////////////////// + +//inefficient +__global__ void PCGStep_Kernel_Dense_Brute(SolverInput input, SolverState state, SolverParameters parameters) +{ + const unsigned int N = input.numberOfImages; // Number of block variables + const unsigned int x = blockIdx.x; + + if (x > 0 && x < N) + { + float3 rot, trans; + applyJTJDenseBruteDevice(x, state, state.d_denseJtJ, input.numberOfImages, rot, trans); // A x p_k => J^T x J x p_k + + state.d_Ap_XRot[x] += rot; + state.d_Ap_XTrans[x] += trans; + } +} +__global__ void PCGStep_Kernel_Dense(SolverInput input, SolverState state, SolverParameters parameters) +{ + const unsigned int N = input.numberOfImages; // Number of block variables + const unsigned int x = blockIdx.x; + const unsigned int lane = threadIdx.x % WARP_SIZE; + + if (x > 0 && x < N) + { + float3 rot, trans; + applyJTJDenseDevice(x, state, state.d_denseJtJ, input.numberOfImages, rot, trans, threadIdx.x); // A x p_k => J^T x J x p_k + + if (lane == 0) + { + atomicAdd(&state.d_Ap_XRot[x].x, rot.x); + atomicAdd(&state.d_Ap_XRot[x].y, rot.y); + atomicAdd(&state.d_Ap_XRot[x].z, rot.z); + + atomicAdd(&state.d_Ap_XTrans[x].x, trans.x); + atomicAdd(&state.d_Ap_XTrans[x].y, trans.y); + atomicAdd(&state.d_Ap_XTrans[x].z, trans.z); + } + } +} + +__global__ void PCGStep_Kernel0(SolverInput input, SolverState state, SolverParameters parameters) +{ + const unsigned int N = input.numberOfCorrespondences; // Number of block variables + const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; + + if (x < N) + { + const float3 tmp = applyJDevice(x, input, state, parameters); // A x p_k => J^T x J x p_k + state.d_Jp[x] = tmp; // store for next kernel call + } +} + +__global__ void PCGStep_Kernel1a(SolverInput input, SolverState state, SolverParameters parameters) +{ + const unsigned int N = input.numberOfImages; // Number of block variables + const unsigned int x = blockIdx.x; + const unsigned int lane = threadIdx.x % WARP_SIZE; + + if (x > 0 && x < N) + { + float3 rot, trans; + applyJTDevice(x, input, state, parameters, rot, trans, threadIdx.x, lane); // A x p_k => J^T x J x p_k + + if (lane == 0) + { + atomicAdd(&state.d_Ap_XRot[x].x, rot.x); + atomicAdd(&state.d_Ap_XRot[x].y, rot.y); + atomicAdd(&state.d_Ap_XRot[x].z, rot.z); + + atomicAdd(&state.d_Ap_XTrans[x].x, trans.x); + atomicAdd(&state.d_Ap_XTrans[x].y, trans.y); + atomicAdd(&state.d_Ap_XTrans[x].z, trans.z); + } + } +} + +__global__ void PCGStep_Kernel1b(SolverInput input, SolverState state, SolverParameters parameters) +{ + const unsigned int N = input.numberOfImages; // Number of block variables + const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; + + float d = 0.0f; + if (x > 0 && x < N) + { + d = dot(state.d_pRot[x], state.d_Ap_XRot[x]) + dot(state.d_pTrans[x], state.d_Ap_XTrans[x]); // x-th term of denominator of alpha + } + + d = warpReduce(d); + if (threadIdx.x % WARP_SIZE == 0) + { + atomicAdd(state.d_scanAlpha, d); + } +} + +__global__ void PCGStep_Kernel2(SolverInput input, SolverState state) +{ + const unsigned int N = input.numberOfImages; + const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; + + const float dotProduct = state.d_scanAlpha[0]; + + float b = 0.0f; + if (x > 0 && x < N) + { + float alpha = 0.0f; + if (dotProduct > FLOAT_EPSILON) alpha = state.d_rDotzOld[x] / dotProduct; // update step size alpha + + state.d_deltaRot[x] = state.d_deltaRot[x] + alpha*state.d_pRot[x]; // do a decent step + state.d_deltaTrans[x] = state.d_deltaTrans[x] + alpha*state.d_pTrans[x]; // do a decent step + + float3 rRot = state.d_rRot[x] - alpha*state.d_Ap_XRot[x]; // update residuum + state.d_rRot[x] = rRot; // store for next kernel call + + float3 rTrans = state.d_rTrans[x] - alpha*state.d_Ap_XTrans[x]; // update residuum + state.d_rTrans[x] = rTrans; // store for next kernel call + + float3 zRot = state.d_precondionerRot[x] * rRot; // apply preconditioner M^-1 + state.d_zRot[x] = zRot; // save for next kernel call + + float3 zTrans = state.d_precondionerTrans[x] * rTrans; // apply preconditioner M^-1 + state.d_zTrans[x] = zTrans; // save for next kernel call + + b = dot(zRot, rRot) + dot(zTrans, rTrans); // compute x-th term of the nominator of beta + } + b = warpReduce(b); + if (threadIdx.x % WARP_SIZE == 0) + { + atomicAdd(&state.d_scanAlpha[1], b); + } +} + +template +__global__ void PCGStep_Kernel3(SolverInput input, SolverState state) +{ + const unsigned int N = input.numberOfImages; + const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; + + if (x > 0 && x < N) + { + const float rDotzNew = state.d_scanAlpha[1]; // get new nominator + const float rDotzOld = state.d_rDotzOld[x]; // get old denominator + + float beta = 0.0f; + if (rDotzOld > FLOAT_EPSILON) beta = rDotzNew / rDotzOld; // update step size beta + + state.d_rDotzOld[x] = rDotzNew; // save new rDotz for next iteration + state.d_pRot[x] = state.d_zRot[x] + beta*state.d_pRot[x]; // update decent direction + state.d_pTrans[x] = state.d_zTrans[x] + beta*state.d_pTrans[x]; // update decent direction + + + state.d_Ap_XRot[x] = make_float3(0.0f, 0.0f, 0.0f); + state.d_Ap_XTrans[x] = make_float3(0.0f, 0.0f, 0.0f); + + if (lastIteration) + { + //if (input.d_validImages[x]) { //not really necessary +#ifdef USE_LIE_SPACE //TODO just keep that matrix transforms around + float3 rot, trans; + computeLieUpdate(state.d_deltaRot[x], state.d_deltaTrans[x], state.d_xRot[x], state.d_xTrans[x], rot, trans); + state.d_xRot[x] = rot; + state.d_xTrans[x] = trans; +#else + state.d_xRot[x] = state.d_xRot[x] + state.d_deltaRot[x]; + state.d_xTrans[x] = state.d_xTrans[x] + state.d_deltaTrans[x]; +#endif + //} + } + } +} + +template +bool PCGIteration(SolverInput& input, SolverState& state, SolverParameters& parameters, SolverStateAnalysis& analysis, bool lastIteration, CUDATimer *timer) +{ + const unsigned int N = input.numberOfImages; // Number of block variables + + // Do PCG step + const int blocksPerGrid = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; + + if (blocksPerGrid > THREADS_PER_BLOCK) + { + std::cout << "Too many variables for this block size. Maximum number of variables for two kernel scan: " << THREADS_PER_BLOCK*THREADS_PER_BLOCK << std::endl; + while (1); + } + if (timer) timer->startEvent("PCGIteration"); + + cutilSafeCall(cudaMemset(state.d_scanAlpha, 0, sizeof(float) * 2)); + + // sparse part + if (useSparse) { + const unsigned int Ncorr = input.numberOfCorrespondences; + const int blocksPerGridCorr = (Ncorr + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; + PCGStep_Kernel0 << > >(input, state, parameters); +#ifdef _DEBUG + cutilSafeCall(cudaDeviceSynchronize()); + cutilCheckMsg(__FUNCTION__); +#endif + PCGStep_Kernel1a << < N, THREADS_PER_BLOCK_JT >> >(input, state, parameters); +#ifdef _DEBUG + cutilSafeCall(cudaDeviceSynchronize()); + cutilCheckMsg(__FUNCTION__); +#endif + } + if (useDense) { + //if (timer) timer->startEvent("apply JTJ dense"); + PCGStep_Kernel_Dense << < N, THREADS_PER_BLOCK_JT_DENSE >> >(input, state, parameters); + //PCGStep_Kernel_Dense_Brute << < N, 1 >> >(input, state, parameters); +#ifdef _DEBUG + cutilSafeCall(cudaDeviceSynchronize()); + cutilCheckMsg(__FUNCTION__); +#endif + //if (timer) timer->endEvent(); + } + //!!!debugging + //float3* Ap_Rot = new float3[input.numberOfImages]; + //float3* Ap_Trans = new float3[input.numberOfImages]; + //cutilSafeCall(cudaMemcpy(Ap_Rot, state.d_Ap_XRot, sizeof(float3)*input.numberOfImages, cudaMemcpyDeviceToHost)); + //cutilSafeCall(cudaMemcpy(Ap_Trans, state.d_Ap_XTrans, sizeof(float3)*input.numberOfImages, cudaMemcpyDeviceToHost)); + //for (unsigned int i = 1; i < input.maxNumberOfImages; i++) { if (isnan(Ap_Rot[i].x)) { printf("NaN at Ap rot %d\n", i); getchar(); } } + //for (unsigned int i = 1; i < input.maxNumberOfImages; i++) { if (isnan(Ap_Trans[i].x)) { printf("NaN at Ap trans %d\n", i); getchar(); } } + //if (Ap_Rot) delete[] Ap_Rot; + //if (Ap_Trans) delete[] Ap_Trans; + //!!!debugging + + PCGStep_Kernel1b << > >(input, state, parameters); +#ifdef _DEBUG + cutilSafeCall(cudaDeviceSynchronize()); + cutilCheckMsg(__FUNCTION__); +#endif + + PCGStep_Kernel2 << > >(input, state); +#ifdef _DEBUG + cutilSafeCall(cudaDeviceSynchronize()); + cutilCheckMsg(__FUNCTION__); +#endif +#ifdef ENABLE_EARLY_OUT //for convergence + float scanAlpha; cutilSafeCall(cudaMemcpy(&scanAlpha, state.d_scanAlpha, sizeof(float), cudaMemcpyDeviceToHost)); + //if (fabs(scanAlpha) < 0.00005f) lastIteration = true; //todo check this part + //if (fabs(scanAlpha) < 1e-6) lastIteration = true; //todo check this part + if (fabs(scanAlpha) < 5e-7) { lastIteration = true; } //todo check this part +#endif + if (lastIteration) { + PCGStep_Kernel3 << > >(input, state); + } + else { + PCGStep_Kernel3 << > >(input, state); + } + +#ifdef _DEBUG + cutilSafeCall(cudaDeviceSynchronize()); + cutilCheckMsg(__FUNCTION__); +#endif + if (timer) timer->endEvent(); + + return lastIteration; +} + +#ifdef USE_LIE_SPACE //TODO +//////////////////////////////////////////////////////////////////// +// matrix <-> pose +//////////////////////////////////////////////////////////////////// +__global__ void convertLiePosesToMatricesCU_Kernel(const float3* d_rot, const float3* d_trans, unsigned int numTransforms, float4x4* d_transforms, float4x4* d_transformInvs) +{ + const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; + if (idx < numTransforms) { + poseToMatrix(d_rot[idx], d_trans[idx], d_transforms[idx]); + d_transformInvs[idx] = d_transforms[idx].getInverse(); + } +} +extern "C" +void convertLiePosesToMatricesCU(const float3* d_rot, const float3* d_trans, unsigned int numTransforms, float4x4* d_transforms, float4x4* d_transformInvs) +{ + convertLiePosesToMatricesCU_Kernel << <(numTransforms + 8 - 1) / 8, 8 >> >(d_rot, d_trans, numTransforms, d_transforms, d_transformInvs); +#ifdef _DEBUG + cutilSafeCall(cudaDeviceSynchronize()); + cutilCheckMsg(__FUNCTION__); +#endif +} +#endif + +//////////////////////////////////////////////////////////////////// +// Main GN Solver Loop +//////////////////////////////////////////////////////////////////// + +extern "C" void solveBundlingStub(SolverInput& input, SolverState& state, SolverParameters& parameters, SolverStateAnalysis& analysis, float* convergenceAnalysis, CUDATimer *timer) +{ + if (convergenceAnalysis) { + float initialResidual = EvalResidual(input, state, parameters, timer); + convergenceAnalysis[0] = initialResidual; // initial residual + } + + //!!!DEBUGGING +#ifdef PRINT_RESIDUALS_SPARSE + if (parameters.weightSparse > 0) { + if (input.numberOfCorrespondences == 0) { printf("ERROR: %d correspondences\n", input.numberOfCorrespondences); getchar(); } + float initialResidual = EvalResidual(input, state, parameters, timer); + printf("initial sparse = %f*%f = %f\n", parameters.weightSparse, initialResidual / parameters.weightSparse, initialResidual); + } +#endif + //float3* xRot = new float3[input.numberOfImages]; //remember the delete! + //float3* xTrans = new float3[input.numberOfImages]; + //timer = new CUDATimer(); + //static unsigned int totalLinIters = 0, numLin = 0, totalNonLinIters = 0, numNonLin = 0; + //!!!DEBUGGING + + for (unsigned int nIter = 0; nIter < parameters.nNonLinearIterations; nIter++) + { + parameters.weightSparse = input.weightsSparse[nIter]; + parameters.weightDenseDepth = input.weightsDenseDepth[nIter]; + parameters.weightDenseColor = input.weightsDenseColor[nIter]; + parameters.useDense = (parameters.weightDenseDepth > 0 || parameters.weightDenseColor > 0); +#ifdef USE_LIE_SPACE + convertLiePosesToMatricesCU(state.d_xRot, state.d_xTrans, input.numberOfImages, state.d_xTransforms, state.d_xTransformInverses); +#endif + if (parameters.useDense) parameters.useDense = BuildDenseSystem(input, state, parameters, timer); //don't solve dense if no overlapping frames found + Initialization(input, state, parameters, timer); + + if (parameters.weightSparse > 0.0f) { + if (parameters.useDense) { + for (unsigned int linIter = 0; linIter < parameters.nLinIterations; linIter++) + if (PCGIteration(input, state, parameters, analysis, linIter == parameters.nLinIterations - 1, timer)) { break; } + } + else { + for (unsigned int linIter = 0; linIter < parameters.nLinIterations; linIter++) + if (PCGIteration(input, state, parameters, analysis, linIter == parameters.nLinIterations - 1, timer)) { + //totalLinIters += (linIter+1); numLin++; + break; + } + } + } + else { + for (unsigned int linIter = 0; linIter < parameters.nLinIterations; linIter++) + if (PCGIteration(input, state, parameters, analysis, linIter == parameters.nLinIterations - 1, timer)) break; + } + //!!!debugging + //cutilSafeCall(cudaMemcpy(xRot, state.d_xRot, sizeof(float3)*input.numberOfImages, cudaMemcpyDeviceToHost)); + //cutilSafeCall(cudaMemcpy(xTrans, state.d_xTrans, sizeof(float3)*input.numberOfImages, cudaMemcpyDeviceToHost)); + //!!!debugging +#ifdef PRINT_RESIDUALS_SPARSE + if (parameters.weightSparse > 0) { + float residual = EvalResidual(input, state, parameters, timer); + printf("[niter %d] weight * sparse = %f*%f = %f\t[#corr = %d]\n", nIter, parameters.weightSparse, residual / parameters.weightSparse, residual, input.numberOfCorrespondences); + } +#endif + if (convergenceAnalysis) { + float residual = EvalResidual(input, state, parameters, timer); + convergenceAnalysis[nIter + 1] = residual; + } + + //if (timer) timer->evaluate(true); + +#ifdef ENABLE_EARLY_OUT //convergence + //if (nIter < parameters.nNonLinearIterations - 1 && EvalGNConvergence(input, state, analysis, timer) < 0.01f) { //!!! TODO CHECK HOW THESE GENERALIZE + if (nIter < parameters.nNonLinearIterations - 1 && EvalGNConvergence(input, state, analysis, timer) < 0.005f) { //0.001? + //if (nIter < parameters.nNonLinearIterations - 1 && EvalGNConvergence(input, state, analysis, timer) < 0.001f) { + //if (!parameters.useDense) { totalNonLinIters += (nIter+1); numNonLin++; } + break; + } + //else if (!parameters.useDense && nIter == parameters.nNonLinearIterations - 1) { totalNonLinIters += (nIter+1); numNonLin++; } +#endif + } + //!!!debugging + //if (xRot) delete[] xRot; + //if (xTrans) delete[] xTrans; + //if (timer) { timer->evaluate(true, false); delete timer; } + //if (!parameters.useDense) { printf("mean #pcg its = %f\tmean #gn its = %f\n", (float)totalLinIters / (float)numLin, (float)totalNonLinIters / (float)numNonLin); } //just stats for global solve + //!!!debugging + } + +//////////////////////////////////////////////////////////////////// +// build variables to correspondences lookup +//////////////////////////////////////////////////////////////////// + +__global__ void BuildVariablesToCorrespondencesTableDevice(EntryJ* d_correspondences, unsigned int numberOfCorrespondences, + unsigned int maxNumCorrespondencesPerImage, int* d_variablesToCorrespondences, int* d_numEntriesPerRow) +{ + const unsigned int N = numberOfCorrespondences; // Number of block variables + const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; + + if (x < N) { + EntryJ& corr = d_correspondences[x]; + if (corr.isValid()) { + int offset0 = atomicAdd(&d_numEntriesPerRow[corr.imgIdx_i], 1); // may overflow - need to check when read + int offset1 = atomicAdd(&d_numEntriesPerRow[corr.imgIdx_j], 1); // may overflow - need to check when read + if (offset0 < maxNumCorrespondencesPerImage && offset1 < maxNumCorrespondencesPerImage) { + d_variablesToCorrespondences[corr.imgIdx_i * maxNumCorrespondencesPerImage + offset0] = x; + d_variablesToCorrespondences[corr.imgIdx_j * maxNumCorrespondencesPerImage + offset1] = x; + } + else { //invalidate + printf("EXCEEDED MAX NUM CORR PER IMAGE IN SOLVER, INVALIDATING %d(%d,%d) [%d,%d | %d]\n", + x, corr.imgIdx_i, corr.imgIdx_j, offset0, offset1, maxNumCorrespondencesPerImage); //debugging + corr.setInvalid(); //make sure j corresponds to jt + } + } + } +} + +extern "C" void buildVariablesToCorrespondencesTableCUDA(EntryJ* d_correspondences, unsigned int numberOfCorrespondences, unsigned int maxNumCorrespondencesPerImage, int* d_variablesToCorrespondences, int* d_numEntriesPerRow, CUDATimer* timer) +{ + const unsigned int N = numberOfCorrespondences; + + if (timer) timer->startEvent(__FUNCTION__); + + BuildVariablesToCorrespondencesTableDevice << <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(d_correspondences, numberOfCorrespondences, maxNumCorrespondencesPerImage, d_variablesToCorrespondences, d_numEntriesPerRow); + +#ifdef _DEBUG + cutilSafeCall(cudaDeviceSynchronize()); + cutilCheckMsg(__FUNCTION__); +#endif + + if (timer) timer->endEvent(); +} diff --git a/cuda_code/SpMVTest.cu b/cuda_code/SpMVTest.cu new file mode 100644 index 0000000000000000000000000000000000000000..3f015f10aea5c1a2a112ac826080f3fd92731f13 --- /dev/null +++ b/cuda_code/SpMVTest.cu @@ -0,0 +1,57 @@ +/** + * @brief Sparse Matrix-Vector multiplication + * @file + */ +#include "Static/SpMV/SpMV.cuh" +#include +#include +#include +//#include //--profile-from-start off +#include + +int exec(int argc, char* argv[]) { + using namespace timer; + using namespace hornets_nest; + + graph::GraphStd graph; + CommandLineParam cmd(graph, argc, argv); + auto h_vector = new int[graph.nV()]; + auto h_value = new int[graph.nE()]; + std::fill(h_vector, h_vector + graph.nV(), 1); + std::fill(h_value, h_value + graph.nE(), 1); + + HornetInit hornet_init(graph.nV(), graph.nE(), graph.csr_out_offsets(), + graph.csr_out_edges()); + hornet_init.insertEdgeData(h_value); + + HornetGraph hornet_matrix(hornet_init); + SpMV spmv(hornet_matrix, h_vector); + + Timer TM; + TM.start(); + + spmv.run(); + + TM.stop(); + TM.print("SpMV"); + + auto is_correct = spmv.validate(); + std::cout << (is_correct ? "\nCorrect <>\n\n" : "\n! Not Correct\n\n"); + + TM.start(); + + delete[] h_vector; + delete[] h_value; + return is_correct; +} + +int main(int argc, char* argv[]) { + int ret = 0; + { + + ret = exec(argc, argv); + + } + + return ret; +} diff --git a/cuda_code/SpatialClassNLLCriterion_17.cu b/cuda_code/SpatialClassNLLCriterion_17.cu new file mode 100644 index 0000000000000000000000000000000000000000..29eba1be55ad8dda1853a7965767722586b0122f --- /dev/null +++ b/cuda_code/SpatialClassNLLCriterion_17.cu @@ -0,0 +1,247 @@ +#ifndef THC_GENERIC_FILE +#define THC_GENERIC_FILE "THCUNN/generic/SpatialClassNLLCriterion.cu" +#else + +void THNN_(SpatialClassNLLCriterion_shapeCheck)( + THCState *state, + THCTensor *input, + THCIndexTensor *target, + THCTensor *weights) +{ + TORCH_CHECK(target->dim() == 3, 1, + "only batches of spatial targets supported (3D tensors)" \ + " but got targets of size: : ", target->sizes()); + TORCH_CHECK(input->dim() == 4, 2, + "only batches of spatial inputs supported (4D tensors), " \ + "but got input of size: ", input->sizes()); + if (THCTensor_(size)(state, input, 0) != THCIndexTensor_(size)(state, target, 0) || + THCTensor_(size)(state, input, 2) != THCIndexTensor_(size)(state, target, 1) || + THCTensor_(size)(state, input, 3) != THCIndexTensor_(size)(state, target, 2)) { + THCDescBuff input_size = THCTensor_(sizeDesc)(state, input); + THCDescBuff target_size = THCIndexTensor_(sizeDesc)(state, target); + THError("input and target batch or spatial sizes don't match: target %s, input %s", + target_size.str, input_size.str); + } + + if (weights && THCTensor_(nElement)(state, weights) != THCTensor_(size)(state, input, 1)) { + THError("weight tensor should be defined either for all or no classes"); + } +} + +static void THNN_(SpatialClassNLLCriterion_gradOutput_no_reduce_shapeCheck)( + THCState *state, + THCTensor *gradOutput, + THCIndexTensor *target) +{ + TORCH_CHECK(THCTensor_(nDimensionLegacyNoScalars)(state, gradOutput) == 3, 2, + "gradOutput must have same dimension as target (3) but got dimension: ", gradOutput->sizes()); + if (THCTensor_(size)(state, gradOutput, 0) != THCIndexTensor_(size)(state, target, 0) || + THCTensor_(size)(state, gradOutput, 1) != THCIndexTensor_(size)(state, target, 1) || + THCTensor_(size)(state, gradOutput, 2) != THCIndexTensor_(size)(state, target, 2)) { + THCDescBuff gradOutput_size = THCTensor_(sizeDesc)(state, gradOutput); + THCDescBuff target_size = THCIndexTensor_(sizeDesc)(state, target); + THError("gradOutput sizes don't match target sizes: target %s, gradOutput %s", + target_size.str, gradOutput_size.str); + } +} + +void THNN_(SpatialClassNLLCriterion_updateOutput)( + THCState *state, + THCTensor *input, + THCIndexTensor *target, + THCTensor *output, + int64_t reduction, + THCTensor *weights, + THCTensor *total_weight, + int64_t ignore_index) +{ + // See Note [Writing Nondeterministic Operations] + // Nondeterministic because of atomicAdd usage + at::globalContext().alertNotDeterministic("SpatialClassNLLCriterion_updateOutput"); + THNN_(SpatialClassNLLCriterion_shapeCheck)(state, input, target, weights); + THCTensor_(resize0d)(state, output); + THCTensor_(resize0d)(state, total_weight); + + if (weights) + THCUNN_assertSameGPU(state, 5, input, target, weights, output, total_weight); + else + THCUNN_assertSameGPU(state, 4, input, target, output, total_weight); + + if (reduction == at::Reduction::None) { + int64_t batch_size = THCTensor_(size)(state, input, 0); + int64_t H = THCTensor_(size)(state, input, 2); + int64_t W = THCTensor_(size)(state, input, 3); + int64_t count = batch_size * H * W; + + THCTensor_(resize3d)(state, output, batch_size, H, W); + + if (count == 0) { + // This guards from unnecessary operations and launching CUDA kernel with 0 blocks. + return; + } + if (weights) { + weights = THCTensor_(newContiguous)(state, weights); + } + + SpatialClassNLLCriterion_updateOutput_no_reduce_kernel + <<>>( + count, + toDeviceTensor(state, input), + toDeviceTensor(state, target), + toDeviceTensor(state, output), + weights ? THCTensor_(data)(state, weights) : NULL, + ignore_index); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + + if (weights) { + THCTensor_(free)(state, weights); + } + return; + } + + input = THCTensor_(newContiguous)(state, input); + weights = weights ? THCTensor_(newContiguous)(state, weights) : NULL; + target = THCIndexTensor_(newContiguous)(state, target); + + scalar_t *input_data = THCTensor_(data)(state, input); + scalar_t *weights_data = weights ? THCTensor_(data)(state, weights) : NULL; + THCIndex_t *target_data = THCIndexTensor_(data)(state, target); + scalar_t *output_data = THCTensor_(data)(state, output); + scalar_t *total_weight_data = THCTensor_(data)(state, total_weight); + THCTensor_(fill)(state, output, ScalarConvert::to(0)); + THCTensor_(fill)(state, total_weight, ScalarConvert::to(0)); + + THCIndex_t batch_size = THCIndexTensor_(size)(state, target, 0); + if (batch_size != 0) { // This guards from unnecessary operations and launching CUDA kernel with 0 blocks. + THCIndex_t map_nelem = THCIndexTensor_(nElement)(state, target) / batch_size; + int blocks_per_sample = GET_BLOCKS(map_nelem) / 128; + blocks_per_sample = (blocks_per_sample == 0) ? 1 : blocks_per_sample; + int total_blocks = blocks_per_sample * batch_size; + + cunn_SpatialClassNLLCriterion_updateOutput_kernel + <<>>( + output_data, + total_weight_data, + input_data, + target_data, + weights_data, + reduction == at::Reduction::Mean, + THCTensor_(size)(state, input, 0), + THCTensor_(size)(state, input, 1), + THCTensor_(size)(state, input, 2) * THCTensor_(size)(state, input, 3), + blocks_per_sample, + ignore_index + ); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + } + if (reduction == at::Reduction::Mean) { + cunn_SpatialClassNLLCriterion_sizeAverage_kernel<<<1, 1, 0, c10::cuda::getCurrentCUDAStream()>>>( + output_data, total_weight_data, THCTensor_(nElement)(state, input) + ); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + } + + if (weights) + THCTensor_(free)(state, weights); + THCIndexTensor_(free)(state, target); + THCTensor_(free)(state, input); +} + +void THNN_(SpatialClassNLLCriterion_updateGradInput)( + THCState *state, + THCTensor *input, + THCIndexTensor *target, + THCTensor *gradOutput, + THCTensor *gradInput, + int64_t reduction, + THCTensor *weights, + THCTensor *total_weight, + int64_t ignore_index) +{ + THNN_(SpatialClassNLLCriterion_shapeCheck)(state, input, target, weights); + THCTensor_(resizeAs)(state, gradInput, input); + THCTensor_(zero)(state, gradInput); + THArgCheck(THCTensor_(isContiguous)(state, gradInput), 4, + "gradInput must be contiguous"); + + if (weights) + THCUNN_assertSameGPU(state, 5, weights, input, target, gradInput, total_weight); + else + THCUNN_assertSameGPU(state, 4, input, target, gradInput, total_weight); + + if (reduction == at::Reduction::None) { + THNN_(SpatialClassNLLCriterion_gradOutput_no_reduce_shapeCheck)( + state, + gradOutput, + target); + + int64_t batch_size = THCTensor_(size)(state, input, 0); + int64_t H = THCTensor_(size)(state, input, 2); + int64_t W = THCTensor_(size)(state, input, 3); + int64_t count = batch_size * H * W; + + if (count == 0) { + // This guards from unnecessary operations and launching CUDA kernel with 0 blocks. + return; + } + if (weights) { + weights = THCTensor_(newContiguous)(state, weights); + } + + SpatialClassNLLCriterion_updateGradInput_no_reduce_kernel + <<>>( + count, + toDeviceTensor(state, target), + toDeviceTensor(state, gradOutput), + toDeviceTensor(state, gradInput), + weights ? THCTensor_(data)(state, weights) : NULL, + ignore_index); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + + if (weights) { + THCTensor_(free)(state, weights); + } + return; + } + + input = THCTensor_(newContiguous)(state, input); + weights = weights ? THCTensor_(newContiguous)(state, weights) : NULL; + target = THCIndexTensor_(newContiguous)(state, target); + + scalar_t *gradOutput_data = THCTensor_(data)(state, gradOutput); + scalar_t *weights_data = weights ? THCTensor_(data)(state, weights) : NULL; + scalar_t *gradInput_data = THCTensor_(data)(state, gradInput); + THCIndex_t *target_data = THCIndexTensor_(data)(state, target); + scalar_t *total_weight_data = THCTensor_(data)(state, total_weight); + + THCIndex_t batch_size = THCIndexTensor_(size)(state, target, 0); + if (batch_size != 0) { // This guards from unnecessary operations and launching CUDA kernel with 0 blocks. + THCIndex_t map_nelem = THCIndexTensor_(nElement)(state, target) / batch_size; + int blocks_per_sample = GET_BLOCKS(map_nelem) / 128; + blocks_per_sample = (blocks_per_sample == 0) ? 1 : blocks_per_sample; + int total_blocks = blocks_per_sample * batch_size; + + cunn_SpatialClassNLLCriterion_updateGradInput_kernel + <<>>( + gradInput_data, + gradOutput_data, + target_data, + weights_data, + total_weight_data, + reduction == at::Reduction::Mean, + THCTensor_(size)(state, input, 0), + THCTensor_(size)(state, input, 1), + THCTensor_(size)(state, input, 2) *THCTensor_(size)(state, input, 3), + blocks_per_sample, + ignore_index + ); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + } + + if (weights) + THCTensor_(free)(state, weights); + THCIndexTensor_(free)(state, target); + THCTensor_(free)(state, input); +} + +#endif diff --git a/cuda_code/SpatialConvolutionMM_16.cu b/cuda_code/SpatialConvolutionMM_16.cu new file mode 100644 index 0000000000000000000000000000000000000000..af492b3e7da0257bd8aed1436af42a7a02ddd07a --- /dev/null +++ b/cuda_code/SpatialConvolutionMM_16.cu @@ -0,0 +1,499 @@ +#ifndef THC_GENERIC_FILE +#define THC_GENERIC_FILE "THCUNN/generic/SpatialConvolutionMM.cu" +#else + +#include +#include + +static inline void THNN_(SpatialConvolutionMM_shapeCheck)( + THCState *state, + THCTensor *input, THCTensor *gradOutput, + THCTensor *weight, THCTensor *bias, + int kH, int kW, int dH, int dW, int padH, int padW, + int weight_nullable) { + THArgCheck(kW > 0 && kH > 0, 9, + "kernel size should be greater than zero, but got kH: %d kW: %d", kH, kW); + THArgCheck(dW > 0 && dH > 0, 11, + "stride should be greater than zero, but got dH: %d dW: %d", dH, dW); + + if (weight != NULL) { + THCUNN_argCheck(state, !weight->is_empty() && (weight->dim() == 2 || weight->dim() == 4), 5, weight, + "non-empty 2D or 4D weight tensor expected, but got: %s"); + if (bias != NULL) { + THCUNN_check_dim_size(state, bias, 1, 0, weight->size(0)); + } + } else if (!weight_nullable) { + THError("weight tensor is expected to be non-nullable"); + } + + int ndim = input->dim(); + int dimf = 0; + int dimh = 1; + int dimw = 2; + + if (ndim == 4) { + dimf++; + dimh++; + dimw++; + } + + // Allow for empty batch size but not other dimensions + bool valid_empty = false; + if (ndim == 3) { + valid_empty = input->size(0) == 0 && input->size(1) != 0 && input->size(2) != 0; + } else if (ndim == 4) { + valid_empty = input->size(0) == 0 && input->size(1) != 0 && input->size(2) != 0 && input->size(3) != 0; + } + + + THCUNN_argCheck(state, (!input->is_empty() || valid_empty) && (ndim == 3 || ndim == 4), 2, input, + "non-empty 3D or 4D input tensor expected but got: %s"); + + int64_t inputHeight = input->size(dimh); + int64_t inputWidth = input->size(dimw); + + int64_t exactInputHeight = inputHeight + 2 * padH; + int64_t exactInputWidth = inputWidth + 2 * padW; + + if (exactInputHeight < kH || exactInputWidth < kW) { + THError("Calculated padded input size per channel: (%ld x %ld). " + "Kernel size: (%d x %d). Kernel size can't be greater than actual input size", + exactInputHeight, exactInputWidth, kH, kW); + } + + int64_t outputHeight = div_rtn(exactInputHeight - kH, dH) + 1; + int64_t outputWidth = div_rtn(exactInputWidth - kW, dW) + 1; + + if (outputWidth < 1 || outputHeight < 1) { + THError("Given input size per channel: (%ld x %ld). " + "Calculated output size per channel: (%ld x %ld). Output size is too small", + inputHeight, inputWidth, outputHeight, outputWidth); + } + + if (weight != NULL) { + int64_t nInputPlane = weight->size(1); + if (weight->dim() == 2) { + nInputPlane /= (kH * kW); + } + THCUNN_check_dim_size(state, input, ndim, dimf, nInputPlane); + } + + if (gradOutput != NULL) { + if (weight != NULL) { + int64_t nOutputPlane = weight->size(0); + THCUNN_check_dim_size(state, gradOutput, ndim, dimf, nOutputPlane); + } else if (bias != NULL) { + int64_t nOutputPlane = bias->dim() == 0 ? 1 : bias->size(0); + THCUNN_check_dim_size(state, gradOutput, ndim, dimf, nOutputPlane); + } + THCUNN_check_dim_size(state, gradOutput, ndim, dimh, outputHeight); + THCUNN_check_dim_size(state, gradOutput, ndim, dimw, outputWidth); + } +} + +static THCTensor* THNN_(newViewWeightMM2d)(THCState *state, THCTensor *weight) { + weight = THCTensor_(newContiguous)(state, weight); + if (weight->dim() == 4) { + int64_t s1 = weight->size(0); + int64_t s2 = weight->size(1) * weight->size(2) * weight->size(3); + THCTensor *old_weight = weight; + weight = THTensor_wrap(weight).view({s1, s2}).unsafeReleaseTensorImpl(); + THCTensor_(free)(state, old_weight); + } + return weight; +} + +void THNN_(SpatialConvolutionMM_updateOutput)( + THCState *state, + THCTensor *input, + THCTensor *output, + THCTensor *weight, + THCTensor *bias, + THCTensor *columns, + THCTensor *ones, + int kW, int kH, + int dW, int dH, + int padW, int padH) { + THCUNN_assertSameGPU(state, 5, input, output, weight, columns, ones); + if (bias) { + THCUNN_assertSameGPU(state, 2, weight, bias); + } + weight = THNN_(newViewWeightMM2d)(state, weight); + THNN_(SpatialConvolutionMM_shapeCheck) + (state, input, NULL, weight, bias, kH, kW, dH, dW, padH, padW, 0); + THArgCheck(!bias || THCTensor_(isContiguous)(state, bias), 5, + "bias tensor has to be contiguous"); + + int ndim = input->dim(); + int dimf = 0; + int dimh = 1; + int dimw = 2; + + if (ndim == 4) { + dimf++; + dimh++; + dimw++; + } + + int64_t nInputPlane = input->size(dimf); + int64_t inputHeight = input->size(dimh); + int64_t inputWidth = input->size(dimw); + int64_t nOutputPlane = weight->size(0); + int64_t outputHeight = (inputHeight + 2*padH - kH) / dH + 1; + int64_t outputWidth = (inputWidth + 2*padW - kW) / dW + 1; + + + input = THCTensor_(newContiguous)(state, input); + int is_batch = 1; + if (input->dim() == 3) { + // Force batch + is_batch = 0; + THCTensor_(resize4d)(state, input, 1, input->size(0), input->size(1), input->size(2)); + } + + // Batch size + input planes + int64_t batchSize = input->size(0); + + // Resize output + THCTensor_(resize4d)(state, output, batchSize, nOutputPlane, outputHeight, outputWidth); + + // Resize temporary columns + THCTensor_(resize2d)(state, columns, nInputPlane*kW*kH, outputHeight*outputWidth); + + // Define a buffer of ones, for bias accumulation + // Note: this buffer can be shared with other modules, it only ever gets increased, + // and always contains ones. + if (bias) { + if (ones->dim() != 2 || ones->size(0)*ones->size(1) < outputHeight*outputWidth) { + // Resize plane and fill with ones... + THCTensor_(resize2d)(state, ones, outputHeight, outputWidth); + THCTensor_(fill)(state, ones, ScalarConvert::to(1)); + } + } + + // Helpers + THCTensor *input_n = THCTensor_(new)(state); + THCTensor *output_n = THCTensor_(new)(state); + + // For each elt in batch, do: + for (int elt = 0; elt < batchSize; elt ++) { + // Matrix mulitply per output: + THCTensor_(select)(state, input_n, input, 0, elt); + THCTensor_(select)(state, output_n, output, 0, elt); + + // Do Bias first: + // M,N,K are dims of matrix A and B + // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) + int64_t m_ = nOutputPlane; + int64_t n_ = outputHeight * outputWidth; + int64_t k_ = 1; + + // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) + if (bias) { + at::cuda::blas::gemm( + 't', 'n', + n_, m_, k_, + ScalarConvert::to(1), + THCTensor_(data)(state, ones), k_, + THCTensor_(data)(state, bias), k_, + ScalarConvert::to(0), + THCTensor_(data)(state, output_n), n_ + ); + } else { + THCTensor_(zero)(state, output_n); + } + + if (kW != 1 || kH != 1 || dW != 1 || dH != 1 || padH != 0 || padW != 0) { + // Extract columns: + at::native::im2col( + c10::cuda::getCurrentCUDAStream(), + THCTensor_(data)(state, input_n), + nInputPlane, inputHeight, inputWidth, + outputHeight, outputWidth, + kH, kW, padH, padW, dH, dW, + 1, 1, + columns->data() + ); + } + + // M,N,K are dims of matrix A and B + // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) + int64_t m = nOutputPlane; + int64_t n = columns->size(1); + int64_t k = nInputPlane*kH*kW; + + // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) + auto gemm_in_ptr = + (kW != 1 || kH != 1 || dW != 1 || dH != 1 || padH != 0 || padW != 0) + ? THCTensor_(data)(state, columns) + : THCTensor_(data)(state, input_n); + at::cuda::blas::gemm( + 'n', 'n', + n, m, k, + ScalarConvert::to(1), + gemm_in_ptr, n, + THCTensor_(data)(state, weight), k, + ScalarConvert::to(1), + THCTensor_(data)(state, output_n), n + ); + } + + // Free + THCTensor_(free)(state, input_n); + THCTensor_(free)(state, output_n); + + // Resize output + if (is_batch == 0) { + THCTensor_(resize3d)(state, output, nOutputPlane, outputHeight, outputWidth); + THCTensor_(resize3d)(state, input, nInputPlane, inputHeight, inputWidth); + } + + THCTensor_(free)(state, input); + THCTensor_(free)(state, weight); +} + +void THNN_(SpatialConvolutionMM_updateGradInput)( + THCState *state, + THCTensor *input, + THCTensor *gradOutput, + THCTensor *gradInput, + THCTensor *weight, + THCTensor *gradColumns, + THCTensor *ones, + int kW, int kH, + int dW, int dH, + int padW, int padH) { + THCUNN_assertSameGPU(state, 5, input, gradOutput, weight, + gradColumns, gradInput); + weight = THNN_(newViewWeightMM2d)(state, weight); + + THNN_(SpatialConvolutionMM_shapeCheck) + (state, input, gradOutput, weight, NULL, kH, kW, dH, dW, padH, padW, 0); + + // Params + int nInputPlane = weight->dim() == 2 ? weight->size(1)/(kW*kH) : weight->size(1); + int nOutputPlane = weight->size(0); + + input = THCTensor_(newContiguous)(state, input); + gradOutput = THCTensor_(newContiguous)(state, gradOutput); + + int is_batch = 1; + if (input->dim() == 3) { + // Force batch + is_batch = 0; + THCTensor_(resize4d)(state, input, 1, input->size(0), input->size(1), input->size(2)); + THCTensor_(resize4d)(state, gradOutput, 1, gradOutput->size(0), gradOutput->size(1), gradOutput->size(2)); + } + + int64_t inputWidth = input->size(3); + int64_t inputHeight = input->size(2); + int64_t outputWidth = (inputWidth + 2*padW - kW) / dW + 1; + int64_t outputHeight = (inputHeight + 2*padH - kH) / dH + 1; + + // Batch size + input planes + int64_t batchSize = input->size(0); + + // Resize output + THCTensor_(resize4d)(state, gradInput, batchSize, nInputPlane, inputHeight, inputWidth); + + // Resize temporary columns + THCTensor_(resize2d)(state, gradColumns, nInputPlane*kW*kH, outputHeight*outputWidth); + + // Helpers + THCTensor *gradInput_n = THCTensor_(new)(state); + THCTensor *gradOutput_n = THCTensor_(new)(state); + + // For each elt in batch, do: + for (int elt = 0; elt < batchSize; elt ++) { + // Matrix mulitply per sample: + THCTensor_(select)(state, gradInput_n, gradInput, 0, elt); + THCTensor_(select)(state, gradOutput_n, gradOutput, 0, elt); + + // M,N,K are dims of matrix A and B + // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) + int64_t m = nInputPlane*kW*kH; + int64_t n = gradColumns->size(1); + int64_t k = nOutputPlane; + + // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) + at::cuda::blas::gemm( + 'n', 't', + n, m, k, + ScalarConvert::to(1), + THCTensor_(data)(state, gradOutput_n), n, + THCTensor_(data)(state, weight), m, + ScalarConvert::to(0), + THCTensor_(data)(state, gradColumns), n + ); + + // Unpack columns back into input: + at::native::col2im( + c10::cuda::getCurrentCUDAStream(), + THCTensor_(data)(state, gradColumns), + nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, padH, padW, dH, dW, + 1, 1, THCTensor_(data)(state, gradInput_n) + ); + } + + // Free + THCTensor_(free)(state, gradInput_n); + THCTensor_(free)(state, gradOutput_n); + THCTensor_(free)(state, weight); + + // Resize output + if (is_batch == 0) { + THCTensor_(resize3d)(state, gradOutput, nOutputPlane, outputHeight, outputWidth); + THCTensor_(resize3d)(state, input, nInputPlane, inputHeight, inputWidth); + THCTensor_(resize3d)(state, gradInput, nInputPlane, inputHeight, inputWidth); + } + + THCTensor_(free)(state, input); + THCTensor_(free)(state, gradOutput); +} + +void THNN_(SpatialConvolutionMM_accGradParameters)( + THCState *state, + THCTensor *input, + THCTensor *gradOutput, + THCTensor *gradWeight, + THCTensor *gradBias, + THCTensor *columns, + THCTensor *ones, + int kW, int kH, + int dW, int dH, + int padW, int padH, + accreal scale_) { + scalar_t scale = ScalarConvert::to(scale_); + THCUNN_assertSameGPU(state, 5, input, gradOutput, gradWeight, gradBias, columns, ones); + if (gradWeight) { + THArgCheck(THCTensor_(isContiguous)(state, gradWeight), 4, "gradWeight needs to be contiguous"); + gradWeight = THNN_(newViewWeightMM2d)(state, gradWeight); + } + if (gradBias) { + THArgCheck(THCTensor_(isContiguous)(state, gradBias), 5, "gradBias needs to be contiguous"); + THArgCheck(THCTensor_(isContiguous)(state, ones), 7, "ones needs to be contiguous"); + } + + THNN_(SpatialConvolutionMM_shapeCheck) + (state, input, gradOutput, gradWeight, gradBias, kH, kW, dH, dW, padH, padW, 1); + + // Params + input = THCTensor_(newContiguous)(state, input); + gradOutput = THCTensor_(newContiguous)(state, gradOutput); + + int is_batch = 1; + if (input->dim() == 3) { + // Force batch + is_batch = 0; + THCTensor_(resize4d)(state, input, 1, input->size(0), input->size(1), input->size(2)); + THCTensor_(resize4d)(state, gradOutput, 1, gradOutput->size(0), gradOutput->size(1), gradOutput->size(2)); + } + + int64_t nInputPlane = input->size(1); + int64_t nOutputPlane = gradOutput->size(1); + + int64_t inputWidth = input->size(3); + int64_t inputHeight = input->size(2); + int64_t outputWidth = (inputWidth + 2*padW - kW) / dW + 1; + int64_t outputHeight = (inputHeight + 2*padH - kH) / dH + 1; + + // Batch size + input planes + int64_t batchSize = input->size(0); + + // Define a buffer of ones, for bias accumulation + if (ones->dim() != 2 || ones->size(0)*ones->size(1) < outputHeight*outputWidth) { + // Resize plane and fill with ones... + THCTensor_(resize2d)(state, ones, outputHeight, outputWidth); + THCTensor_(fill)(state, ones, ScalarConvert::to(1)); + } + + // Resize temporary columns + THCTensor_(resize2d)(state, columns, nInputPlane*kW*kH, outputHeight*outputWidth); + + // Helpers + THCTensor *input_n = THCTensor_(new)(state); + THCTensor *gradOutput_n = THCTensor_(new)(state); + + // For each elt in batch, do: + for (int elt = 0; elt < batchSize; elt ++) { + // Matrix mulitply per output: + THCTensor_(select)(state, gradOutput_n, gradOutput, 0, elt); + + // Do Weight: + if (gradWeight) { + // Matrix mulitply per output: + THCTensor_(select)(state, input_n, input, 0, elt); + + if (kW != 1 || kH != 1 || dW != 1 || dH != 1 || padH != 0 || padW != 0) { + // Extract columns: + at::native::im2col( + c10::cuda::getCurrentCUDAStream(), + THCTensor_(data)(state, input_n), + nInputPlane, inputHeight, inputWidth, + outputHeight, outputWidth, + kH, kW, padH, padW, dH, dW, + 1, 1, + columns->data() + ); + } + + // M,N,K are dims of matrix A and B + // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) + int64_t m = nOutputPlane; + int64_t n = nInputPlane*kW*kH; + int64_t k = columns->size(1); + + // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) + auto gemm_in_ptr = + (kW != 1 || kH != 1 || dW != 1 || dH != 1 || padH != 0 || padW != 0) + ? THCTensor_(data)(state, columns) + : THCTensor_(data)(state, input_n); + at::cuda::blas::gemm( + 't', 'n', + n, m, k, + scale, + gemm_in_ptr, k, + THCTensor_(data)(state, gradOutput_n), k, + ScalarConvert::to(1), + THCTensor_(data)(state, gradWeight), n + ); + } + + // Do Bias: + if (gradBias) { + // M,N,K are dims of matrix A and B + // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) + int64_t m_ = nOutputPlane; + int64_t k_ = outputHeight * outputWidth; + + // Do GEMV (note: this is a bit confusing because gemv assumes column-major matrices) + //#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_BFLOAT16) + at::cuda::blas::gemv( + 't', + k_, m_, + scale, + THCTensor_(data)(state, gradOutput_n), k_, + THCTensor_(data)(state, ones), 1, + ScalarConvert::to(1), + THCTensor_(data)(state, gradBias), 1 + ); + } + } + + // Free + THCTensor_(free)(state, input_n); + THCTensor_(free)(state, gradOutput_n); + if (gradWeight) + THCTensor_(free)(state, gradWeight); + + // Resize + if (is_batch == 0) { + THCTensor_(resize3d)(state, gradOutput, nOutputPlane, outputHeight, outputWidth); + THCTensor_(resize3d)(state, input, nInputPlane, inputHeight, inputWidth); + } + + THCTensor_(free)(state, input); + THCTensor_(free)(state, gradOutput); +} + +#endif diff --git a/cuda_code/SpatialConvolution_1.cu b/cuda_code/SpatialConvolution_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..a2490988fb5e60792a7c3a98e5b4e03653af4ab2 --- /dev/null +++ b/cuda_code/SpatialConvolution_1.cu @@ -0,0 +1,205 @@ + +static int cunn_SpatialConvolution_updateOutput(lua_State *L) +{ + THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); + int dW = luaT_getfieldcheckint(L, 1, "dW"); + int dH = luaT_getfieldcheckint(L, 1, "dH"); + + THCudaTensor *weight = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "weight", "torch.CudaTensor"); + THCudaTensor *bias = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "bias", "torch.CudaTensor"); + THCudaTensor *output = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor"); + + luaL_argcheck(L, input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch mode) tensor is expected"); + + int dimw = 2; + int dimh = 1; + if (input->nDimension == 4) + { + dimw++; + dimh++; + } + + long nOutputPlane = weight->size[0]; + long kW = weight->size[3]; + long kH = weight->size[2]; + long inputWidth = input->size[dimw]; + long inputHeight = input->size[dimh]; + long outputWidth = (inputWidth - kW) / dW + 1; + long outputHeight = (inputHeight - kH) / dH + 1; + + if (input->nDimension == 3) + { + THCudaTensor_resize3d(output, nOutputPlane, outputHeight, outputWidth); + + /* add bias first */ + long k; + THCudaTensor *outputPlane = THCudaTensor_new(); + for(k=0; ksize[0],nOutputPlane, outputHeight, outputWidth); + + /* add bias first */ + long k,p; + THCudaTensor *outputPlane = THCudaTensor_new(); + THCudaTensor *outputBatch = THCudaTensor_new(); + for(p=0; psize[0]; p++) { + THCudaTensor_select(outputBatch, output, 0, p); + for(k=0; knDimension == 3) + { + /* check dims */ + THArgCheck(nOutputPlane == gradOutput->size[0], 1, "Number of output features is not equal to nOutputPlane"); + + /* gradient to input */ + THCudaTensor *tweight = THCudaTensor_newTranspose(weight,0,1); + THCudaTensor_conv2Dmv(gradInput, 0.0, gradOutput, tweight, dH, dW, "fc"); + THCudaTensor_free(tweight); + } + else + { + /* check dims */ + THArgCheck(nOutputPlane == gradOutput->size[1], 1, "Number of output features is not equal to nOutputPlane"); + + /* gradient to input */ + THCudaTensor *tweight = THCudaTensor_newTranspose(weight,0,1); + THCudaTensor_conv2Dmm(gradInput, 0.0, gradOutput, tweight, dH, dW, "fc"); + THCudaTensor_free(tweight); + } + + return 1; +} + +__global__ void compute_gradBias(float *gradBias, float *gradOutput, float scale, + int output_n, int output_h, int output_w) +{ + // each block does a plane + int k = blockIdx.x; + float *gradOutput_k = gradOutput + (k + threadIdx.y*output_n)*output_h*output_w; + + // offsets + int i_start = threadIdx.x; + int i_end = output_w*output_h; + int i_step = blockDim.x; + + int tid = threadIdx.x + threadIdx.y * blockDim.x; + int nthreads = blockDim.x * blockDim.y; + + // sum output plane k into partial sum array + __shared__ float sums[512]; + sums[tid] = 0; + for (int i=i_start; inDimension == 3) + { + /* check dims */ + THArgCheck(nOutputPlane == gradOutput->size[0], 1, "Number of output features is not equal to nOutputPlane"); + + /* gradient to bias */ + dim3 blocks(nOutputPlane); + dim3 threads(32); + compute_gradBias <<>> (gradBias_data, gradOutput_data, scale, + gradOutput->size[0], gradOutput->size[1], gradOutput->size[2]); + + /* gradient to kernels */ + THCudaTensor_conv2DRevger(gradWeight, 1.0, scale, input, gradOutput, dH, dW); + } + else + { + /* check dims */ + THArgCheck(nOutputPlane == gradOutput->size[1], 1, "Number of output features is not equal to nOutputPlane"); + + /* gradient to bias */ + dim3 blocks(nOutputPlane); + long sl; + for (sl=0; slsize[0]; sl+=16) { + int cst = 16; + if ((cst+sl) > gradOutput->size[0]) cst = gradOutput->size[0] - sl; + dim3 threads(16, cst); + compute_gradBias <<>> (gradBias_data, gradOutput_data + sl*gradOutput->stride[0], scale, + gradOutput->size[1], gradOutput->size[2], gradOutput->size[3]); + } + + /* gradient to kernels */ + THCudaTensor_conv2DRevgerm(gradWeight, 1.0, scale, input, gradOutput, dH, dW); + } + + return 0; +} + +static const struct luaL_Reg cunn_SpatialConvolution__ [] = { + {"SpatialConvolution_updateOutput", cunn_SpatialConvolution_updateOutput}, + {"SpatialConvolution_updateGradInput", cunn_SpatialConvolution_updateGradInput}, + {"SpatialConvolution_accGradParameters", cunn_SpatialConvolution_accGradParameters}, + {NULL, NULL} +}; + +static void cunn_SpatialConvolution_init(lua_State *L) +{ + luaT_pushmetatable(L, "torch.CudaTensor"); + luaT_registeratname(L, cunn_SpatialConvolution__, "nn"); + lua_pop(L,1); +} diff --git a/cuda_code/SpectralOps_2.cu b/cuda_code/SpectralOps_2.cu new file mode 100644 index 0000000000000000000000000000000000000000..0dd2d741bc55d55103f82e318bbab9462b39dbaa --- /dev/null +++ b/cuda_code/SpectralOps_2.cu @@ -0,0 +1,635 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + + +namespace at { namespace native { + +using namespace at::native::detail; + +// Offset calculator for indexing in Hermitian mirrored order. +// In mirrored dims, maps linear index i to (n - i) % n +template +struct HermitianSymmetryOffsetCalculator { + using offset_type = at::detail::Array; + using dim_type = std::remove_cv_t; + dim_type dims; + IntDivider sizes_[MAX_DIMS]; + index_t strides_[MAX_DIMS]; + uint32_t mirror_dim_; // bit mask + static_assert(MAX_DIMS < 32, "Need a bigger mask type"); + + HermitianSymmetryOffsetCalculator( + IntArrayRef sizes, IntArrayRef strides, IntArrayRef dim, + const int64_t element_size){ + TORCH_INTERNAL_ASSERT(sizes.size() == strides.size()); + TORCH_INTERNAL_ASSERT(sizes.size() <= MAX_DIMS); + dims = sizes.size(); + + for (dim_type i = 0; i < MAX_DIMS; ++i) { + if (i < dims) { + sizes_[i] = IntDivider(sizes[i]); + strides_[i] = strides[i] / element_size; + } else { + sizes_[i] = IntDivider(1); + strides_[i] = 0; + } + } + + mirror_dim_ = 0; + for (int64_t i = 0; i < dim.size(); ++i) { + mirror_dim_ |= (uint32_t{1} << dim[i]); + } + } + + C10_HOST_DEVICE offset_type get(index_t linear_idx) const { + index_t offset = 0; + + for (dim_type dim = 0; dim < dims; ++dim) { + auto divmod = sizes_[dim].divmod(linear_idx); + linear_idx = divmod.div; + + if ((mirror_dim_ & (uint32_t{1} << dim)) == 0) { + offset += divmod.mod * strides_[dim]; + } else if (divmod.mod != 0) { + offset += (sizes_[dim].divisor - divmod.mod) * strides_[dim]; + } + } + offset_type offsets; + offsets[0] = offset; + return offsets; + } +}; + +// out[:] = conj(in[:]) where in and out ordering is generalized by offset calculators +template +C10_LAUNCH_BOUNDS_1(cuda::detail::CUDA_NUM_THREADS) +__global__ void _fft_conjugate_copy_kernel( + int64_t numel, scalar_t * out_data, const scalar_t * in_data, + inp_calc_t ic, out_calc_t oc) { + CUDA_KERNEL_LOOP_TYPE(index, numel, int64_t) { + auto in_offset = ic.get(index)[0]; + auto out_offset = oc.get(index)[0]; + out_data[out_offset] = std::conj(in_data[in_offset]); + } +} + +// In real-to-complex transform, cuFFT only fills half of the values due to +// conjugate symmetry. See native/SpectralUtils.h for more details. +// The following function fills in the other half with symmetry in +// case of real-to-complex transform with onesided=False flag. +// See NOTE [ Fourier Transform Conjugate Symmetry ] in native/SpectralOpsUtils.h. + +// input should be a tensor of same size as full (twosided) +// signals, but only contains half (onesided) of the values. +// This function modifies inplace. +void _fft_fill_with_conjugate_symmetry_cuda_( + ScalarType dtype, IntArrayRef mirror_dims, IntArrayRef signal_half_sizes, + IntArrayRef in_strides, const void * in_data, + IntArrayRef out_strides, void * out_data) { + // Do the actual conjugate mirroring. + // TODO: consider adding a 32bit indexed kernel for improved performance + auto* in_strides_ptr = in_strides.data(); + const int ndim = in_strides.size(); + const int64_t element_size = scalarTypeToTypeMeta(dtype).itemsize(); + OffsetCalculator<1, int64_t> input_offset_calculator( + ndim, signal_half_sizes.data(), &in_strides_ptr, &element_size); + HermitianSymmetryOffsetCalculator output_offset_calculator( + signal_half_sizes, out_strides, mirror_dims, element_size); + + const auto numel = c10::multiply_integers(signal_half_sizes); + AT_DISPATCH_COMPLEX_TYPES(dtype, "_fft_fill_with_conjugate_symmetry", [&] { + using namespace cuda::detail; + _fft_conjugate_copy_kernel<<< + GET_BLOCKS(numel), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( + numel, + static_cast(out_data), + static_cast(in_data), + input_offset_calculator, + output_offset_calculator); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + }); +} + +REGISTER_DISPATCH(fft_fill_with_conjugate_symmetry_stub, &_fft_fill_with_conjugate_symmetry_cuda_); + +// Execute a pre-planned tranform +static void exec_cufft_plan( + const CuFFTConfig &config, void* in_data, void* out_data, bool forward) { + auto& plan = config.plan(); +#ifdef __HIP_PLATFORM_HCC__ + auto value_type = config.data_type(); + if (value_type == kFloat) { + switch (config.transform_type()) { + case CuFFTTransformType::C2C: { + CUFFT_CHECK(hipfftExecC2C(plan, static_cast(in_data), + static_cast(out_data), + forward ? HIPFFT_FORWARD : HIPFFT_BACKWARD)); + return; + } + case CuFFTTransformType::R2C: { + CUFFT_CHECK(hipfftExecR2C(plan, static_cast(in_data), + static_cast(out_data))); + return; + } + case CuFFTTransformType::C2R: { + CUFFT_CHECK(hipfftExecC2R(plan, static_cast(in_data), + static_cast(out_data))); + return; + } + } + } else if (value_type == kDouble) { + switch (config.transform_type()) { + case CuFFTTransformType::C2C: { + CUFFT_CHECK(hipfftExecZ2Z(plan, static_cast(in_data), + static_cast(out_data), + forward ? HIPFFT_FORWARD : HIPFFT_BACKWARD)); + return; + } + case CuFFTTransformType::R2C: { + CUFFT_CHECK(hipfftExecD2Z(plan, static_cast(in_data), + static_cast(out_data))); + return; + } + case CuFFTTransformType::C2R: { + CUFFT_CHECK(hipfftExecZ2D(plan, static_cast(in_data), + static_cast(out_data))); + return; + } + } + } + TORCH_CHECK(false, "hipFFT doesn't support transforms on type: ", value_type); +#else + CUFFT_CHECK(cufftXtExec(plan, in_data, out_data, + forward ? CUFFT_FORWARD : CUFFT_INVERSE)); +#endif +} + + +// NOTE [ cuFFT Embedded Strides ] +// +// cuFFT supports a subset of arbitrary strides via their "advanced data layout" +// option (http://docs.nvidia.com/cuda/cufft/index.html#advanced-data-layout). +// Specifically, these are tensors that can be viewed as subtensors resulted +// from slicing a larger contiguous tensors. For such input tensors, let the +// sizes of the enclosing tensor be `inembed`, and we can have in 3d case: +// +// input[x, y, z] = input[((x * inembed[1] + y) * inembed[2] + z)] +// +// Above is the simplified formula ignoring the batch dimension. In fact, the +// last dimension of the enclosing tensor doesn't have to be contiguous, i.e., +// it can be greater than 1. Then one can set the base stride for the enclosing +// tensor with `istride`. Then we have +// +// input[x, y, z] = input[((x * inembed[1] + y) * inembed[2] + z) * istride] +// +// For example, consider +// +// enclosing = torch.zeros(6, 8, 10) # contiguous +// input = enclosing[:4, 2:6, 6:] +// input.size() # [ 4, 4, 4] +// input.stride() # [80, 10, 1] +// # inembed = [6, 8, 10] +// input[2, 1, 3] = input[((2 * 8) + 1) * 10 + 3] # using above formula +// = input[173] +// = input[2 * 80 + 1 * 10 + 1 * 3] # using strides directly +// +// Generally, the embedded strides can be computed as +// +// embed[i] = stride[i - 1] / stride[i]. +// +// Note that the value of embed[0] isn't used to compute indices and doesn't +// matter. +// +// Contrary to advanced data layout, simple layout means that *embeds have +// unit-strides. In particular, unit-stride refers to that the input and output +// tensors being contiguous, and that the strides at the innermost signal +// dimension being unit (1) w.r.t. the corresponding data type. + +static inline Tensor _run_cufft( + const CuFFTConfig &config, Tensor& input, int64_t signal_ndim, + bool complex_input, bool complex_output, bool inverse, + IntArrayRef checked_signal_sizes, fft_norm_mode norm, bool onesided, + IntArrayRef output_sizes, bool input_was_cloned +) { + if (config.should_clone_input() && !input_was_cloned) { + input = input.clone(at::MemoryFormat::Contiguous); + } + + auto& plan = config.plan(); + auto& ctx = at::globalContext(); + + // set output + auto output = at::empty(output_sizes, input.options()); + + // set to current stream + CUFFT_CHECK(cufftSetStream(plan, at::cuda::getCurrentCUDAStream())); + + auto ws = at::empty({ config.workspace_size() }, at::device(at::kCUDA).dtype(at::kByte)); + CUFFT_CHECK(cufftSetWorkArea(plan, ws.data_ptr())); + + // run + exec_cufft_plan(config, input.data_ptr(), output.data_ptr(), !inverse); + + // rescale if requested + auto size_last_signal_dim = checked_signal_sizes[signal_ndim - 1]; + if (norm != fft_norm_mode::none) { + auto signal_numel = c10::multiply_integers(checked_signal_sizes); + double scale_denom; + if (norm == fft_norm_mode::by_root_n) { + scale_denom = std::sqrt(static_cast(signal_numel)); + } else { + scale_denom = static_cast(signal_numel); + } + if (!complex_input && complex_output && !onesided) { + auto end_data_slice = infer_ft_real_to_complex_onesided_size(size_last_signal_dim); + output.narrow(signal_ndim, 0, end_data_slice).div_(scale_denom); + } else { + output.div_(scale_denom); + } + } + + // if needed, fill out the other half using conjugate symmetry + if (!complex_input && complex_output && !onesided) { + DimVector signal_dims(signal_ndim); + std::iota(signal_dims.begin(), signal_dims.end(), 1); + auto out_as_complex = at::view_as_complex(output); + at::native::_fft_fill_with_conjugate_symmetry_(out_as_complex, signal_dims); + } + return output; +} + +// The cuFFT plan cache +// unique_ptr for nullability and to avoid reference invalidation on vector resize +static std::vector> plan_caches; +static std::mutex plan_caches_mutex; + +static inline +CuFFTParamsLRUCache &cufft_get_plan_cache(int64_t device_index) { + std::lock_guard guard(plan_caches_mutex); + + AT_ASSERT(device_index >= 0); + + if (device_index >= plan_caches.size()) { + plan_caches.resize(device_index + 1); + } + + if (!plan_caches[device_index]) { + plan_caches[device_index] = std::make_unique(); + } + + return *plan_caches[device_index]; +} + + +namespace detail { + +int64_t cufft_get_plan_cache_max_size_impl(int64_t device_index) { + TORCH_CHECK(0 <= device_index && device_index < at::detail::getCUDAHooks().getNumGPUs(), + "cufft_get_plan_cache_max_size: expected 0 <= device_index < ", + at::detail::getCUDAHooks().getNumGPUs(), "], but got device_index=", + device_index); + return cufft_get_plan_cache(device_index).max_size(); +} + +void cufft_set_plan_cache_max_size_impl(int64_t device_index, int64_t max_size) { + TORCH_CHECK(0 <= device_index && device_index < at::detail::getCUDAHooks().getNumGPUs(), + "cufft_set_plan_cache_max_size: expected 0 <= device_index < ", + at::detail::getCUDAHooks().getNumGPUs(), "], but got device_index=", + device_index); + return cufft_get_plan_cache(device_index).resize(max_size); +} + +int64_t cufft_get_plan_cache_size_impl(int64_t device_index) { + TORCH_CHECK(0 <= device_index && device_index < at::detail::getCUDAHooks().getNumGPUs(), + "cufft_get_plan_cache_size: expected 0 <= device_index < ", + at::detail::getCUDAHooks().getNumGPUs(), "], but got device_index=", + device_index); + return cufft_get_plan_cache(device_index).size(); +} + +void cufft_clear_plan_cache_impl(int64_t device_index) { + TORCH_CHECK(0 <= device_index && device_index < at::detail::getCUDAHooks().getNumGPUs(), + "cufft_clear_plan_cache: expected 0 <= device_index < ", + at::detail::getCUDAHooks().getNumGPUs(), "], but got device_index=", + device_index); + return cufft_get_plan_cache(device_index).clear(); +} + +} // namespace at::native::detail + +namespace { +constexpr int64_t cufft_max_ndim = 3; + +// Execute a general fft operation (can be c2c, onesided r2c or onesided c2r) +static Tensor& _exec_fft(Tensor& out, const Tensor& self, IntArrayRef out_sizes, + IntArrayRef dim, bool forward) { + const auto ndim = self.dim(); + const int64_t signal_ndim = dim.size(); + const auto batch_dims = ndim - signal_ndim; + + // Permute dimensions so batch dimensions come first, and in stride order + // This maximizes data locality when collapsing to a single batch dimension + DimVector dim_permute(ndim); + std::iota(dim_permute.begin(), dim_permute.end(), int64_t{0}); + + c10::SmallVector is_transformed_dim(ndim); + for (const auto& d : dim) { + is_transformed_dim[d] = true; + } + auto batch_end = std::partition(dim_permute.begin(), dim_permute.end(), + [&](int64_t d) {return !is_transformed_dim[d]; }); + auto self_strides = self.strides(); + std::sort(dim_permute.begin(), batch_end, + [&](int64_t a, int64_t b) { return self_strides[a] > self_strides[b]; }); + std::copy(dim.cbegin(), dim.cend(), batch_end); + auto input = self.permute(dim_permute); + + // Collapse batch dimensions into a single dimension + DimVector batched_sizes(signal_ndim + 1); + batched_sizes[0] = -1; + std::copy(input.sizes().cbegin() + batch_dims, input.sizes().cend(), batched_sizes.begin() + 1); + input = input.reshape(batched_sizes); + + const auto batch_size = input.sizes()[0]; + DimVector signal_size(signal_ndim + 1); + signal_size[0] = batch_size; + for (int64_t i = 0; i < signal_ndim; ++i) { + auto in_size = input.sizes()[i + 1]; + auto out_size = out_sizes[dim[i]]; + signal_size[i + 1] = std::max(in_size, out_size); + TORCH_INTERNAL_ASSERT(in_size == signal_size[i + 1] || + in_size == (signal_size[i + 1] / 2) + 1); + TORCH_INTERNAL_ASSERT(out_size == signal_size[i + 1] || + out_size == (signal_size[i + 1] / 2) + 1); + } + + batched_sizes[0] = batch_size; + DimVector batched_out_sizes(batched_sizes.begin(), batched_sizes.end()); + for (size_t i = 0; i < dim.size(); ++i) { + batched_out_sizes[i + 1] = out_sizes[dim[i]]; + } + out.resize_(batched_out_sizes, MemoryFormat::Contiguous); + + // Create the transform plan (either from cache or locally) + const auto value_type = c10::toValueType(input.scalar_type()); + auto fft_type = GetCuFFTTransformType(input.is_complex(), out.is_complex()); + CuFFTParams Params(input.strides(), out.strides(), signal_size, fft_type, value_type); + CuFFTParamsLRUCache& plan_cache = cufft_get_plan_cache(input.device().index()); + std::unique_lock guard(plan_cache.mutex, std::defer_lock); + c10::optional uncached_plan; + const CuFFTConfig * config = nullptr; + + if (plan_cache.max_size() > 0) { + guard.lock(); + if (plan_cache.max_size() > 0) { // check again after acquiring the lock + config = &plan_cache.lookup(Params); + } + } + + if (config == nullptr) { + uncached_plan.emplace(Params); + config = &uncached_plan.value(); + } + + auto & plan = config->plan(); + + if (config->should_clone_input()) { + input = input.clone(MemoryFormat::Contiguous); + } + + // prepare cufft for execution + CUFFT_CHECK(cufftSetStream(plan, at::cuda::getCurrentCUDAStream())); + auto workspace = at::empty({ config->workspace_size() }, at::device(at::kCUDA).dtype(at::kByte)); + CUFFT_CHECK(cufftSetWorkArea(plan, workspace.data_ptr())); + + // execute transform plan + exec_cufft_plan(*config, input.data_ptr(), out.data_ptr(), forward); + + // Inplace reshaping to original batch shape and inverting the dimension permutation + DimVector out_strides(ndim); + int64_t batch_numel = 1; + for (int64_t i = batch_dims - 1; i >= 0; --i) { + out_strides[dim_permute[i]] = batch_numel * out.strides()[0]; + batch_numel *= out_sizes[dim_permute[i]]; + } + for (int64_t i = batch_dims; i < ndim; ++i) { + out_strides[dim_permute[i]] = out.strides()[1 + (i - batch_dims)]; + } + return out.as_strided_(out_sizes, out_strides, out.storage_offset()); +} + +// Calculates the normalization constant and applies it in-place to self +// sizes is the sizes of a twosided tensor and dims are all transformed dims +double _fft_normalization_scale(int64_t normalization, IntArrayRef sizes, IntArrayRef dims) { + auto norm = static_cast(normalization); + if (norm == fft_norm_mode::none) { + return 1.0; + } + + int64_t signal_numel = 1; + for (auto dim : dims) { + signal_numel *= sizes[dim]; + } + const double scale_denom = (norm == fft_norm_mode::by_root_n) ? + std::sqrt(signal_numel) : static_cast(signal_numel); + return 1.0 / scale_denom; +} + +const Tensor& _fft_apply_normalization(const Tensor& self, int64_t normalization, IntArrayRef sizes, IntArrayRef dims) { + auto scale = _fft_normalization_scale(normalization, sizes, dims); + return (scale == 1.0) ? self : self.mul_(scale); +} + +Tensor& _fft_apply_normalization_out(Tensor& out, const Tensor& self, int64_t normalization, IntArrayRef sizes, IntArrayRef dims) { + auto scale = _fft_normalization_scale(normalization, sizes, dims); + return at::mul_out(out, self, c10::scalar_to_tensor(scale)); +} + +} // namespace (anonymous) + +// n-dimensional real to complex FFT +Tensor _fft_r2c_cufft(const Tensor& self, IntArrayRef dim, int64_t normalization, bool onesided) { + TORCH_CHECK(self.is_floating_point()); + auto input_sizes = self.sizes(); + DimVector onesided_sizes(input_sizes.begin(), input_sizes.end()); + auto last_dim = dim.back(); + auto last_dim_halfsize = (input_sizes[last_dim]) / 2 + 1; + onesided_sizes[last_dim] = last_dim_halfsize; + IntArrayRef out_sizes = onesided ? onesided_sizes : input_sizes; + + const auto out_options = self.options().dtype(c10::toComplexType(self.scalar_type())); + auto output = at::empty(out_sizes, out_options); + + // CuFFT requires real input to be over-aligned, as if it were complex + const auto complex_size = 2 * self.element_size(); + const bool complex_aligned = ( + reinterpret_cast(self.data_ptr()) % complex_size == 0); + auto working_tensor = self; + if (!complex_aligned) { + working_tensor = self.movedim(last_dim, -1) + .clone(MemoryFormat::Contiguous) + .movedim(-1, last_dim); + } + + // First do the R2C transform on the last dimension + { + auto target_sizes = dim.size() == 1 ? out_sizes : onesided_sizes; + _exec_fft(output, working_tensor, target_sizes, last_dim, /*forward=*/true); + if (dim.size() > 1) { + working_tensor = at::empty(out_sizes, out_options); + } + } + + // Then any remaining C2C transforms + DimVector sorted_dims(dim.begin(), dim.end() - 1); + while (!sorted_dims.empty()) { + std::swap(output, working_tensor); + + // Resort dimensions every time as _exec_fft re-strides the output + auto strides = working_tensor.strides(); + std::sort(sorted_dims.begin(), sorted_dims.end(), + [&](int64_t a, int64_t b) { return strides[a] > strides[b]; }); + + const auto max_dims = std::min(static_cast(cufft_max_ndim), sorted_dims.size()); + auto last_dims = IntArrayRef(sorted_dims).slice(sorted_dims.size() - max_dims, max_dims); + + // Intermediate results are always onesided + _exec_fft(output, working_tensor, onesided_sizes, last_dims, /*forward=*/true); + sorted_dims.resize(sorted_dims.size() - max_dims); + } + + // Only need to normalize the onesided slice since data in the other half is overwritten + auto out_slice = output.slice(last_dim, 0, last_dim_halfsize); + _fft_apply_normalization(out_slice, normalization, input_sizes, dim); + + if (!onesided) { + if (output.sizes()[last_dim] != out_sizes[last_dim]) { + working_tensor.resize_(out_sizes, MemoryFormat::Contiguous); + working_tensor.slice(last_dim, 0, last_dim_halfsize).copy_(output); + output = std::move(working_tensor); + } + at::native::_fft_fill_with_conjugate_symmetry_(output, dim); + } + return output; +} + +Tensor& _fft_r2c_cufft_out(const Tensor& self, IntArrayRef dim, + int64_t normalization, bool onesided, Tensor& out) { + auto result = _fft_r2c_cufft(self, dim, static_cast(fft_norm_mode::none), /*onesided=*/true); + if (onesided) { + return _fft_apply_normalization_out(out, result, normalization, self.sizes(), dim); + } + + resize_output(out, self.sizes()); + + auto last_dim = dim.back(); + auto last_dim_halfsize = result.sizes()[last_dim]; + auto out_slice = out.slice(last_dim, 0, last_dim_halfsize); + _fft_apply_normalization_out(out_slice, result, normalization, self.sizes(), dim); + at::native::_fft_fill_with_conjugate_symmetry_(out, dim); + return out; +} + +// n-dimensional complex to real IFFT +Tensor _fft_c2r_cufft(const Tensor& self, IntArrayRef dim, int64_t normalization, int64_t lastdim) { + TORCH_CHECK(self.is_complex()); + auto in_sizes = self.sizes(); + DimVector out_sizes(in_sizes.begin(), in_sizes.end()); + out_sizes[dim.back()] = lastdim; + + // First complete any C2C transforms + Tensor temp; + if (dim.size() > 1) { + temp = _fft_c2c_cufft( + self, dim.slice(0, dim.size() - 1), + static_cast(fft_norm_mode::none), /*forward=*/false); + } else { + // Complex to real FFTs may overwrite the input buffer, so must always clone (gh-34551) + temp = self.clone(MemoryFormat::Contiguous); + } + + // Finally, do a 1D C2R transform + // TODO: could transform up to 2 other dims in the same cuFFT operation + auto output = at::empty(out_sizes, self.options().dtype(c10::toValueType(self.scalar_type()))); + _exec_fft(output, temp, out_sizes, dim.back(), /*forward=*/false); + return _fft_apply_normalization(output, normalization, out_sizes, dim); +} + +Tensor& _fft_c2r_cufft_out(const Tensor& self, IntArrayRef dim, + int64_t normalization, int64_t lastdim, Tensor& out) { + auto result = _fft_c2r_cufft(self, dim, static_cast(fft_norm_mode::none), lastdim); + return _fft_apply_normalization_out(out, result, normalization, result.sizes(), dim); +} + +// n-dimensional complex to complex FFT/IFFT +Tensor _fft_c2c_cufft(const Tensor& self, IntArrayRef dim, int64_t normalization, bool forward) { + TORCH_CHECK(self.is_complex()); + if (dim.empty()) { + return self.clone(); + } + + auto out_sizes = self.sizes(); + auto output = at::empty(out_sizes, self.options()); + + // Perform any number of C2C transforms + DimVector sorted_dims(dim.begin(), dim.end()); + auto self_strides = self.strides(); + auto working_tensor = self; + while (true) { + // Sort dimensions every time as _exec_fft re-strides the output + auto strides = working_tensor.strides(); + std::sort(sorted_dims.begin(), sorted_dims.end(), + [&](int64_t a, int64_t b) { return strides[a] > strides[b]; }); + + const auto max_dims = std::min(static_cast(cufft_max_ndim), sorted_dims.size()); + auto first_dims = IntArrayRef(sorted_dims).slice(sorted_dims.size() - max_dims, max_dims); + + _exec_fft(output, working_tensor, out_sizes, first_dims, forward); + sorted_dims.resize(sorted_dims.size() - max_dims); + + if (sorted_dims.empty()) { + break; + } + + if (working_tensor.is_same(self)) { + working_tensor = std::move(output); + output = at::empty(out_sizes, self.options()); + } else { + std::swap(output, working_tensor); + } + } + + return _fft_apply_normalization(output, normalization, out_sizes, dim); +} + +Tensor& _fft_c2c_cufft_out(const Tensor& self, IntArrayRef dim, + int64_t normalization, bool forward, Tensor& out) { + auto result = _fft_c2c_cufft(self, dim, static_cast(fft_norm_mode::none), forward); + return _fft_apply_normalization_out(out, result, normalization, result.sizes(), dim); +} + + +}} // at::native diff --git a/cuda_code/SpectralOps_7.cu b/cuda_code/SpectralOps_7.cu new file mode 100644 index 0000000000000000000000000000000000000000..4a91f58e61ec435425492144d577c68c363ac4e8 --- /dev/null +++ b/cuda_code/SpectralOps_7.cu @@ -0,0 +1,134 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#include +#include + + +namespace at { namespace native { + +using namespace at::native::detail; + +// Offset calculator for indexing in Hermitian mirrored order. +// In mirrored dims, maps linear index i to (n - i) % n +template +struct HermitianSymmetryOffsetCalculator { + using offset_type = at::detail::Array; + using dim_type = std::remove_cv_t; + dim_type dims; + at::cuda::detail::IntDivider sizes_[MAX_DIMS]; + index_t strides_[MAX_DIMS]; + uint32_t mirror_dim_; // bit mask + static_assert(MAX_DIMS < 32, "Need a bigger mask type"); + + HermitianSymmetryOffsetCalculator( + IntArrayRef sizes, IntArrayRef strides, IntArrayRef dim, + const int64_t element_size){ + TORCH_INTERNAL_ASSERT(sizes.size() == strides.size()); + TORCH_INTERNAL_ASSERT(sizes.size() <= MAX_DIMS); + dims = sizes.size(); + + using at::cuda::detail::IntDivider; + for (dim_type i = 0; i < MAX_DIMS; ++i) { + if (i < dims) { + sizes_[i] = IntDivider(sizes[i]); + strides_[i] = strides[i] / element_size; + } else { + sizes_[i] = IntDivider(1); + strides_[i] = 0; + } + } + + mirror_dim_ = 0; + for (int64_t i = 0; i < dim.size(); ++i) { + mirror_dim_ |= (uint32_t{1} << dim[i]); + } + } + + C10_HOST_DEVICE offset_type get(index_t linear_idx) const { + index_t offset = 0; + + for (dim_type dim = 0; dim < dims; ++dim) { + auto divmod = sizes_[dim].divmod(linear_idx); + linear_idx = divmod.div; + + if ((mirror_dim_ & (uint32_t{1} << dim)) == 0) { + offset += divmod.mod * strides_[dim]; + } else if (divmod.mod != 0) { + offset += (sizes_[dim].divisor - divmod.mod) * strides_[dim]; + } + } + offset_type offsets; + offsets[0] = offset; + return offsets; + } +}; + + +// out[:] = conj(in[:]) where in and out ordering is generalized by offset calculators +template +C10_LAUNCH_BOUNDS_1(cuda::detail::CUDA_NUM_THREADS) +__global__ void _fft_conjugate_copy_kernel( + int64_t numel, scalar_t * out_data, const scalar_t * in_data, + inp_calc_t ic, out_calc_t oc) { + CUDA_KERNEL_LOOP_TYPE(index, numel, int64_t) { + auto in_offset = ic.get(index)[0]; + auto out_offset = oc.get(index)[0]; + out_data[out_offset] = std::conj(in_data[in_offset]); + } +} + +// In real-to-complex transform, cuFFT only fills half of the values due to +// conjugate symmetry. See native/SpectralUtils.h for more details. +// The following function fills in the other half with symmetry in +// case of real-to-complex transform with onesided=False flag. +// See NOTE [ Fourier Transform Conjugate Symmetry ] in native/SpectralOpsUtils.h. + +// input should be a tensor of same size as full (twosided) +// signals, but only contains half (onesided) of the values. +// This function modifies inplace. +void _fft_fill_with_conjugate_symmetry_cuda_( + ScalarType dtype, IntArrayRef mirror_dims, IntArrayRef signal_half_sizes, + IntArrayRef in_strides, const void * in_data, + IntArrayRef out_strides, void * out_data) { + // Do the actual conjugate mirroring. + // TODO: consider adding a 32bit indexed kernel for improved performance + auto* in_strides_ptr = in_strides.data(); + const int ndim = in_strides.size(); + const int64_t element_size = scalarTypeToTypeMeta(dtype).itemsize(); + OffsetCalculator<1, int64_t> input_offset_calculator( + ndim, signal_half_sizes.data(), &in_strides_ptr, &element_size); + HermitianSymmetryOffsetCalculator output_offset_calculator( + signal_half_sizes, out_strides, mirror_dims, element_size); + + const auto numel = c10::multiply_integers(signal_half_sizes); + AT_DISPATCH_COMPLEX_TYPES(dtype, "_fft_fill_with_conjugate_symmetry", [&] { + using namespace cuda::detail; + _fft_conjugate_copy_kernel<<< + GET_BLOCKS(numel), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( + numel, + static_cast(out_data), + static_cast(in_data), + input_offset_calculator, + output_offset_calculator); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + }); +} + +REGISTER_DISPATCH(fft_fill_with_conjugate_symmetry_stub, &_fft_fill_with_conjugate_symmetry_cuda_); + +}} // at::native diff --git a/cuda_code/Square_6.cu b/cuda_code/Square_6.cu new file mode 100644 index 0000000000000000000000000000000000000000..b671347b7e7dfe124e7ec6c1c09f06930eb21330 --- /dev/null +++ b/cuda_code/Square_6.cu @@ -0,0 +1,31 @@ +#include "THCUNN.h" + +struct squareupdateOutput_functor +{ + __device__ void operator()(float* output, const float* input) const + { + *output = (*input) * (*input); + } +}; + +void THNN_CudaSquare_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *output) +{ + THAssert(THCudaTensor_checkGPU(state, 2, input, output)); + THCudaTensor_resizeAs(state, output, input); + THCudaTensor_pointwiseApply2(state, output, input, squareupdateOutput_functor()); +} + +struct squareupdateGradInput_functor +{ + __device__ void operator()(float* gradInput, const float* input, const float* gradOutput) const + { + *gradInput = 2.0 * (*gradOutput) * (*input); + } +}; + +void THNN_CudaSquare_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradInput) +{ + THAssert(THCudaTensor_checkGPU(state, 3, input, gradOutput, gradInput)); + THCudaTensor_resizeAs(state, gradInput, input); + THCudaTensor_pointwiseApply3(state, gradInput, input, gradOutput, squareupdateGradInput_functor()); +} diff --git a/cuda_code/SumArray0CP.cu b/cuda_code/SumArray0CP.cu new file mode 100644 index 0000000000000000000000000000000000000000..8e901703ef7ac475a1dfb28ba35d6c54a38bd6cb --- /dev/null +++ b/cuda_code/SumArray0CP.cu @@ -0,0 +1,164 @@ +#include +#include + +#define CHECK(call) \ +{ \ + const cudaError_t error = call; \ + if (error != cudaSuccess) \ + { \ + printf("Error: %s:%d, ", __FILE__, __LINE__); \ + printf("code:%d, reason: %s\n", error, cudaGetErrorString(error)); \ + exit(1); \ + } \ +} + +void checkResult(float *hostRef, float *gpuRef, const int N) { + double epsilon = 1.0E-8; + bool match = 1; + for (int i=0; i epsilon) { + match = 0; + printf("Arrays do not match!\n"); + printf("host %5.2f gpu %5.2f at current %d\n",hostRef[i],gpuRef[i],i); + break; + } + } + if (match) printf("Arrays match.\n\n"); +} + +void initialData(float *ip,int size) { + // generate different seed for random number + time_t t; + srand((unsigned) time(&t)); + for (int i=0; i1) ipower = atoi(argv[1]); + int nElem = 1<>>(d_A, d_B, d_C, nElem); + + // copy kernel result back to host side + cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost); + + // check device results + checkResult(hostRef, gpuRef, nElem); + + // free device global memory + cudaFree(d_A); + cudaFree(d_B); + free(h_A); + free(h_B); + + // part 2: using zerocopy memory for array A and B + // allocate zerocpy memory + unsigned int flags = cudaHostAllocMapped; + cudaHostAlloc((void **)&h_A, nBytes, flags); + cudaHostAlloc((void **)&h_B, nBytes, flags); + + // initialize data at host side + initialData(h_A, nElem); + initialData(h_B, nElem); + memset(hostRef, 0, nBytes); + memset(gpuRef, 0, nBytes); + + // pass the pointer to device + cudaHostGetDevicePointer((void **)&d_A, (void *)h_A, 0); + cudaHostGetDevicePointer((void **)&d_B, (void *)h_B, 0); + + // add at host side for result checks + sumArraysOnHost(h_A, h_B, hostRef, nElem); + + // execute kernel with zero copy memory + sumArraysZeroCopy <<>>(d_A, d_B, d_C, nElem); + + // copy kernel result back to host side + cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost); + + // check device results + checkResult(hostRef, gpuRef, nElem); + + // free memory + cudaFree(d_C); + cudaFreeHost(h_A); + cudaFreeHost(h_B); + free(hostRef); + free(gpuRef); + + // reset device + cudaDeviceReset(); + return EXIT_SUCCESS; +} \ No newline at end of file diff --git a/cuda_code/SummaryOps_2.cu b/cuda_code/SummaryOps_2.cu new file mode 100644 index 0000000000000000000000000000000000000000..7ce7cc2801d70864347860a4abdfd6ae51c34b6c --- /dev/null +++ b/cuda_code/SummaryOps_2.cu @@ -0,0 +1,429 @@ +#include +#include +#include + +#include +#include + +namespace at { +namespace cuda { +#define THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM 100 +#define THRESH_NUMBER_BINS_FOR_GLOBAL_MEM 1000 +#define FOR_KERNEL_LOOP(i, lim) \ + for (IndexType i = blockIdx.x * blockDim.x + threadIdx.x; i < lim; \ + i += gridDim.x * blockDim.x) + +/* + Memory types used for the 3 histogram implementations. + See `CUDA_tensor_histogram` below. + */ +enum class CUDAHistogramMemoryType { SHARED, MULTI_BLOCK, GLOBAL }; +namespace { + template + __device__ static IndexType getBin(input_t bVal, input_t minvalue, input_t maxvalue, int64_t nbins) { + IndexType bin = (int)((bVal - minvalue) * nbins / (maxvalue - minvalue)); + // (only applicable for histc) + // while each bin is inclusive at the lower end and exclusive at the higher, i.e. [start, end) + // the last bin is inclusive at both, i.e. [start, end], in order to include maxvalue if exists + // therefore when bin == nbins, adjust bin to the last bin + if (bin == nbins) bin -= 1; + return bin; + } +} + +/* + Kernel for computing the histogram of the input. + */ +template < + typename output_t, + typename input_t, + typename IndexType, + int ADims, + int PDims, + int BDims, + CUDAHistogramMemoryType MemoryType = CUDAHistogramMemoryType::MULTI_BLOCK, + typename Op> +#ifdef __HIP_PLATFORM_HCC__ +C10_LAUNCH_BOUNDS_1(512) +#endif +__global__ void kernelHistogram1D( + detail::TensorInfo a, /* output */ + detail::TensorInfo p, /* partial output */ + detail::TensorInfo b, /* input */ + int64_t nbins, + input_t minvalue, + input_t maxvalue, + IndexType totalElements, + Op getOp) { + extern __shared__ unsigned char my_smem[]; + output_t* smem = nullptr; + + if (MemoryType == CUDAHistogramMemoryType::SHARED) { + ////////////////////////// Shared memory ////////////////////////// + // atomically add to block specific shared memory + // then atomically add to the global output tensor + smem = reinterpret_cast(my_smem); + for (IndexType i = threadIdx.x; i < a.sizes[0]; i += blockDim.x) { + smem[i] = 0; + } + __syncthreads(); + FOR_KERNEL_LOOP(linearIndex, totalElements) { + // Convert `linearIndex` into an offset of `b` + const IndexType bOffset = + detail::IndexToOffset::get(linearIndex, b); + const auto bVal = b.data[bOffset]; + if (bVal >= minvalue && bVal <= maxvalue) { + // Use value at `b` as an offset of `smem` + const IndexType bin = getBin(bVal, minvalue, maxvalue, nbins); + gpuAtomicAdd(&smem[bin], getOp(linearIndex)); + } + } + __syncthreads(); + // NOTE: atomically update output bin count. + // Atomic update is imp since __syncthread() will only synchronize threads + // in a given block, not across blocks. + for (IndexType i = threadIdx.x; i < a.sizes[0]; i += blockDim.x) { + const IndexType aOffset = + detail::IndexToOffset::get(i, a); + gpuAtomicAdd(&a.data[aOffset], smem[i]); + } + + } else if (MemoryType == CUDAHistogramMemoryType::MULTI_BLOCK) { + ////////////////////////// Multi Block memory ////////////////////////// + // atomically add to block specific global tensor + // then atomically add to the global output tensor + // compute histogram for the block + FOR_KERNEL_LOOP(linearIndex, totalElements) { + // Convert `linearIndex` into an offset of `b` + const IndexType bOffset = + detail::IndexToOffset::get(linearIndex, b); + const auto bVal = b.data[bOffset]; + if (bVal >= minvalue && bVal <= maxvalue) { + // Use value at `b` as an offset of `p` + const IndexType bin = getBin(bVal, minvalue, maxvalue, nbins); + const IndexType pIdx = p.strides[0] * blockIdx.x + bin; + const IndexType pOffset = + detail::IndexToOffset::get(pIdx, p); + gpuAtomicAdd(&p.data[pOffset], getOp(linearIndex)); + } + } + __syncthreads(); + // NOTE: atomically update output bin count. + // Atomic update is imp since __syncthread() will only synchronize threads + // in a given block, not across blocks. + const IndexType pIdx = p.strides[0] * blockIdx.x; + const IndexType pOffset = + detail::IndexToOffset::get(pIdx, p); + for (IndexType i = threadIdx.x; i < a.sizes[0]; i += blockDim.x) { + const IndexType aOffset = + detail::IndexToOffset::get(i, a); + gpuAtomicAdd(&a.data[aOffset], p.data[pOffset + i]); + } + + } else { + ////////////////////////// Global memory ////////////////////////// + // atomically add to the output tensor + // compute histogram for the block + FOR_KERNEL_LOOP(linearIndex, totalElements) { + // Convert `linearIndex` into an offset of `b` + const IndexType bOffset = + detail::IndexToOffset::get(linearIndex, b); + const auto bVal = b.data[bOffset]; + if (bVal >= minvalue && bVal <= maxvalue) { + // Use value at `b` as an offset of `a` + const IndexType bin = getBin(bVal, minvalue, maxvalue, nbins); + const IndexType aOffset = + detail::IndexToOffset::get(bin, a); + gpuAtomicAdd(&a.data[aOffset], getOp(linearIndex)); + } + } + } +} + +#define HANDLE_CASE(MEMORY_TYPE, WEIGHTS_OP, SHARED_MEM) \ + kernelHistogram1D \ + <<>>( \ + aInfo, pInfo, bInfo, nbins, minvalue, maxvalue, totalElements, WEIGHTS_OP); \ + C10_CUDA_KERNEL_LAUNCH_CHECK(); + +#define HANDLE_SWITCH_CASE(mType, getOp) \ + switch (mType) { \ + case CUDAHistogramMemoryType::SHARED: \ + HANDLE_CASE(CUDAHistogramMemoryType::SHARED, getOp, sharedMem); \ + break; \ + case CUDAHistogramMemoryType::MULTI_BLOCK: \ + HANDLE_CASE(CUDAHistogramMemoryType::MULTI_BLOCK, getOp, 0); \ + break; \ + default: \ + HANDLE_CASE(CUDAHistogramMemoryType::GLOBAL, getOp, 0); \ + } + +inline int64_t getFreeGlobalMemory() { + // no need to use `cudaSetDevice` + size_t free_mem, total_mem; + cudaMemGetInfo(&free_mem, &total_mem); + TORCH_INTERNAL_ASSERT( + cudaGetLastError() == cudaSuccess, + "CUDA_tensor_histogram failed to get free global memory"); + return static_cast(free_mem); +} + +/* + Calculate the frequency of the input values. + + `a` contains the final output or the histogram. + Input `b` is assumed to be 1-D non-negative int array. + `c` optionally contains the weight vector. + See `help torch.bincount` for details on the math. + + 3 implementations based of input size and memory usage: + case: #bins < THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM and enough shared mem + SHARED: Each block atomically adds to it's own **shared** hist copy, + then atomically updates the global tensor. + case: #bins < THRESH_NUMBER_BINS_FOR_GLOBAL_MEM and enough global mem + MULTI_BLOCK: Each block atomically adds to it's own **global** hist + copy, then atomically updates the global tensor. + case: THRESH_NUMBER_BINS_FOR_GLOBAL_MEM <= #bins + GLOBAL: all threads atomically update to a single **global** hist copy. + */ +template +bool CUDA_tensor_histogram( + at::Tensor a, /* output */ + at::Tensor b, /* input */ + at::Tensor c, /* weights(optional) */ + int64_t nbins, + input_t minvalue, + input_t maxvalue, + TensorArgType aType = TensorArgType::ReadWrite, + TensorArgType bType = TensorArgType::ReadOnly, + TensorArgType cType = TensorArgType::ReadOnly) { + checkBackend("CUDA_tensor_histogram", {a, b}, Backend::CUDA); + if (HasWeights) { + checkBackend("CUDA_tensor_histogram", {c}, Backend::CUDA); + } + auto totalElements = b.numel(); + + if (totalElements == 0) { + return false; + } + + const dim3 block = getApplyBlock(); + dim3 grid; + int64_t curDevice = current_device(); + if (curDevice == -1 || !getApplyGrid(totalElements, grid, curDevice)) { + return false; + } + + CUDAHistogramMemoryType memType = CUDAHistogramMemoryType::GLOBAL; + auto maxSharedMem = getCurrentDeviceProperties()->sharedMemPerBlock; + auto sharedMem = nbins * sizeof(output_t) + 8; // 8 guard bytes + auto maxGlobalMem = getFreeGlobalMemory(); + auto multiBlockMem = nbins * grid.x * sizeof(output_t) + 8; // 8 guard bytes + // determine memory type to use in the kernel + if (nbins < THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM && + sharedMem < maxSharedMem) { + memType = CUDAHistogramMemoryType::SHARED; + } else if ( + nbins < THRESH_NUMBER_BINS_FOR_GLOBAL_MEM && + multiBlockMem < (maxGlobalMem / 2)) { + // check against half of free mem to be extra safe + // due to cached allocator, we may anyway have slightly more free mem + memType = CUDAHistogramMemoryType::MULTI_BLOCK; + } + + // alloc memory for MULTI_BLOCK + using IndexType = int64_t; + auto aInfo = detail::getTensorInfo(a); + auto bInfo = detail::getTensorInfo(b); + detail::TensorInfo pInfo(nullptr, 0, {}, {}); + Tensor partial_output; + if (memType == CUDAHistogramMemoryType::MULTI_BLOCK) { + partial_output = native::zeros( + {grid.x, nbins}, + optTypeMetaToScalarType(a.options().dtype_opt()), + a.options().layout_opt(), + a.options().device_opt(), + a.options().pinned_memory_opt()); + pInfo = detail::getTensorInfo(partial_output); + } + + if (HasWeights) { + auto cInfo = detail::getTensorInfo(c); + const auto getWeightsOp = [cInfo] __device__(IndexType cIndex) { + const IndexType cOffset = + detail::IndexToOffset::get(cIndex, cInfo); + return cInfo.data[cOffset]; + }; + HANDLE_SWITCH_CASE(memType, getWeightsOp) + } else { + static const auto getDummyOp = [] __device__(IndexType) { return 1L; }; + HANDLE_SWITCH_CASE(memType, getDummyOp) + } + return true; +} + +#undef HANDLE_CASE +#undef HANDLE_SWITCH_CASE +#undef FOR_KERNEL_LOOP +#undef THRESH_NUMBER_BINS_FOR_GLOBAL_MEM +#undef THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM +} // namespace cuda + +namespace { +///////////////// bincount ///////////////// +template +Tensor _bincount_cuda_template( + const Tensor& self, + const Tensor& weights, + int64_t minlength) { + if (minlength < 0) { + AT_ERROR("minlength should be >= 0"); + } + if (self.dim() == 1 && self.numel() == 0) { + return native::zeros( + {minlength}, + kLong, + c10::nullopt /* layout */, + kCUDA, + c10::nullopt /* pin_memory */); + } + if (self.dim() != 1 || + (!std::is_same::value && + *self.min().cpu().data_ptr() < 0)) { + AT_ERROR("bincount only supports 1-d non-negative integral inputs."); + } + + bool has_weights = weights.defined(); + if (has_weights && weights.size(0) != self.size(0)) { + AT_ERROR("input and weights should have the same length"); + } + + const int64_t nbins = std::max(*self.max().cpu().data_ptr() + (int64_t)1, minlength); + const input_t minvalue = 0; + const input_t maxvalue = nbins; + // alloc output counter on GPU + Tensor output; + if (has_weights) { + output = native::zeros( + {nbins}, + optTypeMetaToScalarType(weights.options().dtype_opt()), + weights.options().layout_opt(), + weights.options().device_opt(), + weights.options().pinned_memory_opt()); + auto ret = cuda::CUDA_tensor_histogram( + output, self, weights, nbins, minvalue, maxvalue); + } else { + output = native::zeros( + {nbins}, + kLong, + c10::nullopt /* layout */, + DeviceType::CUDA, + c10::nullopt /* pin_memory */); + auto ret = cuda::CUDA_tensor_histogram( + output, self, weights, nbins, minvalue, maxvalue); + } + return output; +} + +///////////////// histc ///////////////// +template +Tensor _histc_cuda_template( + const Tensor& self, + int64_t nbins, + input_t min, + input_t max) { + if (nbins <= 0) { + AT_ERROR("bins must be > 0"); + } + Tensor output = native::zeros( + {nbins}, + self.scalar_type(), + c10::nullopt /* layout */, + DeviceType::CUDA, + c10::nullopt /* pin_memory */); + input_t minvalue = min; + input_t maxvalue = max; + if (min == max) { + minvalue = *self.min().cpu().data_ptr(); + maxvalue = *self.max().cpu().data_ptr(); + } + if (minvalue == maxvalue) { + minvalue = minvalue - 1; + maxvalue = maxvalue + 1; + } + +#ifndef __HIP_PLATFORM_HCC__ + TORCH_CHECK( + !(THCNumerics::isinf(minvalue) || + THCNumerics::isinf(maxvalue) || + THCNumerics::isnan(minvalue) || + THCNumerics::isnan(maxvalue)), + "range of [", + minvalue, + ", ", + maxvalue, + "] is not finite"); +#else + TORCH_CHECK( + !(std::isinf(minvalue) || std::isinf(maxvalue) || std::isnan(minvalue) || + std::isnan(maxvalue)), + "range of [", + minvalue, + ", ", + maxvalue, + "] is not finite"); +#endif + TORCH_CHECK(minvalue < maxvalue, "max must be larger than min"); + + auto ret = cuda::CUDA_tensor_histogram( + output, self, Tensor(), nbins, minvalue, maxvalue); + return output; +} +} // namespace + +namespace native { +Tensor _bincount_cuda( + const Tensor& self, const c10::optional& weights_opt, + int64_t minlength) { + // See [Note: hacky wrapper removal for optional tensor] + const Tensor& weights = c10::value_or_else(weights_opt, [] {return Tensor();}); + + // See Note [Writing Nondeterministic Operations] + // Nondeterministic because of atomicAdd usage + globalContext().alertNotDeterministic("_bincount_cuda"); + return AT_DISPATCH_INTEGRAL_TYPES(self.scalar_type(), "bincount_cuda", [&] { + const auto scalar = weights.scalar_type(); + if (scalar == ScalarType::Undefined || scalar == ScalarType::Float) + return _bincount_cuda_template(self, weights, minlength); + return _bincount_cuda_template( + self, weights.to(kDouble), minlength); + }); +} + +Tensor _histc_cuda( + const Tensor& self, + int64_t nbins, + const Scalar& min, + const Scalar& max) { + if (self.scalar_type() == ScalarType::Half) { + AT_ERROR("HalfTensor is not supported"); + } + // See Note [Writing Nondeterministic Operations] + // Nondeterministic because of atomicAdd usage + globalContext().alertNotDeterministic("_histc_cuda"); + return AT_DISPATCH_ALL_TYPES(self.scalar_type(), "histc", [&] { + return _histc_cuda_template(self, nbins, min.to(), max.to()); + }); +} + +Tensor& _histc_out_cuda(const Tensor& self, int64_t bins, const Scalar& min, const Scalar& max, Tensor& result) { + auto ret = _histc_cuda(self, bins, min, max); + result.resize_as_(ret); + result.copy_(ret); + return result; +} +} // namespace native +} // namespace at diff --git a/cuda_code/TRWP_backward_soft.cu b/cuda_code/TRWP_backward_soft.cu new file mode 100644 index 0000000000000000000000000000000000000000..9b199608e3edc618167eaadebc860d3c93e2f572 --- /dev/null +++ b/cuda_code/TRWP_backward_soft.cu @@ -0,0 +1,551 @@ +#include "TRWP.h" +#include "commonCUDA.cuh" +#include "TRWP_soft.cuh" + +#ifdef __cplusplus + extern "C" { +#endif + +__device__ void DynamicProgrammingBack(const Param param, + const uint n_thread_a_tree, + const uint current_node_h, + const uint current_node_w, + const uint front_node_h, + const uint front_node_w, + const float* context, + const float* edge_weights, + const float* msg_edge_label, + const uchar* msg_norm_index, + float* dmsg, + float* dunary_update, + float* dcontext, + float* dedge_weights, + float* dmsg_update_shared, + float* msg_min_value_shared, + float* msg_edge_label_shared, + float* msg_edge_label_exp_shared) { + uint height = param.height, width = param.width; + uint n_disp = param.n_disp, n_trees = param.n_trees; + bool is_pass_l2r = param.is_pass_l2r; + float rho = param.rho; + + uint tid = blockIdx.x * blockDim.x + threadIdx.x; + uint n_disp_with_warp = (n_disp + WARP_SIZE - 1) / WARP_SIZE * WARP_SIZE; + uint max_parallel_disps = min(n_disp, blockDim.x / n_disp_with_warp); + uint n_iters = (n_disp + max_parallel_disps - 1) / max_parallel_disps; + bool enable_seg = (n_disp == 21); + + uint id_base = tid / (n_trees * n_thread_a_tree); + uint unary_base = id_base * height * width * n_disp; + uint edge_base = id_base * height * width; + uint msg_edge_label_base = id_base * height * width * n_disp * n_disp; + uint current_d_base = threadIdx.x / n_disp_with_warp; + uint msg_index_offset = id_base * height * width + current_node_h * width + current_node_w; + uchar norm_index = msg_norm_index[msg_index_offset]; + + if (threadIdx.x < n_disp) { + uint current_d = threadIdx.x; + uint msg_offset = unary_base + current_node_h * width * n_disp + current_node_w * n_disp + current_d; + dmsg_update_shared[threadIdx.x] = dmsg[msg_offset]; + } + __syncthreads(); + + // Back norm + uint current_d_4norm = threadIdx.x % n_disp_with_warp; + float gradient = 0; + + // A patch: current_d_4norm above may exceed MAX_DISPARITY + if (current_d_4norm < MAX_DISPARITY) gradient = dmsg_update_shared[current_d_4norm]; + __syncthreads(); + + float gradient_sum = sumMsg(n_disp, current_d_base, gradient); + if (threadIdx.x == 0) dmsg_update_shared[norm_index] -= gradient_sum; + __syncthreads(); + + uint offset_base = unary_base + front_node_h * width * n_disp + front_node_w * n_disp; + uint front_d = threadIdx.x % n_disp_with_warp; + uint unary_offset = offset_base + front_d; + + for (uint iter = 0; iter < n_iters; ++iter) { + uint current_d = iter * max_parallel_disps + current_d_base; + bool is_valid_thread = (front_d < n_disp) && (current_d < n_disp); + bool enable_valid_assign = is_valid_thread && (threadIdx.x % n_disp_with_warp == 0); + uint lr_id = current_d_base * n_disp_with_warp + front_d; + uint msg_edge_label_loc = is_pass_l2r ? (front_d * n_disp + current_d) : (current_d * n_disp + front_d); + uint msg_edge_label_add = msg_edge_label_base + current_node_h * width * n_disp * n_disp + + current_node_w * n_disp * n_disp + msg_edge_label_loc; + + // Calculate p * (1 - msg_edge_label + msg) + if (is_valid_thread) msg_edge_label_shared[lr_id] = msg_edge_label[msg_edge_label_add]; + __syncthreads(); + + // ==== BEGIN: from forward, re-calculate prob and msg_soft_sum + // Find the min value among front_d + float min_value = findMsgMin(n_disp, front_d, current_d_base, msg_edge_label_shared[lr_id]); + + if (enable_valid_assign) msg_min_value_shared[current_d] = min_value; + __syncthreads(); + + // Let msg_edge_label subtracts min_value + if (is_valid_thread) + msg_edge_label_exp_shared[lr_id] = __expf(-msg_edge_label_shared[lr_id] + msg_min_value_shared[current_d]); + __syncthreads(); + + // Soft message + float sum_exp = sumMsg(n_disp, current_d_base, msg_edge_label_exp_shared[lr_id]); + float prob = msg_edge_label_exp_shared[lr_id] / sum_exp; + float msg_soft = prob * msg_edge_label_shared[lr_id]; + + if (is_valid_thread) msg_edge_label_exp_shared[lr_id] = msg_soft; + __syncthreads(); + + // Sum soft message over front_d + float msg_soft_sum = sumMsg(n_disp, current_d_base, msg_edge_label_exp_shared[lr_id]); + if (enable_valid_assign) msg_min_value_shared[current_d] = msg_soft_sum; + __syncthreads(); + // ==== END: From forward + + if (is_valid_thread) { + // Calculate dmsg_edge_label + float dmsg_sum = dmsg_update_shared[current_d]; + float msg_edge_label_one = msg_edge_label_shared[lr_id]; + float dmsg_edge_label = dmsg_sum * prob * (1 - msg_edge_label_one + msg_soft_sum); + + uint context_loc = 0; + if (enable_seg) + context_loc = min(current_d, front_d) * n_disp + max(current_d, front_d); + else + context_loc = std::abs(int(current_d) - int(front_d)); + + uint edge_weight_loc = edge_base + current_node_h * width + current_node_w; + + atomicAdd(&dunary_update[unary_offset], dmsg_edge_label); + atomicAdd(&dmsg[unary_offset], rho * dmsg_edge_label); + atomicAdd(&dedge_weights[edge_weight_loc], context[context_loc] * dmsg_edge_label); + atomicAdd(&dcontext[context_loc], edge_weights[edge_weight_loc] * dmsg_edge_label); + } + __syncthreads(); + } +} + +__global__ void CostAggregateKernelSoftBack(const Param param, + const uint n_thread_required, + float* dcost_final_ptr, + float* dunary, + float* dmsg_ptr) { + // cost_final=unary+sum{msg_update} + uint tid = blockIdx.x * blockDim.x + threadIdx.x; + if (tid >= n_thread_required) return; + + float dcost_final_value = dcost_final_ptr[tid]; + dunary[tid] = dcost_final_value; + + for (uint dir = 0; dir < param.n_dir; ++dir) + dmsg_ptr[dir * n_thread_required + tid] = dcost_final_value; + + __syncthreads(); +} + +__global__ void UpdateUnaryKernelSoftBack(const Param param, + const uint n_thread_required, + float* dunary_update_ptr, + float* dunary_ptr, + float* dmsg_ptr) { + // unary_update=rho*(unary+sum{msg}-msg_dir)-msg_dir_inv + uint tid = blockIdx.x * blockDim.x + threadIdx.x; // batch*n_cv*h*w*n_disp + if (tid >= n_thread_required) return; + + uint dir = param.dir, dir_inv = param.dir_inv, n_dir = param.n_dir; + float rho = param.rho; + float dunary_update_value = dunary_update_ptr[tid]; + float dunary_update_value_rho = rho * dunary_update_value; + + for (uint dir = 0; dir < n_dir; ++dir) + atomicAdd(&dmsg_ptr[dir * n_thread_required + tid], dunary_update_value_rho); + + atomicAdd(&dunary_ptr[tid], dunary_update_value_rho); + atomicAdd(&dmsg_ptr[dir * n_thread_required + tid], -dunary_update_value_rho); + atomicAdd(&dmsg_ptr[dir_inv * n_thread_required + tid], -dunary_update_value); + __syncthreads(); +} + +__global__ void HorizontalKernelSoftBack(const Param param, + const uint n_thread_required, + const uint n_thread_a_tree, + const float* context, + const float* edge_weights, + const float* msg_edge_label, + const uchar* msg_norm_index, + float* dmsg, + float* dunary_update, + float* dcontext, + float* dedge_weights) { + static __shared__ float dmsg_update_shared[MAX_DISPARITY]; + static __shared__ float msg_min_value_shared[MAX_DISPARITY]; + static __shared__ float msg_edge_label_shared[MAX_SHARED_MEM_PER_BLOCK]; + static __shared__ float msg_edge_label_exp_shared[MAX_SHARED_MEM_PER_BLOCK]; + + msg_edge_label_shared[threadIdx.x] = 0; + msg_edge_label_exp_shared[threadIdx.x] = 0; + + if (threadIdx.x < MAX_DISPARITY) { + msg_min_value_shared[threadIdx.x] = 0; + dmsg_update_shared[threadIdx.x] = 0; + } + __syncthreads(); + + uint tid = blockIdx.x * blockDim.x + threadIdx.x; // batch*cv*h*n_thread_a_tree + + if (tid >= n_thread_required) return; + + uint width = param.width, n_trees = param.n_trees; + int w_step = param.w_step; + uint tree_id = (tid / n_thread_a_tree) % n_trees; + int h_start = tree_id, w_start = (w_step > 0) ? 0 : (width - 1); + uint roll_step = width - 1; + + // The front node is in accordance with forward pass, use + *_step + // msg_min_index(batch,n_cv,h,w,n_disp) + for (uint i = 0; i <= roll_step; ++i) { + int current_node_h = h_start; + int current_node_w = w_start + i * w_step; + int front_node_h = current_node_h; + int front_node_w = current_node_w + w_step; + + if (0 <= current_node_w && current_node_w < width && + 0 <= front_node_w && front_node_w < width) + DynamicProgrammingBack(param, + n_thread_a_tree, + current_node_h, + current_node_w, + front_node_h, + front_node_w, + context, + edge_weights, + msg_edge_label, + msg_norm_index, + dmsg, + dunary_update, + dcontext, + dedge_weights, + dmsg_update_shared, + msg_min_value_shared, + msg_edge_label_shared, + msg_edge_label_exp_shared); + __syncthreads(); + } +} + +__global__ void DiagonalKernelNarrowSoftBack(const Param param, + const uint n_thread_required, + const uint n_thread_a_tree, + const float* context, + const float* edge_weights, + const float* msg_edge_label, + const uchar* msg_norm_index, + float* dmsg, + float* dunary_update, + float* dcontext, + float* dedge_weights) { + static __shared__ float dmsg_update_shared[MAX_DISPARITY]; + static __shared__ float msg_min_value_shared[MAX_DISPARITY]; + static __shared__ float msg_edge_label_shared[MAX_SHARED_MEM_PER_BLOCK]; + static __shared__ float msg_edge_label_exp_shared[MAX_SHARED_MEM_PER_BLOCK]; + + msg_edge_label_shared[threadIdx.x] = 0; + msg_edge_label_exp_shared[threadIdx.x] = 0; + + if (threadIdx.x < MAX_DISPARITY) { + msg_min_value_shared[threadIdx.x] = 0; + dmsg_update_shared[threadIdx.x] = 0; + } + __syncthreads(); + + uint tid = blockIdx.x * blockDim.x + threadIdx.x; // batch*cv*h*n_thread_a_tree + + if (tid >= n_thread_required) return; + + uint height = param.height, width = param.width, n_trees = param.n_trees; + int h_step = param.h_step, w_step = param.w_step; + uint h_step_abs = std::abs(h_step); + uint tree_id = (tid / n_thread_a_tree) % n_trees; + int tree_id_shift = tree_id - (height - 1) * max(w_step, 0); + int common1 = tree_id_shift % h_step_abs; + float common2 = float(tree_id_shift) / float(h_step_abs); // This must be float NOT int, will affect ceilf and floorf + int h_start = 0, w_start = 0; + + // Use a common mode to calculate start points for shortest chains, read my notes for clarity + if (w_step > 0) { + h_start = (h_step_abs - common1) % h_step_abs; + w_start = ceilf(common2); + } else { + h_start = common1; + w_start = floorf(common2); + } + + if (h_step < 0) h_start = height - 1 - h_start; + uint roll_step = (height - 1) / h_step_abs; + + // The front node is in accordance with forward pass, use + *_step + // msg_min_index(batch,n_cv,h,w,n_disp) + for (uint i = 0; i <= roll_step; ++i) { + int current_node_h = h_start + i * h_step; + int current_node_w = w_start + i * w_step; + int front_node_h = current_node_h + h_step; + int front_node_w = current_node_w + w_step; + + if (0 <= current_node_h && current_node_h < height && + 0 <= current_node_w && current_node_w < width && + 0 <= front_node_h && front_node_h < height && + 0 <= front_node_w && front_node_w < width) + DynamicProgrammingBack(param, + n_thread_a_tree, + current_node_h, + current_node_w, + front_node_h, + front_node_w, + context, + edge_weights, + msg_edge_label, + msg_norm_index, + dmsg, + dunary_update, + dcontext, + dedge_weights, + dmsg_update_shared, + msg_min_value_shared, + msg_edge_label_shared, + msg_edge_label_exp_shared); + __syncthreads(); + } +} + +__global__ void DiagonalKernelWideSoftBack(const Param param, + const uint n_thread_required, + const uint n_thread_a_tree, + const float* context, + const float* edge_weights, + const float* msg_edge_label, + const uchar* msg_norm_index, + float* dmsg, + float* dunary_update, + float* dcontext, + float* dedge_weights) { + static __shared__ float dmsg_update_shared[MAX_DISPARITY]; + static __shared__ float msg_min_value_shared[MAX_DISPARITY]; + static __shared__ float msg_edge_label_shared[MAX_SHARED_MEM_PER_BLOCK]; + static __shared__ float msg_edge_label_exp_shared[MAX_SHARED_MEM_PER_BLOCK]; + + msg_edge_label_shared[threadIdx.x] = 0; + msg_edge_label_exp_shared[threadIdx.x] = 0; + + if (threadIdx.x < MAX_DISPARITY) { + msg_min_value_shared[threadIdx.x] = 0; + dmsg_update_shared[threadIdx.x] = 0; + } + __syncthreads(); + + uint tid = blockIdx.x * blockDim.x + threadIdx.x; // batch*cv*h*n_thread_a_tree + + if (tid >= n_thread_required) return; + + uint height = param.height, width = param.width, n_trees = param.n_trees; + int h_step = param.h_step, w_step = param.w_step; + uint tree_id = (tid / n_thread_a_tree) % n_trees; + int tree_id_shift = tree_id - (height - 1) * max(w_step, 0); + uint h_step_abs = std::abs(h_step), roll_step = (height - 1) / h_step_abs; + int h_start = (h_step > 0) ? 0 : (height - 1), w_start = tree_id_shift; + + // The front node is in accordance with forward pass, use + *_step + // msg_min_index(batch,n_cv,h,w,n_disp) + for (uint i = 0; i <= roll_step; ++i) { + int current_node_h = h_start + i * h_step; + int current_node_w = w_start + i * w_step; + int front_node_h = current_node_h + h_step; + int front_node_w = current_node_w + w_step; + + if (0 <= current_node_h && current_node_h < height && + 0 <= current_node_w && current_node_w < width && + 0 <= front_node_h && front_node_h < height && + 0 <= front_node_w && front_node_w < width) + DynamicProgrammingBack(param, + n_thread_a_tree, + current_node_h, + current_node_w, + front_node_h, + front_node_w, + context, + edge_weights, + msg_edge_label, + msg_norm_index, + dmsg, + dunary_update, + dcontext, + dedge_weights, + dmsg_update_shared, + msg_min_value_shared, + msg_edge_label_shared, + msg_edge_label_exp_shared); + __syncthreads(); + } +} + +void BackwardCUDASoft(const float rho, + const at::Tensor dcost_final, + const at::Tensor context, + const at::Tensor edge_weights, + const at::Tensor msg_edge_label, + const at::Tensor msg_norm_index, + at::Tensor dunary, + at::Tensor dcontext, + at::Tensor dedge_weights, + at::Tensor dmsg, + at::Tensor dunary_update) { + const uint n_iter = msg_edge_label.size(0); + const uint n_dir = msg_edge_label.size(1); + const uint batch = msg_edge_label.size(2); + const uint n_cv = msg_edge_label.size(3); + const uint height = msg_edge_label.size(4); + const uint width = msg_edge_label.size(5); + const uint n_disp = msg_edge_label.size(6); + float* dcost_final_ptr = dcost_final.data(); + float* context_ptr = context.data(); + float* edge_weight_ptr = edge_weights.data(); + float* msg_edge_label_ptr = msg_edge_label.data(); // (n_iter,n_dir,batch,n_cv,h,w,n_disp,n_disp) + uchar* msg_norm_index_ptr = msg_norm_index.data(); // (n_iter,n_dir,batch,n_cv,h,w) + float* dunary_ptr = dunary.data(); // (batch,n_cv,h,w,n_disp) + float* dcontext_ptr = dcontext.data(); + float* dedge_weight_ptr = dedge_weights.data(); + float* dmsg_ptr = dmsg.data(); + float* dunary_update_ptr = dunary_update.data(); + uint n_disp_with_warp = GetNumThreadATree(n_disp, WARP_SIZE); + uint n_thread_a_tree = min(n_disp, MAX_THREADS_PER_BLOCK / n_disp_with_warp) * n_disp_with_warp; + bool is_backward = true, is_training = true; + + std::vector dmsg_address(n_dir), edge_weight_address(n_dir); + std::vector dedge_weight_address(n_dir), msg_edge_label_address(n_dir); + std::vector msg_norm_index_address(n_dir); + std::vector param_list; + uint msg_min_size = batch * n_cv * height * width * n_disp; + uint msg_norm_size = msg_min_size / n_disp; + uint msg_edge_label_size = n_dir * msg_min_size * n_disp; + uint msg_norm_index_size = n_dir * msg_norm_size; + uint n_thread_unary = min(MAX_THREADS_PER_BLOCK, msg_min_size); + uint n_block_unary = (msg_min_size + n_thread_unary - 1) / n_thread_unary; + + for (int dir = 0; dir < n_dir; ++dir) { + edge_weight_address[dir] = edge_weight_ptr + dir * msg_norm_size; + dedge_weight_address[dir] = dedge_weight_ptr + dir * msg_norm_size; + dmsg_address[dir] = dmsg_ptr + dir * msg_min_size; + Param param(n_dir, batch, n_cv, height, width, n_disp, dir, rho, is_backward, is_training); + UpdateParam(¶m); + param_list.push_back(param); + } + + CostAggregateKernelSoftBack<<>>(param_list[0], + msg_min_size, + dcost_final_ptr, + dunary_ptr, + dmsg_ptr); + #ifdef CUDA_ERROR_CHECK + CUDAErrorCheck(); + #endif + + for (int iter = n_iter - 1; iter >= 0; --iter) { + for (int dir = n_dir - 1; dir >= 0; --dir) { + msg_edge_label_address[dir] = msg_edge_label_ptr + iter * msg_edge_label_size + dir * msg_edge_label_size / n_dir; + msg_norm_index_address[dir] = msg_norm_index_ptr + iter * msg_norm_index_size + dir * msg_norm_size; + + uint n_threads = batch * n_cv * param_list[dir].n_trees * n_thread_a_tree; + uint n_blocks = GetNumBlock(n_threads, n_thread_a_tree); + + // Diagonal + if (4 <= dir) { + uint h_step_abs = std::abs(param_list[dir].h_step); + uint w_step_abs = std::abs(param_list[dir].w_step); + + if (h_step_abs > w_step_abs) + DiagonalKernelNarrowSoftBack<<>>(param_list[dir], + n_threads, + n_thread_a_tree, + context_ptr, + edge_weight_address[dir], + msg_edge_label_address[dir], + msg_norm_index_address[dir], + dmsg_address[dir], + dunary_update_ptr, + dcontext_ptr, + dedge_weight_address[dir]); + else + DiagonalKernelWideSoftBack<<>>(param_list[dir], + n_threads, + n_thread_a_tree, + context_ptr, + edge_weight_address[dir], + msg_edge_label_address[dir], + msg_norm_index_address[dir], + dmsg_address[dir], + dunary_update_ptr, + dcontext_ptr, + dedge_weight_address[dir]); + } + + // Vertical + if ((2 <= dir) && (dir < 4)) + DiagonalKernelWideSoftBack<<>>(param_list[dir], + n_threads, + n_thread_a_tree, + context_ptr, + edge_weight_address[dir], + msg_edge_label_address[dir], + msg_norm_index_address[dir], + dmsg_address[dir], + dunary_update_ptr, + dcontext_ptr, + dedge_weight_address[dir]); + + // Horizontal + if (dir < 2) + HorizontalKernelSoftBack<<>>(param_list[dir], + n_threads, + n_thread_a_tree, + context_ptr, + edge_weight_address[dir], + msg_edge_label_address[dir], + msg_norm_index_address[dir], + dmsg_address[dir], + dunary_update_ptr, + dcontext_ptr, + dedge_weight_address[dir]); + + #ifdef CUDA_ERROR_CHECK + CUDAErrorCheck(); + #endif + + UpdateUnaryKernelSoftBack<<>>(param_list[dir], + msg_min_size, + dunary_update_ptr, + dunary_ptr, + dmsg_ptr); + + #ifdef CUDA_ERROR_CHECK + CUDAErrorCheck(); + #endif + + cudaMemset(dunary_update_ptr, 0, msg_min_size * sizeof(float)); + cudaMemset(dmsg_address[dir], 0, msg_min_size * sizeof(float)); + } + } + + for (uint dir = 0; dir < n_dir; ++dir) { + if (dmsg_address[dir] != nullptr) dmsg_address[dir] = nullptr; + if (msg_edge_label_address[dir] != nullptr) msg_edge_label_address[dir] = nullptr; + if (msg_norm_index_address[dir] != nullptr) msg_norm_index_address[dir] = nullptr; + if (edge_weight_address[dir] != nullptr) edge_weight_address[dir] = nullptr; + if (dedge_weight_address[dir] != nullptr) dedge_weight_address[dir] = nullptr; + } +} + +#ifdef __cplusplus + } +#endif diff --git a/cuda_code/TemporalMaxPooling_3.cu b/cuda_code/TemporalMaxPooling_3.cu new file mode 100644 index 0000000000000000000000000000000000000000..a950aa730afb591e5987088bbd6255670fec57a3 --- /dev/null +++ b/cuda_code/TemporalMaxPooling_3.cu @@ -0,0 +1,188 @@ +#ifndef THC_GENERIC_FILE +#define THC_GENERIC_FILE "generic/TemporalMaxPooling.cu" +#else + +static inline void THNN_(TemporalMaxPooling_shapeCheck)( + THCState *state, + THCTensor *input, + THCTensor *gradOutput, + THCIndexTensor *indices, + int kW, int dW) { + int dimT = 0; // Temporal dimension + int dimF = 1; // Feature dimension + int input_w; + int input_n; + int output_w; + int ndims = input->dim(); + + if (ndims == 3) + { + dimT = 1; + dimF = 2; + } + THArgCheck(kW > 0, 5, + "kernel size should be greater than zero, but got kW: %d", kW); + THArgCheck(dW > 0, 6, + "stride should be greater than zero, but got dW: %d", dW); + + THCUNN_argCheck(state, !input->is_empty() && (input->dim() == 2 || input->dim() == 3), 2, input, + "non-empty 2D or 3D (batch mode) tensor expected for input, but got: %s"); + THArgCheck(input->size(dimT) >= kW, 2, + "input sequence smaller than kernel size. Got: %d, Expected: %d", + input->size(dimT), kW); + + input_w = input->size(dimT); + input_n = input->size(dimF); + output_w = (input_w - kW) / dW + 1; + + if (gradOutput != NULL) { + THCUNN_check_dim_size(state, gradOutput, ndims, dimT, output_w); + THCUNN_check_dim_size(state, gradOutput, ndims, dimF, input_n) + } + if (indices != NULL) { + THCUNN_check_dim_size_indices(state, indices, ndims, dimT, output_w); + THCUNN_check_dim_size_indices(state, indices, ndims, dimF, input_n); + } +} + +void THNN_(TemporalMaxPooling_updateOutput)( + THCState *state, + THCTensor *input, + THCTensor *output, + THCIndexTensor *indices, + int kW, int dW) { + + int dimT = 0; // Temporal dimension + int dimF = 1; // Feature dimension + + int batch = 1; + int input_w; + int input_n; + int output_w; + int nthreads; + + real *input_data; + real *output_data; + THCIndex_t *indices_data; + + THCUNN_assertSameGPU(state, 3, input, output, indices); + THNN_(TemporalMaxPooling_shapeCheck)(state, input, NULL, NULL, kW, dW); + if (input->dim() == 3) + { + dimT = 1; + dimF = 2; + batch = input->size(0); + } + input = THCTensor_(newContiguous)(state, input); + + input_w = input->size(dimT); + input_n = input->size(dimF); + output_w = (input_w - kW) / dW + 1; + + if (input->dim() == 2) + { + THCTensor_(resize2d)(state, output, output_w, input->size(dimF)); + THCIndexTensor_(resize2d)(state, indices, output_w, input->size(dimF)); + } + else + { + THCTensor_(resize3d)(state, output, batch, output_w, input->size(dimF)); + THCIndexTensor_(resize3d)(state, indices, batch, output_w, input->size(dimF)); + } + + input_data = THCTensor_(data)(state, input); + output_data = THCTensor_(data)(state, output); + indices_data = THCIndexTensor_(data)(state, indices); + + dim3 blocks(batch); + nthreads = (output_w / 32) * 32; + if (output_w % 32 > 0) { + nthreads += 32; + } + + if (nthreads > TEMPORAL_MAX_POOLING_THREADS) { + blocks.y = nthreads / TEMPORAL_MAX_POOLING_THREADS; + if (nthreads % TEMPORAL_MAX_POOLING_THREADS > 0) { + blocks.y += 1; + } + nthreads = TEMPORAL_MAX_POOLING_THREADS; + } + + dim3 threads(nthreads); + cunn_TemporalMaxPooling_updateOutputKernel <<< blocks, threads, 0, THCState_getCurrentStream(state) >>>( + input_data, output_data, indices_data, input_w, input_n, output_w, kW, dW); + THCudaCheck(cudaGetLastError()); + THCTensor_(free)(state, input); + +} + +void THNN_(TemporalMaxPooling_updateGradInput)( + THCState *state, + THCTensor *input, + THCTensor *gradOutput, + THCTensor *gradInput, + THCIndexTensor *indices, + int kW, int dW) { + + int dimT = 0; // Temporal dimension + int dimF = 1; // Feature dimension + + int batch = 1; + int input_w; + int input_n; + int output_w; + int nthreads; + + real *gradInput_data; + real *gradOutput_data; + THCIndex_t *indices_data; + + THCUNN_assertSameGPU(state, 4, input, gradOutput, gradInput, indices); + THNN_(TemporalMaxPooling_shapeCheck)(state, input, gradOutput, indices, kW, dW); + THCTensor_(resizeAs)(state, gradInput, input); + THCTensor_(zero)(state, gradInput); + + if (input->dim() == 3) + { + dimT = 1; + dimF = 2; + batch = input->size(0); + } + gradOutput = THCTensor_(newContiguous)(state, gradOutput); + + input_w = input->size(dimT); + input_n = input->size(dimF); + output_w = (input_w - kW) / dW + 1; + + gradInput_data = THCTensor_(data)(state, gradInput); + gradOutput_data = THCTensor_(data)(state, gradOutput); + indices_data = THCIndexTensor_(data)(state, indices); + + dim3 blocks(batch); + nthreads = (output_w / 32) * 32; + if (output_w % 32 > 0) { + nthreads += 32; + } + + if (nthreads > TEMPORAL_MAX_POOLING_THREADS) { + blocks.y = nthreads / TEMPORAL_MAX_POOLING_THREADS; + if (nthreads % TEMPORAL_MAX_POOLING_THREADS > 0) { + blocks.y += 1; + } + nthreads = TEMPORAL_MAX_POOLING_THREADS; + } + + dim3 threads(nthreads); + if (kW <= dW) { + cunn_TemporalMaxPooling_updateGradInputKernel <<< blocks, threads, 0, THCState_getCurrentStream(state) >>>( + gradInput_data, gradOutput_data, indices_data, input_w, input_n, output_w, kW, dW); + } else { + cunn_TemporalMaxPooling_updateGradInputKernelAtomic <<< blocks, threads, 0, THCState_getCurrentStream(state) >>>( + gradInput_data, gradOutput_data, indices_data, input_w, input_n, output_w, kW, dW); + } + THCudaCheck(cudaGetLastError()); + THCTensor_(free)(state, gradOutput); + +} + +#endif diff --git a/cuda_code/TestGpuSelect_7.cu b/cuda_code/TestGpuSelect_7.cu new file mode 100644 index 0000000000000000000000000000000000000000..0c4c09879a06a7dc608ff315469c3a4609c19c16 --- /dev/null +++ b/cuda_code/TestGpuSelect_7.cu @@ -0,0 +1,205 @@ +/** + * Copyright (c) Facebook, Inc. and its affiliates. + * + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +void testForSize(int rows, int cols, int k, bool dir, bool warp) { + using namespace faiss::gpu; + + StandardGpuResources res; + + std::vector v = randVecs(rows, cols); + HostTensor hostVal({rows, cols}); + + for (int r = 0; r < rows; ++r) { + for (int c = 0; c < cols; ++c) { + hostVal[r][c] = v[r * cols + c]; + } + } + + // row -> (val -> idx) + std::unordered_map>> + hostOutValAndInd; + for (int r = 0; r < rows; ++r) { + std::vector> closest; + + for (int c = 0; c < cols; ++c) { + closest.emplace_back(c, (float)hostVal[r][c]); + } + + auto dirFalseFn = [](std::pair& a, + std::pair& b) { + return a.second < b.second; + }; + auto dirTrueFn = [](std::pair& a, + std::pair& b) { + return a.second > b.second; + }; + + std::sort(closest.begin(), closest.end(), dir ? dirTrueFn : dirFalseFn); + hostOutValAndInd.emplace(r, closest); + } + + // Select top-k on GPU + DeviceTensor gpuVal( + res.getResources().get(), + makeDevAlloc(AllocType::Other, 0), + hostVal); + DeviceTensor gpuOutVal( + res.getResources().get(), + makeDevAlloc(AllocType::Other, 0), + {rows, k}); + DeviceTensor gpuOutInd( + res.getResources().get(), + makeDevAlloc(AllocType::Other, 0), + {rows, k}); + + if (warp) { + runWarpSelect(gpuVal, gpuOutVal, gpuOutInd, dir, k, 0); + } else { + runBlockSelect(gpuVal, gpuOutVal, gpuOutInd, dir, k, 0); + } + + // Copy back to CPU + HostTensor outVal(gpuOutVal, 0); + HostTensor outInd(gpuOutInd, 0); + + for (int r = 0; r < rows; ++r) { + std::unordered_map seenIndices; + + for (int i = 0; i < k; ++i) { + float gpuV = outVal[r][i]; + float cpuV = hostOutValAndInd[r][i].second; + + EXPECT_EQ(gpuV, cpuV) + << "rows " << rows << " cols " << cols << " k " << k + << " dir " << dir << " row " << r << " ind " << i; + + // If there are identical elements in a row that should be + // within the top-k, then it is possible that the index can + // differ, because the order in which the GPU will see the + // equivalent values is different than the CPU (and will remain + // unspecified, since this is affected by the choice of + // k-selection algorithm that we use) + int gpuInd = outInd[r][i]; + int cpuInd = hostOutValAndInd[r][i].first; + + // We should never see duplicate indices, however + auto itSeenIndex = seenIndices.find(gpuInd); + + EXPECT_EQ(itSeenIndex, seenIndices.end()) + << "Row " << r << " user index " << gpuInd + << " was seen at both " << itSeenIndex->second << " and " + << i; + + seenIndices[gpuInd] = i; + + if (gpuInd != cpuInd) { + // Gather the values from the original data via index; the + // values should be the same + float gpuGatherV = hostVal[r][gpuInd]; + float cpuGatherV = hostVal[r][cpuInd]; + + EXPECT_EQ(gpuGatherV, cpuGatherV) + << "rows " << rows << " cols " << cols << " k " << k + << " dir " << dir << " row " << r << " ind " << i + << " source ind " << gpuInd << " " << cpuInd; + } + } + } +} + +// General test +TEST(TestGpuSelect, test) { + for (int i = 0; i < 10; ++i) { + int rows = faiss::gpu::randVal(10, 100); + int cols = faiss::gpu::randVal(1, 30000); + int k = std::min(cols, faiss::gpu::randVal(1, GPU_MAX_SELECTION_K)); + bool dir = faiss::gpu::randBool(); + + testForSize(rows, cols, k, dir, false); + } +} + +// Test for k = 1 +TEST(TestGpuSelect, test1) { + for (int i = 0; i < 5; ++i) { + int rows = faiss::gpu::randVal(10, 100); + int cols = faiss::gpu::randVal(1, 30000); + bool dir = faiss::gpu::randBool(); + + testForSize(rows, cols, 1, dir, false); + } +} + +// Test for where k = #cols exactly (we are returning all the values, +// just sorted) +TEST(TestGpuSelect, testExact) { + for (int i = 0; i < 5; ++i) { + int rows = faiss::gpu::randVal(10, 100); + int cols = faiss::gpu::randVal(1, GPU_MAX_SELECTION_K); + bool dir = faiss::gpu::randBool(); + + testForSize(rows, cols, cols, dir, false); + } +} + +// General test +TEST(TestGpuSelect, testWarp) { + for (int i = 0; i < 10; ++i) { + int rows = faiss::gpu::randVal(10, 100); + int cols = faiss::gpu::randVal(1, 30000); + int k = std::min(cols, faiss::gpu::randVal(1, GPU_MAX_SELECTION_K)); + bool dir = faiss::gpu::randBool(); + + testForSize(rows, cols, k, dir, true); + } +} + +// Test for k = 1 +TEST(TestGpuSelect, test1Warp) { + for (int i = 0; i < 5; ++i) { + int rows = faiss::gpu::randVal(10, 100); + int cols = faiss::gpu::randVal(1, 30000); + bool dir = faiss::gpu::randBool(); + + testForSize(rows, cols, 1, dir, true); + } +} + +// Test for where k = #cols exactly (we are returning all the values, +// just sorted) +TEST(TestGpuSelect, testExactWarp) { + for (int i = 0; i < 5; ++i) { + int rows = faiss::gpu::randVal(10, 100); + int cols = faiss::gpu::randVal(1, GPU_MAX_SELECTION_K); + bool dir = faiss::gpu::randBool(); + + testForSize(rows, cols, cols, dir, true); + } +} + +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + + // just run with a fixed test seed + faiss::gpu::setTestSeed(100); + + return RUN_ALL_TESTS(); +} diff --git a/cuda_code/TestHelper_1.cu b/cuda_code/TestHelper_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..ce9c2db0e22db8254a1f2a7fc3dcd617356b4aad --- /dev/null +++ b/cuda_code/TestHelper_1.cu @@ -0,0 +1,130 @@ +/*************************************************************************************** + GpuShareSat -- Copyright (c) 2020, Nicolas Prevot + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and +associated documentation files (the "Software"), to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, publish, distribute, +sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or +substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT +NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT +OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + **************************************************************************************************/ + +#include "gpuShareLib/Helper.cuh" +#include "../testUtils/TestHelper.cuh" +#include "../gpu/CompositionRoot.h" +#include "gpuShareLib/Clauses.cuh" +#include +#include "gpuShareLib/GpuClauseSharer.h" +#include "../simp/SimpSolver.h" + +using namespace Minisat; + +namespace GpuShare { + +void setDefaultOptions(GpuClauseSharerOptions &options) { + options.gpuBlockCountGuideline = 3; + options.gpuThreadsPerBlockGuideline = 32; + options.minGpuLatencyMicros = 50; +} + +GpuFixture::GpuFixture(GpuClauseSharerOptions &options, int varCount, int _solverCount) : + gpuClauseSharer(options), + logger {2, directPrint} +{ + gpuClauseSharer.setVarCount(varCount); + gpuClauseSharer.setCpuSolverCount(_solverCount); + for (int s = 0; s < _solverCount; s++) { + SimpSolver *solv = new SimpSolver(s, gpuClauseSharer, finisher, false, logger); + solvers.push_back(solv); + for (int i = 0; i < varCount; i++) { + solv->newVar(); + } + solv->prepareForSearch(); + } +} + +GpuClauseSharerForTests::GpuClauseSharerForTests(GpuClauseSharerOptions opts): GpuClauseSharerImpl(opts, [&](const std::string &str) {std::cout << str;}) { +} + +void execute(GpuClauseSharer &gpuClauseSharer) { + // if we run execute just once, it will start the gpu run but won't + // get the results back + for (int i = 0; i < 2; i++) gpuClauseSharer.gpuRun(); +} + +void GpuFixture::execute() { + for (int i = 0; i < solvers.size(); i++) { + solvers[i]->tryCopyTrailForGpu(); + } + GpuShare::execute(gpuClauseSharer); +} + +CRef GpuFixture::executeAndImportClauses() { + assert(solvers.size() == 1); + std::vector res; + executeAndImportClauses(res); + return res[0]; +} + +void GpuFixture::executeAndImportClauses(std::vector &res) { + execute(); + bool foundEmptyClause = false; + res.clear(); + for (int i = 0; i < solvers.size(); i++) { + res.push_back(solvers[i]->gpuImportClauses(foundEmptyClause)); + } +} + +void GpuFixture::checkReportedImported(int count, int instance, bool unit) { + BOOST_CHECK_EQUAL(gpuClauseSharer.getOneSolverStat(instance, reportedClauses), count); + BOOST_CHECK_EQUAL(solvers[instance]->usedWhenImported, count); + if (unit) { + BOOST_CHECK_EQUAL(gpuClauseSharer.getOneSolverStat(instance, reportedClausesUnit), count); + } +} + +GpuFixture::~GpuFixture() { + for (int i = 0; i < solvers.size(); i++) { + delete solvers[i]; + } +} + +__global__ void globalUpdateClauses(DClauseUpdates clUpdates, DClauses dClauses) { + updateClauses(clUpdates, dClauses); +} + +// often, this method is called just to make the clause counts on the host clauses right +void copyToDeviceAsync(HostClauses &hCls, cudaStream_t &stream, GpuDims gpuDims) { + Logger logger {2, directPrint}; + ContigCopier cc(logger); + copyToDeviceAsync(hCls, stream, cc, gpuDims); +} + +void GpuFixture::addClause(const std::vector &cl) { + gpuClauseSharer.addClause(-1, (int*) &cl[0], cl.size()); +} + +void copyToDeviceAsync(HostClauses &hCls, cudaStream_t &stream, ContigCopier &cc, GpuDims gpuDims) { + cc.clear(false); + ClUpdateSet updates = hCls.getUpdatesForDevice(stream, cc); + RunInfo runInfo = hCls.makeRunInfo(stream, cc); + exitIfFalse(cc.tryCopyAsync(cudaMemcpyHostToDevice, stream), POSITION); + // TODO: take GpuDims here + DClauses dClauses = runInfo.getDClauses(); + globalUpdateClauses<<>>(updates.getDClauseUpdates(), dClauses); + exitIfError(cudaStreamSynchronize(stream), POSITION); +} + +void addClause(HostClauses &hostClauses, const std::vector &cl) { + hostClauses.addClause(MinHArr((size_t) cl.size(), (Lit*) &cl[0]), cl.size()); +} + +} diff --git a/cuda_code/TestMatrix.cu b/cuda_code/TestMatrix.cu new file mode 100644 index 0000000000000000000000000000000000000000..def6d8fc9fdb76add6e1ce237fa63dc64e249711 --- /dev/null +++ b/cuda_code/TestMatrix.cu @@ -0,0 +1,546 @@ +#include + +#include +#include "Utils.h" + +#define TEST_SIZE_F1(type, flags, rowCompile, rowRuntime, colCompile, colRuntime, batchCompile, batchRuntime) \ + do{ \ + cuMat::Matrix m(rowRuntime, colRuntime, batchRuntime); \ + REQUIRE(m.rows() == rowRuntime); \ + REQUIRE(m.cols() == colRuntime); \ + REQUIRE(m.batches() == batchRuntime); \ + REQUIRE(m.size() == rowRuntime*colRuntime*batchRuntime); \ + if (m.size()>0) REQUIRE(m.data() != nullptr); \ + }while(false) + +#define TEST_SIZE_F2(rowCompile, rowRuntime, colCompile, colRuntime, batchCompile, batchRuntime) \ + do{ \ + TEST_SIZE_F1(bool, cuMat::RowMajor, rowCompile, rowRuntime, colCompile, colRuntime, batchCompile, batchRuntime); \ + TEST_SIZE_F1(bool, cuMat::ColumnMajor,rowCompile, rowRuntime, colCompile, colRuntime, batchCompile, batchRuntime); \ + TEST_SIZE_F1(int, cuMat::RowMajor, rowCompile, rowRuntime, colCompile, colRuntime, batchCompile, batchRuntime); \ + TEST_SIZE_F1(int, cuMat::ColumnMajor,rowCompile, rowRuntime, colCompile, colRuntime, batchCompile, batchRuntime); \ + TEST_SIZE_F1(float, cuMat::RowMajor, rowCompile, rowRuntime, colCompile, colRuntime, batchCompile, batchRuntime); \ + TEST_SIZE_F1(float, cuMat::ColumnMajor,rowCompile, rowRuntime, colCompile, colRuntime, batchCompile, batchRuntime); \ + TEST_SIZE_F1(double, cuMat::RowMajor, rowCompile, rowRuntime, colCompile, colRuntime, batchCompile, batchRuntime); \ + TEST_SIZE_F1(double, cuMat::ColumnMajor,rowCompile, rowRuntime, colCompile, colRuntime, batchCompile, batchRuntime); \ + }while(false) + + +TEST_CASE("instantiation_fully", "[matrix]") +{ + TEST_SIZE_F2(0, 0, 0, 0, 0, 0); + + TEST_SIZE_F2(1, 1, 1, 1, 1, 1); + TEST_SIZE_F2(8, 8, 1, 1, 1, 1); + TEST_SIZE_F2(1, 1, 8, 8, 1, 1); + TEST_SIZE_F2(1, 1, 1, 1, 8, 8); + TEST_SIZE_F2(8, 8, 8, 8, 1, 1); + TEST_SIZE_F2(8, 8, 1, 1, 8, 8); + TEST_SIZE_F2(1, 1, 8, 8, 8, 8); + + TEST_SIZE_F2(cuMat::Dynamic, 16, 4, 4, 4, 4); + TEST_SIZE_F2(4, 4, cuMat::Dynamic, 16, 4, 4); + TEST_SIZE_F2(4, 4, 4, 4, cuMat::Dynamic, 16); + TEST_SIZE_F2(cuMat::Dynamic, 16, cuMat::Dynamic, 8, 4, 4); + TEST_SIZE_F2(4, 4, cuMat::Dynamic, 16, cuMat::Dynamic, 8); + TEST_SIZE_F2(cuMat::Dynamic, 8, 4, 4, cuMat::Dynamic, 16); + TEST_SIZE_F2(cuMat::Dynamic, 8, cuMat::Dynamic, 32, cuMat::Dynamic, 16); +} + +#define TEST_SIZE_D1(type, flags, Rows, Cols, Batches) \ + do { \ + cuMat::Matrix m; \ + if (Rows > 0) {\ + REQUIRE(m.rows() == Rows); \ + } else {\ + REQUIRE(m.rows() == 0); \ + } if (Cols > 0) { \ + REQUIRE(m.cols() == Cols); \ + } else {\ + REQUIRE(m.cols() == 0); \ + } if (Batches > 0) { \ + REQUIRE(m.batches() == Batches); \ + } else {\ + REQUIRE(m.batches() == 0); \ + } if (Rows>0 && Cols>0 && Batches>0) { \ + REQUIRE(m.data() != nullptr); \ + } else {\ + REQUIRE(m.data() == nullptr); \ + } \ + } while (false) +#define TEST_SIZE_D2(rows, cols, batches) \ + do { \ + TEST_SIZE_D1(bool, cuMat::RowMajor, rows, cols, batches); \ + TEST_SIZE_D1(bool, cuMat::ColumnMajor, rows, cols, batches); \ + TEST_SIZE_D1(int, cuMat::RowMajor, rows, cols, batches); \ + TEST_SIZE_D1(int, cuMat::ColumnMajor, rows, cols, batches); \ + TEST_SIZE_D1(float, cuMat::RowMajor, rows, cols, batches); \ + TEST_SIZE_D1(float, cuMat::ColumnMajor, rows, cols, batches); \ + TEST_SIZE_D1(double, cuMat::RowMajor, rows, cols, batches); \ + TEST_SIZE_D1(double, cuMat::ColumnMajor, rows, cols, batches); \ + } while(false) + +TEST_CASE("instantiation_default", "[matrix]") +{ + TEST_SIZE_D2(2, 4, 8); + TEST_SIZE_D2(cuMat::Dynamic, 4, 8); + TEST_SIZE_D2(2, cuMat::Dynamic, 8); + TEST_SIZE_D2(2, 4, cuMat::Dynamic); + TEST_SIZE_D2(cuMat::Dynamic, cuMat::Dynamic, 8); + TEST_SIZE_D2(cuMat::Dynamic, 4, cuMat::Dynamic); + TEST_SIZE_D2(2, cuMat::Dynamic, cuMat::Dynamic); + TEST_SIZE_D2(cuMat::Dynamic, cuMat::Dynamic, cuMat::Dynamic); +} + +TEST_CASE("instantiation_vector", "[matrix]") +{ + cuMat::Matrix columnV(8); + REQUIRE(columnV.rows() == 1); + REQUIRE(columnV.cols() == 8); + REQUIRE(columnV.batches() == 1); + cuMat::Matrix rowV(8); + REQUIRE(rowV.rows() == 8); + REQUIRE(rowV.cols() == 1); + REQUIRE(rowV.batches() == 1); +} + +#define TEST_SIZE_M(rowCompile, rowRuntime, colCompile, colRuntime) \ + do {\ + cuMat::Matrix m(rowRuntime, colRuntime); \ + REQUIRE(m.rows() == rowRuntime); \ + REQUIRE(m.cols() == colRuntime); \ + REQUIRE(m.batches() == 1); \ + REQUIRE(m.size() == rowRuntime*colRuntime); \ + } while(0) +TEST_CASE("instantiation_matrix", "[matrix]") +{ + TEST_SIZE_M(4, 4, 8, 8); + TEST_SIZE_M(cuMat::Dynamic, 4, 8, 8); + TEST_SIZE_M(4, 4, cuMat::Dynamic, 8); + TEST_SIZE_M(cuMat::Dynamic, 4, cuMat::Dynamic, 8); +} + +TEST_CASE("instantiation_throws", "[matrix]") +{ + cuMat::Context& ctx = cuMat::Context::current(); + REQUIRE_THROWS((cuMat::Matrix(7, 6, 4))); + REQUIRE_THROWS((cuMat::Matrix(8, 7, 4))); + REQUIRE_THROWS((cuMat::Matrix(8, 6, 3))); + + REQUIRE_THROWS((cuMat::Matrix(-1, 6, 4))); + REQUIRE_THROWS((cuMat::Matrix(8, -1, 4))); + REQUIRE_THROWS((cuMat::Matrix(8, 6, -1))); + + REQUIRE_THROWS((cuMat::Matrix(-1, 6, 4))); + REQUIRE_THROWS((cuMat::Matrix(8, -1, 4))); + REQUIRE_THROWS((cuMat::Matrix(-1, 6, 4))); + REQUIRE_THROWS((cuMat::Matrix(8, 6, -1))); + REQUIRE_THROWS((cuMat::Matrix(8, -1, 4))); + REQUIRE_THROWS((cuMat::Matrix(8, 6, -1))); + + REQUIRE_THROWS((cuMat::Matrix(-1, 6, 4))); + REQUIRE_THROWS((cuMat::Matrix(8, -1, 4))); + REQUIRE_THROWS((cuMat::Matrix(8, 6, -1))); +} + + +TEST_CASE("index_computations_rowMajor", "[matrix]") +{ + cuMat::Matrix m; + for (cuMat::Index i=0; i= 0); + REQUIRE(index < m.size()); + cuMat::Index i2, j2, k2; + m.index(index, i2, j2, k2); + REQUIRE(i2 == i); + REQUIRE(j2 == j); + REQUIRE(k2 == k); + } + } + } +} +TEST_CASE("index_computations_columnMajor", "[matrix]") +{ + cuMat::Matrix m; + for (cuMat::Index i = 0; i= 0); + REQUIRE(index < m.size()); + cuMat::Index i2, j2, k2; + m.index(index, i2, j2, k2); + REQUIRE(i2 == i); + REQUIRE(j2 == j); + REQUIRE(k2 == k); + } + } + } +} + +template +__global__ void TestMatrixWriteRawKernel(dim3 virtual_size, MatrixType matrix) +{ + CUMAT_KERNEL_1D_LOOP(i, virtual_size) + matrix.setRawCoeff(i, i); + CUMAT_KERNEL_1D_LOOP_END +} +//Tests if a kernel can write the raw data +TEST_CASE("write_raw", "[matrix]") +{ + cuMat::Context& ctx = cuMat::Context::current(); + + int sx = 4; + int sy = 8; + int sz = 16; + typedef cuMat::Matrix Mat_t; + Mat_t m(sx, sy, sz); + + cuMat::KernelLaunchConfig cfg = ctx.createLaunchConfig1D((unsigned int) m.size(), TestMatrixWriteRawKernel); + TestMatrixWriteRawKernel <<< cfg.block_count, cfg.thread_per_block, 0, ctx.stream() >>> + (cfg.virtual_size, m); + CUMAT_CHECK_ERROR(); + + std::vector host(sx * sy * sz); + m.copyToHost(&host[0]); + for (int i=0; i +__global__ void TestMatrixReadRawKernel(dim3 virtual_size, MatrixType matrix, int* failure) +{ + CUMAT_KERNEL_1D_LOOP(i, virtual_size) + if (matrix.rawCoeff(i) != i) failure[0] = 1; + CUMAT_KERNEL_1D_LOOP_END +} +//Test if the kernel can read the raw data +TEST_CASE("read_raw", "[matrix]") +{ + cuMat::Context& ctx = cuMat::Context::current(); + + int sx = 4; + int sy = 8; + int sz = 16; + typedef cuMat::Matrix Mat_t; + Mat_t m(sx, sy, sz); + + std::vector host1(sx * sy * sz); + for (int i = 0; i successFlag(1); + CUMAT_SAFE_CALL(cudaMemset(successFlag.pointer(), 0, sizeof(int))); + + cuMat::KernelLaunchConfig cfg = ctx.createLaunchConfig1D((unsigned int) m.size(), TestMatrixReadRawKernel); + TestMatrixReadRawKernel <<< cfg.block_count, cfg.thread_per_block, 0, ctx.stream() >>> + (cfg.virtual_size, m, successFlag.pointer()); + CUMAT_CHECK_ERROR(); + + int successFlagHost; + cudaMemcpy(&successFlagHost, successFlag.pointer(), sizeof(int), cudaMemcpyDeviceToHost); + REQUIRE(successFlagHost == 0); +} + + +template +__global__ void TestMatrixWriteCoeffKernel(dim3 virtual_size, MatrixType matrix) +{ + CUMAT_KERNEL_3D_LOOP(i, j, k, virtual_size) + matrix.coeff(i, j, k, -1) = i + j*100 + k * 100*100; + CUMAT_KERNEL_3D_LOOP_END +} +//Tests if a kernel can write the 3d-indexed coefficients +TEST_CASE("write_coeff_columnMajor", "[matrix]") +{ + cuMat::Context& ctx = cuMat::Context::current(); + + int sx = 4; + int sy = 8; + int sz = 16; + typedef cuMat::Matrix Mat_t; + Mat_t m(sx, sy, sz); + + cuMat::KernelLaunchConfig cfg = ctx.createLaunchConfig3D(sx, sy, sz, TestMatrixWriteCoeffKernel); + TestMatrixWriteCoeffKernel <<< cfg.block_count, cfg.thread_per_block, 0, ctx.stream() >>> + (cfg.virtual_size, m); + CUMAT_CHECK_ERROR(); + + std::vector host(sx * sy * sz); + m.copyToHost(&host[0]); + int i = 0; + for (int z=0; z Mat_t; + Mat_t m(sx, sy, sz); + + cuMat::KernelLaunchConfig cfg = ctx.createLaunchConfig3D(sx, sy, sz, TestMatrixWriteCoeffKernel); + TestMatrixWriteCoeffKernel <<< cfg.block_count, cfg.thread_per_block, 0, ctx.stream() >>> + (cfg.virtual_size, m); + CUMAT_CHECK_ERROR(); + + std::vector host(sx * sy * sz); + m.copyToHost(&host[0]); + int i = 0; + for (int z = 0; z mem(24); + m.copyToHost(&mem[0]); + int i = 0; + for (int z = 0; z mat1; + REQUIRE(mat1.dataPointer().getCounter() == 1); + + cuMat::Matrix mat2(mat1); + REQUIRE(mat1.dataPointer().getCounter() == 2); + + cuMat::Matrix mat3; + mat3 = mat1; + REQUIRE(mat1.dataPointer().getCounter() == 3); + + cuMat::Matrix mat4(mat3); + REQUIRE(mat1.dataPointer().getCounter() == 4); + REQUIRE(mat4.dataPointer().getCounter() == 4); + + REQUIRE(mat1.data() == mat2.data()); + REQUIRE(mat1.data() == mat3.data()); + REQUIRE(mat1.data() == mat4.data()); +} + +TEST_CASE("implicit-transpose", "[matrix]") +{ + //implicit transposition is only allowed for vectors + + cuMat::VectorXiR v1(5); + cuMat::VectorXiC v2a = v1; + cuMat::VectorXiC v2b(v1); + cuMat::VectorXiC v2c; v2c = v1; + + cuMat::RowVectorXiR v3(5); + cuMat::RowVectorXiC v4a = v3; + cuMat::RowVectorXiC v4b(v3); + cuMat::RowVectorXiC v4c; v4c = v3; + + cuMat::ScalariR v5; + cuMat::ScalariC v6a = v5; + cuMat::ScalariC v6b(v5); + cuMat::ScalariC v6c; v6c = v5; + + //This should not compile, how can I test that? + //cuMat::MatrixXiR v7(5, 4); + //cuMat::MatrixXiC v8a = v7; + //cuMat::MatrixXiC v8b(v7); + //cuMat::MatrixXiC v8c; v8c = v7; +} + +//evalTo is deprecated +/* +// Matrix direct eval +template +void testDirectEvalTo() +{ + typedef typename cuMat::Matrix MatR; + typedef typename cuMat::Matrix MatC; + T data[2][2][3] { + { + {1, 2, -1}, + {3, 4, -2} + }, + { + {5, 6, -3}, + {7, 8, -4} + } + }; + MatR matR = MatR::fromArray(data); + MatC matC = matR+0; + + MatR targetR(2, 3, 2); + MatC targetC(2, 3, 2); + + //memcpy + targetR.setZero(); + targetC.setZero(); + CUMAT_PROFILING_RESET(); + matR.template evalTo(targetR); + matC.template evalTo(targetC); + REQUIRE(CUMAT_PROFILING_GET(EvalAny)==0); + REQUIRE(CUMAT_PROFILING_GET(DeviceMemcpy)==2); + assertMatrixEquality(matR, targetR); + assertMatrixEquality(matC, targetC); + + //transpose + targetR.setZero(); + targetC.setZero(); + CUMAT_PROFILING_RESET(); + matR.template evalTo(targetC); + matC.template evalTo(targetR); + REQUIRE(CUMAT_PROFILING_GET(EvalAny)==2); + REQUIRE(CUMAT_PROFILING_GET(EvalTranspose) == (cuMat::internal::NumTraits::IsCudaNumeric ? 2 : 0)); + REQUIRE(CUMAT_PROFILING_GET(EvalCwise) == (cuMat::internal::NumTraits::IsCudaNumeric ? 0 : 2)); + REQUIRE(CUMAT_PROFILING_GET(DeviceMemcpy)==0); + assertMatrixEquality(matR, targetR); + assertMatrixEquality(matC, targetC); + + //cwise + targetR.setZero(); + targetC.setZero(); + CUMAT_PROFILING_RESET(); + auto block1 = targetC.block(0,0,0,2,3,2); + auto block2 = targetR.block(0,0,0,2,3,2); + matR.template evalTo(block1); + matC.template evalTo(block2); + REQUIRE(CUMAT_PROFILING_GET(EvalAny)==2); + REQUIRE(CUMAT_PROFILING_GET(EvalTranspose)==0); + REQUIRE(CUMAT_PROFILING_GET(EvalCwise)==2); + REQUIRE(CUMAT_PROFILING_GET(DeviceMemcpy)==0); + assertMatrixEquality(matR, targetR); + assertMatrixEquality(matC, targetC); +} +TEST_CASE("direct evalTo", "[matrix]") +{ + SECTION("int") { + testDirectEvalTo(); + } + SECTION("float") { + testDirectEvalTo(); + } + SECTION("double") { + testDirectEvalTo(); + } +} +*/ + +//Deep clone +template +void testDeepClone() +{ + typedef typename cuMat::Matrix MatR; + typedef typename cuMat::Matrix MatC; + T data[2][2][3] { + { + {1, 2, -1}, + {3, 4, -2} + }, + { + {5, 6, -3}, + {7, 8, -4} + } + }; + MatR matR = MatR::fromArray(data); + MatC matC = matR+0; + + MatR cloneR1 = matR.deepClone(); + MatR cloneR2 = matR.template deepClone(); + MatC cloneR3 = matR.template deepClone(); + REQUIRE(matR.data() != cloneR1.data()); + REQUIRE(matR.data() != cloneR2.data()); + REQUIRE(matR.data() != cloneR3.data()); + assertMatrixEquality(matR, cloneR1); + assertMatrixEquality(matR, cloneR2); + assertMatrixEquality(matR, cloneR3); + + MatC cloneC1 = matC.deepClone(); + MatC cloneC2 = matC.template deepClone(); + MatR cloneC3 = matC.template deepClone(); + REQUIRE(matC.data() != cloneC1.data()); + REQUIRE(matC.data() != cloneC2.data()); + REQUIRE(matC.data() != cloneC3.data()); + assertMatrixEquality(matC, cloneC1); + assertMatrixEquality(matC, cloneC2); + assertMatrixEquality(matC, cloneC3); +} + +TEST_CASE("deep clone", "[matrix]") +{ + SECTION("int") { + testDeepClone(); + } + SECTION("float") { + testDeepClone(); + } + SECTION("double") { + testDeepClone(); + } +} diff --git a/cuda_code/Test_8.cu b/cuda_code/Test_8.cu new file mode 100644 index 0000000000000000000000000000000000000000..b7a6189b5b2714da8b319bb2ca73425b6ad4bece --- /dev/null +++ b/cuda_code/Test_8.cu @@ -0,0 +1,145 @@ +#include "utility/include/LogBook.h" +#include "utility/include/Exception.h" +#include "utility/include/GPU.h" +#include +#include + +//------------------------------------------------------------ +// This kernel is NOT optimized for memory bandwidth. +// The total computation tasks are distributed among persistent +// threads evenly and statically. +//------------------------------------------------------------ +__global__ void Kernel(double* a, double* b, double* c, size_t numElement, int threadPerGrid) +{ + size_t globalThreadIdx = blockDim.x * blockIdx.x + threadIdx.x; + if(globalThreadIdx < threadPerGrid) + { + size_t quotient = numElement / threadPerGrid; + size_t modulus = numElement % threadPerGrid; + size_t numTaskPerThread; + size_t taskOffset; + + if(globalThreadIdx < modulus) + { + numTaskPerThread = quotient + 1; + taskOffset = (quotient + 1) * globalThreadIdx; + } + else + { + numTaskPerThread = quotient; + taskOffset = (quotient + 1) * modulus + quotient * (globalThreadIdx - modulus); + } + + for(size_t i = 0; i < numTaskPerThread; ++i) + { + size_t idx = taskOffset + i; + c[idx] = a[idx] + b[idx]; + } + } +} + +//------------------------------------------------------------ +//------------------------------------------------------------ +class TestManager +{ +public: + void Run(); + +private: + int blockPerGrid; + int threadPerBlock; + int threadPerGrid; + + std::vector a; + std::vector b; + std::vector c; + + double* a_d = nullptr; + double* b_d = nullptr; + double* c_d = nullptr; +}; + +//------------------------------------------------------------ +//------------------------------------------------------------ +void TestManager::Run() +{ + auto& logBook = Peanut::LogBook::GetLogBook(); + + + size_t numElement = 1024 * 1024; + + a.resize(numElement, 0.0); + b.resize(numElement, 0.0); + c.resize(numElement, 0.0); + + for(size_t i = 0; i < numElement; ++i) + { + a[i] = 0.25; + b[i] = 0.75; + } + + Peanut::SetGPUByCudaIndex(0); + + Peanut::ImplementPersistentThread(Kernel, + blockPerGrid, + threadPerBlock); + threadPerGrid = blockPerGrid * threadPerBlock; + logBook << "--> blockPerGrid = " << blockPerGrid << "\n" + << " threadPerBlock = " << threadPerBlock << "\n" + << " threadPerGrid = " << threadPerGrid << "\n"; + + HANDLE_GPU_ERROR(cudaMalloc(&a_d, numElement * sizeof(double))); + HANDLE_GPU_ERROR(cudaMalloc(&b_d, numElement * sizeof(double))); + HANDLE_GPU_ERROR(cudaMalloc(&c_d, numElement * sizeof(double))); + + HANDLE_GPU_ERROR(cudaMemcpy(a_d, a.data(), numElement * sizeof(double), cudaMemcpyHostToDevice)); + HANDLE_GPU_ERROR(cudaMemcpy(b_d, b.data(), numElement * sizeof(double), cudaMemcpyHostToDevice)); + + Kernel<<>>(a_d, b_d, c_d, numElement, threadPerGrid); + HANDLE_GPU_ERROR(cudaDeviceSynchronize()); + + HANDLE_GPU_ERROR(cudaMemcpy(c.data(), c_d, numElement * sizeof(double), cudaMemcpyDeviceToHost)); + + HANDLE_GPU_ERROR(cudaFree(a_d)); + HANDLE_GPU_ERROR(cudaFree(b_d)); + HANDLE_GPU_ERROR(cudaFree(c_d)); + + double sum = 0.0; + for(auto&& item : c) + { + sum += item; + } + + logBook << "--> result = " << sum << std::endl; +} + +//------------------------------------------------------------ +//------------------------------------------------------------ +int main() +{ + try + { + auto& logBook = Peanut::LogBook::GetLogBook(); + + try + { + TestManager tm; + tm.Run(); + } + catch(Peanut::PeanutException& e) + { + logBook << e.what(); + } + catch(...) + { + logBook << "--> Unhandled exception." << std::endl; + } + } + catch(...) + { + std::cout << "--> Logbook exception." << std::endl; + } + + return 0; +} + diff --git a/cuda_code/Thinning_1.cu b/cuda_code/Thinning_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..e54aa9e98da43afe297741da593b4078febd0945 --- /dev/null +++ b/cuda_code/Thinning_1.cu @@ -0,0 +1,1201 @@ +// Thinning.cu +// 实现二值图像的细化算法 + +#include "Thinning.h" +#include +#include +using namespace std; + +#define uchar unsigned char + +#define HIGH 255 +#define LOW 0 + +static __global__ void _thinPet1Ker(ImageCuda tempimg, ImageCuda outimg, int *devchangecount) +{ + // dstc 和 dstr 分别表示线程处理的像素点的坐标的 x 和 y 分量 (其中,c 表示 + // column,r 表示 row )。 + int dstc = blockIdx.x * blockDim.x + threadIdx.x; + int dstr = blockIdx.y * blockDim.y + threadIdx.y; + + // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源, + // 另一方面防止由于段错误导致程序崩溃。 + if (dstc >= tempimg.imgMeta.width - 2 || + dstr >= tempimg.imgMeta.height - 2 || dstc < 2 || dstr < 2) + return; + + // 定义目标点位置的指针。 + unsigned char *outptr; + + // 获取当前像素点在图像中的相对位置。 + int curpos = dstr * tempimg.pitchBytes + dstc; + + // 获取当前像素点在图像中的绝对位置。 + outptr = tempimg.imgMeta.imgData + curpos; + + // 如果目标像素点的像素值为低像素, 则不进行细化处理。 + if (*outptr != LOW) { + // 由于图像是线性存储的,所以在这里先获得 8 邻域里三列的列索引值, + // 防止下面细化处理时重复计算。 + int posColumn1 = (dstr - 1) * tempimg.pitchBytes; + int posColumn2 = posColumn1 + tempimg.pitchBytes; + int posColumn3 = posColumn2 + tempimg.pitchBytes; + int posColumn4 = posColumn3 + tempimg.pitchBytes; + + uchar x1 = tempimg.imgMeta.imgData[posColumn2 + 1 + dstc] == HIGH; + uchar x2 = tempimg.imgMeta.imgData[posColumn1 + 1 + dstc] == HIGH; + uchar x3 = tempimg.imgMeta.imgData[posColumn1 + dstc] == HIGH; + uchar x4 = tempimg.imgMeta.imgData[posColumn1 - 1 + dstc] == HIGH; + uchar x5 = tempimg.imgMeta.imgData[posColumn2 - 1 + dstc] == HIGH; + uchar x6 = tempimg.imgMeta.imgData[posColumn3 - 1 + dstc] == HIGH; + uchar x7 = tempimg.imgMeta.imgData[posColumn3 + dstc] == HIGH; + uchar x8 = tempimg.imgMeta.imgData[posColumn3 + 1 + dstc] == HIGH; + // uchar y1 = tempimg.imgMeta.imgData[posColumn4 + 1 + dstc] == HIGH; + uchar y2 = tempimg.imgMeta.imgData[posColumn4 + dstc] == HIGH; + // uchar y3 = tempimg.imgMeta.imgData[posColumn4 + 1 + dstc] == HIGH; + // uchar y4 = tempimg.imgMeta.imgData[posColumn1 + 2 + dstc] == HIGH; + uchar y5 = tempimg.imgMeta.imgData[posColumn2 + 2 + dstc] == HIGH; + // uchar y6 = tempimg.imgMeta.imgData[posColumn3 + 2 + dstc] == HIGH; + + int A = (x2 ^ x3) + (x3 ^ x4) + (x4 ^ x5) + (x5 ^ x6) + + (x6 ^ x7) + (x7 ^ x8) + (x8 ^ x1) + (x1 ^ x2); + int B = x2 + x3 + x4 + x5 + x6 + x7 + x8 + x1; + int R = x1 && x7 && x8 && + ((!y5 && x2 && x3 && !x5) || (!y2 && !x3 && x5 && x6)); + if (A == 2 && B >= 2 && B <= 6 && R == 0) { + outimg.imgMeta.imgData[curpos] = LOW; + // 记录删除点数的 devchangecount 值加 1 。 + *devchangecount = 1; + } + } +} + +static __global__ void _thinPet2Ker(ImageCuda tempimg, ImageCuda outimg, int *devchangecount) +{ + // dstc 和 dstr 分别表示线程处理的像素点的坐标的 x 和 y 分量 (其中,c 表示 + // column,r 表示 row )。 + int dstc = blockIdx.x * blockDim.x + threadIdx.x; + int dstr = blockIdx.y * blockDim.y + threadIdx.y; + + // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源, + // 另一方面防止由于段错误导致程序崩溃。 + if (dstc >= tempimg.imgMeta.width - 2 || + dstr >= tempimg.imgMeta.height - 2 || dstc < 2 || dstr < 2) + return; + + // 定义目标点位置的指针。 + unsigned char *outptr; + + // 获取当前像素点在图像中的相对位置。 + int curpos = dstr * tempimg.pitchBytes + dstc; + + // 获取当前像素点在图像中的绝对位置。 + outptr = tempimg.imgMeta.imgData + curpos; + + // 如果目标像素点的像素值为低像素, 则不进行细化处理。 + if (*outptr != LOW) { + // 由于图像是线性存储的,所以在这里先获得 8 邻域里三列的列索引值, + // 防止下面细化处理时重复计算。 + int posColumn1 = (dstr - 1) * tempimg.pitchBytes; + int posColumn2 = posColumn1 + tempimg.pitchBytes; + int posColumn3 = posColumn2 + tempimg.pitchBytes; + // int posColumn4 = posColumn3 + tempimg.pitchBytes; + + uchar x1 = tempimg.imgMeta.imgData[posColumn2 + 1 + dstc] == HIGH; + uchar x2 = tempimg.imgMeta.imgData[posColumn1 + 1 + dstc] == HIGH; + uchar x3 = tempimg.imgMeta.imgData[posColumn1 + dstc] == HIGH; + uchar x4 = tempimg.imgMeta.imgData[posColumn1 - 1 + dstc] == HIGH; + uchar x5 = tempimg.imgMeta.imgData[posColumn2 - 1 + dstc] == HIGH; + uchar x6 = tempimg.imgMeta.imgData[posColumn3 - 1 + dstc] == HIGH; + uchar x7 = tempimg.imgMeta.imgData[posColumn3 + dstc] == HIGH; + uchar x8 = tempimg.imgMeta.imgData[posColumn3 + 1 + dstc] == HIGH; + // uchar y1 = tempimg.imgMeta.imgData[posColumn4 + 1 + dstc] == HIGH; + // uchar y2 = tempimg.imgMeta.imgData[posColumn4 + dstc] == HIGH; + // uchar y3 = tempimg.imgMeta.imgData[posColumn4 + 1 + dstc] == HIGH; + // uchar y4 = tempimg.imgMeta.imgData[posColumn1 + 2 + dstc] == HIGH; + // uchar y5 = tempimg.imgMeta.imgData[posColumn2 + 2 + dstc] == HIGH; + // uchar y6 = tempimg.imgMeta.imgData[posColumn3 + 2 + dstc] == HIGH; + + int S0 = (x3&&x7) || (x5&&x1); + int S1 = (x1 && !x6 && (!x4 || x3)) || (x3 && !x8 && (!x6 || x5)) || + (x7 && !x4 && (!x2 || x1)) || (x5 && !x2 && (!x8 || x7)); + int B = x2 + x3 + x4 + x5 + x6 + x7 + x8 + x1; + int R = (x3 && (x1&&!x8 || x5&&!x6)) || (x7 && (!x5&&!x8 || !x1&&!x6)); + if ((!S0 && S1) && R == 0 && B >= 3) { + outimg.imgMeta.imgData[curpos] = LOW; + // 记录删除点数的 devchangecount 值加 1 。 + *devchangecount = 1; + } + } +} + +// 直接并行化 +// 线程数,处理多少个点有多少线程数 +__host__ int Thinning::thinPet(Image *inimg, Image *outimg) +{ + // 局部变量,错误码。 + int errcode; + cudaError_t cudaerrcode; + + // 检查输入图像,输出图像是否为空。 + if (inimg == NULL || outimg == NULL) + return NULL_POINTER; + + // 声明所有中间变量并初始化为空。 + Image *tempimg = NULL; + int *devchangecount = NULL; + + // 记录细化点数的变量,位于 host 端。 + int changeCount; + + // 记录细化点数的变量,位于 device 端。并为其申请空间。 + cudaerrcode = cudaMalloc((void **)&devchangecount, sizeof (int)); + if (cudaerrcode != cudaSuccess) { + return CUDA_ERROR; + } + + // 生成暂存图像。 + errcode = ImageBasicOp::newImage(&tempimg); + if (errcode != NO_ERROR) + return errcode; + errcode = ImageBasicOp::makeAtCurrentDevice(tempimg, inimg->width, + inimg->height); + if (errcode != NO_ERROR) { + return errcode; + } + + // 将输入图像 inimg 完全拷贝到输出图像 outimg ,并将 outimg 拷贝到 + // device 端。 + errcode = ImageBasicOp::copyToCurrentDevice(inimg, outimg); + if (errcode != NO_ERROR) { + // FAIL_THIN_IMAGE_FREE; + return errcode; + } + + // 提取输出图像 + ImageCuda outsubimgCud; + errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); + if (errcode != NO_ERROR) { + // FAIL_THIN_IMAGE_FREE; + return errcode; + } + + // 提取暂存图像 + ImageCuda tempsubimgCud; + errcode = ImageBasicOp::roiSubImage(tempimg, &tempsubimgCud); + if (errcode != NO_ERROR) { + // FAIL_THIN_IMAGE_FREE; + return errcode; + } + + // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 + dim3 gridsize, blocksize; + blocksize.x = DEF_BLOCK_X; + blocksize.y = DEF_BLOCK_Y; + gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; + gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y - 1) / blocksize.y; + + // 赋值为 1,以便开始第一次迭代。 + changeCount = 1; + + // 开始迭代,当不可再被细化,即记录细化点数的变量 changeCount 的值为 0 时, + // 停止迭代。 + while (changeCount > 0) { + // 将 host 端的变量赋值为 0 ,并将值拷贝到 device 端的 devchangecount。 + changeCount = 0; + cudaerrcode = cudaMemcpy(devchangecount, &changeCount, sizeof (int), + cudaMemcpyHostToDevice); + if (cudaerrcode != cudaSuccess) { + return CUDA_ERROR; + } + + // copy ouimg to tempimg + cudaerrcode = cudaMemcpyPeer(tempimg->imgData, tempsubimgCud.deviceId, + outimg->imgData, outsubimgCud.deviceId, + outsubimgCud.pitchBytes * outimg->height); + + if (cudaerrcode != cudaSuccess) { + return CUDA_ERROR; + } + + // 调用核函数,开始第一步细化操作。 + _thinPet1Ker<<>>(tempsubimgCud, outsubimgCud, devchangecount); + if (cudaGetLastError() != cudaSuccess) { + // 核函数出错,结束迭代函数,释放申请的变量空间。 + // FAIL_THIN_IMAGE_FREE; + return CUDA_ERROR; + } + + // copy ouimg to tempimg + cudaerrcode = cudaMemcpyPeer(tempimg->imgData, tempsubimgCud.deviceId, + outimg->imgData, outsubimgCud.deviceId, + outsubimgCud.pitchBytes * outimg->height); + + if (cudaerrcode != cudaSuccess) { + return CUDA_ERROR; + } + + // 调用核函数,开始第二步细化操作。 + _thinPet2Ker<<>>(tempsubimgCud, outsubimgCud, devchangecount); + if (cudaGetLastError() != cudaSuccess) { + // 核函数出错,结束迭代函数,释放申请的变量空间 。 + // FAIL_THIN_IMAGE_FREE; + return CUDA_ERROR; + } + + // 将位于 device 端的 devchangecount 拷贝到 host 端上的 changeCount + // 变量,进行迭代判断。 + cudaerrcode = cudaMemcpy(&changeCount, devchangecount, sizeof (int), + cudaMemcpyDeviceToHost); + if (cudaerrcode != cudaSuccess) { + // FAIL_THIN_IMAGE_FREE; + return CUDA_ERROR; + } + + } + // 细化结束后释放申请的变量空间。 + cudaFree(devchangecount); + ImageBasicOp::deleteImage(tempimg); + return NO_ERROR; +} + +static __global__ void _thinPetFour1Ker(ImageCuda tempimg, ImageCuda outimg, int *devchangecount) +{ + // dstc 和 dstr 分别表示线程处理的像素点的坐标的 x 和 y 分量 (其中,c 表示 + // column,r 表示 row )。 + int dstc = blockIdx.x * blockDim.x + threadIdx.x; + int dstr = (blockIdx.y * blockDim.y + threadIdx.y) * 4; + + // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源, + // 另一方面防止由于段错误导致程序崩溃。 + if (dstc >= tempimg.imgMeta.width - 2 || + dstr >= tempimg.imgMeta.height - 2 || dstc < 2 || dstr < 2) + return; + + // 定义目标点位置的指针。 + unsigned char *outptr; + + // 获取当前像素点在图像中的相对位置。 + int curpos = dstr * tempimg.pitchBytes + dstc; + + // 获取当前像素点在图像中的绝对位置。 + outptr = tempimg.imgMeta.imgData + curpos; + + // 如果目标像素点的像素值为低像素, 则不进行细化处理。 + if (*outptr != LOW) { + // 由于图像是线性存储的,所以在这里先获得 8 邻域里三列的列索引值, + // 防止下面细化处理时重复计算。 + int posColumn1 = (dstr - 1) * tempimg.pitchBytes; + int posColumn2 = posColumn1 + tempimg.pitchBytes; + int posColumn3 = posColumn2 + tempimg.pitchBytes; + int posColumn4 = posColumn3 + tempimg.pitchBytes; + + uchar x1 = tempimg.imgMeta.imgData[posColumn2 + 1 + dstc] == HIGH; + uchar x2 = tempimg.imgMeta.imgData[posColumn1 + 1 + dstc] == HIGH; + uchar x3 = tempimg.imgMeta.imgData[posColumn1 + dstc] == HIGH; + uchar x4 = tempimg.imgMeta.imgData[posColumn1 - 1 + dstc] == HIGH; + uchar x5 = tempimg.imgMeta.imgData[posColumn2 - 1 + dstc] == HIGH; + uchar x6 = tempimg.imgMeta.imgData[posColumn3 - 1 + dstc] == HIGH; + uchar x7 = tempimg.imgMeta.imgData[posColumn3 + dstc] == HIGH; + uchar x8 = tempimg.imgMeta.imgData[posColumn3 + 1 + dstc] == HIGH; + // uchar y1 = tempimg.imgMeta.imgData[posColumn4 + 1 + dstc] == HIGH; + uchar y2 = tempimg.imgMeta.imgData[posColumn4 + dstc] == HIGH; + // uchar y3 = tempimg.imgMeta.imgData[posColumn4 + 1 + dstc] == HIGH; + // uchar y4 = tempimg.imgMeta.imgData[posColumn1 + 2 + dstc] == HIGH; + uchar y5 = tempimg.imgMeta.imgData[posColumn2 + 2 + dstc] == HIGH; + // uchar y6 = tempimg.imgMeta.imgData[posColumn3 + 2 + dstc] == HIGH; + + int A = (x2 ^ x3) + (x3 ^ x4) + (x4 ^ x5) + (x5 ^ x6) + + (x6 ^ x7) + (x7 ^ x8) + (x8 ^ x1) + (x1 ^ x2); + int B = x2 + x3 + x4 + x5 + x6 + x7 + x8 + x1; + int R = x1 && x7 && x8 && + ((!y5 && x2 && x3 && !x5) || (!y2 && !x3 && x5 && x6)); + if (A == 2 && B >= 2 && B <= 6 && R == 0) { + outimg.imgMeta.imgData[curpos] = LOW; + // 记录删除点数的 devchangecount 值加 1 。 + *devchangecount = 1; + } + } + + for (int i = 0; i < 3; ++i) { + if (++dstr >= tempimg.imgMeta.height - 2) + return ; + curpos += tempimg.pitchBytes; + + // 获取当前像素点在图像中的绝对位置。 + outptr = tempimg.imgMeta.imgData + curpos; + + if (*outptr != LOW) { + // 由于图像是线性存储的,所以在这里先获得 8 邻域里三列的列索引值, + // 防止下面细化处理时重复计算。 + int posColumn1 = (dstr - 1) * tempimg.pitchBytes; + int posColumn2 = posColumn1 + tempimg.pitchBytes; + int posColumn3 = posColumn2 + tempimg.pitchBytes; + int posColumn4 = posColumn3 + tempimg.pitchBytes; + + uchar x1 = tempimg.imgMeta.imgData[posColumn2 + 1 + dstc] == HIGH; + uchar x2 = tempimg.imgMeta.imgData[posColumn1 + 1 + dstc] == HIGH; + uchar x3 = tempimg.imgMeta.imgData[posColumn1 + dstc] == HIGH; + uchar x4 = tempimg.imgMeta.imgData[posColumn1 - 1 + dstc] == HIGH; + uchar x5 = tempimg.imgMeta.imgData[posColumn2 - 1 + dstc] == HIGH; + uchar x6 = tempimg.imgMeta.imgData[posColumn3 - 1 + dstc] == HIGH; + uchar x7 = tempimg.imgMeta.imgData[posColumn3 + dstc] == HIGH; + uchar x8 = tempimg.imgMeta.imgData[posColumn3 + 1 + dstc] == HIGH; + // uchar y1 = tempimg.imgMeta.imgData[posColumn4 + 1 + dstc] == HIGH; + uchar y2 = tempimg.imgMeta.imgData[posColumn4 + dstc] == HIGH; + // uchar y3 = tempimg.imgMeta.imgData[posColumn4 + 1 + dstc] == HIGH; + // uchar y4 = tempimg.imgMeta.imgData[posColumn1 + 2 + dstc] == HIGH; + uchar y5 = tempimg.imgMeta.imgData[posColumn2 + 2 + dstc] == HIGH; + // uchar y6 = tempimg.imgMeta.imgData[posColumn3 + 2 + dstc] == HIGH; + + int A = (x2 ^ x3) + (x3 ^ x4) + (x4 ^ x5) + (x5 ^ x6) + + (x6 ^ x7) + (x7 ^ x8) + (x8 ^ x1) + (x1 ^ x2); + int B = x2 + x3 + x4 + x5 + x6 + x7 + x8 + x1; + int R = x1 && x7 && x8 && + ((!y5 && x2 && x3 && !x5) || (!y2 && !x3 && x5 && x6)); + if (A == 2 && B >= 2 && B <= 6 && R == 0) { + outimg.imgMeta.imgData[curpos] = LOW; + // 记录删除点数的 devchangecount 值加 1 。 + *devchangecount = 1; + } + } + } +} + +static __global__ void _thinPetFour2Ker(ImageCuda tempimg, ImageCuda outimg, int *devchangecount) +{ + // dstc 和 dstr 分别表示线程处理的像素点的坐标的 x 和 y 分量 (其中,c 表示 + // column,r 表示 row )。 + int dstc = blockIdx.x * blockDim.x + threadIdx.x; + int dstr = (blockIdx.y * blockDim.y + threadIdx.y) * 4; + + // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源, + // 另一方面防止由于段错误导致程序崩溃。 + if (dstc >= tempimg.imgMeta.width - 2 || + dstr >= tempimg.imgMeta.height - 2 || dstc < 2 || dstr < 2) + return; + + // 定义目标点位置的指针。 + unsigned char *outptr; + + // 获取当前像素点在图像中的相对位置。 + int curpos = dstr * tempimg.pitchBytes + dstc; + + // 获取当前像素点在图像中的绝对位置。 + outptr = tempimg.imgMeta.imgData + curpos; + + // 如果目标像素点的像素值为低像素, 则不进行细化处理。 + if (*outptr != LOW) { + // 由于图像是线性存储的,所以在这里先获得 8 邻域里三列的列索引值, + // 防止下面细化处理时重复计算。 + int posColumn1 = (dstr - 1) * tempimg.pitchBytes; + int posColumn2 = posColumn1 + tempimg.pitchBytes; + int posColumn3 = posColumn2 + tempimg.pitchBytes; + // int posColumn4 = posColumn3 + tempimg.pitchBytes; + + uchar x1 = tempimg.imgMeta.imgData[posColumn2 + 1 + dstc] == HIGH; + uchar x2 = tempimg.imgMeta.imgData[posColumn1 + 1 + dstc] == HIGH; + uchar x3 = tempimg.imgMeta.imgData[posColumn1 + dstc] == HIGH; + uchar x4 = tempimg.imgMeta.imgData[posColumn1 - 1 + dstc] == HIGH; + uchar x5 = tempimg.imgMeta.imgData[posColumn2 - 1 + dstc] == HIGH; + uchar x6 = tempimg.imgMeta.imgData[posColumn3 - 1 + dstc] == HIGH; + uchar x7 = tempimg.imgMeta.imgData[posColumn3 + dstc] == HIGH; + uchar x8 = tempimg.imgMeta.imgData[posColumn3 + 1 + dstc] == HIGH; + // uchar y1 = tempimg.imgMeta.imgData[posColumn4 + 1 + dstc] == HIGH; + // uchar y2 = tempimg.imgMeta.imgData[posColumn4 + dstc] == HIGH; + // uchar y3 = tempimg.imgMeta.imgData[posColumn4 + 1 + dstc] == HIGH; + // uchar y4 = tempimg.imgMeta.imgData[posColumn1 + 2 + dstc] == HIGH; + // uchar y5 = tempimg.imgMeta.imgData[posColumn2 + 2 + dstc] == HIGH; + // uchar y6 = tempimg.imgMeta.imgData[posColumn3 + 2 + dstc] == HIGH; + + int S0 = (x3&&x7) || (x5&&x1); + int S1 = (x1 && !x6 && (!x4 || x3)) || (x3 && !x8 && (!x6 || x5)) || + (x7 && !x4 && (!x2 || x1)) || (x5 && !x2 && (!x8 || x7)); + int B = x2 + x3 + x4 + x5 + x6 + x7 + x8 + x1; + int R = (x3 && (x1&&!x8 || x5&&!x6)) || (x7 && (!x5&&!x8 || !x1&&!x6)); + if ((!S0 && S1) && R == 0 && B >= 3) { + outimg.imgMeta.imgData[curpos] = LOW; + // 记录删除点数的 devchangecount 值加 1 。 + *devchangecount = 1; + } + } + + for (int i = 0; i < 3; ++i) { + if (++dstr >= tempimg.imgMeta.height - 2) + return ; + curpos += tempimg.pitchBytes; + + // 获取当前像素点在图像中的绝对位置。 + outptr = tempimg.imgMeta.imgData + curpos; + + if (*outptr != LOW) { + // 由于图像是线性存储的,所以在这里先获得 8 邻域里三列的列索引值, + // 防止下面细化处理时重复计算。 + int posColumn1 = (dstr - 1) * tempimg.pitchBytes; + int posColumn2 = posColumn1 + tempimg.pitchBytes; + int posColumn3 = posColumn2 + tempimg.pitchBytes; + // int posColumn4 = posColumn3 + tempimg.pitchBytes; + + uchar x1 = tempimg.imgMeta.imgData[posColumn2 + 1 + dstc] == HIGH; + uchar x2 = tempimg.imgMeta.imgData[posColumn1 + 1 + dstc] == HIGH; + uchar x3 = tempimg.imgMeta.imgData[posColumn1 + dstc] == HIGH; + uchar x4 = tempimg.imgMeta.imgData[posColumn1 - 1 + dstc] == HIGH; + uchar x5 = tempimg.imgMeta.imgData[posColumn2 - 1 + dstc] == HIGH; + uchar x6 = tempimg.imgMeta.imgData[posColumn3 - 1 + dstc] == HIGH; + uchar x7 = tempimg.imgMeta.imgData[posColumn3 + dstc] == HIGH; + uchar x8 = tempimg.imgMeta.imgData[posColumn3 + 1 + dstc] == HIGH; + // uchar y1 = tempimg.imgMeta.imgData[posColumn4 + 1 + dstc] == HIGH; + // uchar y2 = tempimg.imgMeta.imgData[posColumn4 + dstc] == HIGH; + // uchar y3 = tempimg.imgMeta.imgData[posColumn4 + 1 + dstc] == HIGH; + // uchar y4 = tempimg.imgMeta.imgData[posColumn1 + 2 + dstc] == HIGH; + // uchar y5 = tempimg.imgMeta.imgData[posColumn2 + 2 + dstc] == HIGH; + // uchar y6 = tempimg.imgMeta.imgData[posColumn3 + 2 + dstc] == HIGH; + + int S0 = (x3&&x7) || (x5&&x1); + int S1 = (x1 && !x6 && (!x4 || x3)) || (x3 && !x8 && (!x6 || x5)) || + (x7 && !x4 && (!x2 || x1)) || (x5 && !x2 && (!x8 || x7)); + int B = x2 + x3 + x4 + x5 + x6 + x7 + x8 + x1; + int R = (x3 && (x1&&!x8 || x5&&!x6)) || (x7 && (!x5&&!x8 || !x1&&!x6)); + if ((!S0 && S1) && R == 0 && B >= 3) { + outimg.imgMeta.imgData[curpos] = LOW; + // 记录删除点数的 devchangecount 值加 1 。 + *devchangecount = 1; + } + } + } +} + +// 直接并行化 +// 线程数,处理多少个点有多少线程数 +__host__ int Thinning::thinPetFour(Image *inimg, Image *outimg) +{ + // 局部变量,错误码。 + int errcode; + cudaError_t cudaerrcode; + + // 检查输入图像,输出图像是否为空。 + if (inimg == NULL || outimg == NULL) + return NULL_POINTER; + + // 声明所有中间变量并初始化为空。 + Image *tempimg = NULL; + int *devchangecount = NULL; + + // 记录细化点数的变量,位于 host 端。 + int changeCount; + + // 记录细化点数的变量,位于 device 端。并为其申请空间。 + cudaerrcode = cudaMalloc((void **)&devchangecount, sizeof (int)); + if (cudaerrcode != cudaSuccess) { + return CUDA_ERROR; + } + + // 生成暂存图像。 + errcode = ImageBasicOp::newImage(&tempimg); + if (errcode != NO_ERROR) + return errcode; + errcode = ImageBasicOp::makeAtCurrentDevice(tempimg, inimg->width, + inimg->height); + if (errcode != NO_ERROR) { + return errcode; + } + + // 将输入图像 inimg 完全拷贝到输出图像 outimg ,并将 outimg 拷贝到 + // device 端。 + errcode = ImageBasicOp::copyToCurrentDevice(inimg, outimg); + if (errcode != NO_ERROR) { + // FAIL_THIN_IMAGE_FREE; + return errcode; + } + + // 提取输出图像 + ImageCuda outsubimgCud; + errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); + if (errcode != NO_ERROR) { + // FAIL_THIN_IMAGE_FREE; + return errcode; + } + + // 提取暂存图像 + ImageCuda tempsubimgCud; + errcode = ImageBasicOp::roiSubImage(tempimg, &tempsubimgCud); + if (errcode != NO_ERROR) { + // FAIL_THIN_IMAGE_FREE; + return errcode; + } + + // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 + dim3 gridsize, blocksize; + blocksize.x = DEF_BLOCK_X; + blocksize.y = DEF_BLOCK_Y; + gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; + gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y * 4 - 1) / blocksize.y * 4; + + // 赋值为 1,以便开始第一次迭代。 + changeCount = 1; + + // 开始迭代,当不可再被细化,即记录细化点数的变量 changeCount 的值为 0 时, + // 停止迭代。 + while (changeCount > 0) { + // 将 host 端的变量赋值为 0 ,并将值拷贝到 device 端的 devchangecount。 + changeCount = 0; + cudaerrcode = cudaMemcpy(devchangecount, &changeCount, sizeof (int), + cudaMemcpyHostToDevice); + if (cudaerrcode != cudaSuccess) { + return CUDA_ERROR; + } + + // copy ouimg to tempimg + cudaerrcode = cudaMemcpyPeer(tempimg->imgData, tempsubimgCud.deviceId, + outimg->imgData, outsubimgCud.deviceId, + outsubimgCud.pitchBytes * outimg->height); + + if (cudaerrcode != cudaSuccess) { + return CUDA_ERROR; + } + + // 调用核函数,开始第一步细化操作。 + _thinPetFour1Ker<<>>(tempsubimgCud, outsubimgCud, devchangecount); + if (cudaGetLastError() != cudaSuccess) { + // 核函数出错,结束迭代函数,释放申请的变量空间。 + // FAIL_THIN_IMAGE_FREE; + return CUDA_ERROR; + } + + // copy ouimg to tempimg + cudaerrcode = cudaMemcpyPeer(tempimg->imgData, tempsubimgCud.deviceId, + outimg->imgData, outsubimgCud.deviceId, + outsubimgCud.pitchBytes * outimg->height); + + if (cudaerrcode != cudaSuccess) { + return CUDA_ERROR; + } + + // 调用核函数,开始第二步细化操作。 + _thinPetFour2Ker<<>>(tempsubimgCud, outsubimgCud, devchangecount); + if (cudaGetLastError() != cudaSuccess) { + // 核函数出错,结束迭代函数,释放申请的变量空间 。 + // FAIL_THIN_IMAGE_FREE; + return CUDA_ERROR; + } + + // 将位于 device 端的 devchangecount 拷贝到 host 端上的 changeCount + // 变量,进行迭代判断。 + cudaerrcode = cudaMemcpy(&changeCount, devchangecount, sizeof (int), + cudaMemcpyDeviceToHost); + if (cudaerrcode != cudaSuccess) { + // FAIL_THIN_IMAGE_FREE; + return CUDA_ERROR; + } + + } + // 细化结束后释放申请的变量空间。 + cudaFree(devchangecount); + ImageBasicOp::deleteImage(tempimg); + return NO_ERROR; +} + +static __global__ void _thinPetPt1Ker(ImageCuda tempimg, ImageCuda outimg, + int *devchangecount, uchar *dev_lut) +{ + // dstc 和 dstr 分别表示线程处理的像素点的坐标的 x 和 y 分量 (其中,c 表示 + // column,r 表示 row )。 + int dstc = blockIdx.x * blockDim.x + threadIdx.x; + int dstr = blockIdx.y * blockDim.y + threadIdx.y; + + // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源, + // 另一方面防止由于段错误导致程序崩溃。 + if (dstc >= tempimg.imgMeta.width - 2 || + dstr >= tempimg.imgMeta.height - 2 || dstc < 2 || dstr < 2) + return; + + // 定义目标点位置的指针。 + unsigned char *outptr; + + // 获取当前像素点在图像中的相对位置。 + int curpos = dstr * tempimg.pitchBytes + dstc; + + // 获取当前像素点在图像中的绝对位置。 + outptr = tempimg.imgMeta.imgData + curpos; + + // 如果目标像素点的像素值为低像素, 则不进行细化处理。 + if (*outptr != LOW) { + // 由于图像是线性存储的,所以在这里先获得 8 邻域里三列的列索引值, + // 防止下面细化处理时重复计算。 + int posColumn1 = (dstr - 1) * tempimg.pitchBytes; + int posColumn2 = posColumn1 + tempimg.pitchBytes; + int posColumn3 = posColumn2 + tempimg.pitchBytes; + int posColumn4 = posColumn3 + tempimg.pitchBytes; + + uchar x1 = tempimg.imgMeta.imgData[posColumn2 + 1 + dstc] == HIGH; + uchar x2 = tempimg.imgMeta.imgData[posColumn1 + 1 + dstc] == HIGH; + uchar x3 = tempimg.imgMeta.imgData[posColumn1 + dstc] == HIGH; + uchar x4 = tempimg.imgMeta.imgData[posColumn1 - 1 + dstc] == HIGH; + uchar x5 = tempimg.imgMeta.imgData[posColumn2 - 1 + dstc] == HIGH; + uchar x6 = tempimg.imgMeta.imgData[posColumn3 - 1 + dstc] == HIGH; + uchar x7 = tempimg.imgMeta.imgData[posColumn3 + dstc] == HIGH; + uchar x8 = tempimg.imgMeta.imgData[posColumn3 + 1 + dstc] == HIGH; + // uchar y1 = tempimg.imgMeta.imgData[posColumn4 + 1 + dstc] == HIGH; + uchar y2 = tempimg.imgMeta.imgData[posColumn4 + dstc] == HIGH; + // uchar y3 = tempimg.imgMeta.imgData[posColumn4 + 1 + dstc] == HIGH; + // uchar y4 = tempimg.imgMeta.imgData[posColumn1 + 2 + dstc] == HIGH; + uchar y5 = tempimg.imgMeta.imgData[posColumn2 + 2 + dstc] == HIGH; + // uchar y6 = tempimg.imgMeta.imgData[posColumn3 + 2 + dstc] == HIGH; + + int index = (x1)*1 + (x2)*2 + (x3)*4 + (x4)*8 + (x5)*16 + (x6)*32 + (x7)*64 + (x8)*128 + + (y2) * 256 + (y5) * 512; + + if (dev_lut[index]) { + outimg.imgMeta.imgData[curpos] = LOW; + // 记录删除点数的 devchangecount 值加 1 。 + *devchangecount = 1; + } + } +} + +static __global__ void _thinPetPt2Ker(ImageCuda tempimg, ImageCuda outimg, + int *devchangecount, uchar *dev_lut) +{ + // dstc 和 dstr 分别表示线程处理的像素点的坐标的 x 和 y 分量 (其中,c 表示 + // column,r 表示 row )。 + int dstc = blockIdx.x * blockDim.x + threadIdx.x; + int dstr = blockIdx.y * blockDim.y + threadIdx.y; + + // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源, + // 另一方面防止由于段错误导致程序崩溃。 + if (dstc >= tempimg.imgMeta.width - 2 || + dstr >= tempimg.imgMeta.height - 2 || dstc < 2 || dstr < 2) + return; + + // 定义目标点位置的指针。 + unsigned char *outptr; + + // 获取当前像素点在图像中的相对位置。 + int curpos = dstr * tempimg.pitchBytes + dstc; + + // 获取当前像素点在图像中的绝对位置。 + outptr = tempimg.imgMeta.imgData + curpos; + + // 如果目标像素点的像素值为低像素, 则不进行细化处理。 + if (*outptr != LOW) { + // 由于图像是线性存储的,所以在这里先获得 8 邻域里三列的列索引值, + // 防止下面细化处理时重复计算。 + int posColumn1 = (dstr - 1) * tempimg.pitchBytes; + int posColumn2 = posColumn1 + tempimg.pitchBytes; + int posColumn3 = posColumn2 + tempimg.pitchBytes; + // int posColumn4 = posColumn3 + tempimg.pitchBytes; + + uchar x1 = tempimg.imgMeta.imgData[posColumn2 + 1 + dstc] == HIGH; + uchar x2 = tempimg.imgMeta.imgData[posColumn1 + 1 + dstc] == HIGH; + uchar x3 = tempimg.imgMeta.imgData[posColumn1 + dstc] == HIGH; + uchar x4 = tempimg.imgMeta.imgData[posColumn1 - 1 + dstc] == HIGH; + uchar x5 = tempimg.imgMeta.imgData[posColumn2 - 1 + dstc] == HIGH; + uchar x6 = tempimg.imgMeta.imgData[posColumn3 - 1 + dstc] == HIGH; + uchar x7 = tempimg.imgMeta.imgData[posColumn3 + dstc] == HIGH; + uchar x8 = tempimg.imgMeta.imgData[posColumn3 + 1 + dstc] == HIGH; + // uchar y1 = tempimg.imgMeta.imgData[posColumn4 + 1 + dstc] == HIGH; + // uchar y2 = tempimg.imgMeta.imgData[posColumn4 + dstc] == HIGH; + // uchar y3 = tempimg.imgMeta.imgData[posColumn4 + 1 + dstc] == HIGH; + // uchar y4 = tempimg.imgMeta.imgData[posColumn1 + 2 + dstc] == HIGH; + // uchar y5 = tempimg.imgMeta.imgData[posColumn2 + 2 + dstc] == HIGH; + // uchar y6 = tempimg.imgMeta.imgData[posColumn3 + 2 + dstc] == HIGH; + + int index = (x1)*1 + (x2)*2 + (x3)*4 + (x4)*8 + (x5)*16 + (x6)*32 + (x7)*64 + (x8)*128; + + if (dev_lut[1024 + index]) { + outimg.imgMeta.imgData[curpos] = LOW; + // 记录删除点数的 devchangecount 值加 1 。 + *devchangecount = 1; + } + } +} + +// 直接并行化 +// 线程数,处理多少个点有多少线程数 +__host__ int Thinning::thinPetPt(Image *inimg, Image *outimg) +{ + // 局部变量,错误码。 + int errcode; + cudaError_t cudaerrcode; + + // 检查输入图像,输出图像是否为空。 + if (inimg == NULL || outimg == NULL) + return NULL_POINTER; + + uchar lut[1280] = + { + 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0 + }; + + uchar *dev_lut; + cudaerrcode = cudaMalloc((void **)&dev_lut, sizeof (uchar) * 1280); + if (cudaerrcode != cudaSuccess) + return CUDA_ERROR; + + cudaerrcode = cudaMemcpy(dev_lut, lut, sizeof (uchar) * 1280, + cudaMemcpyHostToDevice); + if (cudaerrcode != cudaSuccess) + return CUDA_ERROR; + + // 声明所有中间变量并初始化为空。 + Image *tempimg = NULL; + int *devchangecount = NULL; + + // 记录细化点数的变量,位于 host 端。 + int changeCount; + + // 记录细化点数的变量,位于 device 端。并为其申请空间。 + cudaerrcode = cudaMalloc((void **)&devchangecount, sizeof (int)); + if (cudaerrcode != cudaSuccess) { + return CUDA_ERROR; + } + + // 生成暂存图像。 + errcode = ImageBasicOp::newImage(&tempimg); + if (errcode != NO_ERROR) + return errcode; + errcode = ImageBasicOp::makeAtCurrentDevice(tempimg, inimg->width, + inimg->height); + if (errcode != NO_ERROR) { + return errcode; + } + + // 将输入图像 inimg 完全拷贝到输出图像 outimg ,并将 outimg 拷贝到 + // device 端。 + errcode = ImageBasicOp::copyToCurrentDevice(inimg, outimg); + if (errcode != NO_ERROR) { + // FAIL_THIN_IMAGE_FREE; + return errcode; + } + + // 提取输出图像 + ImageCuda outsubimgCud; + errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); + if (errcode != NO_ERROR) { + // FAIL_THIN_IMAGE_FREE; + return errcode; + } + + // 提取暂存图像 + ImageCuda tempsubimgCud; + errcode = ImageBasicOp::roiSubImage(tempimg, &tempsubimgCud); + if (errcode != NO_ERROR) { + // FAIL_THIN_IMAGE_FREE; + return errcode; + } + + // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 + dim3 gridsize, blocksize; + blocksize.x = DEF_BLOCK_X; + blocksize.y = DEF_BLOCK_Y; + gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; + gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y - 1) / blocksize.y; + + // 赋值为 1,以便开始第一次迭代。 + changeCount = 1; + + // 开始迭代,当不可再被细化,即记录细化点数的变量 changeCount 的值为 0 时, + // 停止迭代。 + while (changeCount > 0) { + // 将 host 端的变量赋值为 0 ,并将值拷贝到 device 端的 devchangecount。 + changeCount = 0; + cudaerrcode = cudaMemcpy(devchangecount, &changeCount, sizeof (int), + cudaMemcpyHostToDevice); + if (cudaerrcode != cudaSuccess) { + return CUDA_ERROR; + } + + // copy ouimg to tempimg + cudaerrcode = cudaMemcpyPeer(tempimg->imgData, tempsubimgCud.deviceId, + outimg->imgData, outsubimgCud.deviceId, + outsubimgCud.pitchBytes * outimg->height); + + if (cudaerrcode != cudaSuccess) { + return CUDA_ERROR; + } + + // 调用核函数,开始第一步细化操作。 + _thinPetPt1Ker<<>>(tempsubimgCud, outsubimgCud, devchangecount, dev_lut); + if (cudaGetLastError() != cudaSuccess) { + // 核函数出错,结束迭代函数,释放申请的变量空间。 + // FAIL_THIN_IMAGE_FREE; + return CUDA_ERROR; + } + + // copy ouimg to tempimg + cudaerrcode = cudaMemcpyPeer(tempimg->imgData, tempsubimgCud.deviceId, + outimg->imgData, outsubimgCud.deviceId, + outsubimgCud.pitchBytes * outimg->height); + + if (cudaerrcode != cudaSuccess) { + return CUDA_ERROR; + } + + // 调用核函数,开始第二步细化操作。 + _thinPetPt2Ker<<>>(tempsubimgCud, outsubimgCud, devchangecount, dev_lut); + if (cudaGetLastError() != cudaSuccess) { + // 核函数出错,结束迭代函数,释放申请的变量空间 。 + // FAIL_THIN_IMAGE_FREE; + return CUDA_ERROR; + } + + // 将位于 device 端的 devchangecount 拷贝到 host 端上的 changeCount + // 变量,进行迭代判断。 + cudaerrcode = cudaMemcpy(&changeCount, devchangecount, sizeof (int), + cudaMemcpyDeviceToHost); + if (cudaerrcode != cudaSuccess) { + // FAIL_THIN_IMAGE_FREE; + return CUDA_ERROR; + } + + } + // 细化结束后释放申请的变量空间。 + cudaFree(devchangecount); + ImageBasicOp::deleteImage(tempimg); + return NO_ERROR; +} + +static __global__ void _thinPetPtFour1Ker(ImageCuda tempimg, ImageCuda outimg, + int *devchangecount, uchar* dev_lut) +{ + // dstc 和 dstr 分别表示线程处理的像素点的坐标的 x 和 y 分量 (其中,c 表示 + // column,r 表示 row )。 + int dstc = blockIdx.x * blockDim.x + threadIdx.x; + int dstr = (blockIdx.y * blockDim.y + threadIdx.y) * 4; + + // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源, + // 另一方面防止由于段错误导致程序崩溃。 + if (dstc >= tempimg.imgMeta.width - 2 || + dstr >= tempimg.imgMeta.height - 2 || dstc < 2 || dstr < 2) + return; + + // 定义目标点位置的指针。 + unsigned char *outptr; + + // 获取当前像素点在图像中的相对位置。 + int curpos = dstr * tempimg.pitchBytes + dstc; + + // 获取当前像素点在图像中的绝对位置。 + outptr = tempimg.imgMeta.imgData + curpos; + + // 如果目标像素点的像素值为低像素, 则不进行细化处理。 + if (*outptr != LOW) { + // 由于图像是线性存储的,所以在这里先获得 8 邻域里三列的列索引值, + // 防止下面细化处理时重复计算。 + int posColumn1 = (dstr - 1) * tempimg.pitchBytes; + int posColumn2 = posColumn1 + tempimg.pitchBytes; + int posColumn3 = posColumn2 + tempimg.pitchBytes; + int posColumn4 = posColumn3 + tempimg.pitchBytes; + + uchar x1 = tempimg.imgMeta.imgData[posColumn2 + 1 + dstc] == HIGH; + uchar x2 = tempimg.imgMeta.imgData[posColumn1 + 1 + dstc] == HIGH; + uchar x3 = tempimg.imgMeta.imgData[posColumn1 + dstc] == HIGH; + uchar x4 = tempimg.imgMeta.imgData[posColumn1 - 1 + dstc] == HIGH; + uchar x5 = tempimg.imgMeta.imgData[posColumn2 - 1 + dstc] == HIGH; + uchar x6 = tempimg.imgMeta.imgData[posColumn3 - 1 + dstc] == HIGH; + uchar x7 = tempimg.imgMeta.imgData[posColumn3 + dstc] == HIGH; + uchar x8 = tempimg.imgMeta.imgData[posColumn3 + 1 + dstc] == HIGH; + // uchar y1 = tempimg.imgMeta.imgData[posColumn4 + 1 + dstc] == HIGH; + uchar y2 = tempimg.imgMeta.imgData[posColumn4 + dstc] == HIGH; + // uchar y3 = tempimg.imgMeta.imgData[posColumn4 + 1 + dstc] == HIGH; + // uchar y4 = tempimg.imgMeta.imgData[posColumn1 + 2 + dstc] == HIGH; + uchar y5 = tempimg.imgMeta.imgData[posColumn2 + 2 + dstc] == HIGH; + // uchar y6 = tempimg.imgMeta.imgData[posColumn3 + 2 + dstc] == HIGH; + + int index = (x1)*1 + (x2)*2 + (x3)*4 + (x4)*8 + (x5)*16 + (x6)*32 + (x7)*64 + (x8)*128 + + (y2) * 256 + (y5) * 512; + + if (dev_lut[index]) { + outimg.imgMeta.imgData[curpos] = LOW; + // 记录删除点数的 devchangecount 值加 1 。 + *devchangecount = 1; + } + } + + for (int i = 0; i < 3; ++i) { + if (++dstr >= tempimg.imgMeta.height - 2) + return ; + curpos += tempimg.pitchBytes; + + // 获取当前像素点在图像中的绝对位置。 + outptr = tempimg.imgMeta.imgData + curpos; + + if (*outptr != LOW) { + // 由于图像是线性存储的,所以在这里先获得 8 邻域里三列的列索引值, + // 防止下面细化处理时重复计算。 + int posColumn1 = (dstr - 1) * tempimg.pitchBytes; + int posColumn2 = posColumn1 + tempimg.pitchBytes; + int posColumn3 = posColumn2 + tempimg.pitchBytes; + int posColumn4 = posColumn3 + tempimg.pitchBytes; + + uchar x1 = tempimg.imgMeta.imgData[posColumn2 + 1 + dstc] == HIGH; + uchar x2 = tempimg.imgMeta.imgData[posColumn1 + 1 + dstc] == HIGH; + uchar x3 = tempimg.imgMeta.imgData[posColumn1 + dstc] == HIGH; + uchar x4 = tempimg.imgMeta.imgData[posColumn1 - 1 + dstc] == HIGH; + uchar x5 = tempimg.imgMeta.imgData[posColumn2 - 1 + dstc] == HIGH; + uchar x6 = tempimg.imgMeta.imgData[posColumn3 - 1 + dstc] == HIGH; + uchar x7 = tempimg.imgMeta.imgData[posColumn3 + dstc] == HIGH; + uchar x8 = tempimg.imgMeta.imgData[posColumn3 + 1 + dstc] == HIGH; + // uchar y1 = tempimg.imgMeta.imgData[posColumn4 + 1 + dstc] == HIGH; + uchar y2 = tempimg.imgMeta.imgData[posColumn4 + dstc] == HIGH; + // uchar y3 = tempimg.imgMeta.imgData[posColumn4 + 1 + dstc] == HIGH; + // uchar y4 = tempimg.imgMeta.imgData[posColumn1 + 2 + dstc] == HIGH; + uchar y5 = tempimg.imgMeta.imgData[posColumn2 + 2 + dstc] == HIGH; + // uchar y6 = tempimg.imgMeta.imgData[posColumn3 + 2 + dstc] == HIGH; + + int index = (x1)*1 + (x2)*2 + (x3)*4 + (x4)*8 + (x5)*16 + (x6)*32 + (x7)*64 + (x8)*128 + + (y2) * 256 + (y5) * 512; + + if (dev_lut[index]) { + outimg.imgMeta.imgData[curpos] = LOW; + // 记录删除点数的 devchangecount 值加 1 。 + *devchangecount = 1; + } + } + } +} + +static __global__ void _thinPetPtFour2Ker(ImageCuda tempimg, ImageCuda outimg, + int *devchangecount, uchar* dev_lut) +{ + // dstc 和 dstr 分别表示线程处理的像素点的坐标的 x 和 y 分量 (其中,c 表示 + // column,r 表示 row )。 + int dstc = blockIdx.x * blockDim.x + threadIdx.x; + int dstr = (blockIdx.y * blockDim.y + threadIdx.y) * 4; + + // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源, + // 另一方面防止由于段错误导致程序崩溃。 + if (dstc >= tempimg.imgMeta.width - 2 || + dstr >= tempimg.imgMeta.height - 2 || dstc < 2 || dstr < 2) + return; + + // 定义目标点位置的指针。 + unsigned char *outptr; + + // 获取当前像素点在图像中的相对位置。 + int curpos = dstr * tempimg.pitchBytes + dstc; + + // 获取当前像素点在图像中的绝对位置。 + outptr = tempimg.imgMeta.imgData + curpos; + + // 如果目标像素点的像素值为低像素, 则不进行细化处理。 + if (*outptr != LOW) { + // 由于图像是线性存储的,所以在这里先获得 8 邻域里三列的列索引值, + // 防止下面细化处理时重复计算。 + int posColumn1 = (dstr - 1) * tempimg.pitchBytes; + int posColumn2 = posColumn1 + tempimg.pitchBytes; + int posColumn3 = posColumn2 + tempimg.pitchBytes; + // int posColumn4 = posColumn3 + tempimg.pitchBytes; + + uchar x1 = tempimg.imgMeta.imgData[posColumn2 + 1 + dstc] == HIGH; + uchar x2 = tempimg.imgMeta.imgData[posColumn1 + 1 + dstc] == HIGH; + uchar x3 = tempimg.imgMeta.imgData[posColumn1 + dstc] == HIGH; + uchar x4 = tempimg.imgMeta.imgData[posColumn1 - 1 + dstc] == HIGH; + uchar x5 = tempimg.imgMeta.imgData[posColumn2 - 1 + dstc] == HIGH; + uchar x6 = tempimg.imgMeta.imgData[posColumn3 - 1 + dstc] == HIGH; + uchar x7 = tempimg.imgMeta.imgData[posColumn3 + dstc] == HIGH; + uchar x8 = tempimg.imgMeta.imgData[posColumn3 + 1 + dstc] == HIGH; + // uchar y1 = tempimg.imgMeta.imgData[posColumn4 + 1 + dstc] == HIGH; + // uchar y2 = tempimg.imgMeta.imgData[posColumn4 + dstc] == HIGH; + // uchar y3 = tempimg.imgMeta.imgData[posColumn4 + 1 + dstc] == HIGH; + // uchar y4 = tempimg.imgMeta.imgData[posColumn1 + 2 + dstc] == HIGH; + // uchar y5 = tempimg.imgMeta.imgData[posColumn2 + 2 + dstc] == HIGH; + // uchar y6 = tempimg.imgMeta.imgData[posColumn3 + 2 + dstc] == HIGH; + + int index = (x1)*1 + (x2)*2 + (x3)*4 + (x4)*8 + (x5)*16 + (x6)*32 + (x7)*64 + (x8)*128; + + if (dev_lut[1024 + index]) { + outimg.imgMeta.imgData[curpos] = LOW; + // 记录删除点数的 devchangecount 值加 1 。 + *devchangecount = 1; + } + } + + for (int i = 0; i < 3; ++i) { + if (++dstr >= tempimg.imgMeta.height - 2) + return ; + curpos += tempimg.pitchBytes; + + // 获取当前像素点在图像中的绝对位置。 + outptr = tempimg.imgMeta.imgData + curpos; + + if (*outptr != LOW) { + // 由于图像是线性存储的,所以在这里先获得 8 邻域里三列的列索引值, + // 防止下面细化处理时重复计算。 + int posColumn1 = (dstr - 1) * tempimg.pitchBytes; + int posColumn2 = posColumn1 + tempimg.pitchBytes; + int posColumn3 = posColumn2 + tempimg.pitchBytes; + // int posColumn4 = posColumn3 + tempimg.pitchBytes; + + uchar x1 = tempimg.imgMeta.imgData[posColumn2 + 1 + dstc] == HIGH; + uchar x2 = tempimg.imgMeta.imgData[posColumn1 + 1 + dstc] == HIGH; + uchar x3 = tempimg.imgMeta.imgData[posColumn1 + dstc] == HIGH; + uchar x4 = tempimg.imgMeta.imgData[posColumn1 - 1 + dstc] == HIGH; + uchar x5 = tempimg.imgMeta.imgData[posColumn2 - 1 + dstc] == HIGH; + uchar x6 = tempimg.imgMeta.imgData[posColumn3 - 1 + dstc] == HIGH; + uchar x7 = tempimg.imgMeta.imgData[posColumn3 + dstc] == HIGH; + uchar x8 = tempimg.imgMeta.imgData[posColumn3 + 1 + dstc] == HIGH; + // uchar y1 = tempimg.imgMeta.imgData[posColumn4 + 1 + dstc] == HIGH; + // uchar y2 = tempimg.imgMeta.imgData[posColumn4 + dstc] == HIGH; + // uchar y3 = tempimg.imgMeta.imgData[posColumn4 + 1 + dstc] == HIGH; + // uchar y4 = tempimg.imgMeta.imgData[posColumn1 + 2 + dstc] == HIGH; + // uchar y5 = tempimg.imgMeta.imgData[posColumn2 + 2 + dstc] == HIGH; + // uchar y6 = tempimg.imgMeta.imgData[posColumn3 + 2 + dstc] == HIGH; + + int index = (x1)*1 + (x2)*2 + (x3)*4 + (x4)*8 + (x5)*16 + (x6)*32 + (x7)*64 + (x8)*128; + + if (dev_lut[1024 + index]) { + outimg.imgMeta.imgData[curpos] = LOW; + // 记录删除点数的 devchangecount 值加 1 。 + *devchangecount = 1; + } + } + } +} + +// 直接并行化 +// 线程数,处理多少个点有多少线程数 +__host__ int Thinning::thinPetPtFour(Image *inimg, Image *outimg) +{ + // 局部变量,错误码。 + int errcode; + cudaError_t cudaerrcode; + + // 检查输入图像,输出图像是否为空。 + if (inimg == NULL || outimg == NULL) + return NULL_POINTER; + + // 声明所有中间变量并初始化为空。 + Image *tempimg = NULL; + int *devchangecount = NULL; + + // 记录细化点数的变量,位于 host 端。 + int changeCount; + + // 记录细化点数的变量,位于 device 端。并为其申请空间。 + cudaerrcode = cudaMalloc((void **)&devchangecount, sizeof (int)); + if (cudaerrcode != cudaSuccess) { + return CUDA_ERROR; + } + + uchar lut[1280] = + { + 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0 + }; + + uchar *dev_lut; + cudaerrcode = cudaMalloc((void **)&dev_lut, sizeof (uchar) * 1280); + if (cudaerrcode != cudaSuccess) + return CUDA_ERROR; + + cudaerrcode = cudaMemcpy(dev_lut, lut, sizeof (uchar) * 1280, + cudaMemcpyHostToDevice); + if (cudaerrcode != cudaSuccess) + return CUDA_ERROR; + + // 生成暂存图像。 + errcode = ImageBasicOp::newImage(&tempimg); + if (errcode != NO_ERROR) + return errcode; + errcode = ImageBasicOp::makeAtCurrentDevice(tempimg, inimg->width, + inimg->height); + if (errcode != NO_ERROR) { + return errcode; + } + + // 将输入图像 inimg 完全拷贝到输出图像 outimg ,并将 outimg 拷贝到 + // device 端。 + errcode = ImageBasicOp::copyToCurrentDevice(inimg, outimg); + if (errcode != NO_ERROR) { + // FAIL_THIN_IMAGE_FREE; + return errcode; + } + + // 提取输出图像 + ImageCuda outsubimgCud; + errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); + if (errcode != NO_ERROR) { + // FAIL_THIN_IMAGE_FREE; + return errcode; + } + + // 提取暂存图像 + ImageCuda tempsubimgCud; + errcode = ImageBasicOp::roiSubImage(tempimg, &tempsubimgCud); + if (errcode != NO_ERROR) { + // FAIL_THIN_IMAGE_FREE; + return errcode; + } + + // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 + dim3 gridsize, blocksize; + blocksize.x = DEF_BLOCK_X; + blocksize.y = DEF_BLOCK_Y; + gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; + gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y*4 - 1) / blocksize.y*4; + + // 赋值为 1,以便开始第一次迭代。 + changeCount = 1; + + // 开始迭代,当不可再被细化,即记录细化点数的变量 changeCount 的值为 0 时, + // 停止迭代。 + while (changeCount > 0) { + // 将 host 端的变量赋值为 0 ,并将值拷贝到 device 端的 devchangecount。 + changeCount = 0; + cudaerrcode = cudaMemcpy(devchangecount, &changeCount, sizeof (int), + cudaMemcpyHostToDevice); + if (cudaerrcode != cudaSuccess) { + return CUDA_ERROR; + } + + // copy ouimg to tempimg + cudaerrcode = cudaMemcpyPeer(tempimg->imgData, tempsubimgCud.deviceId, + outimg->imgData, outsubimgCud.deviceId, + outsubimgCud.pitchBytes * outimg->height); + + if (cudaerrcode != cudaSuccess) { + return CUDA_ERROR; + } + + // 调用核函数,开始第一步细化操作。 + _thinPetPtFour1Ker<<>>(tempsubimgCud, outsubimgCud, devchangecount, dev_lut); + if (cudaGetLastError() != cudaSuccess) { + // 核函数出错,结束迭代函数,释放申请的变量空间。 + // FAIL_THIN_IMAGE_FREE; + return CUDA_ERROR; + } + + // copy ouimg to tempimg + cudaerrcode = cudaMemcpyPeer(tempimg->imgData, tempsubimgCud.deviceId, + outimg->imgData, outsubimgCud.deviceId, + outsubimgCud.pitchBytes * outimg->height); + + if (cudaerrcode != cudaSuccess) { + return CUDA_ERROR; + } + + // 调用核函数,开始第二步细化操作。 + _thinPetPtFour2Ker<<>>(tempsubimgCud, outsubimgCud, devchangecount, dev_lut); + if (cudaGetLastError() != cudaSuccess) { + // 核函数出错,结束迭代函数,释放申请的变量空间 。 + // FAIL_THIN_IMAGE_FREE; + return CUDA_ERROR; + } + + // 将位于 device 端的 devchangecount 拷贝到 host 端上的 changeCount + // 变量,进行迭代判断。 + cudaerrcode = cudaMemcpy(&changeCount, devchangecount, sizeof (int), + cudaMemcpyDeviceToHost); + if (cudaerrcode != cudaSuccess) { + // FAIL_THIN_IMAGE_FREE; + return CUDA_ERROR; + } + + } + // 细化结束后释放申请的变量空间。 + cudaFree(devchangecount); + ImageBasicOp::deleteImage(tempimg); + return NO_ERROR; +} diff --git a/cuda_code/Tiempo.cu b/cuda_code/Tiempo.cu new file mode 100644 index 0000000000000000000000000000000000000000..04ed5fa5438c1081fe8fe9f7ed4076b63357712b --- /dev/null +++ b/cuda_code/Tiempo.cu @@ -0,0 +1,85 @@ +#include +#define DIM_MAX 768 + +// Creamos el kernel. Es interesante notar como un arreglo 2D en el host se vuelve 1D en el device. +__global__ void suma(int * d_arreglo){ + + int idx_x = blockIdx.x*blockDim.x + threadIdx.x ; + int idx_y = blockIdx.y*blockDim.y + threadIdx.y ; + + for (int t = 0; t < 1000; t ++) { + if (idx_x*blockDim.x + idx_y < 1536) { + d_arreglo[idx_x*blockDim.x + idx_y] += 1 ; + __syncthreads() ; + } + } + +} + +int main(int argc, char ** argv){ + + // Definimos los arreglos de entrada, salida y aquel que ira en el device + int arreglo[DIM_MAX][DIM_MAX] ; + int arreglo_salida[DIM_MAX][DIM_MAX] ; + int * d_arreglo ; + + // Colocamos las condiciones iniciales. Un arreglo lleno de 0's + for (int x = 0; x < DIM_MAX; x ++) { + for (int y = 0; y < DIM_MAX; y ++) { + arreglo[x][y] = 0 ; + } + } + + // Alojamos d_arreglo en el device + cudaMalloc((void**) &d_arreglo, DIM_MAX*DIM_MAX*sizeof(int)) ; + + // Aqui introducimos un tipo de variable nuevo. Los detalles no son importantes, pero seran los "Events" + // que nos permitiran medir el tiempo + cudaEvent_t start, stop ; + cudaEventCreate(&start) ; + cudaEventCreate(&stop) ; + + // Se copia la memoria en el device desde el host + cudaMemcpy(d_arreglo, arreglo, DIM_MAX*DIM_MAX*sizeof(int), cudaMemcpyHostToDevice) ; + + // Definimos la dimension de la malla y los bloques + int dimB = atoi(argv[1]) ; int dimG = atoi(argv[2]) ; + dim3 dimBlock(dimB, dimB, 1) ; + dim3 dimGrid(dimG, 1, 1) ; + + // Empezamos a medir el tiempo + cudaEventRecord(start) ; + + // Lanzamos el kernel + suma<<>>(d_arreglo) ; + + // Terminamos de medir el tiempo + cudaEventRecord(stop) ; + + // Copiamos de regreso + cudaMemcpy(arreglo_salida, d_arreglo, DIM_MAX*DIM_MAX*sizeof(int), cudaMemcpyDeviceToHost) ; + + // Y terminamos con los eventos. La sincronizacion permite que solo el device se desempeñe sin tener + // que preocuparse por el host + cudaEventSynchronize(stop); + + // calculamos el tiempo. Esto gracias a cudaEventElapsedTime, la cual es una API ya integrada en CUDA + float milliseconds = 0; + cudaEventElapsedTime(&milliseconds, start, stop); + + // Calculamos el resultado final + int resultado = 0 ; + for (int x = 0; x < DIM_MAX; x ++) { + for (int y = 0; y < DIM_MAX; y ++) { + resultado += arreglo_salida[x][y] ; + } + } + + // Imprimimos los resultados + printf("El resultado final es %d\n", resultado) ; + printf("Tiempo de ejecucion del kernel %f ms\n", milliseconds) ; + + cudaFree(d_arreglo) ; + + return 0 ; +} diff --git a/cuda_code/TorsionSolveOnDevice.cu b/cuda_code/TorsionSolveOnDevice.cu new file mode 100644 index 0000000000000000000000000000000000000000..99a434d4f9996d8e1414ff4445ea099dc5161700 --- /dev/null +++ b/cuda_code/TorsionSolveOnDevice.cu @@ -0,0 +1,125 @@ +/* +* TorsionSolveOnDevice.cu +* +* Created on 11/7/2017 +* Author: SRB +*/ + +#include "NodeSystemDevice.h" +#include "TorsionSolveOnDevice.h" + +void TorsionSolveOnDevice( + NodeInfoVecs& nodeInfoVecs, + TorsionInfoVecs& torsionInfoVecs, + GeneralParams& generalParams) { + +const double PI = 3.14159265358979323846; +if (generalParams.totalTorsionCount>0) { + + thrust::counting_iterator startTorsionIter(0); + thrust::counting_iterator endTorsionIter(generalParams.totalTorsionCount); + + //for_each guarrantees order. This is needed for iter count and saving to torsion force vectors. + //forces are filled using 3 counters left = counter, center = counter + totalTorsionCount etc. + //Thus, in the force vector, only the first 3*totalTorsionCount entries are filled. + thrust::for_each( + thrust::make_zip_iterator( + thrust::make_tuple( + startTorsionIter, + torsionInfoVecs.leftIndex.begin(), + torsionInfoVecs.centerIndex.begin(), + torsionInfoVecs.rightIndex.begin(), + torsionInfoVecs.angleZero.begin())), + thrust::make_zip_iterator( + thrust::make_tuple( + startTorsionIter, + torsionInfoVecs.leftIndex.begin(), + torsionInfoVecs.centerIndex.begin(), + torsionInfoVecs.rightIndex.begin(), + torsionInfoVecs.angleZero.begin())) + generalParams.totalTorsionCount, + TorsionFunctor( + thrust::raw_pointer_cast(nodeInfoVecs.nodeLocX.data()), + thrust::raw_pointer_cast(nodeInfoVecs.nodeLocY.data()), + thrust::raw_pointer_cast(nodeInfoVecs.nodeLocZ.data()), + thrust::raw_pointer_cast(torsionInfoVecs.forceX.data()), + thrust::raw_pointer_cast(torsionInfoVecs.forceY.data()), + thrust::raw_pointer_cast(torsionInfoVecs.forceZ.data()), + thrust::raw_pointer_cast(nodeInfoVecs.isNodeFixed.data()), + generalParams.torsionStiffness, + generalParams.maxNodeCount, + generalParams.totalTorsionCount, + PI)); + + cudaThreadSynchronize(); + //reduce by key to get forces.Notice leftIndex is 1/3rd the length of torsion.forceX + //this vector will be sorted each iteration, so it needs to be recopied unfortunately. + //fill must end before non-set id's + thrust::copy(torsionInfoVecs.leftIndex.begin(), torsionInfoVecs.leftIndex.begin() + generalParams.totalTorsionCount, + torsionInfoVecs.tempTorIndices.begin()); + + thrust::copy(torsionInfoVecs.centerIndex.begin(), torsionInfoVecs.centerIndex.begin() + generalParams.totalTorsionCount, + torsionInfoVecs.tempTorIndices.begin() + generalParams.totalTorsionCount); + + thrust::copy(torsionInfoVecs.rightIndex.begin(), torsionInfoVecs.rightIndex.begin() + generalParams.totalTorsionCount, + torsionInfoVecs.tempTorIndices.begin() + 2 * generalParams.totalTorsionCount); + + + //key, then value. Each vector returns sorted + thrust::sort_by_key(torsionInfoVecs.tempTorIndices.begin(), torsionInfoVecs.tempTorIndices.begin() + 3 * generalParams.totalTorsionCount, + thrust::make_zip_iterator( + thrust::make_tuple( + torsionInfoVecs.forceX.begin(), + torsionInfoVecs.forceY.begin(), + torsionInfoVecs.forceZ.begin())), thrust::less()); + + + thrust::fill(torsionInfoVecs.tempForceX.begin(), torsionInfoVecs.tempForceX.end(), 0); + thrust::fill(torsionInfoVecs.tempForceY.begin(), torsionInfoVecs.tempForceY.end(), 0); + thrust::fill(torsionInfoVecs.tempForceZ.begin(), torsionInfoVecs.tempForceZ.end(), 0); + thrust::fill(torsionInfoVecs.reducedIds.begin(), torsionInfoVecs.reducedIds.end(), 0); + + unsigned endKey = thrust::get<0>( + thrust::reduce_by_key( + torsionInfoVecs.tempTorIndices.begin(), + torsionInfoVecs.tempTorIndices.begin() + 3*generalParams.totalTorsionCount, + thrust::make_zip_iterator( + thrust::make_tuple( + torsionInfoVecs.forceX.begin(), + torsionInfoVecs.forceY.begin(), + torsionInfoVecs.forceZ.begin())), + torsionInfoVecs.reducedIds.begin(), + thrust::make_zip_iterator( + thrust::make_tuple( + torsionInfoVecs.tempForceX.begin(), + torsionInfoVecs.tempForceY.begin(), + torsionInfoVecs.tempForceZ.begin())), + thrust::equal_to(), CVec3Add())) - torsionInfoVecs.reducedIds.begin();//binary_pred, binary_op + + cudaThreadSynchronize(); + + //std::cout<<"endkey: "<< endKey << std::endl; + //std::cout<<"totalTorsion: "<< generalParams.totalTorsionCount << std::endl; + + thrust::for_each( + thrust::make_zip_iterator(//1st begin + thrust::make_tuple( + torsionInfoVecs.reducedIds.begin(), + torsionInfoVecs.tempForceX.begin(), + torsionInfoVecs.tempForceY.begin(), + torsionInfoVecs.tempForceZ.begin())), + thrust::make_zip_iterator(//1st end + thrust::make_tuple( + torsionInfoVecs.reducedIds.begin(), + torsionInfoVecs.tempForceX.begin(), + torsionInfoVecs.tempForceY.begin(), + torsionInfoVecs.tempForceZ.begin())) + endKey, + AddTorsionForceFunctor( + generalParams.maxNodeCount, + thrust::raw_pointer_cast(nodeInfoVecs.nodeForceX.data()), + thrust::raw_pointer_cast(nodeInfoVecs.nodeForceY.data()), + thrust::raw_pointer_cast(nodeInfoVecs.nodeForceZ.data()))); + + } + + +} \ No newline at end of file diff --git a/cuda_code/UTMatrixOperation.cu b/cuda_code/UTMatrixOperation.cu new file mode 100644 index 0000000000000000000000000000000000000000..36cb39ea7048286a58ea68238e16321cef80be03 --- /dev/null +++ b/cuda_code/UTMatrixOperation.cu @@ -0,0 +1,138 @@ +/* + This is an upper-triangularization operation on a 'nearly upper triangular' matrix. + In order to place rME and rMI in registers for optimal performance, the number of + non-zero values below the subdiagonal must be known at compile time. The only + known way to do this is templating the function. See line 31. + + This is a simplied version that just solves a single matrix of ngr*ngr size. + The kernel should be launched with # of threads = ngr. +*/ + template +__global__ void kernelDecomposeRegister ( int ngr, + const int * __restrict__ ODL, // array size = ngr + double * __restrict__ MatrixArrayRM, // RM = in row-major order + double *rhs + ) + +{ + extern __shared__ double sMEM[]; + + // require 19 * ngr SMEM storage (19 kB for ngr = 128) + // ... one for current row + // ... one for column[row] of descendants + // ... one for nAL for each row + + double * __restrict__ sNormal = sMEM; + double * __restrict__ sCurrentRow = sMEM+1; + double * __restrict__ sCurrentColumn = sCurrentRow + ngr; // this really only needs to be size 16 (blockDim.x / ngr) + double * __restrict__ sRhs = sCurrentColumn + ngr; + int * __restrict__ sNAL = (int *) (sRhs + ngr); + + // require a compile-time known number of descendants to be placed in registers + double rME[NSUBDIAG]; // [r]egister [M]atrix [E]lement + unsigned int rMI[NSUBDIAG]; // [r]egister [M]atrix [I]ndex + + unsigned int tid = threadIdx.x; + unsigned int tidongr = tid/ngr; + unsigned int tidmngr = tid%ngr; + + // load first blockDim.x/ngr lines of dense matrix // this assumes ngr >= 16 // is this comment correct?? Seems out of date. + for ( int ireg=0; ireg= irow ) { + rME[ireg] -= sCurrentColumn[rMI[ireg]] * sCurrentRow[tidmngr]; + } + // update RHS + if ( tidmngr == 0 ) { + sRhs[rMI[ireg]] -= sCurrentColumn[rMI[ireg]] * sRhs[irow]; + } + } + + if ( rMI[ireg] == irow+1 && tidmngr == irow+1 ) { // if this will be the next eliminated (i.e. next iteration this will be the diagonal row) + *sNormal = 1.0/rME[ireg]; // then cache diagonal value so the row can be normalized + } + + } + __syncthreads(); + + for ( int ireg=0; ireg < NSUBDIAG; ireg++ ) + { + if ( rMI[ireg] == irow+1 ) { // if this thread has row irow+1 cached (i.e. the next row to eliminate) + // write out + if ( tidmngr > irow ) { // if the column is in the upper triangular + // normalize + rME[ireg] *= *sNormal; // normalize + sCurrentRow[tidmngr] = rME[ireg]; // write to shared + MatrixArrayRM[(tidmngr)+(irow+1)*ngr] = rME[ireg]; // also write to GMEM (as it is the fully upper-triangularized result) + } + if ( tidmngr == 0 ) { // use the zeroth thread in the row to normalize the RHS + sRhs[rMI[ireg]] *= *sNormal; + } + } + + if ( tidmngr == irow+1 ) { + if (rMI[ireg] < ngr) { + sCurrentColumn[rMI[ireg]] = rME[ireg]; + } + } + + } + + // syncthreads ------------------------------------------------ + __syncthreads(); + + } + + // output updated rhs + if ( tid < ngr ) { + rhs[tid] = sRhs[tid]; + } +} diff --git a/cuda_code/UnaryComplexKernels_6.cu b/cuda_code/UnaryComplexKernels_6.cu new file mode 100644 index 0000000000000000000000000000000000000000..fcedb34dd7b33c41906a52c79f1d7164663e46d0 --- /dev/null +++ b/cuda_code/UnaryComplexKernels_6.cu @@ -0,0 +1,118 @@ +#define TORCH_ASSERT_NO_OPERATORS +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { namespace native { + +// We manually overload angle because std::arg does not work with types other than c10::complex. +template +__host__ __device__ static inline scalar_t angle_wrapper(scalar_t v) { + if (at::_isnan(v)){ + return v; + } + return v < 0 ? M_PI : 0; +} + +template +__host__ __device__ static inline c10::complex angle_wrapper(c10::complex v) { + return std::arg(v); +} + +void angle_kernel_cuda(TensorIteratorBase& iter) { + AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(iter.common_dtype(), "angle_cuda", [&]() { + gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { + return angle_wrapper(a); + }); + }); +} + +// We manually overload real because std::real does not work types other than c10::complex. +template +__host__ __device__ static inline scalar_t real_wrapper(scalar_t v) { + return v; +} + +template +__host__ __device__ static inline c10::complex real_wrapper(c10::complex v) { + return v.real(); +} + +void real_kernel_cuda(TensorIteratorBase& iter) { + AT_DISPATCH_ALL_TYPES_AND_COMPLEX(iter.dtype(), "real_cuda", [&]() { + gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { + return real_wrapper(a); + }); + }); +} + +// We manually overload imag because std::imag does not work types other than c10::complex. +template +__host__ __device__ static inline scalar_t imag_wrapper(scalar_t v) { + return 0; +} + +template +__host__ __device__ static inline c10::complex imag_wrapper(c10::complex v) { + return v.imag(); +} + +void imag_kernel_cuda(TensorIteratorBase& iter) { + AT_DISPATCH_ALL_TYPES_AND_COMPLEX(iter.dtype(), "imag_cuda", [&]() { + gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { + return imag_wrapper(a); + }); + }); +} + +// We manually overload conj because std::conj does not work types other than c10::complex. +template +__host__ __device__ static inline scalar_t conj_wrapper(scalar_t v) { + return v; +} + +template +__host__ __device__ static inline c10::complex conj_wrapper(c10::complex v) { + return std::conj(v); +} + +// NB: Ignores the negative bit on tensors +const char conj_name[] = "conj_kernel"; +void conj_kernel_cuda(TensorIteratorBase& iter) { + auto common_dtype = iter.common_dtype(); + if (common_dtype == kComplexHalf) { + using scalar_t = c10::complex; + #if AT_USE_JITERATOR() + static const auto conj_string = jiterator_stringify( + template + T conj_kernel(T z) { + return std::conj(z); + } + ); + jitted_gpu_kernel(iter, conj_string); + #else + gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { + return conj_wrapper(a); + }); + #endif + } else { + AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( + kBool, kBFloat16, kHalf, iter.common_dtype(), "conj_cuda", [&]() { + gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { + return conj_wrapper(a); + }); + }); + } +} + +REGISTER_DISPATCH(angle_stub, &angle_kernel_cuda); +REGISTER_DISPATCH(real_stub, &real_kernel_cuda); +REGISTER_DISPATCH(imag_stub, &imag_kernel_cuda); +REGISTER_DISPATCH(conj_physical_stub, &conj_kernel_cuda); + +}} // namespace at::native diff --git a/cuda_code/UnaryGammaKernels_4.cu b/cuda_code/UnaryGammaKernels_4.cu new file mode 100644 index 0000000000000000000000000000000000000000..78d80a4a98276ff761e36a4851abd23e0330dae5 --- /dev/null +++ b/cuda_code/UnaryGammaKernels_4.cu @@ -0,0 +1,49 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { namespace native { + +void digamma_kernel_cuda(TensorIterator& iter) { + AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "digamma_cuda", [&]() { + gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { + return calc_digamma(a); + }); + }); +} + +void trigamma_kernel_cuda(TensorIterator& iter) { + AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "trigamma_cuda", [&]() { + gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { + return calc_trigamma(a); + }); + }); +} + +void polygamma_kernel_cuda(TensorIterator& iter, int64_t n) { + switch (n) { + case 0: digamma_kernel_cuda(iter); break; + case 1: trigamma_kernel_cuda(iter); break; + default: TORCH_CHECK(false, "polygamma(n,x) is not implemented for n>=2, but was ", n); + } +} + +void lgamma_kernel_cuda(TensorIterator& iter) { + AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "lgamma_cuda", [&]() { + gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { + return ::lgamma(a); + }); + }); +} + +REGISTER_DISPATCH(digamma_stub, &digamma_kernel_cuda); +REGISTER_DISPATCH(polygamma_stub, &polygamma_kernel_cuda); +REGISTER_DISPATCH(lgamma_stub, &lgamma_kernel_cuda); + +}} // namespace at::native diff --git a/cuda_code/UnaryGammaKernels_6.cu b/cuda_code/UnaryGammaKernels_6.cu new file mode 100644 index 0000000000000000000000000000000000000000..2f0341007e2fb73408ca3bb69c2a6a6ce3e91a1c --- /dev/null +++ b/cuda_code/UnaryGammaKernels_6.cu @@ -0,0 +1,55 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { namespace native { + +void digamma_kernel_cuda(TensorIteratorBase& iter) { + AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "digamma_cuda", [&]() { + gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { + return calc_digamma(a); + }); + }); +} + +void trigamma_kernel_cuda(TensorIteratorBase& iter) { + AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "trigamma_cuda", [&]() { + gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { + return calc_trigamma(a); + }); + }); +} + +void polygamma_kernel_cuda(TensorIteratorBase& iter, int64_t n) { + if (n == 0) { + digamma_kernel_cuda(iter); + } else if (n == 1) { + trigamma_kernel_cuda(iter); + } else { + AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "polygamma_cuda", [&]() { + gpu_kernel(iter, [=] GPU_LAMBDA(scalar_t a) -> scalar_t { + return calc_polygamma(int(n), a); + }); + }); + } +} + +void lgamma_kernel_cuda(TensorIteratorBase& iter) { + AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "lgamma_cuda", [&]() { + gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { + return ::lgamma(a); + }); + }); +} + +REGISTER_DISPATCH(digamma_stub, &digamma_kernel_cuda); +REGISTER_DISPATCH(polygamma_stub, &polygamma_kernel_cuda); +REGISTER_DISPATCH(lgamma_stub, &lgamma_kernel_cuda); + +}} // namespace at::native diff --git a/cuda_code/UnarySpecialOpsKernel_7.cu b/cuda_code/UnarySpecialOpsKernel_7.cu new file mode 100644 index 0000000000000000000000000000000000000000..0cff8649f42ba666e42f28f61012ff7ffd5e0830 --- /dev/null +++ b/cuda_code/UnarySpecialOpsKernel_7.cu @@ -0,0 +1,201 @@ +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { + +void exp2_kernel_cuda(TensorIteratorBase& iter) { + AT_DISPATCH_FLOATING_TYPES_AND2( + ScalarType::Half, ScalarType::BFloat16, + iter.common_dtype(), "exp2_cuda", + [&]() { + gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { + return ::exp2(a); + }); + }); +} + +void i0_kernel_cuda(TensorIteratorBase& iter) { + AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "i0_cuda", [&]() { + gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { + return calc_i0(a); + }); + }); +} + +void i0e_kernel_cuda(TensorIteratorBase& iter) { + AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "i0e_cuda", [&]() { + gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { + return calc_i0e(a); + }); + }); +} + +void i1_kernel_cuda(TensorIteratorBase& iter) { + AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "i1_cuda", [&]() { + gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { + return calc_i1(a); + }); + }); +} + +void i1e_kernel_cuda(TensorIteratorBase& iter) { + AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "i1e_cuda", [&]() { + gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { + return calc_i1e(a); + }); + }); +} + +void sigmoid_kernel_cuda(TensorIteratorBase& iter) { + AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "sigmoid_cuda", [&]() { + gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { + return static_cast(1) / (static_cast(1) + std::exp(-a)); + }); + }); +} + +void sinc_kernel_cuda(TensorIteratorBase& iter) { + AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2( + ScalarType::Half, ScalarType::BFloat16, + iter.common_dtype(), "sinc_cuda", + [&]() { + gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { + if (a == scalar_t(0)) { + return scalar_t(1); + } else { + // NVCC says constexpr var is not accessible from device + scalar_t product = c10::detail::pi() * a; + return std::sin(product) / product; + } + }); + }); +} + +void logit_kernel_cuda(TensorIteratorBase& iter, const Scalar& eps_scalar) { + AT_DISPATCH_FLOATING_TYPES_AND2( + at::ScalarType::Half, + at::ScalarType::BFloat16, + iter.common_dtype(), + "logit_cuda", + [&]() { + using T_ACC = acc_type; + const T_ACC eps = eps_scalar.to(); + if (eps < T_ACC(0)) { + gpu_kernel(iter, [] GPU_LAMBDA(scalar_t x) -> scalar_t { + const T_ACC x_acc = static_cast(x); + return c10::cuda::compat::log(x_acc / (T_ACC(1) - x_acc)); + }); + } else { + const T_ACC lo = eps; + const T_ACC hi = T_ACC(1) - eps; + gpu_kernel( + iter, [lo, hi] GPU_LAMBDA(scalar_t x) -> scalar_t { + const T_ACC x_acc = static_cast(x); + T_ACC z = x_acc < lo ? lo : (x_acc > hi ? hi : x_acc); + return c10::cuda::compat::log(z / (T_ACC(1) - z)); + }); + } + }); +} + +void ndtri_kernel_cuda(TensorIteratorBase& iter) { + AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "ndtri_cuda", [&]() { + gpu_kernel( + iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return calc_ndtri(a); }); + }); +} + +void erf_kernel_cuda(TensorIteratorBase& iter) { + AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "erf_cuda", [&]() { + gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { + return ::erf(a); + }); + }); +} + +void erfc_kernel_cuda(TensorIteratorBase& iter) { + AT_DISPATCH_FLOATING_TYPES_AND2( + ScalarType::Half, ScalarType::BFloat16, + iter.common_dtype(), "erfc_cuda", + [&]() { + gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { + return ::erfc(a); + }); + }); +} + +void erfinv_kernel_cuda(TensorIteratorBase& iter) { + AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "erfinv_cuda", [&]() { + gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t { + return ::erfinv(a); + }); + }); +} + +void kaiser_window_kernel_cuda(TensorIteratorBase& iter, int64_t window_length, double beta_){ + AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.dtype(), "kaiser_window_cuda", [&](){ + using T_ACC = acc_type; + const T_ACC inv_alpha = static_cast(2.0 / (window_length - 1)); + const T_ACC beta = static_cast(beta_); + const T_ACC inv_i0_beta = 1.0 / calc_i0(beta); + gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t a) -> scalar_t { + T_ACC x = static_cast(a) * inv_alpha - 1; + T_ACC y = std::max(0, 1 - x * x); + return calc_i0(beta * ::sqrt(y)) * inv_i0_beta; + }); + }); +} + +void entr_kernel_cuda(TensorIteratorBase& iter) { + AT_DISPATCH_FLOATING_TYPES_AND2( + ScalarType::Half, + ScalarType::BFloat16, + iter.common_dtype(), + "entr_cuda", + [&]() { + gpu_kernel(iter, [=] GPU_LAMBDA(scalar_t x) -> scalar_t { + if (at::_isnan(x)) { + return x; + } else if (x > 0) { + return -x * std::log(x); + } else if (x == 0) { + return 0; + } + return static_cast(-INFINITY); + }); + }); +} + +REGISTER_DISPATCH(exp2_stub, &exp2_kernel_cuda); +REGISTER_DISPATCH(i0_stub, &i0_kernel_cuda); +REGISTER_DISPATCH(special_i0e_stub, &i0e_kernel_cuda); +REGISTER_DISPATCH(special_i1_stub, &i1_kernel_cuda); +REGISTER_DISPATCH(special_i1e_stub, &i1e_kernel_cuda); +REGISTER_DISPATCH(sigmoid_stub, &sigmoid_kernel_cuda); +REGISTER_DISPATCH(sinc_stub, &sinc_kernel_cuda); +REGISTER_DISPATCH(logit_stub, &logit_kernel_cuda); +REGISTER_DISPATCH(erf_stub, &erf_kernel_cuda); +REGISTER_DISPATCH(erfc_stub, &erfc_kernel_cuda); +REGISTER_DISPATCH(erfinv_stub, &erfinv_kernel_cuda); +REGISTER_DISPATCH(kaiser_window_stub, &kaiser_window_kernel_cuda); +REGISTER_DISPATCH(special_entr_stub, &entr_kernel_cuda); +REGISTER_DISPATCH(special_ndtri_stub, &ndtri_kernel_cuda); + +} // namespace native +} // namespace at diff --git a/cuda_code/Util_12.cu b/cuda_code/Util_12.cu new file mode 100644 index 0000000000000000000000000000000000000000..500e197d2ae6e0634fad08232fff143c0f31cfcd --- /dev/null +++ b/cuda_code/Util_12.cu @@ -0,0 +1,92 @@ +#include "Util.h" +#include + +int64_t gCudaAllocatedBytes = 0; +int64_t gCudaMaxBytes = 0; + +size_t +NextPowerOf2(size_t v) +{ + size_t result = 1; + if (INT_MAX+1U < v) { + printf("Error: NextPowerOf2(%d) too large!\n", v); + return 0; + } + while (result < v) { + result *= 2; + } + return result; +} + +bool +ReadOneLine(FILE *fp, char *line_return, size_t lineBytes) +{ + line_return[0] = 0; + int c; + int pos = 0; + + do { + c = fgetc(fp); + if (c == EOF || c == '\n') { + break; + } + + if (c != '\r') { + line_return[pos] = (char)c; + line_return[pos+1] = 0; + ++pos; + } + } while (c != EOF && pos < (int)lineBytes -1); + + return c != EOF; +} + +void +GetBestBlockThreadSize(int count, dim3 &threads_return, dim3 &blocks_return) +{ + if ((count / WW_NUM_THREADS_PER_BLOCK) <= 1) { + threads_return.x = count; + } else { + threads_return.x = WW_NUM_THREADS_PER_BLOCK; + threads_return.y = 1; + threads_return.z = 1; + int countRemain = count / WW_NUM_THREADS_PER_BLOCK; + if ((countRemain / WW_BLOCK_X) <= 1) { + blocks_return.x = countRemain; + blocks_return.y = 1; + blocks_return.z = 1; + } else { + blocks_return.x = WW_BLOCK_X; + countRemain /= WW_BLOCK_X; + blocks_return.y = countRemain; + blocks_return.z = 1; + } + } +} + + + + +const char * +CudaFftGetErrorString(cufftResult error) +{ + switch (error) { + case CUFFT_SUCCESS: return "CUFFT_SUCCESS"; + case CUFFT_INVALID_PLAN: return "CUFFT_INVALID_PLAN"; + case CUFFT_ALLOC_FAILED: return "CUFFT_ALLOC_FAILED"; + case CUFFT_INVALID_TYPE: return "CUFFT_INVALID_TYPE"; + case CUFFT_INVALID_VALUE: return "CUFFT_INVALID_VALUE"; + + case CUFFT_INTERNAL_ERROR: return "CUFFT_INTERNAL_ERROR"; + case CUFFT_EXEC_FAILED: return "CUFFT_EXEC_FAILED"; + case CUFFT_SETUP_FAILED: return "CUFFT_SETUP_FAILED"; + case CUFFT_INVALID_SIZE: return "CUFFT_INVALID_SIZE"; + case CUFFT_UNALIGNED_DATA: return "CUFFT_UNALIGNED_DATA"; + + case CUFFT_INCOMPLETE_PARAMETER_LIST: return "CUFFT_INCOMPLETE_PARAMETER_LIST"; + case CUFFT_INVALID_DEVICE: return "CUFFT_INVALID_DEVICE"; + case CUFFT_PARSE_ERROR: return "CUFFT_PARSE_ERROR"; + case CUFFT_NO_WORKSPACE: return "CUFFT_NO_WORKSPACE"; + default: return "unknown"; + } +} \ No newline at end of file diff --git a/cuda_code/VectorMultiply2.cu b/cuda_code/VectorMultiply2.cu new file mode 100644 index 0000000000000000000000000000000000000000..03cee23aa271c3e048f5bc5001fa08b27e56167a --- /dev/null +++ b/cuda_code/VectorMultiply2.cu @@ -0,0 +1,117 @@ +#include +#include +#include +/** +* Element-wise Vector Multiplication: C[i] = A[i] * B[i]. +* This sample is a very basic sample that implements element by element vector multiplication. +*/ +// For the CUDA runtime routines (prefixed with "cuda_") +#include +#include "device_launch_parameters.h" +/** +* CUDA Kernel Device code +* Computes the element-wise vector multiplication of A and B into C. The 3 vectors have the same number of +elements numElements. +*/ +__global__ void vectorMultiply(float *A, float *B, float *C, int numElements) +{ + int size = numElements * sizeof(float); + float *d_A, *d_B, *d_C; + int i = threadIdx.x + 2* blockDim.x*blockIdx.x; + if (i < numElements) C[i] = A[i] * B[i]; + if (i < numElements + blockDim.x) C[i + blockDim.x] = A[i + blockDim.x] + B[i + blockDim.x]; +} +//Host main routine +int main(void) +{ + // Error code to check return values for CUDA calls + cudaError_t err = cudaSuccess; + // Print the vector length to be used, and compute its size + float EPS = 0.00001; + int numElements = 50000; + size_t size = numElements * sizeof(float); + printf("[Vector multiplication of %d elements]\n", numElements); + // Allocate the host input vector A + float *h_A = (float *)malloc(size); + // Allocate the host input vector B + float *h_B = (float *)malloc(size); + // Allocate the host output vector C + float *h_C = (float *)malloc(size); + // Verify that allocations succeeded + if (h_A == NULL || h_B == NULL || h_C == NULL) + { + fprintf(stderr, "Failed to allocate host vectors!\n"); + exit(EXIT_FAILURE); + } + // Initialize the host input vectors + for (int i = 0; i < numElements; i++) + { + *(h_A + i) = (float)i; + //printf("h_A = %f\n", h_A[i]); + } + for (int i = 0; i < numElements; i++) + *(h_B + i) = (1 / (EPS + i)); + // Allocate the device input vector A + float *d_A = NULL; + err = cudaMalloc((void **)&d_A, size); + if (err != cudaSuccess) + { + fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err)); + exit(EXIT_FAILURE); + } + // Allocate the device input vector B + float *d_B = NULL; + err = cudaMalloc((void **)&d_B, size); + if (err != cudaSuccess) + { + fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err)); + exit(EXIT_FAILURE); + } + // Allocate the device output vector C + float *d_C = NULL; + err = cudaMalloc((void **)&d_C, size); + if (err != cudaSuccess) + { + fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err)); + exit(EXIT_FAILURE); + } + // Copy the host input vectors A and B in host memory to the device input vectors in device memory + printf("Copy input data from the host memory to the CUDA device\n"); + cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); + cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); + // Launch the VectorMultiply CUDA Kernel + int threadsPerBlock = 256; + + int blocksPerGrid = ceil(numElements / (float) threadsPerBlock); + vectorMultiply <<< blocksPerGrid, threadsPerBlock >>>(d_A, d_B, d_C, numElements); + + printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); + err = cudaGetLastError(); + if (err != cudaSuccess) + { + fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err)); + exit(EXIT_FAILURE); + } + // Copy the device result vector in device memory to the host result vector + // in host memory. + printf("Copy output data from the CUDA device to the host memory\n"); + cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); + // Verify that the result vector is correct + for (int i = 0; i < numElements; ++i) + { + if (fabs((h_A[i] * h_B[i]) - h_C[i]) > 1e-5) + { + fprintf(stderr, "Result verification failed at element %d!\n", i); + exit(EXIT_FAILURE); + } + } + printf("Test PASSED\n"); + // Free device global memory + cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); + // Free host memory + free(h_A); + free(h_B); + free(h_C); + printf("Done\n"); + return 0; +} \ No newline at end of file diff --git a/cuda_code/VectorResidual_4.cu b/cuda_code/VectorResidual_4.cu new file mode 100644 index 0000000000000000000000000000000000000000..bd7a07bc3f3717a9f61ac7d6ccf782af97c87575 --- /dev/null +++ b/cuda_code/VectorResidual_4.cu @@ -0,0 +1,144 @@ +/** + * Copyright (c) Facebook, Inc. and its affiliates. + * + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include +#include +#include +#include +#include // in CUDA SDK, for CUDART_NAN_F +#include +namespace faiss { namespace gpu { + +template +__global__ void calcResidual(Tensor vecs, + Tensor centroids, + Tensor vecToCentroid, + Tensor residuals) { + auto vec = vecs[blockIdx.x]; + auto residual = residuals[blockIdx.x]; + + int centroidId = vecToCentroid[blockIdx.x]; + // Vector could be invalid (containing NaNs), so -1 was the + // classified centroid + if (centroidId == -1) { + if (LargeDim) { + for (int i = threadIdx.x; i < vecs.getSize(1); i += blockDim.x) { + residual[i] = CUDART_NAN_F; + } + } else { + residual[threadIdx.x] = CUDART_NAN_F; + } + + return; + } + + auto centroid = centroids[centroidId]; + + if (LargeDim) { + for (int i = threadIdx.x; i < vecs.getSize(1); i += blockDim.x) { + residual[i] = vec[i] - ConvertTo::to(centroid[i]); + } + } else { + residual[threadIdx.x] = vec[threadIdx.x] - + ConvertTo::to(centroid[threadIdx.x]); + } +} + +template +__global__ void gatherReconstruct(Tensor listIds, + Tensor vecs, + Tensor out) { + auto id = listIds[blockIdx.x]; + auto vec = vecs[id]; + auto outVec = out[blockIdx.x]; + + Convert conv; + + for (int i = threadIdx.x; i < vecs.getSize(1); i += blockDim.x) { + outVec[i] = id == -1 ? 0.0f : conv(vec[i]); + } +} + +template +void calcResidual(Tensor& vecs, + Tensor& centroids, + Tensor& vecToCentroid, + Tensor& residuals, + cudaStream_t stream) { + FAISS_ASSERT(vecs.getSize(1) == centroids.getSize(1)); + FAISS_ASSERT(vecs.getSize(1) == residuals.getSize(1)); + FAISS_ASSERT(vecs.getSize(0) == vecToCentroid.getSize(0)); + FAISS_ASSERT(vecs.getSize(0) == residuals.getSize(0)); + + dim3 grid(vecs.getSize(0)); + + int maxThreads = getMaxThreadsCurrentDevice(); + bool largeDim = vecs.getSize(1) > maxThreads; + dim3 block(std::min(vecs.getSize(1), maxThreads)); + + if (largeDim) { + calcResidual<<>>( + vecs, centroids, vecToCentroid, residuals); + } else { + calcResidual<<>>( + vecs, centroids, vecToCentroid, residuals); + } + + CUDA_TEST_ERROR(); +} + +template +void gatherReconstruct(Tensor& listIds, + Tensor& vecs, + Tensor& out, + cudaStream_t stream) { + FAISS_ASSERT(listIds.getSize(0) == out.getSize(0)); + FAISS_ASSERT(vecs.getSize(1) == out.getSize(1)); + + dim3 grid(listIds.getSize(0)); + + int maxThreads = getMaxThreadsCurrentDevice(); + dim3 block(std::min(vecs.getSize(1), maxThreads)); + + gatherReconstruct<<>>(listIds, vecs, out); + + CUDA_TEST_ERROR(); +} + +void runCalcResidual(Tensor& vecs, + Tensor& centroids, + Tensor& vecToCentroid, + Tensor& residuals, + cudaStream_t stream) { + calcResidual(vecs, centroids, vecToCentroid, residuals, stream); +} + +void runCalcResidual(Tensor& vecs, + Tensor& centroids, + Tensor& vecToCentroid, + Tensor& residuals, + cudaStream_t stream) { + calcResidual(vecs, centroids, vecToCentroid, residuals, stream); +} + +void runReconstruct(Tensor& listIds, + Tensor& vecs, + Tensor& out, + cudaStream_t stream) { + gatherReconstruct(listIds, vecs, out, stream); +} + +void runReconstruct(Tensor& listIds, + Tensor& vecs, + Tensor& out, + cudaStream_t stream) { + gatherReconstruct(listIds, vecs, out, stream); +} + +} } // namespace diff --git a/cuda_code/VolumeMeshRenderer-sort_1.cu b/cuda_code/VolumeMeshRenderer-sort_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..89bddcacf06aec064a2a25cb0c1f537b331d8208 --- /dev/null +++ b/cuda_code/VolumeMeshRenderer-sort_1.cu @@ -0,0 +1,169 @@ +/* +* VolumeMeshRenderer-sort.cu +* +* Copyright (C) 2012 by Universitaet Stuttgart (VIS). +* Alle Rechte vorbehalten. +*/ +#ifndef MEGAMOLPROTEIN_VOLUMEMESHRENDERER_SORT_CU_INCLUDED +#define MEGAMOLPROTEIN_VOLUMEMESHRENDERER_SORT_CU_INCLUDED + +#include "VolumeMeshRenderer.cuh" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "helper_math.h" + +/* + * Note: + * This is a VERY slow compiling piece of code (because of thrust::sort). + * Its in an extra file so that small changes on other parts of VolumeMeshRenderer + * wont lead to huge compilation times. + */ +extern "C" +cudaError CentroidReduce(uint* centroidLabelsCount, uint* centroidLabels, float4* centroidSums, uint* centroidCounts, uint* vertexLabels, float4* vertices, uint vertexCount) +{ + uint* vertexLabelsEnd = vertexLabels + vertexCount; + // Sort (reduce needs consecutive keys). + thrust::sort_by_key(thrust::device_ptr(vertexLabels), thrust::device_ptr(vertexLabelsEnd), + thrust::device_ptr(vertices)); + // Count. + thrust::reduce_by_key(thrust::device_ptr(vertexLabels), thrust::device_ptr(vertexLabelsEnd), + thrust::constant_iterator(1), thrust::device_ptr(centroidLabels), + thrust::device_ptr(centroidCounts)); + // Sum. + uint* centroidLabelsEnd = thrust::reduce_by_key(thrust::device_ptr(vertexLabels), thrust::device_ptr(vertexLabelsEnd), + thrust::device_ptr(vertices), thrust::device_ptr(centroidLabels), + thrust::device_ptr(centroidSums)).first.get(); + if (centroidLabelsEnd >= centroidLabels) { + *centroidLabelsCount = centroidLabelsEnd - centroidLabels; + } else{ + *centroidLabelsCount = 0; + } + return cudaGetLastError(); +} + +extern "C" +cudaError ComputeFeatureBBox( float* fBBoxMinX, float* fBBoxMinY, float* fBBoxMinZ, float* fBBoxMaxX, float* fBBoxMaxY, float* fBBoxMaxZ, + uint* triaLabelsMinX, uint* triaLabelsMinY, uint* triaLabelsMinZ, uint* triaLabelsMaxX, uint* triaLabelsMaxY, uint* triaLabelsMaxZ, + uint triaCount) { + // compute the bboxes of all features + + // sort the min values + thrust::sort_by_key( thrust::device_ptr(fBBoxMinX), thrust::device_ptr(fBBoxMinX + triaCount), thrust::device_ptr(triaLabelsMinX)); + thrust::sort_by_key( thrust::device_ptr(fBBoxMinY), thrust::device_ptr(fBBoxMinY + triaCount), thrust::device_ptr(triaLabelsMinY)); + thrust::sort_by_key( thrust::device_ptr(fBBoxMinZ), thrust::device_ptr(fBBoxMinZ + triaCount), thrust::device_ptr(triaLabelsMinZ)); + // sort the max values + thrust::sort_by_key( thrust::device_ptr(fBBoxMaxX), thrust::device_ptr(fBBoxMaxX + triaCount), thrust::device_ptr(triaLabelsMaxX), thrust::greater()); + thrust::sort_by_key( thrust::device_ptr(fBBoxMaxY), thrust::device_ptr(fBBoxMaxY + triaCount), thrust::device_ptr(triaLabelsMaxY), thrust::greater()); + thrust::sort_by_key( thrust::device_ptr(fBBoxMaxZ), thrust::device_ptr(fBBoxMaxZ + triaCount), thrust::device_ptr(triaLabelsMaxZ), thrust::greater()); + // sort the min values by label + thrust::stable_sort_by_key( thrust::device_ptr(triaLabelsMinX), thrust::device_ptr(triaLabelsMinX + triaCount), thrust::device_ptr(fBBoxMinX)); + thrust::stable_sort_by_key( thrust::device_ptr(triaLabelsMinY), thrust::device_ptr(triaLabelsMinY + triaCount), thrust::device_ptr(fBBoxMinY)); + thrust::stable_sort_by_key( thrust::device_ptr(triaLabelsMinZ), thrust::device_ptr(triaLabelsMinZ + triaCount), thrust::device_ptr(fBBoxMinZ)); + // sort the max values by label + thrust::stable_sort_by_key( thrust::device_ptr(triaLabelsMaxX), thrust::device_ptr(triaLabelsMaxX + triaCount), thrust::device_ptr(fBBoxMaxX)); + thrust::stable_sort_by_key( thrust::device_ptr(triaLabelsMaxY), thrust::device_ptr(triaLabelsMaxY + triaCount), thrust::device_ptr(fBBoxMaxY)); + thrust::stable_sort_by_key( thrust::device_ptr(triaLabelsMaxZ), thrust::device_ptr(triaLabelsMaxZ + triaCount), thrust::device_ptr(fBBoxMaxZ)); + // get the min/max x/y/z-value per feature + thrust::unique_by_key( thrust::device_ptr(triaLabelsMinX), thrust::device_ptr(triaLabelsMinX + triaCount), thrust::device_ptr(fBBoxMinX)); + thrust::unique_by_key( thrust::device_ptr(triaLabelsMinY), thrust::device_ptr(triaLabelsMinY + triaCount), thrust::device_ptr(fBBoxMinY)); + thrust::unique_by_key( thrust::device_ptr(triaLabelsMinZ), thrust::device_ptr(triaLabelsMinZ + triaCount), thrust::device_ptr(fBBoxMinZ)); + thrust::unique_by_key( thrust::device_ptr(triaLabelsMaxX), thrust::device_ptr(triaLabelsMaxX + triaCount), thrust::device_ptr(fBBoxMaxX)); + thrust::unique_by_key( thrust::device_ptr(triaLabelsMaxY), thrust::device_ptr(triaLabelsMaxY + triaCount), thrust::device_ptr(fBBoxMaxY)); + thrust::unique_by_key( thrust::device_ptr(triaLabelsMaxZ), thrust::device_ptr(triaLabelsMaxZ + triaCount), thrust::device_ptr(fBBoxMaxZ)); + + return cudaGetLastError(); +} + +extern "C" +cudaError SortPrevTetraLabel( int2* labelPair, uint tetrahedronCount, int &labelCount) { + thrust::sort( thrust::device_ptr(labelPair), thrust::device_ptr(labelPair + tetrahedronCount), lessInt2X()); + const int numberOfUniqueValues = thrust::unique( thrust::device_ptr(labelPair), thrust::device_ptr(labelPair + tetrahedronCount), equalInt2()) - thrust::device_ptr(labelPair); + labelCount = numberOfUniqueValues; + + return cudaGetLastError(); +} + +extern "C" +cudaError TriangleVerticesToIndexList( float4* featureVertices, float4* featureVerticesOut, uint* featureVertexIdx, uint* featureVertexCnt, uint* featureVertexCntOut, + uint* featureVertexStartIdx, uint* featureVertexIdxOut, uint triaVertexCnt, uint &vertexCnt) { + thrust::sequence( thrust::device_ptr(featureVertexIdx), thrust::device_ptr(featureVertexIdx + triaVertexCnt)); + thrust::fill_n( thrust::device_ptr(featureVertexCnt), triaVertexCnt, 1); + cudaDeviceSynchronize(); + thrust::sort_by_key( thrust::device_ptr(featureVertices), + thrust::device_ptr(featureVertices + triaVertexCnt), + thrust::device_ptr(featureVertexIdx), less_float4()); + cudaDeviceSynchronize(); + float4* new_end = thrust::reduce_by_key( thrust::device_ptr(featureVertices), + thrust::device_ptr(featureVertices + triaVertexCnt), + thrust::device_ptr(featureVertexCnt), + thrust::device_ptr(featureVerticesOut), + thrust::device_ptr(featureVertexCntOut), equal_float4()).first.get(); + cudaDeviceSynchronize(); + vertexCnt = (new_end - featureVerticesOut); + thrust::exclusive_scan( thrust::device_ptr(featureVertexCntOut), thrust::device_ptr(featureVertexCntOut + vertexCnt), thrust::device_ptr(featureVertexStartIdx)); + WriteTriangleVertexIndexList( featureVertexIdx, featureVertexCntOut, featureVertexStartIdx, featureVertexIdxOut, triaVertexCnt, vertexCnt); + return cudaGetLastError(); +} + +extern "C" +cudaError TriangleEdgeList( uint* featureVertexIdxOut, uint* featureEdgeCnt, uint* featureEdgeCntOut, uint triaCnt, uint2 *featureEdges, uint2 *featureEdgesOut, uint &edgeCnt) { + thrust::fill_n( thrust::device_ptr(featureEdgeCnt), triaCnt * 3, 1); + // write edges + WriteTriangleEdgeList( featureVertexIdxOut, triaCnt, featureEdges); + cudaDeviceSynchronize(); + thrust::sort( thrust::device_ptr(featureEdges), + thrust::device_ptr(featureEdges + (triaCnt * 3)), less_uint2()); + cudaDeviceSynchronize(); + uint2* new_end = thrust::reduce_by_key( thrust::device_ptr(featureEdges), + thrust::device_ptr(featureEdges + (triaCnt * 3)), + thrust::device_ptr(featureEdgeCnt), + thrust::device_ptr(featureEdgesOut), + thrust::device_ptr(featureEdgeCntOut), equal_uint2()).first.get(); + cudaDeviceSynchronize(); + edgeCnt = (new_end - featureEdgesOut); + return cudaGetLastError(); +} + +// global variable for camera position +__device__ __constant__ float3 camPosition; + +cudaError_t copyCamPosToDevice( float3 camPos) { + cudaError_t error = cudaMemcpyToSymbol( camPosition, (void*)&camPos, sizeof(float3)); + cudaDeviceSynchronize(); + return error; +} + +/* + * greater than comparison + */ +__device__ bool greater_float4x3::operator()(const float4x3& lhs, const float4x3& rhs) const { + // use midpoint + float3 trianglePos1 = make_float3(lhs.v1 + lhs.v2 + lhs.v3)/3.0f; + float3 trianglePos2 = make_float3(rhs.v1 + rhs.v2 + rhs.v3)/3.0f; + float dist1 = length(trianglePos1 - camPosition); + float dist2 = length(trianglePos2 - camPosition); + + return (dist1 > dist2); +} + +extern "C" +cudaError SortTrianglesDevice( uint triaCnt, float4x3 *vertices, float4x3 *verticesCopy, float4x3 *colors, float4x3 *normals) { + thrust::sort_by_key( thrust::device_ptr(vertices), + thrust::device_ptr(vertices + triaCnt), + thrust::device_ptr(colors), greater_float4x3()); + thrust::sort_by_key( thrust::device_ptr(verticesCopy), + thrust::device_ptr(verticesCopy + triaCnt), + thrust::device_ptr(normals), greater_float4x3()); + cudaDeviceSynchronize(); + return cudaGetLastError(); +} + +#endif // MEGAMOLPROTEIN_VOLUMEMESHRENDERER_SORT_CU_INCLUDED diff --git a/cuda_code/VolumetricAveragePooling_8.cu b/cuda_code/VolumetricAveragePooling_8.cu new file mode 100644 index 0000000000000000000000000000000000000000..56e1d6980b1a4f7b08072c4761cc5b53837a1434 --- /dev/null +++ b/cuda_code/VolumetricAveragePooling_8.cu @@ -0,0 +1,279 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +template +__global__ void cuda_VolumetricAveragePooling_updateOutput( + THCDeviceTensor input, + THCDeviceTensor output, + int kT, int kH, int kW, + int dT, int dH, int dW, + int padT, int padH, int padW, + bool count_include_pad, int offsetZ) +{ + int oCol = blockIdx.x * blockDim.x + threadIdx.x; + int oRow = blockIdx.y * blockDim.y + threadIdx.y; + int oFrame = (blockIdx.z + offsetZ) % output.getSize(1); // output frame/time + int slice = (blockIdx.z + offsetZ) / output.getSize(1); // output slice/feature + + if (oRow < output.getSize(2) && oCol < output.getSize(3)) + { + Acctype sum = 0.0; + + int tstart = oFrame * dT - padT; + int hstart = oRow * dH - padH; + int wstart = oCol * dW - padW; + int tend = min(tstart + kT, input.getSize(1) + padT); + int hend = min(hstart + kH, input.getSize(2) + padH); + int wend = min(wstart + kW, input.getSize(3) + padW); + int pool_size = (tend - tstart) * (hend - hstart) * (wend - wstart); + tstart = max(tstart, 0); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + tend = min(tend, input.getSize(1)); + hend = min(hend, input.getSize(2)); + wend = min(wend, input.getSize(3)); + + Acctype divide_factor; + if (count_include_pad) + divide_factor = static_cast(pool_size); + else + divide_factor = static_cast((tend - tstart) * (hend - hstart) * (wend - wstart)); + + int ti, hi, wi; + for (ti = tstart; ti < tend; ++ti) + { + for (hi = hstart; hi < hend; ++hi) + { + for (wi = wstart; wi < wend; ++wi) + { + Dtype val = input[slice][ti][hi][wi]; + sum += val; + } + } + } + + output[slice][oFrame][oRow][oCol] = ScalarConvert::to(sum / divide_factor); + } +} + +// Inner-most loop size (kW) passed as template parameter for +// performance reasons. +// +template +__global__ void cuda_VolumetricAveragePooling_updateOutput_fixedKW( + THCDeviceTensor input, + THCDeviceTensor output, + int kT, int kH, + int dT, int dH, int dW, + int padT, int padH, int padW, + bool count_include_pad, int offsetZ) +{ + int oCol = blockIdx.x * blockDim.x + threadIdx.x; + int oRow = blockIdx.y * blockDim.y + threadIdx.y; + int oFrame = (blockIdx.z + offsetZ) % output.getSize(1); // output frame/time + int slice = (blockIdx.z + offsetZ) / output.getSize(1); // output slice/feature + + if (oRow < output.getSize(2) && oCol < output.getSize(3)) + { + Acctype sum = 0.0; + + int tstart = oFrame * dT - padT; + int hstart = oRow * dH - padH; + int wstart = oCol * dW - padW; + int tend = min(tstart + kT, input.getSize(1) + padT); + int hend = min(hstart + kH, input.getSize(2) + padH); + int wend = min(wstart + KERNEL_WIDTH, input.getSize(3) + padW); + int pool_size = (tend - tstart) * (hend - hstart) * (wend - wstart); + tstart = max(tstart, 0); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + tend = min(tend, input.getSize(1)); + hend = min(hend, input.getSize(2)); + wend = min(wend, input.getSize(3)); + + Acctype divide_factor; + if (count_include_pad) + divide_factor = static_cast(pool_size); + else + divide_factor = static_cast((tend - tstart) * (hend - hstart) * (wend - wstart)); + + int ti, hi, wi; + for (ti = tstart; ti < tend; ++ti) + { + for (hi = hstart; hi < hend; ++hi) + { + for (wi = wstart; wi < wend; ++wi) + { + Dtype val = input[slice][ti][hi][wi]; + sum += val; + } + } + } + + output[slice][oFrame][oRow][oCol] = ScalarConvert::to(sum / divide_factor); + } +} + +#define LAUNCH_UPDATE_OUTPUT_KERNEL_WIDTH(KW) case KW: \ + cuda_VolumetricAveragePooling_updateOutput_fixedKW \ + <<>>( \ + cudaInput, cudaOutput, kT, kH, dT, dH, dW, padT, padH, padW, count_include_pad, offsetZ); \ + break + +template +__global__ void cuda_VolumetricAveragePooling_updateGradInput_Stride1( + THCDeviceTensor gradOutput, + THCDeviceTensor gradInput, + int kT, int kH, int kW, + Acctype normFactor, int offsetZ) +{ + int iCol = blockIdx.x * blockDim.x + threadIdx.x; + int iRow = blockIdx.y * blockDim.y + threadIdx.y; + int iFrame = (blockIdx.z + offsetZ) % gradInput.getSize(1); // input frame/time + int slice = (blockIdx.z + offsetZ) / gradInput.getSize(1); // input slice/feature + + // guard against over-tiled threads + if (iRow < gradInput.getSize(2) && iCol < gradInput.getSize(3)) + { + Acctype sum = 0.0; + Dtype *gOut = &gradOutput[slice][max(0, iFrame - kT + 1)] + [max(0, iRow - kH + 1)][max(0, iCol - kW + 1)]; + int frameOffset = 0; + for (int oFrame = max(0, iFrame - kT + 1); + oFrame < min(iFrame + 1, gradOutput.getSize(1)); + ++oFrame) + { + int rowOffset = frameOffset; + for (int oRow = max(0, iRow - kH + 1); + oRow < min(iRow + 1, gradOutput.getSize(2)); + ++oRow) + { + int colOffset = rowOffset; + for (int oCol = max(0, iCol - kW + 1); + oCol < min(iCol + 1, gradOutput.getSize(3)); + ++oCol) + { + sum += gOut[colOffset]; + ++colOffset; + } + rowOffset += gradOutput.getSize(3); + } + frameOffset += gradOutput.getSize(2) * gradOutput.getSize(3); + } + gradInput[slice][iFrame][iRow][iCol] = ScalarConvert::to(sum * normFactor); + } +} + +template +__global__ void cuda_VolumetricAveragePooling_updateGradInput_atomicAdd( + THCDeviceTensor gradOutput, + THCDeviceTensor gradInput, + int kT, int kH, int kW, + int dT, int dH, int dW, + int padT, int padH, int padW, + bool count_include_pad, int offsetZ) +{ + int oCol = blockIdx.x * blockDim.x + threadIdx.x; + int oRow = blockIdx.y * blockDim.y + threadIdx.y; + int oFrame = (blockIdx.z + offsetZ) % gradOutput.getSize(1); // gradOutput frame/time + int slice = (blockIdx.z + offsetZ) / gradOutput.getSize(1); // gradOutput slice/feature + + // guard against over-tiled threads + if (oRow < gradOutput.getSize(2) && oCol < gradOutput.getSize(3)) + { + int tstart = oFrame * dT - padT; + int hstart = oRow * dH - padH; + int wstart = oCol * dW - padW; + int tend = min(tstart + kT, gradInput.getSize(1) + padT); + int hend = min(hstart + kH, gradInput.getSize(2) + padH); + int wend = min(wstart + kW, gradInput.getSize(3) + padW); + int pool_size = (tend - tstart) * (hend - hstart) * (wend - wstart); + tstart = max(tstart, 0); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + tend = min(tend, gradInput.getSize(1)); + hend = min(hend, gradInput.getSize(2)); + wend = min(wend, gradInput.getSize(3)); + + Acctype divide_factor; + if (count_include_pad) + divide_factor = static_cast(pool_size); + else + divide_factor = static_cast((tend - tstart) * (hend - hstart) * (wend - wstart)); + + Dtype val = ScalarConvert::to( + ScalarConvert::to(gradOutput[slice][oFrame][oRow][oCol]) / divide_factor); + for (int iFrame = tstart; iFrame < tend; ++iFrame) + { + for (int iRow = hstart; iRow < hend; ++iRow) + { + for (int iCol = wstart; iCol < wend; ++iCol) + { + atomicAdd(&gradInput[slice][iFrame][iRow][iCol], val); + } + } + } + } +} + +template +__global__ void cuda_VolumetricAveragePooling_updateGradInput( + THCDeviceTensor gradOutput, + THCDeviceTensor gradInput, + int kT, int kH, int kW, + int dT, int dH, int dW, + int padT, int padH, int padW, + bool count_include_pad, int offsetZ) +{ + int oCol = blockIdx.x * blockDim.x + threadIdx.x; + int oRow = blockIdx.y * blockDim.y + threadIdx.y; + int oFrame = (blockIdx.z + offsetZ) % gradOutput.getSize(1); // gradOutput frame/time + int slice = (blockIdx.z + offsetZ) / gradOutput.getSize(1); // gradOutput slice/feature + + // guard against over-tiled threads + if (oRow < gradOutput.getSize(2) && oCol < gradOutput.getSize(3)) + { + int tstart = oFrame * dT - padT; + int hstart = oRow * dH - padH; + int wstart = oCol * dW - padW; + int tend = min(tstart + kT, gradInput.getSize(1) + padT); + int hend = min(hstart + kH, gradInput.getSize(2) + padH); + int wend = min(wstart + kW, gradInput.getSize(3) + padW); + int pool_size = (tend - tstart) * (hend - hstart) * (wend - wstart); + tstart = max(tstart, 0); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + tend = min(tend, gradInput.getSize(1)); + hend = min(hend, gradInput.getSize(2)); + wend = min(wend, gradInput.getSize(3)); + + Acctype divide_factor; + if (count_include_pad) + divide_factor = static_cast(pool_size); + else + divide_factor = static_cast((tend - tstart) * (hend - hstart) * (wend - wstart)); + + Dtype val = ScalarConvert::to( + ScalarConvert::to(gradOutput[slice][oFrame][oRow][oCol]) / divide_factor); + for (int iFrame = tstart; iFrame < tend; ++iFrame) + { + for (int iRow = hstart; iRow < hend; ++iRow) + { + for (int iCol = wstart; iCol < wend; ++iCol) + { + gradInput[slice][iFrame][iRow][iCol] = val; + } + } + } + } +} + +#include +#include diff --git a/cuda_code/VoxelizeILV.cu b/cuda_code/VoxelizeILV.cu new file mode 100644 index 0000000000000000000000000000000000000000..8f64e056b5f7e685150dbd09413123dca13b92c7 --- /dev/null +++ b/cuda_code/VoxelizeILV.cu @@ -0,0 +1,62 @@ +#pragma once + +#include "Common.cu" + +#include "VectorMath.cu" + +struct LineData +{ + int3 t; + int3 tStep; + int3 signDir; + int3 pos; + int3 end; +}; + +__device__ +int3& GetNextVoxelInLine(LineData& data) +{ + int tmin = min(data.t); + int axis = 0; + + if(tmin == data.t.x && data.signDir.x != 0) axis = 0; + else if(tmin == data.t.y && data.signDir.y != 0) axis = 1; + else if(tmin == data.t.z && data.signDir.z != 0) axis = 2; + + data.t -= tmin; + + get(data.pos, axis) += get(data.signDir, axis); + get(data.t, axis) = get(data.tStep, axis); + return data.pos; +} + +__device__ +LineData CreateLineData(int3 v1, int3 v2) +{ + LineData data; + int3 dir = v2 - v1; + data.signDir = sign(dir); + data.pos = v1; + data.end = v2; + + int3 dir1 = make_int3(max(abs(dir.x), 1),max(abs(dir.y), 1),max(abs(dir.z), 1)); + + data.t = make_int3(dir1.y * dir1.z, dir1.x * dir1.z, dir1.x * dir1.y); + if(dir.x == 0) data.t.x = INT_MAX; + if(dir.y == 0) data.t.y = INT_MAX; + if(dir.z == 0) data.t.z = INT_MAX; + data.tStep = data.t * 2; + return data; +} + +__device__ +void voxelizeLine(int3 v1, int3 v2, unsigned char color) +{ + LineData data = CreateLineData(v1,v2); + voxelizePoint(data.pos, color); + + while(data.pos != data.end) + { + voxelizePoint(GetNextVoxelInLine(data), color); + } +} diff --git a/cuda_code/WarpSelectHalf1_4.cu b/cuda_code/WarpSelectHalf1_4.cu new file mode 100644 index 0000000000000000000000000000000000000000..f86683157b679df37592d1b6da31c918e1451fdf --- /dev/null +++ b/cuda_code/WarpSelectHalf1_4.cu @@ -0,0 +1,20 @@ + +/** + * Copyright (c) 2015-present, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under the CC-by-NC license found in the + * LICENSE file in the root directory of this source tree. + */ + +// Copyright 2004-present Facebook. All Rights Reserved. +#include "WarpSelectImpl.cuh" + +namespace faiss { namespace gpu { + +#ifdef FAISS_USE_FLOAT16 +WARP_SELECT_IMPL(half, true, 1, 1); +WARP_SELECT_IMPL(half, false, 1, 1); +#endif + +} } // namespace diff --git a/cuda_code/WarpingSolver_1.cu b/cuda_code/WarpingSolver_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..f9c6fdb40097289f2c9df11904ce0db0b7ea0ed9 --- /dev/null +++ b/cuda_code/WarpingSolver_1.cu @@ -0,0 +1,332 @@ +#include + +// Enabled to print a bunch of junk during solving +#define DEBUG_PRINT_SOLVER_INFO 0 + +#include "WarpingSolverParameters.h" +#include "WarpingSolverState.h" +#include "WarpingSolverUtil.h" +#include "WarpingSolverEquations.h" + +#include +#include +#include + +#include "CUDATimer.h" + +#ifdef _WIN32 +#include +#endif + +#ifdef _WIN32 +#define EXPORT __declspec(dllexport) +#else +#define EXPORT +#endif + +#define WARP_SIZE 32u +#define WARP_MASK (WARP_SIZE-1u) + +///////////////////////////////////////////////////////////////////////// +// Eval Residual +///////////////////////////////////////////////////////////////////////// + +__global__ void ResetResidualDevice(SolverInput input, SolverState state, SolverParameters parameters) +{ + const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; + if (x == 0) state.d_sumResidual[0] = 0.0f; +} + +__global__ void EvalResidualDevice(SolverInput input, SolverState state, SolverParameters parameters) +{ + const unsigned int N = input.N; // Number of block variables + const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; + + float residual = 0.0f; + if (x < N) + { + residual = evalFDevice(x, input, state, parameters); + } + residual = warpReduce(residual); + + //This command gets the lane ID within the current warp + unsigned int laneid; + asm("mov.u32 %0, %%laneid;" : "=r"(laneid)); + if (laneid == 0) { + atomicAdd(&state.d_sumResidual[0], residual); + } +} + +float EvalResidual(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer& timer) +{ + float residual = 0.0f; + + const unsigned int N = input.N; // Number of block variables + ResetResidualDevice << < 1, 1, 1 >> >(input, state, parameters); + cudaSafeCall(cudaDeviceSynchronize()); + timer.startEvent("EvalResidual"); + EvalResidualDevice << <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(input, state, parameters); + timer.endEvent(); + cudaSafeCall(cudaDeviceSynchronize()); + + residual = state.getSumResidual(); + + #ifdef _DEBUG + cudaSafeCall(cudaDeviceSynchronize()); + #endif + + return residual; +} + +// For the naming scheme of the variables see: +// http://en.wikipedia.org/wiki/Conjugate_gradient_method +// This code is an implementation of their PCG pseudo code + +__global__ void PCGInit_Kernel1(SolverInput input, SolverState state, SolverParameters parameters) +{ + const unsigned int N = input.N; + const int x = blockIdx.x * blockDim.x + threadIdx.x; + + float d = 0.0f; + if (x < N) + { + float3 residuumA; + const float3 residuum = evalMinusJTFDevice(x, input, state, parameters, residuumA); // residuum = J^T x -F - A x delta_0 => J^T x -F, since A x x_0 == 0 + state.d_r[x] = residuum; // store for next iteration + state.d_rA[x] = residuumA; // store for next iteration + + const float3 p = state.d_precondioner[x] * residuum; // apply preconditioner M^-1 + state.d_p[x] = p; + + const float3 pA = state.d_precondionerA[x] * residuumA; // apply preconditioner M^-1 + state.d_pA[x] = pA; + + d = dot(residuum, p) + dot(residuumA, pA); // x-th term of nomimator for computing alpha and denominator for computing beta + } + + d = warpReduce(d); + if ((threadIdx.x & WARP_MASK) == 0) { + atomicAdd(state.d_scanAlpha, d); + } +} + +__global__ void PCGInit_Kernel2(unsigned int N, SolverState state) +{ + const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; + + if (x < N) { + state.d_rDotzOld[x] = state.d_scanAlpha[0]; + state.d_delta[x] = make_float3(0.0f, 0.0f, 0.0f); + state.d_deltaA[x] = make_float3(0.0f, 0.0f, 0.0f); + } +} + +void Initialization(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer& timer) +{ + const unsigned int N = input.N; + + const int blocksPerGrid = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; + const int shmem_size = sizeof(float)*THREADS_PER_BLOCK; + + if (blocksPerGrid > THREADS_PER_BLOCK) + { + std::cout << "Too many variables for this block size. Maximum number of variables for two kernel scan: " << THREADS_PER_BLOCK*THREADS_PER_BLOCK << std::endl; + while (1); + } + cudaSafeCall(cudaMemset(state.d_scanAlpha, 0, sizeof(float))); + timer.startEvent("PCGInit_Kernel1"); + PCGInit_Kernel1 << > >(input, state, parameters); + timer.endEvent(); + #ifdef _DEBUG + cudaSafeCall(cudaDeviceSynchronize()); + #endif + + timer.startEvent("PCGInit_Kernel2"); + PCGInit_Kernel2 << > >(N, state); + timer.endEvent(); + #ifdef _DEBUG + cudaSafeCall(cudaDeviceSynchronize()); + #endif + + #if DEBUG_PRINT_SOLVER_INFO + float temp; + cudaSafeCall( cudaMemcpy(&temp, state.d_scanAlpha, sizeof(float), cudaMemcpyDeviceToHost) ); + printf("ScanAlpha (Init): %f\n", temp); + #endif +} + +///////////////////////////////////////////////////////////////////////// +// PCG Iteration Parts +///////////////////////////////////////////////////////////////////////// + +__global__ void PCGStep_Kernel1(SolverInput input, SolverState state, SolverParameters parameters) +{ + const unsigned int N = input.N; // Number of block variables + const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; + + float d = 0.0f; + if (x < N) + { + float3 tmpA; + const float3 tmp = applyJTJDevice(x, input, state, parameters, tmpA); // A x p_k => J^T x J x p_k + + state.d_Ap_X[x] = tmp; // store for next kernel call + state.d_Ap_A[x] = tmpA; // store for next kernel call + + d = dot(state.d_p[x], tmp) + dot(state.d_pA[x], tmpA); // x-th term of denominator of alpha + } + + d = warpReduce(d); + if ((threadIdx.x & WARP_MASK) == 0) { + atomicAdd(state.d_scanAlpha, d); // sum over x-th terms to compute denominator of alpha inside this block + } +} + +__global__ void PCGStep_Kernel2(SolverInput input, SolverState state) +{ + const unsigned int N = input.N; + const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; + + const float dotProduct = state.d_scanAlpha[0]; + + float b = 0.0f; + if (x < N) + { + float alpha = 0.0f; + if (dotProduct > FLOAT_EPSILON) alpha = state.d_rDotzOld[x] / dotProduct; // update step size alpha + + state.d_delta[x] = state.d_delta[x] + alpha*state.d_p[x]; // do a decent step + state.d_deltaA[x] = state.d_deltaA[x] + alpha*state.d_pA[x]; // do a decent step + + float3 r = state.d_r[x] - alpha*state.d_Ap_X[x]; // update residuum + state.d_r[x] = r; // store for next kernel call + + float3 rA = state.d_rA[x] - alpha*state.d_Ap_A[x]; // update residuum + state.d_rA[x] = rA; // store for next kernel call + + float3 z = state.d_precondioner[x] * r; // apply preconditioner M^-1 + state.d_z[x] = z; // save for next kernel call + + float3 zA = state.d_precondionerA[x] * rA; // apply preconditioner M^-1 + state.d_zA[x] = zA; // save for next kernel call + + b = dot(z, r) + dot(zA, rA); // compute x-th term of the nominator of beta + } + + b = warpReduce(b); + if ((threadIdx.x & WARP_MASK) == 0) { + atomicAdd(state.d_scanBeta, b); // sum over x-th terms to compute denominator of alpha inside this block + } +} + +__global__ void PCGStep_Kernel3(SolverInput input, SolverState state) +{ + const unsigned int N = input.N; + const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; + + if (x < N) + { + const float rDotzNew = state.d_scanBeta[0]; // get new nominator + const float rDotzOld = state.d_rDotzOld[x]; // get old denominator + + float beta = 0.0f; + if (rDotzOld > FLOAT_EPSILON) beta = rDotzNew / rDotzOld; // update step size beta + + state.d_rDotzOld[x] = rDotzNew; // save new rDotz for next iteration + state.d_p[x] = state.d_z[x] + beta*state.d_p[x]; // update decent direction + state.d_pA[x] = state.d_zA[x] + beta*state.d_pA[x]; // update decent direction + } +} + +void PCGIteration(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer& timer) +{ + const unsigned int N = input.N; // Number of block variables + + // Do PCG step + const int blocksPerGrid = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; + const int shmem_size = sizeof(float)*THREADS_PER_BLOCK; + + if (blocksPerGrid > THREADS_PER_BLOCK) + { + std::cout << "Too many variables for this block size. Maximum number of variables for two kernel scan: " << THREADS_PER_BLOCK*THREADS_PER_BLOCK << std::endl; + while (1); + } + + cudaSafeCall(cudaMemset(state.d_scanAlpha, 0, sizeof(float))); + timer.startEvent("PCGStep_Kernel1"); + PCGStep_Kernel1 << > >(input, state, parameters); + timer.endEvent(); + #ifdef _DEBUG + cudaSafeCall(cudaDeviceSynchronize()); + #endif + + cudaSafeCall(cudaMemset(state.d_scanBeta, 0, sizeof(float))); + timer.startEvent("PCGStep_Kernel2"); + PCGStep_Kernel2 << > >(input, state); + timer.endEvent(); + #ifdef _DEBUG + cudaSafeCall(cudaDeviceSynchronize()); + #endif + + timer.startEvent("PCGStep_Kernel3"); + PCGStep_Kernel3 << > >(input, state); + timer.endEvent(); + #ifdef _DEBUG + cudaSafeCall(cudaDeviceSynchronize()); + #endif +} + +///////////////////////////////////////////////////////////////////////// +// Apply Update +///////////////////////////////////////////////////////////////////////// + +__global__ void ApplyLinearUpdateDevice(SolverInput input, SolverState state, SolverParameters parameters) +{ + const unsigned int N = input.N; + const unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; + + if (x < N) { + state.d_x[x] = state.d_x[x] + state.d_delta[x]; + state.d_a[x] = state.d_a[x] + state.d_deltaA[x]; + } +} + +void ApplyLinearUpdate(SolverInput& input, SolverState& state, SolverParameters& parameters, CUDATimer& timer) +{ + const unsigned int N = input.N; + ApplyLinearUpdateDevice << <(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(input, state, parameters); + #ifdef _DEBUG + cudaSafeCall(cudaDeviceSynchronize()); + #endif +} + +//////////////////////////////////////////////////////////////////// +// Main GN Solver Loop +//////////////////////////////////////////////////////////////////// + +extern "C" double ImageWarpingSolveGNStub(SolverInput& input, SolverState& state, SolverParameters& parameters) +{ + CUDATimer timer; + + double residual = EvalResidual(input, state, parameters, timer); + printf("%f\n", residual); + + for (unsigned int nIter = 0; nIter < parameters.nNonLinearIterations; nIter++) + { + Initialization(input, state, parameters, timer); + + for (unsigned int linIter = 0; linIter < parameters.nLinIterations; linIter++) { + PCGIteration(input, state, parameters, timer); + } + + ApplyLinearUpdate(input, state, parameters, timer); + + float residual = EvalResidual(input, state, parameters, timer); + printf("%f\n", residual); + + timer.nextIteration(); + } + + timer.evaluate(); + return residual; +} diff --git a/cuda_code/WeightNormSTDPPlasticity.cu b/cuda_code/WeightNormSTDPPlasticity.cu new file mode 100644 index 0000000000000000000000000000000000000000..59237b2b649caecfcfa3ab4a66ff87b3a7fd424b --- /dev/null +++ b/cuda_code/WeightNormSTDPPlasticity.cu @@ -0,0 +1,176 @@ +// -*- mode: c++ -*- +#include "Spike/Backend/CUDA/Plasticity/WeightNormSTDPPlasticity.hpp" + +SPIKE_EXPORT_BACKEND_TYPE(CUDA, WeightNormSTDPPlasticity); + +namespace Backend { + namespace CUDA { + WeightNormSTDPPlasticity::~WeightNormSTDPPlasticity() { + CudaSafeCall(cudaFree(plastic_synapse_indices)); + CudaSafeCall(cudaFree(sum_squared_afferent_values)); + CudaSafeCall(cudaFree(neuron_in_plasticity_set)); + CudaSafeCall(cudaFree(initial_weights)); + CudaSafeCall(cudaFree(weight_divisor)); + } + + void WeightNormSTDPPlasticity::reset_state() { + if (total_number_of_plastic_synapses > 0) { + + // Now load values into device memory + CudaSafeCall(cudaMemcpy((void*)initial_weights, &(frontend()->initial_weights[0]), sizeof(float)*total_number_of_plastic_synapses, cudaMemcpyHostToDevice)); + + CudaSafeCall(cudaMemcpy((void*)afferent_weight_change_updater, + (void*)frontend()->afferent_weight_change_updater, + sizeof(float)*frontend()->neurs->total_number_of_neurons, cudaMemcpyHostToDevice)); + } + } + + void WeightNormSTDPPlasticity::prepare() { + + // Set up synapses backend and synaptic details + synapses_backend = dynamic_cast<::Backend::CUDA::Synapses*> + (frontend()->syns->backend()); + total_number_of_plastic_synapses = frontend()->total_number_of_plastic_synapses; + + // This learning rule requires a device side storage of a number of variables + + allocate_device_pointers(); + } + + void WeightNormSTDPPlasticity::allocate_device_pointers() { + if (total_number_of_plastic_synapses > 0){ + CudaSafeCall(cudaMalloc((void **)&plastic_synapse_indices, sizeof(int)*total_number_of_plastic_synapses)); + CudaSafeCall(cudaMemcpy((void*)plastic_synapse_indices, + (void*)&(frontend()->plastic_synapses[0]), + sizeof(int)*total_number_of_plastic_synapses, + cudaMemcpyHostToDevice)); + // Loading vectors from front-end + CudaSafeCall(cudaMalloc((void **)&sum_squared_afferent_values, sizeof(float)*frontend()->neurs->total_number_of_neurons)); + CudaSafeCall(cudaMalloc((void **)&afferent_weight_change_updater, sizeof(float)*frontend()->neurs->total_number_of_neurons)); + CudaSafeCall(cudaMalloc((void **)&neuron_in_plasticity_set, sizeof(bool)*frontend()->neurs->total_number_of_neurons)); + // Copy values + CudaSafeCall(cudaMemcpy((void*)sum_squared_afferent_values, + (void*)frontend()->sum_squared_afferent_values, + sizeof(float)*frontend()->neurs->total_number_of_neurons, cudaMemcpyHostToDevice)); + CudaSafeCall(cudaMemcpy((void*)afferent_weight_change_updater, + (void*)frontend()->afferent_weight_change_updater, + sizeof(float)*frontend()->neurs->total_number_of_neurons, cudaMemcpyHostToDevice)); + CudaSafeCall(cudaMemcpy((void*)neuron_in_plasticity_set, + (void*)frontend()->neuron_in_plasticity_set, + sizeof(bool)*frontend()->neurs->total_number_of_neurons, cudaMemcpyHostToDevice)); + + // Loading initial weights and setting weight changes to zero + CudaSafeCall(cudaMalloc((void **)&initial_weights, sizeof(float)*total_number_of_plastic_synapses)); + CudaSafeCall(cudaMalloc((void **)&weight_divisor, sizeof(float)*frontend()->neurs->total_number_of_neurons)); + } + } + + void WeightNormSTDPPlasticity::weight_normalization(){ + if (total_number_of_plastic_synapses > 0) { + CudaSafeCall(cudaMemcpy((void*)afferent_weight_change_updater, + (void*)frontend()->afferent_weight_change_updater, + sizeof(float)*frontend()->neurs->total_number_of_neurons, cudaMemcpyHostToDevice)); + + // First calculate the weight change + weight_change_calculations<<number_of_synapse_blocks_per_grid, synapses_backend->threads_per_block>>>( + synapses_backend->postsynaptic_neuron_indices, + synapses_backend->synaptic_efficacies_or_weights, + initial_weights, + afferent_weight_change_updater, + plastic_synapse_indices, + total_number_of_plastic_synapses); + CudaCheckError(); + weight_division_calc<<number_of_synapse_blocks_per_grid, synapses_backend->threads_per_block>>>( + sum_squared_afferent_values, + afferent_weight_change_updater, + weight_divisor, + neuron_in_plasticity_set, + frontend()->neurs->total_number_of_neurons); + CudaCheckError(); + weight_update<<number_of_synapse_blocks_per_grid, synapses_backend->threads_per_block>>>( + synapses_backend->postsynaptic_neuron_indices, + neuron_in_plasticity_set, + synapses_backend->synaptic_efficacies_or_weights, + weight_divisor, + plastic_synapse_indices, + total_number_of_plastic_synapses); + CudaCheckError(); + } + } + + __global__ void weight_change_calculations( + int* postsyn_ids, + float* current_weight, + float* initial_weights, + float* afferent_weight_change_updater, + int* d_plastic_synapse_indices, + size_t total_number_of_plastic_synapses) + { + // Global Index + int indx = threadIdx.x + blockIdx.x * blockDim.x; + + while (indx < total_number_of_plastic_synapses) { + // Get the current synapse index + int idx = d_plastic_synapse_indices[indx]; + int post_id = postsyn_ids[idx]; + float weight_change = current_weight[idx] - initial_weights[indx]; + if (weight_change != 0.0){ + float update_value = weight_change*weight_change + 2.0f*initial_weights[indx]*weight_change; + atomicAdd(&afferent_weight_change_updater[post_id], update_value); + } + indx += blockDim.x * gridDim.x; + } + } + + __global__ void weight_division_calc( + float* sum_squared_afferent_values, + float* afferent_weight_change_updater, + float* weight_divisor, + bool* neuron_in_plasticity_set, + size_t total_number_of_neurons) + { + // Global Index + int idx = threadIdx.x + blockIdx.x * blockDim.x; + + while (idx < total_number_of_neurons) { + if (neuron_in_plasticity_set[idx]) + { + if ((sum_squared_afferent_values[idx] - afferent_weight_change_updater[idx] < (sum_squared_afferent_values[idx]*0.01))) + printf("NORMALIZATION DIFF VERY LARGE. DANGER OF SYNAPSES ALL -> ZERO: %f, %f \n", + sum_squared_afferent_values[idx], + afferent_weight_change_updater[idx]); + weight_divisor[idx] = sqrtf(sum_squared_afferent_values[idx] + afferent_weight_change_updater[idx]) / sqrtf(sum_squared_afferent_values[idx]); + } + idx += blockDim.x * gridDim.x; + } + } + + + __global__ void weight_update( + int* postsyn_neuron, + bool* neuron_in_plasticity_set, + float* current_weight, + float* weight_divisor, + int* d_plastic_synapse_indices, + size_t total_number_of_plastic_synapses){ + + // Global Index + int indx = threadIdx.x + blockIdx.x * blockDim.x; + + while (indx < total_number_of_plastic_synapses) { + int idx = d_plastic_synapse_indices[indx]; + int postneuron = postsyn_neuron[idx]; + if (neuron_in_plasticity_set[postneuron]){ + float division_value = weight_divisor[postneuron]; + //if (division_value != 1.0) + //printf("%f, %f, %f wat \n", division_value, current_weight[idx], (current_weight[idx] / division_value)); + if (division_value != 1.0) + current_weight[idx] /= division_value; + } + indx += blockDim.x * gridDim.x; + } + } + + + } +} diff --git a/cuda_code/ZetaKernel_3.cu b/cuda_code/ZetaKernel_3.cu new file mode 100644 index 0000000000000000000000000000000000000000..730dbaf850955076464d2b86947e5702318a1e62 --- /dev/null +++ b/cuda_code/ZetaKernel_3.cu @@ -0,0 +1,38 @@ +#define TORCH_ASSERT_NO_OPERATORS +#include +#include +#include +#include +#include +#include + +namespace at { namespace native { +namespace { + +/* + * This function is derived from the implementation of the zeta function in the Cephes Math Library. + * See note [3-Clause BSD License for the Cephes Math Library]. + */ +// See note [Jiterator] +const char zeta_name[] = "zeta"; +void zeta_kernel_cuda(TensorIteratorBase& iter) { + #ifdef USE_JITERATOR + AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "zeta_cuda", [&]() { + opmath_jitted_gpu_kernel_with_scalars(iter, zeta_string); + }); + #else + AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "zeta_cuda", [&]() { + gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t x, scalar_t q) -> scalar_t { + return zeta(x, q); + }); + }); + #endif //jiterator +} + +} // namespace (anonymous) + +REGISTER_DISPATCH(zeta_stub, &zeta_kernel_cuda); + +}} // namespace at::native diff --git a/cuda_code/_2_tiling.cu b/cuda_code/_2_tiling.cu new file mode 100644 index 0000000000000000000000000000000000000000..baf684053e12a897b49bbca060019bb5e210d786 --- /dev/null +++ b/cuda_code/_2_tiling.cu @@ -0,0 +1,103 @@ +/* + * 5KK73 + * Eindhoven University of Technology + */ + +/* Matrix multiplication: C = A * B. + * Device code. + */ + + #include + #include "../parser.h" + + //////////////////////////////////////////////////////////////////////////////// + //! Matrix multiplication on the device: C = A * B + //! wA is A's width and wB is B's width + //////////////////////////////////////////////////////////////////////////////// + __global__ void + tiling_kernel( float* C, float* A, float* B, int interDim) + { + + // Declaration of the shared memory array As used to + // store the sub-matrix of A + __shared__ float As[2 * 6]; + + // Declaration of the shared memory array Bs used to + // store the sub-matrix of B + __shared__ float Bs[2 * 8]; + + // Index of the first sub-matrix of A&B processed by the block + int A_head = threadIdx.x * interDim; // {0,1,2} * 4 + int B_head = threadIdx.y ; // {0,1,2,3} + // Index of the last sub-matrix of A processed by the block + int A_tail = A_head + 4 -1 ; // width of As = 2 + // Step size used to iterate through the sub-matrices of A&B + int A_step = 2; + int B_step = 2 * interDim ; + + // Csub is used to store the element of the block sub-matrix + // that is computed by the thread + float Csub = 0; + + // Loop over all the sub-matrices of A and B + // required to compute the block sub-matrix + for (int a = A_head, b = B_head; + a <= A_tail; + a += A_step, b += B_step) { + // Load the matrices from device memory + // to shared memory; each thread loads + // one element of each matrix + As[0 + a] = A[a]; + As[1 + a] = A[a + 1]; + + Bs[0 + b] = B[b]; + Bs[1 * interDim + b] = B[b + 1 * interDim]; + __syncthreads(); + + // if (threadIdx.x == 0 && threadIdx.y == 1) { + // printf("-b: %d ; -Bs : [ %f , %f ] ; threadIdx.x : %d ; threadIdx.y : %d ; \n" ,\ + // b , Bs[0] , Bs[1] , threadIdx.x , threadIdx.y ); + // } + // Multiply the two matrices together; + // each thread computes one element + // of the block sub-matrix + // for (int k = 0; k < 12; ++k) + Csub += As[0 + a]*Bs[0 + b] + As[1 + a] * Bs[1 * interDim + b] ; + // Synchronize to make sure that the preceding + // computation is done before loading two new + // sub-matrices of A and B in the next iteration + __syncthreads(); + } + + // Write the block sub-matrix to device memory; + // each thread writes one element + C[threadIdx.x * blockDim.y + threadIdx.y ] = Csub; + } + +void parser::matmul_tiling( matrix& C) { + float* dev_a; + cudaMalloc(&dev_a, A.row * A.col * sizeof(float)); + cudaMemcpy(dev_a, A.elements, A.row * A.col * sizeof(float), cudaMemcpyHostToDevice); + + float* dev_b; + cudaMalloc(&dev_b, B.row * B.col * sizeof(float)); + cudaMemcpy(dev_b, B.elements, B.row * B.col * sizeof(float), cudaMemcpyHostToDevice); + + float* dev_c; + cudaMalloc(&dev_c, C.row * C.col * sizeof(float)); + + + dim3 block_size(3,4); + // dim3 grid_size(1); + tiling_kernel<<< 1 , block_size , 2 * sizeof(float)>>>(dev_c, dev_a, dev_b , 4); + + cudaDeviceSynchronize(); + + + cudaMemcpy(C.elements, dev_c, C.row * C.col * sizeof(float), cudaMemcpyDeviceToHost); + + cudaFree(dev_a); + cudaFree(dev_b); + cudaFree(dev_c); + return; +} \ No newline at end of file diff --git a/cuda_code/abs_value.cu b/cuda_code/abs_value.cu new file mode 100644 index 0000000000000000000000000000000000000000..4c645851b84a690c6dd7aa818081ee011ebf69b5 --- /dev/null +++ b/cuda_code/abs_value.cu @@ -0,0 +1,10 @@ +extern "C" +#include +__global__ void abs_value(int n, double *a, double *c) +{ + int i = blockIdx.x * blockDim.x + threadIdx.x; //output x + if (i < n) + { + c[i] = abs(a[i]); + } +} \ No newline at end of file diff --git a/cuda_code/activations_31.cu b/cuda_code/activations_31.cu new file mode 100644 index 0000000000000000000000000000000000000000..dfba54e9332dd442656dc3d1d045b2a4e6e27da3 --- /dev/null +++ b/cuda_code/activations_31.cu @@ -0,0 +1,521 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +#include +#include + +#include "math.hpp" +#include "types.hpp" +#include "vector_traits.hpp" +#include "grid_stride_range.hpp" +#include "execution.hpp" + +#include "../cuda4dnn/csl/stream.hpp" +#include "../cuda4dnn/csl/span.hpp" + +#include "../cuda4dnn/kernels/scale_shift.hpp" + +#include + +#include + +using namespace cv::dnn::cuda4dnn::csl; +using namespace cv::dnn::cuda4dnn::csl::device; + +namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels { + + namespace raw { + template + __global__ void abs_vec(Span output, View input) { + using vector_type = get_vector_type_t; + + auto output_vPtr = vector_type::get_pointer(output.data()); + auto input_vPtr = vector_type::get_pointer(input.data()); + + for (auto i : grid_stride_range(output.size() / vector_type::size())) { + vector_type vec; + v_load(vec, input_vPtr[i]); + for (int j = 0; j < vector_type::size(); j++) { + using device::abs; + vec.data[j] = abs(vec.data[j]); + } + v_store(output_vPtr[i], vec); + } + } + + template + __global__ void tanh_vec(Span output, View input) { + using vector_type = get_vector_type_t; + + auto output_vPtr = vector_type::get_pointer(output.data()); + auto input_vPtr = vector_type::get_pointer(input.data()); + + for (auto i : grid_stride_range(output.size() / vector_type::size())) { + vector_type vec; + v_load(vec, input_vPtr[i]); + for (int j = 0; j < vector_type::size(); j++) { + using device::tanh; + vec.data[j] = tanh(vec.data[j]); + } + v_store(output_vPtr[i], vec); + } + } + + template + __global__ void swish_vec(Span output, View input) { + using vector_type = get_vector_type_t; + + auto output_vPtr = vector_type::get_pointer(output.data()); + auto input_vPtr = vector_type::get_pointer(input.data()); + + for (auto i : grid_stride_range(output.size() / vector_type::size())) { + vector_type vec; + v_load(vec, input_vPtr[i]); + for (int j = 0; j < vector_type::size(); j++) { + using device::sigmoid; + vec.data[j] = vec.data[j] * sigmoid(vec.data[j]); + } + v_store(output_vPtr[i], vec); + } + } + + template + __global__ void mish_vec(Span output, View input) { + using vector_type = get_vector_type_t; + + auto output_vPtr = vector_type::get_pointer(output.data()); + auto input_vPtr = vector_type::get_pointer(input.data()); + + for (auto i : grid_stride_range(output.size() / vector_type::size())) { + vector_type vec; + v_load(vec, input_vPtr[i]); + for (int j = 0; j < vector_type::size(); j++) { + using device::tanh; + using device::log1pexp; + vec.data[j] = vec.data[j] * tanh(log1pexp(vec.data[j])); + } + v_store(output_vPtr[i], vec); + } + } + + template + __global__ void sigmoid_vec(Span output, View input) { + using vector_type = get_vector_type_t; + + auto output_vPtr = vector_type::get_pointer(output.data()); + auto input_vPtr = vector_type::get_pointer(input.data()); + + for (auto i : grid_stride_range(output.size() / vector_type::size())) { + vector_type vec; + v_load(vec, input_vPtr[i]); + for (int j = 0; j < vector_type::size(); j++) { + using device::sigmoid; + vec.data[j] = sigmoid(vec.data[j]); + } + v_store(output_vPtr[i], vec); + } + } + + template + __global__ void bnll_vec(Span output, View input) { + using vector_type = get_vector_type_t; + + auto output_vPtr = vector_type::get_pointer(output.data()); + auto input_vPtr = vector_type::get_pointer(input.data()); + + for (auto i : grid_stride_range(output.size() / vector_type::size())) { + vector_type vec; + v_load(vec, input_vPtr[i]); + for (int j = 0; j < vector_type::size(); j++) { + using device::log1pexp; + vec.data[j] = vec.data[j] > T(0) ? vec.data[j] + log1pexp(-vec.data[j]) : log1pexp(vec.data[j]); + } + v_store(output_vPtr[i], vec); + } + } + + template + __global__ void elu_vec(Span output, View input) { + using vector_type = get_vector_type_t; + + auto output_vPtr = vector_type::get_pointer(output.data()); + auto input_vPtr = vector_type::get_pointer(input.data()); + + for (auto i : grid_stride_range(output.size() / vector_type::size())) { + vector_type vec; + v_load(vec, input_vPtr[i]); + for (int j = 0; j < vector_type::size(); j++) { + using device::expm1; + vec.data[j] = vec.data[j] >= T(0) ? vec.data[j] : expm1(vec.data[j]); + } + v_store(output_vPtr[i], vec); + } + } + + template + __global__ void relu_vec(Span output, View input, T slope) { + using vector_type = get_vector_type_t; + + auto output_vPtr = vector_type::get_pointer(output.data()); + auto input_vPtr = vector_type::get_pointer(input.data()); + + for (auto i : grid_stride_range(output.size() / vector_type::size())) { + vector_type vec; + v_load(vec, input_vPtr[i]); + for(int j = 0; j < vector_type::size(); j++) + vec.data[j] = vec.data[j] >= T(0) ? vec.data[j] : slope * vec.data[j]; + v_store(output_vPtr[i], vec); + } + } + + template + __global__ void clipped_relu_vec(Span output, View input, T floor, T ceiling) { + using vector_type = get_vector_type_t; + + auto output_vPtr = vector_type::get_pointer(output.data()); + auto input_vPtr = vector_type::get_pointer(input.data()); + + for (auto i : grid_stride_range(output.size() / vector_type::size())) { + using device::clamp; + + vector_type vec; + v_load(vec, input_vPtr[i]); + for (int j = 0; j < vector_type::size(); j++) + vec.data[j] = clamp(vec.data[j], floor, ceiling); + v_store(output_vPtr[i], vec); + } + } + + template + __global__ void axiswise_relu_vec(Span output, View input, size_type inner_size, View slope) { + using vector_type = get_vector_type_t; + + auto output_vPtr = vector_type::get_pointer(output.data()); + auto input_vPtr = vector_type::get_pointer(input.data()); + + inner_size /= vector_type::size(); + for (auto i : grid_stride_range(output.size() / vector_type::size())) { + const index_type c = (i / inner_size) % static_cast(slope.size()); + + vector_type vec; + v_load(vec, input_vPtr[i]); + for (int j = 0; j < vector_type::size(); j++) + vec.data[j] = vec.data[j] > T(0) ? vec.data[j] : vec.data[j] * slope[c]; + v_store(output_vPtr[i], vec); + } + } + + template + __global__ void power_vec(Span output, View input, T exp, T scale, T shift) { + using vector_type = get_vector_type_t; + + auto output_vPtr = vector_type::get_pointer(output.data()); + auto input_vPtr = vector_type::get_pointer(input.data()); + + for (auto i : grid_stride_range(output.size() / vector_type::size())) { + using device::pow; + + vector_type vec; + v_load(vec, input_vPtr[i]); + for (int j = 0; j < vector_type::size(); j++) + vec.data[j] = pow(shift + scale * vec.data[j], exp); + v_store(output_vPtr[i], vec); + } + } + } + + template + void launch_vectorized_abs(const Stream& stream, Span output, View input) { + CV_Assert(is_fully_aligned(output, N)); + CV_Assert(is_fully_aligned(input, N)); + + auto kernel = raw::abs_vec; + auto policy = make_policy(kernel, output.size() / N, 0, stream); + launch_kernel(kernel, policy, output, input); + } + + template + void abs(const Stream& stream, Span output, View input) { + CV_Assert(input.size() == output.size()); + + if (is_fully_aligned(output, 4) && is_fully_aligned(input, 4)) { + launch_vectorized_abs(stream, output, input); + } else if (is_fully_aligned(output, 2) && is_fully_aligned(input, 2)) { + launch_vectorized_abs(stream, output, input); + } else { + launch_vectorized_abs(stream, output, input); + } + } + + template void abs<__half>(const Stream& stream, Span<__half> output, View<__half> input); + template void abs(const Stream& stream, Span output, View input); + + template + void launch_vectorized_tanh(const Stream& stream, Span output, View input) { + CV_Assert(is_fully_aligned(output, N)); + CV_Assert(is_fully_aligned(input, N)); + + auto kernel = raw::tanh_vec; + auto policy = make_policy(kernel, output.size() / N, 0, stream); + launch_kernel(kernel, policy, output, input); + } + + template + void tanh(const Stream& stream, Span output, View input) { + CV_Assert(input.size() == output.size()); + + if (is_fully_aligned(output, 4) && is_fully_aligned(input, 4)) { + launch_vectorized_tanh(stream, output, input); + } else if (is_fully_aligned(output, 2) && is_fully_aligned(input, 2)) { + launch_vectorized_tanh(stream, output, input); + } else { + launch_vectorized_tanh(stream, output, input); + } + } + + template void tanh<__half>(const Stream&, Span<__half>, View<__half>); + template void tanh(const Stream&, Span, View); + + template + void launch_vectorized_swish(const Stream& stream, Span output, View input) { + CV_Assert(is_fully_aligned(output, N)); + CV_Assert(is_fully_aligned(input, N)); + + auto kernel = raw::swish_vec; + auto policy = make_policy(kernel, output.size() / N, 0, stream); + launch_kernel(kernel, policy, output, input); + } + + template + void swish(const Stream& stream, Span output, View input) { + CV_Assert(input.size() == output.size()); + + if (is_fully_aligned(output, 4) && is_fully_aligned(input, 4)) { + launch_vectorized_swish(stream, output, input); + } else if (is_fully_aligned(output, 2) && is_fully_aligned(input, 2)) { + launch_vectorized_swish(stream, output, input); + } else { + launch_vectorized_swish(stream, output, input); + } + } + + template void swish<__half>(const Stream&, Span<__half>, View<__half>); + template void swish(const Stream&, Span, View); + + template + void launch_vectorized_mish(const Stream& stream, Span output, View input) { + CV_Assert(is_fully_aligned(output, N)); + CV_Assert(is_fully_aligned(input, N)); + + auto kernel = raw::mish_vec; + auto policy = make_policy(kernel, output.size() / N, 0, stream); + launch_kernel(kernel, policy, output, input); + } + + template + void mish(const Stream& stream, Span output, View input) { + CV_Assert(input.size() == output.size()); + + if (is_fully_aligned(output, 4) && is_fully_aligned(input, 4)) { + launch_vectorized_mish(stream, output, input); + } else if (is_fully_aligned(output, 2) && is_fully_aligned(input, 2)) { + launch_vectorized_mish(stream, output, input); + } else { + launch_vectorized_mish(stream, output, input); + } + } + + template void mish<__half>(const Stream&, Span<__half>, View<__half>); + template void mish(const Stream&, Span, View); + + template + void launch_vectorized_sigmoid(const Stream& stream, Span output, View input) { + CV_Assert(is_fully_aligned(output, N)); + CV_Assert(is_fully_aligned(input, N)); + + auto kernel = raw::sigmoid_vec; + auto policy = make_policy(kernel, output.size() / N, 0, stream); + launch_kernel(kernel, policy, output, input); + } + + template + void sigmoid(const Stream& stream, Span output, View input) { + CV_Assert(input.size() == output.size()); + + if (is_fully_aligned(output, 4) && is_fully_aligned(input, 4)) { + launch_vectorized_sigmoid(stream, output, input); + } else if (is_fully_aligned(output, 2) && is_fully_aligned(input, 2)) { + launch_vectorized_sigmoid(stream, output, input); + } else { + launch_vectorized_sigmoid(stream, output, input); + } + } + + template void sigmoid<__half>(const Stream&, Span<__half>, View<__half>); + template void sigmoid(const Stream&, Span, View); + + template + void launch_vectorized_bnll(const Stream& stream, Span output, View input) { + CV_Assert(is_fully_aligned(output, N)); + CV_Assert(is_fully_aligned(input, N)); + + auto kernel = raw::bnll_vec; + auto policy = make_policy(kernel, output.size() / N, 0, stream); + launch_kernel(kernel, policy, output, input); + } + + template + void bnll(const Stream& stream, Span output, View input) { + CV_Assert(input.size() == output.size()); + + if (is_fully_aligned(output, 4) && is_fully_aligned(input, 4)) { + launch_vectorized_bnll(stream, output, input); + } else if (is_fully_aligned(output, 2) && is_fully_aligned(input, 2)) { + launch_vectorized_bnll(stream, output, input); + } else { + launch_vectorized_bnll(stream, output, input); + } + } + + template void bnll<__half>(const Stream&, Span<__half>, View<__half>); + template void bnll(const Stream&, Span, View); + + template + void launch_vectorized_elu(const Stream& stream, Span output, View input) { + CV_Assert(is_fully_aligned(output, N)); + CV_Assert(is_fully_aligned(input, N)); + + auto kernel = raw::elu_vec; + auto policy = make_policy(kernel, output.size() / N, 0, stream); + launch_kernel(kernel, policy, output, input); + } + + template + void elu(const Stream& stream, Span output, View input) { + CV_Assert(input.size() == output.size()); + + if (is_fully_aligned(output, 4) && is_fully_aligned(input, 4)) { + launch_vectorized_elu(stream, output, input); + } else if (is_fully_aligned(output, 2) && is_fully_aligned(input, 2)) { + launch_vectorized_elu(stream, output, input); + } else { + launch_vectorized_elu(stream, output, input); + } + } + + template void elu<__half>(const Stream&, Span<__half>, View<__half>); + template void elu(const Stream&, Span, View); + + template + void launch_vectorized_relu(const Stream& stream, Span output, View input, T slope) { + CV_Assert(is_fully_aligned(output, N)); + CV_Assert(is_fully_aligned(input, N)); + + auto kernel = raw::relu_vec; + auto policy = make_policy(kernel, output.size() / N, 0, stream); + launch_kernel(kernel, policy, output, input, slope); + } + + template + void relu(const Stream& stream, Span output, View input, T slope) { + CV_Assert(input.size() == output.size()); + + if(is_fully_aligned(output, 4) && is_fully_aligned(input, 4)) { + launch_vectorized_relu(stream, output, input, slope); + } else if (is_fully_aligned(output, 2) && is_fully_aligned(input, 2)) { + launch_vectorized_relu(stream, output, input, slope); + } else { + launch_vectorized_relu(stream, output, input, slope); + } + } + + template void relu<__half>(const Stream&, Span<__half>, View<__half>, __half); + template void relu(const Stream&, Span, View, float); + + template + void launch_vectorized_clipped_relu(const Stream& stream, Span output, View input, T floor, T ceiling) { + CV_Assert(is_fully_aligned(output, N)); + CV_Assert(is_fully_aligned(input, N)); + + auto kernel = raw::clipped_relu_vec; + auto policy = make_policy(kernel, output.size() / N, 0, stream); + launch_kernel(kernel, policy, output, input, floor, ceiling); + } + + template + void clipped_relu(const Stream& stream, Span output, View input, T floor, T ceiling) { + CV_Assert(input.size() == output.size()); + CV_Assert(static_cast(floor) <= static_cast(ceiling)); + + if(is_fully_aligned(output, 4) && is_fully_aligned(input, 4)) { + launch_vectorized_clipped_relu(stream, output, input, floor, ceiling); + } else if (is_fully_aligned(output, 2) && is_fully_aligned(input, 2)) { + launch_vectorized_clipped_relu(stream, output, input, floor, ceiling); + } else { + launch_vectorized_clipped_relu(stream, output, input, floor, ceiling); + } + } + + template void clipped_relu<__half>(const Stream&, Span<__half>, View<__half>, __half, __half); + template void clipped_relu(const Stream&, Span, View, float, float); + + template + void launch_vectorized_axiswise_relu(const Stream& stream, Span output, View input, std::size_t inner_size, View slope) { + CV_Assert(is_fully_aligned(output, N)); + CV_Assert(is_fully_aligned(input, N)); + CV_Assert(inner_size % N == 0); + + auto kernel = raw::axiswise_relu_vec; + auto policy = make_policy(kernel, output.size() / N, 0, stream); + launch_kernel(kernel, policy, output, input, inner_size, slope); + } + + template + void axiswise_relu(const Stream& stream, Span output, View input, std::size_t inner_size, View slope) { + CV_Assert(input.size() == output.size()); + + if (is_fully_aligned(output, 4) && is_fully_aligned(input, 4) && inner_size % 4 == 0) { + launch_vectorized_axiswise_relu(stream, output, input, inner_size, slope); + } else if (is_fully_aligned(output, 2) && is_fully_aligned(input, 2) && inner_size % 2 == 0) { + launch_vectorized_axiswise_relu(stream, output, input, inner_size, slope); + } else { + launch_vectorized_axiswise_relu(stream, output, input, inner_size, slope); + } + } + + template void axiswise_relu<__half>(const Stream&, Span<__half>, View<__half>, std::size_t, View<__half>); + template void axiswise_relu(const Stream&, Span, View, std::size_t, View); + + template + void launch_vectorized_power(const Stream& stream, Span output, View input, T exp, T scale, T shift) { + CV_Assert(is_fully_aligned(output, N)); + CV_Assert(is_fully_aligned(input, N)); + + auto kernel = raw::power_vec; + auto policy = make_policy(kernel, output.size() / N, 0, stream); + launch_kernel(kernel, policy, output, input, exp, scale, shift); + } + + template + void power(const Stream& stream, Span output, View input, T exp, T scale, T shift) { + CV_Assert(input.size() == output.size()); + + if (static_cast(exp) == 1.0f) { + scale1_with_bias1(stream, output, input, scale, shift); + return; + } + + if (is_fully_aligned(output, 4) && is_fully_aligned(input, 4) && output.size()) { + launch_vectorized_power(stream, output, input, exp, scale, shift); + } else if (is_fully_aligned(output, 2) && is_fully_aligned(input, 2) && output.size()) { + launch_vectorized_power(stream, output, input, exp, scale, shift); + } else { + launch_vectorized_power(stream, output, input, exp, scale, shift); + } + } + + template void power<__half>(const Stream&, Span<__half>, View<__half>, __half, __half, __half); + template void power(const Stream&, Span, View, float, float, float); + +}}}} /* namespace cv::dnn::cuda4dnn::kernels */ diff --git a/cuda_code/adaptivethreshold_1.cu b/cuda_code/adaptivethreshold_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..615029720c1c1d86fcd3233ea8e8a65e79f73abd --- /dev/null +++ b/cuda_code/adaptivethreshold_1.cu @@ -0,0 +1,1034 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0. + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +#include "ppl/cv/cuda/adaptivethreshold.h" +#include "ppl/cv/cuda/setvalue.h" + +#include + +#include "utility.hpp" + +using namespace ppl::common; + +namespace ppl { +namespace cv { +namespace cuda { + +#define SMALL_SIZE 7 +#define SMALL_MAX_KSIZE 32 + +__host__ __device__ +void createGaussianKernel(float* coefficients, float sigma, int ksize) { + bool fixed_kernel = false; + if ((ksize & 1) == 1 && ksize <= SMALL_SIZE && sigma <= 0) { + if (ksize == 1) { + coefficients[0] = 1.f; + } + else if (ksize == 3) { + coefficients[0] = 0.25f; + coefficients[1] = 0.5f; + coefficients[2] = 0.25f; + } + else if (ksize == 5) { + coefficients[0] = 0.0625f; + coefficients[1] = 0.25f; + coefficients[2] = 0.375f; + coefficients[3] = 0.25f; + coefficients[4] = 0.0625f; + } + else { + coefficients[0] = 0.03125f; + coefficients[1] = 0.109375f; + coefficients[2] = 0.21875f; + coefficients[3] = 0.28125f; + coefficients[4] = 0.21875f; + coefficients[5] = 0.109375f; + coefficients[6] = 0.03125f; + } + fixed_kernel = true; + } + + float value = sigma > 0 ? sigma : ((ksize - 1) * 0.5f - 1) * 0.3f + 0.8f; + float scale_2x = -0.5f / (value * value); + float sum = 0.f; + + int i; + float x; + for (i = 0; i < ksize; i++) { + x = i - (ksize - 1) * 0.5f; + value = fixed_kernel ? coefficients[i] : std::exp(scale_2x * x * x); + if (!fixed_kernel) { + coefficients[i] = value; + } + sum += value; + } + + sum = 1.f / sum; + for (i = 0; i < ksize; i++) { + coefficients[i] *= sum; + } +} + +template +__global__ +void rowColC1Kernel0(const uchar* src, int rows, int cols, int src_stride, + int radius, float weight, int threshold_type, + uchar setted_value, int delta, uchar* dst, int dst_stride, + BorderInterpolation interpolation) { + __shared__ float data[kDimY0 * 3][(kDimX0 << 2)]; + + int element_x = ((blockIdx.x << kShiftX0) + threadIdx.x) << 2; + int element_y = (blockIdx.y << kShiftY0) + threadIdx.y; + + int bottom = element_x - radius; + int top = element_x + radius; + + int data_index, row_index; + uchar* input; + float4 value; + float4 sum = make_float4(0.f, 0.f, 0.f, 0.f); + + bool isnt_border_block = true; + data_index = radius >> (kShiftX0 + 2); + if (blockIdx.x <= data_index) isnt_border_block = false; + data_index = (cols - radius) >> (kShiftX0 + 2); + if (blockIdx.x >= data_index) isnt_border_block = false; + + if (threadIdx.y < radius && element_x < cols) { + row_index = interpolation(rows, radius, element_y - radius); + input = (uchar*)src + row_index * src_stride; + if (isnt_border_block) { + for (int i = bottom; i <= top; i++) { + value.x = input[i]; + value.y = input[i + 1]; + value.z = input[i + 2]; + value.w = input[i + 3]; + sum += value; + } + } + else { + for (int i = bottom; i <= top; i++) { + data_index = interpolation(cols, radius, i); + value.x = input[data_index]; + data_index = interpolation(cols, radius, i + 1); + value.y = input[data_index]; + data_index = interpolation(cols, radius, i + 2); + value.z = input[data_index]; + data_index = interpolation(cols, radius, i + 3); + value.w = input[data_index]; + sum += value; + } + } + data_index = threadIdx.x << 2; + data[threadIdx.y][data_index] = sum.x; + data[threadIdx.y][data_index + 1] = sum.y; + data[threadIdx.y][data_index + 2] = sum.z; + data[threadIdx.y][data_index + 3] = sum.w; + } + + if (element_y < rows && element_x < cols) { + sum = make_float4(0.f, 0.f, 0.f, 0.f); + input = (uchar*)src + element_y * src_stride; + + if (isnt_border_block) { + for (int i = bottom; i <= top; i++) { + value.x = input[i]; + value.y = input[i + 1]; + value.z = input[i + 2]; + value.w = input[i + 3]; + sum += value; + } + } + else { + for (int i = bottom; i <= top; i++) { + data_index = interpolation(cols, radius, i); + value.x = input[data_index]; + data_index = interpolation(cols, radius, i + 1); + value.y = input[data_index]; + data_index = interpolation(cols, radius, i + 2); + value.z = input[data_index]; + data_index = interpolation(cols, radius, i + 3); + value.w = input[data_index]; + sum += value; + } + } + data_index = threadIdx.x << 2; + data[radius + threadIdx.y][data_index] = sum.x; + data[radius + threadIdx.y][data_index + 1] = sum.y; + data[radius + threadIdx.y][data_index + 2] = sum.z; + data[radius + threadIdx.y][data_index + 3] = sum.w; + } + + if (threadIdx.y < radius && element_x < cols) { + sum = make_float4(0.f, 0.f, 0.f, 0.f); + if (blockIdx.y != gridDim.y - 1) { + row_index = interpolation(rows, radius, + ((blockIdx.y + 1) << kShiftY0) + threadIdx.y); + } + else { + row_index = interpolation(rows, radius, rows + threadIdx.y); + } + input = (uchar*)src + row_index * src_stride; + + if (isnt_border_block) { + for (int i = bottom; i <= top; i++) { + value.x = input[i]; + value.y = input[i + 1]; + value.z = input[i + 2]; + value.w = input[i + 3]; + sum += value; + } + } + else { + for (int i = bottom; i <= top; i++) { + data_index = interpolation(cols, radius, i); + value.x = input[data_index]; + data_index = interpolation(cols, radius, i + 1); + value.y = input[data_index]; + data_index = interpolation(cols, radius, i + 2); + value.z = input[data_index]; + data_index = interpolation(cols, radius, i + 3); + value.w = input[data_index]; + sum += value; + } + } + + data_index = threadIdx.x << 2; + if (blockIdx.y != gridDim.y - 1) { + row_index = radius + kDimY0 + threadIdx.y; + } + else { + row_index = radius + (rows - (blockIdx.y << kShiftY0)) + threadIdx.y; + } + data[row_index][data_index] = sum.x; + data[row_index][data_index + 1] = sum.y; + data[row_index][data_index + 2] = sum.z; + data[row_index][data_index + 3] = sum.w; + } + __syncthreads(); + + if (element_y < rows && element_x < cols) { + top = (radius << 1) + 1; + sum = make_float4(0.f, 0.f, 0.f, 0.f); + + for (int i = 0; i < top; i++) { + data_index = threadIdx.x << 2; + value.x = data[i + threadIdx.y][data_index]; + value.y = data[i + threadIdx.y][data_index + 1]; + value.z = data[i + threadIdx.y][data_index + 2]; + value.w = data[i + threadIdx.y][data_index + 3]; + sum += value; + } + + sum.x *= weight; + sum.y *= weight; + sum.z *= weight; + sum.w *= weight; + + int4 threshold; + threshold.x = saturateCast(sum.x) - delta; + threshold.y = saturateCast(sum.y) - delta; + threshold.z = saturateCast(sum.z) - delta; + threshold.w = saturateCast(sum.w) - delta; + + input = (uchar*)src + element_y * src_stride; + value.x = input[element_x]; + value.y = input[element_x + 1]; + value.z = input[element_x + 2]; + value.w = input[element_x + 3]; + + if (threshold_type == THRESH_BINARY) { + value.x = value.x > threshold.x ? setted_value : 0; + value.y = value.y > threshold.y ? setted_value : 0; + value.z = value.z > threshold.z ? setted_value : 0; + value.w = value.w > threshold.w ? setted_value : 0; + } + else { + value.x = value.x > threshold.x ? 0 : setted_value; + value.y = value.y > threshold.y ? 0 : setted_value; + value.z = value.z > threshold.z ? 0 : setted_value; + value.w = value.w > threshold.w ? 0 : setted_value; + } + + uchar* output = dst + element_y * dst_stride; + if (element_x < cols - 3) { + output[element_x] = saturateCast(value.x); + output[element_x + 1] = saturateCast(value.y); + output[element_x + 2] = saturateCast(value.z); + output[element_x + 3] = saturateCast(value.w); + } + else { + output[element_x] = saturateCast(value.x); + if (element_x < cols - 1) { + output[element_x + 1] = saturateCast(value.y); + } + if (element_x < cols - 2) { + output[element_x + 2] = saturateCast(value.z); + } + } + } +} + +template +__global__ +void rowColC1Kernel1(const uchar* src, int rows, int cols, int src_stride, + int ksize, int threshold_type, uchar setted_value, + int delta, uchar* dst, int dst_stride, + BorderInterpolation interpolation) { + __shared__ float data[kDimY0 * 3][(kDimX0 << 2)]; + __shared__ float kernel[SMALL_MAX_KSIZE]; + + int element_x = ((blockIdx.x << kShiftX0) + threadIdx.x) << 2; + int element_y = (blockIdx.y << kShiftY0) + threadIdx.y; + + if (threadIdx.y == 0 && threadIdx.x == 0) { + createGaussianKernel(kernel, 0, ksize); + } + __syncthreads(); + + int radius = ksize >> 1; + int bottom = element_x - radius; + int top = element_x + radius; + + int data_index, row_index, kernel_index = 0; + uchar* input; + float4 value; + float4 sum = make_float4(0.f, 0.f, 0.f, 0.f); + + bool isnt_border_block = true; + data_index = radius >> (kShiftX0 + 2); + if (blockIdx.x <= data_index) isnt_border_block = false; + data_index = (cols - radius) >> (kShiftX0 + 2); + if (blockIdx.x >= data_index) isnt_border_block = false; + + if (threadIdx.y < radius && element_x < cols) { + row_index = interpolation(rows, radius, element_y - radius); + input = (uchar*)src + row_index * src_stride; + if (isnt_border_block) { + for (int i = bottom; i <= top; i++) { + value.x = input[i]; + value.y = input[i + 1]; + value.z = input[i + 2]; + value.w = input[i + 3]; + mulAdd(sum, value, kernel[kernel_index]); + kernel_index++; + } + } + else { + for (int i = bottom; i <= top; i++) { + data_index = interpolation(cols, radius, i); + value.x = input[data_index]; + data_index = interpolation(cols, radius, i + 1); + value.y = input[data_index]; + data_index = interpolation(cols, radius, i + 2); + value.z = input[data_index]; + data_index = interpolation(cols, radius, i + 3); + value.w = input[data_index]; + mulAdd(sum, value, kernel[kernel_index]); + kernel_index++; + } + } + data_index = threadIdx.x << 2; + data[threadIdx.y][data_index] = sum.x; + data[threadIdx.y][data_index + 1] = sum.y; + data[threadIdx.y][data_index + 2] = sum.z; + data[threadIdx.y][data_index + 3] = sum.w; + } + + if (element_y < rows && element_x < cols) { + sum = make_float4(0.f, 0.f, 0.f, 0.f); + input = (uchar*)src + element_y * src_stride; + kernel_index = 0; + + if (isnt_border_block) { + for (int i = bottom; i <= top; i++) { + value.x = input[i]; + value.y = input[i + 1]; + value.z = input[i + 2]; + value.w = input[i + 3]; + mulAdd(sum, value, kernel[kernel_index]); + kernel_index++; + } + } + else { + for (int i = bottom; i <= top; i++) { + data_index = interpolation(cols, radius, i); + value.x = input[data_index]; + data_index = interpolation(cols, radius, i + 1); + value.y = input[data_index]; + data_index = interpolation(cols, radius, i + 2); + value.z = input[data_index]; + data_index = interpolation(cols, radius, i + 3); + value.w = input[data_index]; + mulAdd(sum, value, kernel[kernel_index]); + kernel_index++; + } + } + data_index = threadIdx.x << 2; + data[radius + threadIdx.y][data_index] = sum.x; + data[radius + threadIdx.y][data_index + 1] = sum.y; + data[radius + threadIdx.y][data_index + 2] = sum.z; + data[radius + threadIdx.y][data_index + 3] = sum.w; + } + + if (threadIdx.y < radius && element_x < cols) { + sum = make_float4(0.f, 0.f, 0.f, 0.f); + if (blockIdx.y != gridDim.y - 1) { + row_index = interpolation(rows, radius, + ((blockIdx.y + 1) << kShiftY0) + threadIdx.y); + } + else { + row_index = interpolation(rows, radius, rows + threadIdx.y); + } + input = (uchar*)src + row_index * src_stride; + kernel_index = 0; + + if (isnt_border_block) { + for (int i = bottom; i <= top; i++) { + value.x = input[i]; + value.y = input[i + 1]; + value.z = input[i + 2]; + value.w = input[i + 3]; + mulAdd(sum, value, kernel[kernel_index]); + kernel_index++; + } + } + else { + for (int i = bottom; i <= top; i++) { + data_index = interpolation(cols, radius, i); + value.x = input[data_index]; + data_index = interpolation(cols, radius, i + 1); + value.y = input[data_index]; + data_index = interpolation(cols, radius, i + 2); + value.z = input[data_index]; + data_index = interpolation(cols, radius, i + 3); + value.w = input[data_index]; + mulAdd(sum, value, kernel[kernel_index]); + kernel_index++; + } + } + + data_index = threadIdx.x << 2; + if (blockIdx.y != gridDim.y - 1) { + row_index = radius + kDimY0 + threadIdx.y; + } + else { + row_index = radius + (rows - (blockIdx.y << kShiftY0)) + threadIdx.y; + } + data[row_index][data_index] = sum.x; + data[row_index][data_index + 1] = sum.y; + data[row_index][data_index + 2] = sum.z; + data[row_index][data_index + 3] = sum.w; + } + __syncthreads(); + + if (element_y < rows && element_x < cols) { + top = (radius << 1) + 1; + sum = make_float4(0.f, 0.f, 0.f, 0.f); + kernel_index = 0; + + for (int i = 0; i < top; i++) { + data_index = threadIdx.x << 2; + value.x = data[i + threadIdx.y][data_index]; + value.y = data[i + threadIdx.y][data_index + 1]; + value.z = data[i + threadIdx.y][data_index + 2]; + value.w = data[i + threadIdx.y][data_index + 3]; + mulAdd(sum, value, kernel[kernel_index]); + kernel_index++; + } + + int4 threshold; + threshold.x = saturateCast(sum.x) - delta; + threshold.y = saturateCast(sum.y) - delta; + threshold.z = saturateCast(sum.z) - delta; + threshold.w = saturateCast(sum.w) - delta; + + input = (uchar*)src + element_y * src_stride; + value.x = input[element_x]; + value.y = input[element_x + 1]; + value.z = input[element_x + 2]; + value.w = input[element_x + 3]; + + if (threshold_type == THRESH_BINARY) { + value.x = value.x > threshold.x ? setted_value : 0; + value.y = value.y > threshold.y ? setted_value : 0; + value.z = value.z > threshold.z ? setted_value : 0; + value.w = value.w > threshold.w ? setted_value : 0; + } + else { + value.x = value.x > threshold.x ? 0 : setted_value; + value.y = value.y > threshold.y ? 0 : setted_value; + value.z = value.z > threshold.z ? 0 : setted_value; + value.w = value.w > threshold.w ? 0 : setted_value; + } + + uchar* output = dst + element_y * dst_stride; + if (element_x < cols - 3) { + output[element_x] = saturateCast(value.x); + output[element_x + 1] = saturateCast(value.y); + output[element_x + 2] = saturateCast(value.z); + output[element_x + 3] = saturateCast(value.w); + } + else { + output[element_x] = saturateCast(value.x); + if (element_x < cols - 1) { + output[element_x + 1] = saturateCast(value.y); + } + if (element_x < cols - 2) { + output[element_x + 2] = saturateCast(value.z); + } + } + } +} + +template +__global__ +void rowBatch4Kernel0(const uchar* src, int rows, int cols, int src_stride, + int radius, float* dst, int dst_stride, + BorderInterpolation interpolation) { + int element_x = ((blockIdx.x << kBlockShiftX1) + threadIdx.x) << 2; + int element_y = (blockIdx.y << kBlockShiftY1) + threadIdx.y; + if (element_x >= cols || element_y >= rows) { + return; + } + + int origin_x = element_x - radius; + int top_x = element_x + radius; + + int data_index; + uchar* input; + uchar4 value; + float4 sum = make_float4(0.f, 0.f, 0.f, 0.f); + + bool isnt_border_block = true; + data_index = radius >> (kBlockShiftX1 + 2); + if (blockIdx.x <= data_index) isnt_border_block = false; + data_index = (cols - radius) >> (kBlockShiftX1 + 2); + if (blockIdx.x >= data_index) isnt_border_block = false; + + input = (uchar*)src + element_y * src_stride; + if (isnt_border_block) { + for (int i = origin_x; i <= top_x; i++) { + value.x = input[i]; + value.y = input[i + 1]; + value.z = input[i + 2]; + value.w = input[i + 3]; + sum += value; + } + } + else { + for (int i = origin_x; i <= top_x; i++) { + data_index = interpolation(cols, radius, i); + value.x = input[data_index]; + data_index = interpolation(cols, radius, i + 1); + value.y = input[data_index]; + data_index = interpolation(cols, radius, i + 2); + value.z = input[data_index]; + data_index = interpolation(cols, radius, i + 3); + value.w = input[data_index]; + sum += value; + } + } + + float* output = (float*)((uchar*)dst + element_y * dst_stride); + if (element_x < cols - 3) { + output[element_x] = sum.x; + output[element_x + 1] = sum.y; + output[element_x + 2] = sum.z; + output[element_x + 3] = sum.w; + } + else { + output[element_x] = sum.x; + if (element_x < cols - 1) { + output[element_x + 1] = sum.y; + } + if (element_x < cols - 2) { + output[element_x + 2] = sum.z; + } + } +} + +template +__global__ +void rowBatch4Kernel1(const uchar* src, int rows, int cols, int src_stride, + const float* kernel, int radius, float* dst, + int dst_stride, BorderInterpolation interpolation) { + int element_x = ((blockIdx.x << kBlockShiftX1) + threadIdx.x) << 2; + int element_y = (blockIdx.y << kBlockShiftY1) + threadIdx.y; + if (element_x >= cols || element_y >= rows) { + return; + } + + int origin_x = element_x - radius; + int top_x = element_x + radius; + + int data_index, kernel_index = 0; + uchar* input; + uchar4 value; + float4 sum = make_float4(0.f, 0.f, 0.f, 0.f); + + bool isnt_border_block = true; + data_index = radius >> (kBlockShiftX1 + 2); + if (blockIdx.x <= data_index) isnt_border_block = false; + data_index = (cols - radius) >> (kBlockShiftX1 + 2); + if (blockIdx.x >= data_index) isnt_border_block = false; + + input = (uchar*)src + element_y * src_stride; + if (isnt_border_block) { + for (int i = origin_x; i <= top_x; i++) { + value.x = input[i]; + value.y = input[i + 1]; + value.z = input[i + 2]; + value.w = input[i + 3]; + mulAdd(sum, value, kernel[kernel_index]); + kernel_index++; + } + } + else { + for (int i = origin_x; i <= top_x; i++) { + data_index = interpolation(cols, radius, i); + value.x = input[data_index]; + data_index = interpolation(cols, radius, i + 1); + value.y = input[data_index]; + data_index = interpolation(cols, radius, i + 2); + value.z = input[data_index]; + data_index = interpolation(cols, radius, i + 3); + value.w = input[data_index]; + mulAdd(sum, value, kernel[kernel_index]); + kernel_index++; + } + } + + float* output = (float*)((uchar*)dst + element_y * dst_stride); + if (element_x < cols - 3) { + output[element_x] = sum.x; + output[element_x + 1] = sum.y; + output[element_x + 2] = sum.z; + output[element_x + 3] = sum.w; + } + else { + output[element_x] = sum.x; + if (element_x < cols - 1) { + output[element_x + 1] = sum.y; + } + if (element_x < cols - 2) { + output[element_x + 2] = sum.z; + } + } +} + +template +__global__ +void colBatch4Kernel0(const float* buffer, int rows, int cols, + int buffer_stride, const uchar* src, int src_stride, + int radius, float weight, int threshold_type, + uchar setted_value, int delta, uchar* dst, int dst_stride, + BorderInterpolation interpolation) { + __shared__ uchar data[kBlockDimY1][kBlockDimX1 << 2]; + + int element_x = (blockIdx.x << (kBlockShiftX1 + 2)) + threadIdx.x; + int element_y = (blockIdx.y << kBlockShiftY1) + threadIdx.y; + if (element_x >= cols || element_y >= rows) { + return; + } + + int origin_y = element_y - radius; + int top_y = element_y + radius; + + int data_index; + float* input0; + float value; + float sum = 0.f; + + bool isnt_border_block = true; + data_index = radius >> kBlockShiftY1; + if (blockIdx.y <= data_index) isnt_border_block = false; + data_index = (rows - radius) >> kBlockShiftY1; + if (blockIdx.y >= data_index) isnt_border_block = false; + + if (isnt_border_block) { + for (int i = origin_y; i <= top_y; i++) { + input0 = (float*)((uchar*)buffer + i * buffer_stride); + value = input0[element_x]; + sum += value; + } + } + else { + for (int i = origin_y; i <= top_y; i++) { + data_index = interpolation(rows, radius, i); + input0 = (float*)((uchar*)buffer + data_index * buffer_stride); + value = input0[element_x]; + sum += value; + } + } + + sum *= weight; + data[threadIdx.y][threadIdx.x] = saturateCast(sum); + __syncthreads(); + + if (threadIdx.x < kBlockDimX1) { + int4 value; + uchar* input1 = (uchar*)src + element_y * src_stride; + element_x = (blockIdx.x << (kBlockShiftX1 + 2)) + (threadIdx.x << 2); + value.x = input1[element_x]; + value.y = input1[element_x + 1]; + value.z = input1[element_x + 2]; + value.w = input1[element_x + 3]; + + int4 threshold; + threshold.x = data[threadIdx.y][(threadIdx.x << 2)] - delta; + threshold.y = data[threadIdx.y][(threadIdx.x << 2) + 1] - delta; + threshold.z = data[threadIdx.y][(threadIdx.x << 2) + 2] - delta; + threshold.w = data[threadIdx.y][(threadIdx.x << 2) + 3] - delta; + + if (threshold_type == THRESH_BINARY) { + value.x = value.x > threshold.x ? setted_value : 0; + value.y = value.y > threshold.y ? setted_value : 0; + value.z = value.z > threshold.z ? setted_value : 0; + value.w = value.w > threshold.w ? setted_value : 0; + } + else { + value.x = value.x > threshold.x ? 0 : setted_value; + value.y = value.y > threshold.y ? 0 : setted_value; + value.z = value.z > threshold.z ? 0 : setted_value; + value.w = value.w > threshold.w ? 0 : setted_value; + } + + uchar* output = (uchar*)dst + element_y * dst_stride; + element_x = ((blockIdx.x << kBlockShiftX1) + threadIdx.x) << 2; + data_index = threadIdx.x << 2; + if (element_x < cols - 3) { + output[element_x] = clip(value.x, 0, 255); + output[element_x + 1] = clip(value.y, 0, 255); + output[element_x + 2] = clip(value.z, 0, 255); + output[element_x + 3] = clip(value.w, 0, 255); + } + else if (element_x < cols) { + output[element_x] = clip(value.x, 0, 255); + if (element_x < cols - 1) { + output[element_x + 1] = clip(value.y, 0, 255); + } + if (element_x < cols - 2) { + output[element_x + 2] = clip(value.z, 0, 255); + } + } + else { + } + } +} + +template +__global__ +void colBatch4Kernel1(const float* buffer, int rows, int cols, + int buffer_stride, const uchar* src, int src_stride, + const float* kernel, int radius, int threshold_type, + uchar setted_value, int delta, uchar* dst, int dst_stride, + BorderInterpolation interpolation) { + __shared__ uchar data[kBlockDimY1][kBlockDimX1 << 2]; + + int element_x = (blockIdx.x << (kBlockShiftX1 + 2)) + threadIdx.x; + int element_y = (blockIdx.y << kBlockShiftY1) + threadIdx.y; + if (element_x >= cols || element_y >= rows) { + return; + } + + int origin_y = element_y - radius; + int top_y = element_y + radius; + + int data_index, kernel_index = 0; + float* input0; + float value; + float sum = 0.f; + + bool isnt_border_block = true; + data_index = radius >> kBlockShiftY1; + if (blockIdx.y <= data_index) isnt_border_block = false; + data_index = (rows - radius) >> kBlockShiftY1; + if (blockIdx.y >= data_index) isnt_border_block = false; + + if (isnt_border_block) { + for (int i = origin_y; i <= top_y; i++) { + input0 = (float*)((uchar*)buffer + i * buffer_stride); + value = input0[element_x]; + sum += value * kernel[kernel_index]; + kernel_index++; + } + } + else { + for (int i = origin_y; i <= top_y; i++) { + data_index = interpolation(rows, radius, i); + input0 = (float*)((uchar*)buffer + data_index * buffer_stride); + value = input0[element_x]; + sum += value * kernel[kernel_index]; + kernel_index++; + } + } + + data[threadIdx.y][threadIdx.x] = saturateCast(sum); + __syncthreads(); + + if (threadIdx.x < kBlockDimX1) { + uchar4 value; + uchar* input1 = (uchar*)src + element_y * src_stride; + element_x = (blockIdx.x << (kBlockShiftX1 + 2)) + (threadIdx.x << 2); + value.x = input1[element_x]; + value.y = input1[element_x + 1]; + value.z = input1[element_x + 2]; + value.w = input1[element_x + 3]; + + int4 threshold; + threshold.x = data[threadIdx.y][(threadIdx.x << 2)] - delta; + threshold.y = data[threadIdx.y][(threadIdx.x << 2) + 1] - delta; + threshold.z = data[threadIdx.y][(threadIdx.x << 2) + 2] - delta; + threshold.w = data[threadIdx.y][(threadIdx.x << 2) + 3] - delta; + + if (threshold_type == THRESH_BINARY) { + value.x = value.x > threshold.x ? setted_value : 0; + value.y = value.y > threshold.y ? setted_value : 0; + value.z = value.z > threshold.z ? setted_value : 0; + value.w = value.w > threshold.w ? setted_value : 0; + } + else { + value.x = value.x > threshold.x ? 0 : setted_value; + value.y = value.y > threshold.y ? 0 : setted_value; + value.z = value.z > threshold.z ? 0 : setted_value; + value.w = value.w > threshold.w ? 0 : setted_value; + } + + uchar* output = (uchar*)dst + element_y * dst_stride; + element_x = ((blockIdx.x << kBlockShiftX1) + threadIdx.x) << 2; + data_index = threadIdx.x << 2; + if (element_x < cols - 3) { + output[element_x] = value.x; + output[element_x + 1] = value.y; + output[element_x + 2] = value.z; + output[element_x + 3] = value.w; + } + else if (element_x < cols) { + output[element_x] = value.x; + if (element_x < cols - 1) { + output[element_x + 1] = value.y; + } + if (element_x < cols - 2) { + output[element_x + 2] = value.z; + } + } + else { + } + } +} + +#define RUN_SMALL_KERNELS(Interpolation) \ +Interpolation interpolation; \ +if (adaptive_method == ADAPTIVE_THRESH_MEAN_C) { \ + rowColC1Kernel0<<>>(src, rows, cols, \ + src_stride, radius, weight, threshold_type, setted_value, int_delta, dst,\ + dst_stride, interpolation); \ +} \ +else { \ + rowColC1Kernel1<<>>(src, rows, cols, \ + src_stride, ksize, threshold_type, setted_value, int_delta, dst, \ + dst_stride, interpolation); \ +} + +#define RUN_LARAGE_KERNELS0(Interpolation) \ +Interpolation interpolation; \ +rowBatch4Kernel0<<>>(src, rows, cols, \ + src_stride, radius, buffer, pitch, interpolation); \ +colBatch4Kernel0<<>>(buffer, rows, \ + cols, pitch, src, src_stride, radius, weight, threshold_type, setted_value,\ + int_delta, dst, dst_stride, interpolation); + +#define RUN_LARAGE_KERNELS1(Interpolation) \ +Interpolation interpolation; \ +rowBatch4Kernel1<<>>(src, rows, cols, \ + src_stride, gpu_kernel, radius, buffer, pitch, interpolation); \ +colBatch4Kernel1<<>>(buffer, rows, \ + cols, pitch, src, src_stride, gpu_kernel, radius, threshold_type, \ + setted_value, int_delta, dst, dst_stride, interpolation); + +RetCode +AdaptiveThreshold(cudaStream_t stream, int rows, int cols, int src_stride, + const uchar* src, int dst_stride, uchar* dst, float max_value, + int adaptive_method, int threshold_type, int ksize, + float delta, BorderType border_type) { + PPL_ASSERT(src != nullptr); + PPL_ASSERT(dst != nullptr); + PPL_ASSERT(rows >= 1 && cols >= 1); + PPL_ASSERT(max_value != 0); + PPL_ASSERT(src_stride >= cols * (int)sizeof(uchar)); + PPL_ASSERT(dst_stride >= cols * (int)sizeof(uchar)); + PPL_ASSERT(adaptive_method == ADAPTIVE_THRESH_MEAN_C || + adaptive_method == ADAPTIVE_THRESH_GAUSSIAN_C); + PPL_ASSERT(threshold_type == THRESH_BINARY || + threshold_type == THRESH_BINARY_INV); + PPL_ASSERT((ksize & 1) == 1 && ksize > 1); + PPL_ASSERT(border_type == BORDER_REPLICATE || + border_type == BORDER_REFLECT || + border_type == BORDER_REFLECT_101 || + border_type == BORDER_DEFAULT); + + uchar setted_value = 0; + if (max_value < 0) { + Zeros(stream, rows, cols, dst_stride, dst); + return RC_SUCCESS; + } + else if (max_value < 255.f) { + setted_value = rintf(max_value); + } + else { + setted_value = 255; + } + + int int_delta = 0; + if (threshold_type == THRESH_BINARY) { + int_delta = std::ceil(delta); + } + else { + int_delta = std::floor(delta); + } + + int radius = ksize >> 1; + float weight = 1.f / (ksize * ksize); + + cudaError_t code; + if (ksize < SMALL_MAX_KSIZE) { + dim3 block, grid; + block.x = kDimX0; + block.y = kDimY0; + grid.x = divideUp(divideUp(cols, 4, 2), kDimX0, kShiftX0); + grid.y = divideUp(rows, kDimY0, kShiftY0); + + if (border_type == BORDER_REPLICATE) { + RUN_SMALL_KERNELS(ReplicateBorder); + } + else if (border_type == BORDER_REFLECT) { + RUN_SMALL_KERNELS(ReflectBorder); + } + else { + RUN_SMALL_KERNELS(Reflect101Border); + } + + code = cudaGetLastError(); + if (code != cudaSuccess) { + LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code); + return RC_DEVICE_RUNTIME_ERROR; + } + + return RC_SUCCESS; + } + + dim3 block1, grid1; + block1.x = kBlockDimX1; + block1.y = kBlockDimY1; + grid1.x = divideUp(divideUp(cols, 4, 2), kBlockDimX1, kBlockShiftX1); + grid1.y = divideUp(rows, kBlockDimY1, kBlockShiftY1); + + dim3 block2, grid2; + block2.x = (kBlockDimX1 << 2); + block2.y = kBlockDimY1; + grid2.x = divideUp(cols, (kBlockDimX1 << 2), (kBlockShiftX1 + 2)); + grid2.y = divideUp(rows, kBlockDimY1, kBlockShiftY1); + + if (adaptive_method == ADAPTIVE_THRESH_MEAN_C) { + float* buffer; + size_t pitch; + code = cudaMallocPitch(&buffer, &pitch, cols * sizeof(float), rows); + if (code != cudaSuccess) { + LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code); + return RC_DEVICE_MEMORY_ERROR; + } + + if (border_type == BORDER_REPLICATE) { + RUN_LARAGE_KERNELS0(ReplicateBorder); + } + else if (border_type == BORDER_REFLECT) { + RUN_LARAGE_KERNELS0(ReflectBorder); + } + else { + RUN_LARAGE_KERNELS0(Reflect101Border); + } + + code = cudaGetLastError(); + if (code != cudaSuccess) { + cudaFree(buffer); + LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code); + return RC_DEVICE_RUNTIME_ERROR; + } + + cudaFree(buffer); + } + else { + float* buffer; + size_t pitch; + code = cudaMallocPitch(&buffer, &pitch, cols * sizeof(float), rows); + if (code != cudaSuccess) { + LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code); + return RC_DEVICE_MEMORY_ERROR; + } + + int kernel_size = ksize * sizeof(float); + float* kernel = (float*)malloc(kernel_size); + float* gpu_kernel; + code = cudaMalloc((void**)&gpu_kernel, kernel_size); + if (code != cudaSuccess) { + LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code); + return RC_DEVICE_MEMORY_ERROR; + } + createGaussianKernel(kernel, 0, ksize); + code = cudaMemcpyAsync(gpu_kernel, kernel, kernel_size, + cudaMemcpyHostToDevice); + if (code != cudaSuccess) { + LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code); + return RC_DEVICE_MEMORY_ERROR; + } + + if (border_type == BORDER_REPLICATE) { + RUN_LARAGE_KERNELS1(ReplicateBorder); + } + else if (border_type == BORDER_REFLECT) { + RUN_LARAGE_KERNELS1(ReflectBorder); + } + else { + RUN_LARAGE_KERNELS1(Reflect101Border); + } + + code = cudaGetLastError(); + if (code != cudaSuccess) { + free(kernel); + cudaFree(buffer); + cudaFree(gpu_kernel); + LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code); + return RC_DEVICE_RUNTIME_ERROR; + } + + free(kernel); + cudaFree(buffer); + cudaFree(gpu_kernel); + } + + code = cudaGetLastError(); + if (code != cudaSuccess) { + LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code); + return RC_DEVICE_RUNTIME_ERROR; + } + + return RC_SUCCESS; +} + +} // cuda +} // cv +} // ppl diff --git a/cuda_code/addVectors_1.cu b/cuda_code/addVectors_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..ba6be6866eb0157f5397177707ba4dcc3719355b --- /dev/null +++ b/cuda_code/addVectors_1.cu @@ -0,0 +1,9 @@ +__global__ void addVectors(const int entries, + const float *a, + const float *b, + float *ab){ + const int N = threadIdx.x + (16 * blockIdx.x); + + if(N < entries) + ab[N] = a[N] + b[N]; +} diff --git a/cuda_code/add_107.cu b/cuda_code/add_107.cu new file mode 100644 index 0000000000000000000000000000000000000000..7d5811ffe1db105e484902be5a1be6bf8b0ba7d3 --- /dev/null +++ b/cuda_code/add_107.cu @@ -0,0 +1,61 @@ + +#include "../include/cuda_context.h" + +__global__ void AddKernel(const int *a, const int *b, int *c) +{ + int i = threadIdx.x; + c[i] = a[i] + b[i]; +} + +void Scratchpad() +{ + const int size = 5; + const int a[size] = { 1, 2, 3, 4, 5 }; + const int b[size] = { 10, 20, 30, 40, 50 }; + int c[size] = { 0 }; + + Stopwatch sw; + cuda::CudaStream stream(cudaStreamNonBlocking); + + std::function regression = [size, &a, &b](const int* d, int) + { + int errs = 0; + for (int i = 0; i < size; i++) { + if (d[i] != a[i] + b[i]) + ++errs; + } + + return errs; + }; + + cuda::CudaContext() + .RegisterKernel(AddKernel, dim3(1), dim3(size), cuda::InputArray(a, size), cuda::InputArray(b, size), cuda::OutputArray(c, size)) + .PushStream(stream) + .InvalidateInputs() + .PushTiming(sw) + .Launch(10000) + .Sync() + .PopTiming() + .GatherOutputs() + .Sync() + .Verify() + .PopStream(); + + printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", + c[0], c[1], c[2], c[3], c[4]); + + printf("\nOperation timing: %f ms\n", sw.ms()); +} + +int main() +{ + try { + Scratchpad(); + } + + catch (const std::exception& ex) { + printf("%s\n", ex.what()); + } + + return 0; +} \ No newline at end of file diff --git a/cuda_code/add_grid_1.cu b/cuda_code/add_grid_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..086e2292696992a71f961112d7efcc91639b298f --- /dev/null +++ b/cuda_code/add_grid_1.cu @@ -0,0 +1,55 @@ +#include +#include + +//_1_: kernel to add the elements of two arrays +__global__ +void add(int n, float *x, float *y) +{ + int index = blockIdx.x * blockDim.x + threadIdx.x; + int stride = gridDim.x * blockDim.x; + for (int i = index; i < n; i+=stride) + y[i] = x[i] + y[i]; +} + +int main(void) +{ + int N = 1<<20; // 1M elements + + // float *x = new float[N]; + // float *y = new float[N]; + //_2_: Allocate Unified Memory -- accessible from CPU or GPU + float *x, *y; + cudaMallocManaged(&x, N*sizeof(float)); + cudaMallocManaged(&y, N*sizeof(float)); + + // initialize x and y arrays on the host + for (int i = 0; i < N; i++) { + x[i] = 1.0f; + y[i] = 2.0f; + } + + // Run kernel on 1M elements on the CPU + // add(N, x, y); + //_3_: launch kernel running on GPU + int blockSize = 256; + int numBlocks = (N + blockSize - 1) / blockSize; + add<<>>(N, x, y); + + + //_4_: synchronization + cudaDeviceSynchronize(); + + // Check for errors (all values should be 3.0f) + float maxError = 0.0f; + for (int i = 0; i < N; i++) + maxError = fmax(maxError, fabs(y[i]-3.0f)); + std::cout << "Max error: " << maxError << std::endl; + + //_2_: Free memory + // delete [] x; + // delete [] y; + cudaFree(x); + cudaFree(y); + + return 0; +} diff --git a/cuda_code/add_scalar_4.cu b/cuda_code/add_scalar_4.cu new file mode 100644 index 0000000000000000000000000000000000000000..0df6132de8b49428275c0222ec9eb56d2cd9e6cc --- /dev/null +++ b/cuda_code/add_scalar_4.cu @@ -0,0 +1,12 @@ +#include "scalar.h" + +__device__ float op(float d1,float d2,float *params) { + return d2 + d1; +} + +extern "C" +__global__ void add_scalar_float(int n, int idx,float dx,float *dy,int incx,float *params,float *result,int blockSize) { + transform(n,idx,dx,dy,incx,params,result,blockSize); + } + + diff --git a/cuda_code/agg_bin.cu b/cuda_code/agg_bin.cu new file mode 100644 index 0000000000000000000000000000000000000000..71fe2506d096a7fe0900ba0b45c341110a103bd0 --- /dev/null +++ b/cuda_code/agg_bin.cu @@ -0,0 +1,26 @@ +#define THREADS _THREADS_ + +__global__ void agg_bin(const int n, + int *ind_count, + const float *rgba, + const int *inds, + float *new_rgba){ + + const int i = blockIdx.x*THREADS + threadIdx.x; + + + if (i >= n){ + return; + } + + const int ii = 4*i; + const int r = atomicAdd(&ind_count[4*inds[i]+3], 1); + const int rr = 4*r; + + new_rgba[rr] = rgba[ii]; + new_rgba[rr+1] = rgba[ii+1]; + new_rgba[rr+2] = rgba[ii+2]; + new_rgba[rr+3] = rgba[ii+3]; + +} + diff --git a/cuda_code/all_kernels_convex_polygon_4.cu b/cuda_code/all_kernels_convex_polygon_4.cu new file mode 100644 index 0000000000000000000000000000000000000000..673733e1042d9348e6c309bdee77cd343e185e9d --- /dev/null +++ b/cuda_code/all_kernels_convex_polygon_4.cu @@ -0,0 +1,33 @@ +// Copyright (c) 2009-2018 The Regents of the University of Michigan +// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. + +#include "ComputeFreeVolumeGPU.cuh" +#include "IntegratorMCMMonoGPU.cuh" +#include "IntegratorMCMMonoImplicitGPU.cuh" +#include "IntegratorMCMMonoImplicitNewGPU.cuh" + +#include "ShapeConvexPolygon.h" + +namespace mcm +{ + +namespace detail +{ + +//! MCM kernels for ShapeConvexPolygon +template cudaError_t gpu_mcm_free_volume(const mcm_free_volume_args_t &args, + const typename ShapeConvexPolygon::param_type *d_params); +template cudaError_t gpu_mcm_update(const mcm_args_t& args, + const typename ShapeConvexPolygon::param_type *d_params); +template cudaError_t gpu_mcm_implicit_count_overlaps(const mcm_implicit_args_t& args, + const typename ShapeConvexPolygon::param_type *d_params); +template cudaError_t gpu_mcm_implicit_accept_reject(const mcm_implicit_args_t& args, + const typename ShapeConvexPolygon::param_type *d_params); +template cudaError_t gpu_mcm_insert_depletants_queue(const mcm_implicit_args_new_t& args, + const typename ShapeConvexPolygon::param_type *d_params); +template cudaError_t gpu_mcm_implicit_accept_reject_new(const mcm_implicit_args_new_t& args, + const typename ShapeConvexPolygon::param_type *d_params); + +}; // end namespace detail + +} // end namespace mcm diff --git a/cuda_code/all_kernels_simple_polygon_2.cu b/cuda_code/all_kernels_simple_polygon_2.cu new file mode 100644 index 0000000000000000000000000000000000000000..dac7eac7649ed0d4804408e8c64231ff11ed20b5 --- /dev/null +++ b/cuda_code/all_kernels_simple_polygon_2.cu @@ -0,0 +1,33 @@ +// Copyright (c) 2009-2017 The Regents of the University of Michigan +// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. + +#include "ComputeFreeVolumeGPU.cuh" +#include "IntegratorHPMCMonoGPU.cuh" +#include "IntegratorHPMCMonoImplicitGPU.cuh" +#include "IntegratorHPMCMonoImplicitNewGPU.cuh" + +#include "ShapeSimplePolygon.h" + +namespace hpmc +{ + +namespace detail +{ + +//! HPMC kernels for ShapeSimplePolygon +template cudaError_t gpu_hpmc_free_volume(const hpmc_free_volume_args_t &args, + const typename ShapeSimplePolygon::param_type *d_params); +template cudaError_t gpu_hpmc_update(const hpmc_args_t& args, + const typename ShapeSimplePolygon::param_type *d_params); +template cudaError_t gpu_hpmc_implicit_count_overlaps(const hpmc_implicit_args_t& args, + const typename ShapeSimplePolygon::param_type *d_params); +template cudaError_t gpu_hpmc_implicit_accept_reject(const hpmc_implicit_args_t& args, + const typename ShapeSimplePolygon::param_type *d_params); +template cudaError_t gpu_hpmc_insert_depletants_queue(const hpmc_implicit_args_new_t& args, + const typename ShapeSimplePolygon::param_type *d_params); +template cudaError_t gpu_hpmc_implicit_accept_reject_new(const hpmc_implicit_args_new_t& args, + const typename ShapeSimplePolygon::param_type *d_params); + +}; // end namespace detail + +} // end namespace hpmc diff --git a/cuda_code/allocator_9.cu b/cuda_code/allocator_9.cu new file mode 100644 index 0000000000000000000000000000000000000000..66f4cfed88b665a8810d3ba965639e2553783a3d --- /dev/null +++ b/cuda_code/allocator_9.cu @@ -0,0 +1,23 @@ +#if __has_include() +#include + +#include "allocator.h" + +namespace lython { +namespace device{ + +// CUDA alloc is guaranteed to be 256 aligned +void* CUDA::malloc(std::size_t n){ + void* ptr = nullptr; + cudaMalloc(&ptr, sizeof(float) * n); + return ptr; +} + +bool CUDA::free(void* ptr, std::size_t){ + cudaFree(ptr); + return true; +} + +} // namspace device +} // namespace lython +#endif diff --git a/cuda_code/alpha_22.cu b/cuda_code/alpha_22.cu new file mode 100644 index 0000000000000000000000000000000000000000..cf6785b99be74db8c20fca72a74b3660d8c30acf --- /dev/null +++ b/cuda_code/alpha_22.cu @@ -0,0 +1,777 @@ +{% include "./EdgeSortTest/beta.cu" %} +{% include "./MovelistTest/beta.cu" %} + +__constant__ unsigned char c_ucFourPermutations[{{ fit_nr_four_permutations }}][{{ glob_nr_tile_orientations }}]; + +struct xFourPermutation { + unsigned short WalkIndex; + uchar4 Perm; + __device__ xFourPermutation(unsigned short __usPermIndex); + __device__ unsigned short ucWalk(); + __device__ bool bNotTraversed(); +}; + +__device__ xFourPermutation::xFourPermutation(unsigned short __usPermIndex) { + this->WalkIndex = 0; + switch(__usPermIndex % 24){ + case 1: this->Perm = make_uchar4(1,2,3,4); break; + case 2: this->Perm = make_uchar4(1,2,4,3); break; + case 3: this->Perm = make_uchar4(1,3,2,4); break; + case 4: this->Perm = make_uchar4(1,3,4,2); break; + case 5: this->Perm = make_uchar4(1,4,2,3); break; + case 6: this->Perm = make_uchar4(1,4,3,2); break; + case 7: this->Perm = make_uchar4(2,1,3,4); break; + case 8: this->Perm = make_uchar4(2,1,4,3); break; + case 9: this->Perm = make_uchar4(2,3,1,4); break; + case 10: this->Perm = make_uchar4(2,3,4,1); break; + case 11: this->Perm = make_uchar4(2,4,1,3); break; + case 12: this->Perm = make_uchar4(2,4,3,1); break; + case 13: this->Perm = make_uchar4(3,2,1,4); break; + case 14: this->Perm = make_uchar4(3,2,4,1); break; + case 15: this->Perm = make_uchar4(3,1,2,4); break; + case 16: this->Perm = make_uchar4(3,1,4,2); break; + case 17: this->Perm = make_uchar4(3,4,2,1); break; + case 18: this->Perm = make_uchar4(3,4,1,2); break; + case 19: this->Perm = make_uchar4(4,2,3,1); break; + case 20: this->Perm = make_uchar4(4,2,1,3); break; + case 21: this->Perm = make_uchar4(4,3,2,1); break; + case 22: this->Perm = make_uchar4(4,3,1,2); break; + case 23: this->Perm = make_uchar4(4,1,2,3); break; + case 0: this->Perm = make_uchar4(4,1,3,2); break; + } +} + +__device__ unsigned short xFourPermutation::ucWalk() { + //Require c_ucFourPermutations to be numbers 1-4 (NOT 0-3) + this->WalkIndex++; + if (this->WalkIndex - 1 < mNrTileOrientations) { + //return this->Perm[];//this->WalkIndex-1; //c_ucFourPermutations[this->PermIndex][this->WalkIndex - 1] - 1; //TEST + switch(this->WalkIndex-1){ + case 0: return this->Perm.x-1; + case 1: return this->Perm.y-1; + case 2: return this->Perm.z-1; + case 3: return this->Perm.w-1; + } + } else return 0; +} + +__device__ bool xFourPermutation::bNotTraversed() { + //Require c_ucFourPermutations to be numbers 1-4 (NOT 0-3) + if (this->WalkIndex >= mNrTileOrientations) { + return false; + } else return true; +} + +extern "C++"{ +template +struct xLinearIterator { + unsigned short WalkIndex; + __device__ xLinearIterator(unsigned short __usPermIndex); + __device__ unsigned short ucWalk(); + __device__ bool bNotTraversed(); +}; + +template +__device__ xLinearIterator::xLinearIterator(unsigned short __usPermIndex) { + //this->WalkIndex = 0; +} + +template +__device__ unsigned short xLinearIterator::ucWalk() { + //Require c_fFourPermutations to be numbers 1-4 (NOT 0-3) + this->WalkIndex++; + if (this->WalkIndex - 1 < Length) { + return this->WalkIndex - 1; + } else return 0; +} + +template +__device__ bool xLinearIterator::bNotTraversed() { + //Require c_fFourPermutations to be numbers 1-4 (NOT 0-3) + if (this->WalkIndex >= Length) { + return false; + } else return true; +} + +struct xCell { + unsigned char data; + __device__ void set_Orient(unsigned char __uiOrient); + __device__ void set_Type(unsigned char __uiType); + __device__ unsigned char get_xType(void); + __device__ unsigned char get_xOrient(void); + __device__ unsigned char get_xCell(void); + __device__ void set_xCell(unsigned char __ucVal); +}; + +__device__ void xCell::set_Orient(unsigned char __uiOrient) { + __uiOrient = __uiOrient % mNrTileOrientations; + //unsigned char DBGVAL1 = this->data & (255-3); + //unsigned char DBGVAL2 = __uiOrient; + //unsigned char DBGVAL3 = this->data & (255-3) + __uiOrient; + //I THINK THIS FUNCTION DOES NOT WORK! + this->data = ((this->data & (255-3) ) + __uiOrient); +} + +__device__ void xCell::set_Type(unsigned char __uiType) { +#ifndef __NON_FERMI + if (__uiType > 63) { + printf("xCell: TileType exceeded 63 limit!\n"); + } +#endif + this->data = (this->data & 3) + (__uiType << 2); +} + +__device__ void xCell::set_xCell(unsigned char __ucVal) { + this->data = __ucVal; +} + +__device__ unsigned char xCell::get_xType(void) { + return this->data >> 2; +} + +__device__ unsigned char xCell::get_xOrient(void) { + return (this->data & 3); +} + +__device__ unsigned char xCell::get_xCell(void) { + return this->data; +} + +struct xCellGrid { + union { + xCell multi_d[m_fit_DimGridX][m_fit_DimGridY][m_fit_NrRedundancyGridDepth][mWarpSize]; + xCell mix_d[m_fit_DimGridX * m_fit_DimGridY][m_fit_NrRedundancyGridDepth][mWarpSize]; + xCell one_d[m_fit_DimGridX * m_fit_DimGridY * mWarpSize * m_fit_NrRedundancyGridDepth]; + } data; + + __device__ void Initialise(xThreadInfo __xThreadInfo, unsigned char __red); + __device__ xCell get_xCell(xThreadInfo __xThreadInfo, unsigned char __x, unsigned char __y, unsigned char __red); + __device__ bool set_xCell(xThreadInfo __xThreadInfo, unsigned char __x, unsigned char __y, unsigned char __red, unsigned char __val); + __device__ xCell xGetNeighbourCell(xThreadInfo __xThreadInfo, unsigned char __x, unsigned char __y, unsigned char __red, unsigned char __dir); + __device__ uchar2 xGetNeighbourCellCoords(unsigned char __x, unsigned char __y, unsigned char __dir); + __device__ bool xCompareRed(xThreadInfo __xThreadInfo, unsigned char __red); + __device__ void print(xThreadInfo __xThreadInfo, xGenomeSet *__xGenomeSet); +}; + +__device__ void xCellGrid::Initialise(xThreadInfo __xThreadInfo, + unsigned char __red) { + //Surefire-version: + /*for (int i = 0; i < m_fit_DimGridX; i++) { + for (int j = 0; j < m_fit_DimGridY; j++) { + this->data.multi_d[i][j][__red][__xThreadInfo.BankId()].set_xCell(mEMPTY_CELL); + } + }*/ + /*for (int i = 0; i < m_fit_DimGridX; i++) { + for (int j = 0; j < m_fit_DimGridY; j++) { + this->data.multi_d[i*j][__red][__xThreadInfo.BankId()].set_xCell(mEMPTY_CELL); + } + }*/ + short offset = (m_fit_DimGridX*m_fit_DimGridY) % mBankSize; + short myshare = (m_fit_DimGridX*m_fit_DimGridY - offset) / mBankSize; + //short one_d_off = m_fit_DimGridX*m_fit_DimGridY*m_fit_NrRedundancyAssemblies*__xThreadInfo.BankId() + m_fit_DimGridX*m_fit_DimGridY*__red; + //_fit_DimGridX*m_fit_DimGridY; + + for(int i=0;idata.mix_d[__xThreadInfo.WarpId()*myshare + i][__red][__xThreadInfo.BankId()].set_xCell(mEMPTY_CELL); + } + if(__xThreadInfo.WarpId()==mBankSize-1){ + for(int i=0;idata.one_d[one_d_off + mBankSize*myshare + i].set_xCell(mEMPTY_CELL); + this->data.mix_d[mBankSize*myshare + i][__red][__xThreadInfo.BankId()].set_xCell(mEMPTY_CELL); + } + } + //__syncthreads(); +} + +__device__ xCell xCellGrid::get_xCell(xThreadInfo __xThreadInfo, unsigned char __x, unsigned char __y, unsigned char __red) { + if ((__x < m_fit_DimGridX) && (__y < m_fit_DimGridY)) { + return this->data.multi_d[__x][__y][__red][__xThreadInfo.BankId()]; + } else { + xCell TmpCell; + TmpCell.set_xCell(mEMPTY_CELL); + return TmpCell; + } +} + +__device__ bool xCellGrid::set_xCell(xThreadInfo __xThreadInfo, unsigned char __x, unsigned char __y, unsigned char __red, unsigned char __val) { + if ((__x < m_fit_DimGridX - 1) && (__y < m_fit_DimGridY - 1)) { + this->data.multi_d[__x][__y][__red][__xThreadInfo.BankId()].set_xCell( + __val); + return true; + } else if (__x == (m_fit_DimGridX - 1) || (__y == (m_fit_DimGridY - 1))) { + //UnboundUND condition! Return false. + this->data.multi_d[__x][__y][__red][__xThreadInfo.BankId()].set_xCell( + __val); + return false; + } else { + return false; + } +} + +__device__ xCell xCellGrid::xGetNeighbourCell(xThreadInfo __xThreadInfo, unsigned char __x, unsigned char __y, unsigned char __red, unsigned char __dir) { + uchar2 TmpCoords = xGetNeighbourCellCoords(__x, __y, __dir); + return this->get_xCell(__xThreadInfo, TmpCoords.x, TmpCoords.y, __red); +} + +__device__ uchar2 xCellGrid::xGetNeighbourCellCoords(unsigned char __x, unsigned char __y, unsigned char __dir) { + switch (__dir) { + case 0: //NORTH + return make_uchar2(__x, __y - 1); + //break; + case 1: //EAST + return make_uchar2(__x + 1, __y); + //break; + case 2: //SOUTH + return make_uchar2(__x, __y + 1); + //break; + case 3: //WEST + return make_uchar2(__x - 1, __y); + //break; + } + return make_uchar2(mEMPTY_CELL, mEMPTY_CELL); +} + +__device__ bool xCellGrid::xCompareRed(xThreadInfo __xThreadInfo, unsigned char __red) { + unsigned char TmpNextDir = (__red + 1) % m_fit_NrRedundancyGridDepth; + unsigned char TmpIsDifferent = 0; + for (int i = 0; i < m_fit_DimGridX * m_fit_DimGridY; i++) { + if (this->data.mix_d[i][__red][__xThreadInfo.BankId()].get_xCell() != this->data.mix_d[i][TmpNextDir][__xThreadInfo.BankId()].get_xCell() ) { + TmpIsDifferent = 1; + break; + } + } + if (!TmpIsDifferent) + return true; + else + return false; +} + +struct xFitnessGrid { + texture *grid; + __device__ unsigned char get_xCell(unsigned char i, unsigned char j); +}; + +struct xAssembly { + struct { + xCellGrid grid; + xEdgeSort edgesort; + xMoveList movelist; + xAssemblyFlags flags[mWarpSize]; + curandState *states;//[mWarpSize]; + //unsigned int synccounter[mWarpSize]; //Will be used to synchronize between Warps + float2 gravity[mWarpSize]; + int assembly_size[mWarpSize]; + int left[mWarpSize]; + int bottom[mWarpSize]; + } data; + + __device__ void Initialise(xThreadInfo __xThreadInfo, xGenomeSet *__xGenomeSet); + __device__ bool Assemble(xThreadInfo __xThreadInfo, xGenomeSet *__xGenomeSet); + __device__ bool Assemble_PreProcess(xThreadInfo __xThreadInfo, xGenomeSet *__xGenomeSet); + __device__ bool Assemble_PostProcess(xThreadInfo __xThreadInfo, xGenomeSet *__xGenomeSet); + __device__ bool Assemble_Movelist(xThreadInfo __xThreadInfo, xGenomeSet *__xGenomeSet); + __device__ bool Assemble_InPlace(xThreadInfo __xThreadInfo, xGenomeSet *__xGenomeSet); + __device__ float fEvaluateFitness(xThreadInfo __xThreadInfo);//, bool __bSingleBlockId); + //__device__ float fEvaluateFitnessForSingleGrid(xThreadInfo __xThreadInfo, xFitnessGrid *__xSingleFitnessGrid, bool __bIsSingleBlock); + __device__ bool bSynchronizeBank(xThreadInfo __xThreadInfo); +}; + +__device__ float xAssembly::fEvaluateFitness(xThreadInfo __xThreadInfo){//, bool __bSingleBlockId){ + if(__xThreadInfo.WarpId()==0){ + this->data.gravity[__xThreadInfo.BankId()].x=0; + this->data.gravity[__xThreadInfo.BankId()].y=0; + this->data.left[__xThreadInfo.BankId()]=0; + this->data.bottom[__xThreadInfo.BankId()]=0; + this->data.assembly_size[__xThreadInfo.BankId()]=0; //Only if assembly process still faulty + } + __syncthreads(); + short offset = (m_fit_DimGridX*m_fit_DimGridY) % mBankSize; + short myshare = (m_fit_DimGridX*m_fit_DimGridY - offset) / mBankSize; + + //Step0: Count how many tiles have been assembled - i.e. we do this as assembly process is still faulty + { + int off_x=0, off_y=0; + int sum_x=0, sum_y=0; + for(int i=0;idata.grid.data.multi_d[off_x][off_y][this->data.flags[__xThreadInfo.BankId()].get_ucRed()][__xThreadInfo.BankId()].get_xCell()!=mEMPTY_CELL){ + sum_x += 1; + //this->data.grid.data.multi_d[off_x][off_y][this->data.flags[__xThreadInfo.BankId()].get_ucRed()][__xThreadInfo.BankId()].set_xCell(__xThreadInfo.WarpId()); + } + } + if(__xThreadInfo.WarpId()==mBankSize-1){ + for(int i=0;idata.grid.data.multi_d[off_x][off_y][this->data.flags[__xThreadInfo.BankId()].get_ucRed()][__xThreadInfo.BankId()].get_xCell()!=mEMPTY_CELL){ + sum_x += 1; + } + } + } + __syncthreads(); + atomicAdd(&this->data.assembly_size[__xThreadInfo.BankId()], sum_x); + __syncthreads(); + } + + //Step1: Evaluate Center of gravity + { + int off_x=0, off_y=0; + float sum_x=0, sum_y=0; + for(int i=0;idata.grid.data.multi_d[off_x][off_y][this->data.flags[__xThreadInfo.BankId()].get_ucRed()][__xThreadInfo.BankId()].get_xCell()!=mEMPTY_CELL){ + sum_x += (float)off_x; + sum_y += (float)off_y; + } + } + if(__xThreadInfo.WarpId()==mBankSize-1){ + for(int i=0;idata.grid.data.multi_d[off_x][off_y][this->data.flags[__xThreadInfo.BankId()].get_ucRed()][__xThreadInfo.BankId()].get_xCell()!=mEMPTY_CELL){ + sum_x += (float)off_x; + sum_y += (float)off_y; + } + } + } + __syncthreads(); + atomicAdd(&this->data.gravity[__xThreadInfo.BankId()].x, sum_x); + atomicAdd(&this->data.gravity[__xThreadInfo.BankId()].y, sum_y); + __syncthreads(); + //this->data.gravity[__xThreadInfo.BankId()].x=9; + //this->data.gravity[__xThreadInfo.BankId()].y=8; + + //TODO: THere is an error in Assemble_Movelist, i.e. we have that assembly_size is one higher than anticipated... + if(__xThreadInfo.WarpId()==0){ + //this->data.assembly_size[__xThreadInfo.BankId()]--; + this->data.gravity[__xThreadInfo.BankId()].x /= this->data.assembly_size[__xThreadInfo.BankId()]; + this->data.gravity[__xThreadInfo.BankId()].y /= this->data.assembly_size[__xThreadInfo.BankId()]; + } + __syncthreads(); + } + /* + NOW calculate left, bottom - hand side weight + */ + + int off_x=0, off_y=0; + float sum_x=0, sum_y=0; + int grav_x = this->data.gravity[__xThreadInfo.BankId()].x; + int grav_y = this->data.gravity[__xThreadInfo.BankId()].y; + + for(int i=0;idata.grid.data.multi_d[off_x][off_y][this->data.flags[__xThreadInfo.BankId()].get_ucRed()][__xThreadInfo.BankId()].get_xCell()!=mEMPTY_CELL){ + if(off_x > grav_x){ + sum_x += (off_x-grav_x); + } + if(off_y > grav_y){ + sum_y += (off_y-grav_y); + } + } + } + if(__xThreadInfo.WarpId()==mBankSize-1){ + for(int i=0;idata.grid.data.multi_d[off_x][off_y][this->data.flags[__xThreadInfo.BankId()].get_ucRed()][__xThreadInfo.BankId()].get_xCell()!=mEMPTY_CELL){ + if(off_x > grav_x){ + sum_x += (off_x-grav_x); + } + if(off_y > grav_y){ + sum_y += (off_y-grav_y); + } + } + } + } + __syncthreads(); + atomicAdd(&this->data.left[__xThreadInfo.BankId()], sum_x); + atomicAdd(&this->data.bottom[__xThreadInfo.BankId()], sum_y); + __syncthreads(); +} + +__device__ void xAssembly::Initialise(xThreadInfo __xThreadInfo, xGenomeSet *__xGenomeSet) { + unsigned char TmpRed = this->data.flags[__xThreadInfo.BankId()].get_ucRed() % m_fit_NrRedundancyGridDepth; + this->data.grid.Initialise(__xThreadInfo, TmpRed); + this->data.movelist.Initialise(__xThreadInfo); +} + +__device__ bool xAssembly::Assemble(xThreadInfo __xThreadInfo, xGenomeSet *__xGenomeSet) { + bool TmpFlag = false; + this->data.flags[__xThreadInfo.BankId()].ClearAll(); + TmpFlag = true; + if (TmpFlag) { + //if(__xThreadInfo.WarpId() == 0){ + this->data.edgesort.Initialise(__xThreadInfo, __xGenomeSet); //TEST + //} + //__syncthreads(); + //this->Assemble_PostProcess(__xThreadInfo, __xGenomeSet); + if (TmpFlag) { + //for (int i = 0; (i < m_fit_NrRedundancyAssemblies) && (!this->data.flags[__xThreadInfo.BankId()].get_bUNDCondition()); i++) { + for (int i = 0; (i < m_fit_NrRedundancyAssemblies); i++) { + //__syncthreads(); + this->Initialise(__xThreadInfo, __xGenomeSet); //Empty out assembly grid at red + __syncthreads(); + if( (__xThreadInfo.WarpId() == 0) && (!this->data.flags[__xThreadInfo.BankId()].get_bUNDCondition()) ){ + this->data.flags[__xThreadInfo.BankId()].ClearAll(); + this->Assemble_Movelist(__xThreadInfo, __xGenomeSet); //TEST + } + //__syncthreads(); //TEST + //this->Initialise(__xThreadInfo, __xGenomeSet); //Empty out assembly grid at red //TEST + return true; //TEST + //__syncthreads(); + //__syncthreads(); +/* if (!TmpController) TmpController = this->Assemble_InPlace(__xThreadInfo, __xGenomeSet); + if (!TmpController) { + // Both assembly processes did not finish! (should NEVER happen) + return false; //Always false - indicate assembly did not finish properly (should not happen!) + } + this->data.flags[__xThreadInfo.BankId()].set_Red(i); //Choose next assembly step! +*/ + } + return true; //Always true - i.e. indicate assembly did finish (can still be UND, though) + } else { + return false; //Indicates that processing before assembly returned either single block, or UND + } + + } else { + return false; //Indicates that processing before assembly returned either single block, or UND + } + +} + +__device__ bool xAssembly::Assemble_PreProcess(xThreadInfo __xThreadInfo, xGenomeSet *__xGenomeSet) { + unsigned char TmpSameCounter = 0; + + //NOTE: This should work, however, not clear how to communicate that single tile without initialisation of grid! + //Check if starting tile is not empty + for (int j = 0; j < mNrTileOrientations; j++) { + if (__xGenomeSet->get_xEdgeType(__xThreadInfo, m_fit_TileIndexStartingTile, + j) == 0) + TmpSameCounter++; + } + if (TmpSameCounter == 4) { + this->data.grid.get_xCell(__xThreadInfo, m_fit_DimGridX / 2, + m_fit_DimGridY / 2, 0); + return true; //Have finished assembly - UND is false, but so is PreProcess (trigger) + } + + //Replace tile doublettes by empty tiles + //Works for any number of mNrTileOrientations and mBitLengthEdgeType <= 4 Byte! + //Note: This would be faster (but more inflexible) if tile-wise accesses! + TmpSameCounter = 0; + unsigned char DBGVAL1, DBGVAL2, DBGVAL3; + for (int k = 0; k < mNrTileTypes - 1; k++) { //Go through all Tiles X (except for last one) + for (int i = k + 1; i < mNrTileTypes; i++) { //Go through all Tiles X_r to the right + for (int j = 0; j < mNrTileOrientations; j++) { //Go through all X edges rots + TmpSameCounter = 0; + for (int l = 0; l < mNrTileOrientations; l++) { //Cycle through all X edges + if (__xGenomeSet->get_xEdgeType(__xThreadInfo, k, l) + == __xGenomeSet->get_xEdgeType(__xThreadInfo, i, (j + + l) % mNrTileOrientations)) { + TmpSameCounter++; + } + } + if (TmpSameCounter == mNrTileOrientations) { + //Have detected a doublette - replace with empty tile!! + for (int l = 0; l < mNrTileOrientations; l++) { + //__xGenomeSet->set_EdgeType(__xThreadInfo, i, l, 0); //TEST + } + } + } + } + } + return true; +} + +__device__ bool xAssembly::Assemble_PostProcess(xThreadInfo __xThreadInfo, xGenomeSet *__xGenomeSet) { + //Optional: start at first tile and see if it can connect to any degenerate entries in EdgeSort directly + //Note: If we can refrain from assembly, then save time for grid initialisation! + unsigned char TmpBondingCounter = 0; + unsigned char TmpEdgeTypeLength = 0; + for (int j = 0; j < mNrTileOrientations; j++) { + TmpEdgeTypeLength = this->data.edgesort.get_xLength(__xThreadInfo, j); + if (TmpEdgeTypeLength > 1) { + this->data.flags[__xThreadInfo.BankId()].set_TrivialUND(); //TEST + return false; + } else if (TmpEdgeTypeLength == 0) { + TmpBondingCounter++; + } + } + + if (TmpBondingCounter == 4) { + //(Single-tile assembly: PostProcess return value is false, but UND is also false (trigger) ) + this->data.grid.set_xCell(__xThreadInfo, m_fit_DimGridX / 2, m_fit_DimGridY / 2, 0, 0); + return false; + } + //Note: (Optional) Could now check for periodicity (can return to tile X first tile starting at X at same orientation) + //Note: (Optional) Could now check for 2x2 assembly, etc (quite rare though) + //NOTE: TODO, have to check in EdgeSort whether Tile is symmetric, i.e. then remove bonding orientations + return true; +} + +__device__ bool xAssembly::Assemble_Movelist(xThreadInfo __xThreadInfo, xGenomeSet *__xGenomeSet) { + //Place tiletype 0 on center of grid + this->data.grid.set_xCell(__xThreadInfo, m_fit_DimGridX / 2, m_fit_DimGridY / 2, this->data.flags[__xThreadInfo.BankId()].get_ucRed(), 0); + //Add first four moves to movelist (even iff they might be empty) + uchar2 X; //X be current position in grid + X.x = m_fit_DimGridX / 2; //X is abused here as a buffer (reset at loop head) + X.y = m_fit_DimGridY / 2; + //return false; //TEST + + { //Keep all this local + xFourPermutation BUF((int) (curand_uniform(&this->data.states[__xThreadInfo.BankId()])*24.0f)); + unsigned char index; + while (BUF.bNotTraversed()) { + index = BUF.ucWalk(); + //unsigned char DBGVAL = TmpAddPerm.WalkIndex; + this->data.movelist.bPush(__xThreadInfo, this->data.grid.xGetNeighbourCellCoords(X.x, X.y, (unsigned char) (index-1))); + this->data.grid.set_xCell( __xThreadInfo, this->data.grid.xGetNeighbourCellCoords(X.x, X.y, (unsigned char) (index-1)).x, this->data.grid.xGetNeighbourCellCoords(X.x, X.y, (unsigned char) (index-1)).y, this->data.flags[__xThreadInfo.BankId()].get_ucRed(), mEMPTY_CELL_ML); + //this->data.grid.set_xCell( __xThreadInfo, 0, 0, this->data.flags[__xThreadInfo.BankId()].get_ucRed(), 250); //TEST + } + } + + //this->data.assembly_size[__xThreadInfo.BankId()] = 1; + + + //We use movelist approach to assemble grids + //Will switch to in-place assembly if either movelist full, or some other pre-defined condition. + //Note: If we want mixed redundancy detection, need to implement some Single-Assembly Flag in AssemblyFlags that will switch. + //Also: SynchronizeBank() needs to be adapted to not wait for other threads iff Many-thread approach! + + xCell N; //N(E_X) be non-empty neighbouring cells + unsigned char Mirr; // Mirr(E_X, N(E_X)) be tile edge neighbouring E_X + xCell T, TmpT; // T(Mirr(E_X, N(E_X)) be potential bonding tiles + + //BEGIN DEBUG + int DBG_MAXREP = 1; + int DBG_COUNTER = 0; + //END DEBUG + + //For all elements M in Movelist (and while not UND condition detected) + while (this->data.movelist.get_sPos(__xThreadInfo) > 0) { + //BEGIN DEBUG + if(DBG_COUNTER >= DBG_MAXREP) return; + //STOP DEBUG + + //Choose position X from Movelist and remove it from Movelist + X = this->data.movelist.xPop(__xThreadInfo); + //Now remove grid marking to indicate movelist has been traversing this entry + this->data.grid.set_xCell( __xThreadInfo, X.x, X.y, this->data.flags[__xThreadInfo.BankId()].get_ucRed(), mEMPTY_CELL); + + T.set_xCell(mEMPTY_CELL); + TmpT.set_xCell(mEMPTY_CELL); + for (int E_X = 0; E_X < mNrTileOrientations; E_X++) { + //::Let N(E_X) be non-empty neighbouring cells. + N = this->data.grid.xGetNeighbourCell(__xThreadInfo, X.x, X.y, this->data.flags[__xThreadInfo.BankId()].get_ucRed(), (unsigned char) E_X); + if ( (N.get_xCell() != mEMPTY_CELL) && (N.get_xCell() != mEMPTY_CELL_ML) ) { //For all N(E_X) + //::Let Mirr(E_X, N(E_X)) be tile neighbouring E_X + unsigned char TmpMirrorCoord = (4 - N.get_xOrient() + (E_X + mNrTileOrientations / 2) % mNrTileOrientations) % mNrTileOrientations; + Mirr = __xGenomeSet->get_xEdgeType(__xThreadInfo, N.get_xType(), TmpMirrorCoord); + //For all Mirr(E_X, N(E_X)), let T(Mirr(E_X, N(E_X)) be potential bonding tiles + TmpT.set_xCell(this->data.edgesort.GetBondingTile( __xThreadInfo, Mirr, &this->data.states[__xThreadInfo.BankId()], &this->data.flags[__xThreadInfo.BankId()])); + + //TmpT.set_Orient((TmpT.get_xOrient() + E_X) % mNrTileOrientations); + //NOTE: TrivialUND can arise in three ways: + //1. For some Mirr, there is more than 1 bonding tile T (TrivialUND raised by GetBondingTile) + //2. For some T, there is more than one orientation O + //3. T does not agree between all N + //Else if | T( Mirr( E_X, N(E_X) ) ) | == 0 + //If | T( Mirr( E_X, N(E_X) ) ) | > 0 + //Raise TrivialUND condition + //Else If | T( Mirr( E_X, N(E_X) ) ) | == 1 + //if ( T.get_xCell() != mEMPTY_CELL ){ //Check if already tile there ?? + if (TmpT.get_xCell() != mEMPTY_CELL) { + TmpT.set_Orient((TmpT.get_xOrient() + E_X) % mNrTileOrientations); + if( (TmpT.get_xCell() != T.get_xCell()) && (T.get_xCell() != mEMPTY_CELL) ){ + //Raise TrivialUND! + this->data.flags[__xThreadInfo.WarpId()].set_TrivialUND(); + /*BEGIN DEBUG*/ + this->data.grid.set_xCell( __xThreadInfo, 2, 0, this->data.flags[__xThreadInfo.BankId()].get_ucRed(), 3<<2); + this->data.grid.set_xCell( __xThreadInfo, 0, 1, this->data.flags[__xThreadInfo.BankId()].get_ucRed(), TmpT.get_xCell() << 2); + this->data.grid.set_xCell( __xThreadInfo, 0, 2, this->data.flags[__xThreadInfo.BankId()].get_ucRed(), T.get_xCell() << 2); + /*END DEBUG*/ + return; + } + T.set_xCell(TmpT.get_xCell()); + //As Bonding Cell is rotated such that bonding edge is facing North, + //we need to rotate tile T such that bonding edge faces bonding site + //Note: bonding orientations are handled above (GetBondingTile includes orientation). + //::Let O(T) be all bonding orientations of T + //If |O(T)| > 1 + //Else If |O(T)| = 1 --> Check Steric, if not --> Assemble + //Let T* be T rotated such that E_T*(E_X) == E_T(O(T)) + //T.set_Orient((T.get_xOrient() + E_X) % mNrTileOrientations); //Rotate TmpT instead! + } + } + } //Now we have looked for all neighbours of X and filtered the possible bonding tiles + if (!this->data.flags[__xThreadInfo.BankId()].get_bUNDCondition() && T.get_xCell() != mEMPTY_CELL) { + + //NOTE: StericUND can arise in two ways: + //1. T does not agree with tile from previous assembly run + //2. T does not agree with tile already at X in same run (multiple threads only) + xCell TmpT2; + if (this->data.flags[__xThreadInfo.BankId()].get_ucRed() > 0) { + TmpT2 = this->data.grid.get_xCell(__xThreadInfo, X.x, X.y, this->data.flags[__xThreadInfo.BankId()].get_ucRed() - 1); + if (TmpT2.get_xCell() != T.get_xCell()) { //We have detected steric non-determinism! + this->data.flags[__xThreadInfo.BankId()].set_StericUND(); //TEST + /*START DEBUG*/ + this->data.grid.set_xCell( __xThreadInfo, 0, 0, this->data.flags[__xThreadInfo.BankId()].get_ucRed(), 7<<2); + /*END DEBUG*/ + return; + } + } + + //If X is not BorderCell + //Assemble T* at X + //Note: set_xCell will return false if BorderCell case! + if (T.get_xCell() != mEMPTY_CELL) { + //BEGIN DEBUG + DBG_COUNTER++; + //END DEBUG + + //Now: Assemble tile! + if (!this->data.grid.set_xCell( __xThreadInfo, X.x, X.y, this->data.flags[__xThreadInfo.BankId()].get_ucRed(), T.get_xCell())) { + this->data.flags[__xThreadInfo.BankId()].set_UnboundUND(); + /*START DEBUG*/ + this->data.grid.set_xCell( __xThreadInfo, 1, 0, this->data.flags[__xThreadInfo.BankId()].get_ucRed(), 1<<2); + this->data.grid.set_xCell( __xThreadInfo, 0, 1, this->data.flags[__xThreadInfo.BankId()].get_ucRed(), T.get_xCell()); + /*END DEBUG*/ + return; + } + + xFourPermutation TmpAddPerm((int) (curand_uniform(&this->data.states[__xThreadInfo.BankId()]) * 24.0f)); + unsigned char index2; //Buffer + while (TmpAddPerm.bNotTraversed()) { + index2 = TmpAddPerm.ucWalk(); + //For all n(E_X) + N = this->data.grid.xGetNeighbourCell(__xThreadInfo, X.x, X.y, this->data.flags[__xThreadInfo.BankId()].get_ucRed(), (unsigned char) index2); + //::Let n(E_X) be empty neighbour cells (i.e. no tile and not on movelist already). + if (N.get_xCell() == mEMPTY_CELL) { + this->data.movelist.bPush(__xThreadInfo, this->data.grid.xGetNeighbourCellCoords(X.x, X.y, (unsigned char) index2)); + this->data.grid.set_xCell( __xThreadInfo, this->data.grid.xGetNeighbourCellCoords(X.x, X.y, (unsigned char) (index2)).x, this->data.grid.xGetNeighbourCellCoords(X.x, X.y, (unsigned char) (index2)).y, this->data.flags[__xThreadInfo.BankId()].get_ucRed(), mEMPTY_CELL_ML); + this->data.grid.set_xCell( __xThreadInfo, 0, index2, this->data.flags[__xThreadInfo.BankId()].get_ucRed(), 99); + } + } + } + } + } +} + +__device__ unsigned char xEdgeSort::GetBondingTile(xThreadInfo __xThreadInfo, + short __sEdgeId, curandState *__xCurandState, + xAssemblyFlags *__xAssemblyFlags) { + //Takes: Edge Type to which the tile should bond, FitFlags which will be set according to UND conditions + //Returns: Cell of Bonding Tile type which is rotated such that the bonding tile is facing NORTH (0), + //If nothing bonds, will return mEMPTY_CELL instead. + if (this->get_xLength(__xThreadInfo, __sEdgeId) == 1) { + xCell TmpCell; + unsigned char DBGVAL2, DBGVAL3, DBGVAL = GetBondingTileOrientation( + __xThreadInfo, __sEdgeId, 0, __xAssemblyFlags); + unsigned char TmpBondBuffer = GetBondingTileOrientation(__xThreadInfo, + __sEdgeId, 0, __xAssemblyFlags); + TmpCell.set_xCell(4 - TmpBondBuffer); + TmpCell.set_Type(this->get_xData(__xThreadInfo, __sEdgeId, 0, + TmpBondBuffer)); //TEST (0 anstelle TmpCell.get_xOrient()) b-fore + return TmpCell.get_xCell(); + } else if (this->get_xLength(__xThreadInfo, __sEdgeId) == 0) { + return mEMPTY_CELL; + } else { + __xAssemblyFlags->set_TrivialUND(); + return mEMPTY_CELL; + } +} + +__device__ unsigned char xEdgeSort::GetBondingTileOrientation(xThreadInfo __xThreadInfo, unsigned char __ucEdgeId, unsigned char __ucTileId, xAssemblyFlags *__xAssemblyFlags) { + unsigned char TmpCounter = 0, TmpTile, TmpOrient = mEMPTY_CELL; + for (int i = 0; i < mNrTileOrientations; i++) { + TmpTile = this->get_xData(__xThreadInfo, __ucEdgeId, __ucTileId, i); + if (TmpTile != mEMPTY_CELL) { + TmpOrient = i; + TmpCounter++; + if (TmpCounter >= 2) { + __xAssemblyFlags->set_TrivialUND(); + break; + } + } + } + return TmpOrient; //should never be mEMPTY_CELL! + //Returns edge-id of neighbouring tile that bonds +} + +__device__ unsigned char xEdgeSort::get_xData(xThreadInfo __xThreadInfo, unsigned char __ucEdgeId, unsigned char __ucTileId, unsigned char __ucOrientation) { + return this->data.multi_d[__ucEdgeId][__ucTileId][__ucOrientation][__xThreadInfo.BankId()]; +} + +__device__ bool xAssembly::Assemble_InPlace(xThreadInfo __xThreadInfo, xGenomeSet *__xGenomeSet) { + return true; +} + +} + +__global__ void TestAssemblyKernel(unsigned char *g_ucGenomes, float *g_ucFitnessValues, unsigned char *g_ucGrids, curandState *states) +{ + __shared__ xGenomeSet s_xGenomeSet; + //__shared__ xEdgeSort s_xEdgeSort; + __shared__ xAssembly s_xAssembly; + s_xAssembly.data.states = states; + xThreadInfo r_xThreadInfo(threadIdx.x, threadIdx.y, blockIdx.x, blockIdx.y); + s_xGenomeSet.CopyFromGlobal(r_xThreadInfo, g_ucGenomes); + //s_xEdgeSort.Initialise(r_xThreadInfo, &s_xGenomeSet, -1); + s_xAssembly.Assemble(r_xThreadInfo, &s_xGenomeSet); + for(int i=0;i<4;i++){ + //s_xGenomeSet.data.multi_d[r_xThreadInfo.BankId()].data.one_d[i] = s_xEdgeSort.length.multi_d[i][r_xThreadInfo.BankId()]; + //s_xGenomeSet.data.multi_d[r_xThreadInfo.BankId()].data.one_d[i] = s_xEdgeSort.data.multi_d[6][0][i][r_xThreadInfo.BankId()]; + //s_xGenomeSet.data.multi_d[r_xThreadInfo.BankId()].data.one_d[i] = tex2D(t_ucInteractionMatrix, i, 1); + } + s_xGenomeSet.CopyToGlobal(r_xThreadInfo, g_ucGenomes); + for(int i=0;i; + +class RadarAmbiguityFunction : public ::testing::Test { +protected: + void SetUp() override + { + + pb = std::make_unique(); + pb->InitAndRunTVGenerator("01_radar", "ambgfun", "run", + {sig_size}); + + pb->NumpyToTensorView(xv, "x"); + } + + void TearDown() { pb.reset(); } + + index_t sig_size = 16; + tensor_t xv{{sig_size}}; + std::unique_ptr pb; +}; + +TEST_F(RadarAmbiguityFunction, Cut2D) +{ + MATX_ENTER_HANDLER(); + + tensor_t amf2dv( + {2 * sig_size - 1, + (index_t)pow(2, std::ceil(std::log2(2 * sig_size - 1)))}); + + signal::ambgfun(amf2dv, xv, 1e3, signal::AMGBFUN_CUT_TYPE_2D, 1.0); + MATX_TEST_ASSERT_COMPARE(pb, amf2dv, "amf_2d", 0.01); + + MATX_EXIT_HANDLER(); +} + +TEST_F(RadarAmbiguityFunction, CutDelay) +{ + MATX_ENTER_HANDLER(); + + tensor_t amf_delay_v( + {1, (index_t)pow(2, std::ceil(std::log2(2 * sig_size - 1)))}); + + signal::ambgfun(amf_delay_v, xv, 1e3, signal::AMGBFUN_CUT_TYPE_DELAY, 1.0); + + auto delay1d = amf_delay_v.Slice<1>({0, 0}, {matxDropDim, matxEnd}); + MATX_TEST_ASSERT_COMPARE(pb, delay1d, "amf_delay", 0.01); + + MATX_EXIT_HANDLER(); +} + +TEST_F(RadarAmbiguityFunction, CutDoppler) +{ + MATX_ENTER_HANDLER(); + + tensor_t amf_doppler_v({1, xv.Size(0) * 2 - 1}); + + signal::ambgfun(amf_doppler_v, xv, 1e3, signal::AMGBFUN_CUT_TYPE_DOPPLER, + 1.0); + + auto doppler1d = amf_doppler_v.Slice<1>({0, 0}, {matxDropDim, matxEnd}); + MATX_TEST_ASSERT_COMPARE(pb, doppler1d, "amf_doppler", 0.01); + + MATX_EXIT_HANDLER(); +} \ No newline at end of file diff --git a/cuda_code/amg.cu b/cuda_code/amg.cu new file mode 100644 index 0000000000000000000000000000000000000000..ac2983051eb77802695bb08526d0f947d80181de --- /dev/null +++ b/cuda_code/amg.cu @@ -0,0 +1,1589 @@ +/* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of NVIDIA CORPORATION nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +namespace amgx +{ + +template +AMG +::AMG(AMG_Config &cfg, const std::string &cfg_scope) + : fine_h(0), fine_d(0), m_cfg(&cfg), m_cfg_scope(cfg_scope), + ref_count(1), csr_workspace(NULL), d2_workspace(NULL) +{ + cycle_iters = cfg.getParameter("cycle_iters", cfg_scope); + norm = cfg.getParameter("norm", cfg_scope); + max_levels = cfg.getParameter( "max_levels", cfg_scope ); + coarsen_threshold = cfg.getParameter("coarsen_threshold", cfg_scope); + min_fine_rows = cfg.getParameter( "min_fine_rows", cfg_scope ); + min_coarse_rows = cfg.getParameter( "min_coarse_rows", cfg_scope); + m_amg_consolidation_flag = cfg.getParameter("amg_consolidation_flag", cfg_scope); + m_consolidation_lower_threshold = cfg.getParameter("matrix_consolidation_lower_threshold", cfg_scope); + m_consolidation_upper_threshold = cfg.getParameter("matrix_consolidation_upper_threshold", cfg_scope); + m_sum_stopping_criteria = cfg.getParameter("use_sum_stopping_criteria", cfg_scope); + m_structure_reuse_levels = cfg.getParameter("structure_reuse_levels", cfg_scope); + m_amg_host_levels_rows = cfg.getParameter("amg_host_levels_rows", cfg_scope); + + if (m_consolidation_upper_threshold <= m_consolidation_lower_threshold) + { + FatalError("Error, matrix_consolidation_lower_threshold must be smaller than matrix_consolidation_upper_threshold", AMGX_ERR_CONFIGURATION); + } + + std::string solverName, new_scope, tmp_scope; + cfg.getParameter( "coarse_solver", solverName, cfg_scope, new_scope ); + + if (solverName.compare("NOSOLVER") == 0) + { + coarse_solver_d = NULL; + coarse_solver_h = NULL; + } + else + { + coarse_solver_d = SolverFactory::allocate(cfg, cfg_scope, "coarse_solver"); + coarse_solver_h = SolverFactory::allocate(cfg, cfg_scope, "coarse_solver"); + } + + //NOTE: + //if dense_lu_num_rows=0 then either you are not using dense solver (it was not selected) or the matrix size for it to be used was set to zero + //if dense_lu_max_rows=0 then either you are not using dense solver or you don't want to cap the maximum matrix size + m_dense_lu_num_rows = 0; + m_dense_lu_max_rows = 0; + + if ( solverName == "DENSE_LU_SOLVER" ) + { + m_dense_lu_num_rows = cfg.getParameter( "dense_lu_num_rows", cfg_scope ); + m_dense_lu_max_rows = cfg.getParameter( "dense_lu_max_rows", cfg_scope ); + } +} + +template +void AMG::allocate_fine_level() +{ + fine_d = AMG_LevelFactory::allocate(this, tmng); + fine_h = AMG_LevelFactory::allocate(this, tmng); +} + +// Print the settings used by amg solver +template +void AMG::printSettings() const +{ + std::cout << std::endl; + std::cout << "AMG solver settings:" << std::endl; + std::cout << "cycle_iters = " << cycle_iters << std::endl; + std::cout << "norm = " << getString(norm) << std::endl; + std::cout << "presweeps = " << getNumPresweeps() << std::endl; + std::cout << "postsweeps = " << getNumPostsweeps() << std::endl; + std::cout << "max_levels = " << max_levels << std::endl; + std::cout << "coarsen_threshold = " << coarsen_threshold << std::endl; + std::cout << "min_fine_rows = " << min_fine_rows << std::endl; + std::cout << "min_coarse_rows = " << min_coarse_rows << std::endl; + std::cout << "coarse_solver_d: " << this->coarse_solver_d->getName() + << " with scope name " << this->coarse_solver_d->getScope() << std::endl; + std::cout << "coarse_solver_h: " << this->coarse_solver_h->getName() + << " with scope name " << this->coarse_solver_h->getScope() << std::endl; +} + +template +AMG::~AMG() +{ + if (fine_d) { delete fine_d; } + + if (fine_h) { delete fine_h; } // Don't delete both since the hierarchies meet at some point !!! + + delete coarse_solver_d; + delete coarse_solver_h; + + if ( d2_workspace != NULL && d2_workspace != csr_workspace ) + { + typedef TemplateConfig TConfig_d; + CSR_Multiply::csr_workspace_delete( d2_workspace ); + csr_workspace = NULL; + } + + if ( csr_workspace != NULL ) + { + typedef TemplateConfig TConfig_d; + CSR_Multiply::csr_workspace_delete( csr_workspace ); + csr_workspace = NULL; + } +} + +template void logDeviceType() +{ + AMGXLOG("Devicetype", T_Config::MemSpaceInfo::getName()) +} + + +/********************************************************** + * Setups the AMG system + *********************************************************/ + +void analyze_coloring(device_vector_alloc aggregates_d, device_vector_alloc colors_d); + +template< AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec > +class AMG_Setup +{ + public: + template< typename TConfig_hd > + static + AMG_Level *setup( AMG *amg, + AMG_Level *&level, + int min_rows, bool hybrid ) + { + typedef typename TConfig_hd::MemSpace MemorySpace; + typedef TemplateConfig TConfig_h; + typedef TemplateConfig TConfig_d; + typedef typename Matrix::IVector IVector_h; + typedef typename Matrix::IVector IVector_d; + typedef typename Matrix::VVector VVector_h; + typedef typename Matrix::VVector VVector_d; + typedef typename Matrix::MVector MVector_h; + typedef typename Matrix::MVector MVector_d; + typedef typename Matrix::IVector IVector_hd; + typedef typename Matrix::VVector VVector_hd; + typedef typename Matrix::MVector MVector_hd; + typedef typename MatPrecisionMap::Type ValueTypeA; + typedef typename VecPrecisionMap::Type ValueTypeB; + static const AMGX_MemorySpace other_memspace = MemorySpaceMap::memspace>::id; + typedef TemplateConfig TConfig1; + typedef TConfig1 T_Config1; + MemorySpace memorySpaceTag; + // The previous level. + AMG_Level *prev_level = 0L; + typedef TemplateConfig hvector_type; + typedef Vector HVector; + std::vector partition_rows(0); + HVector num_rows(1); + int64_t num_rows_global; + num_rows[0] = num_rows_global = level->getNumRows( ); + int min_partition_rows = num_rows[0]; + + if (level->getA().is_matrix_distributed()) + { + level->getA( ).manager->getComms()->global_reduce(partition_rows, num_rows, + level->getA( ), level->tag * 100 + 7); + num_rows_global = 0; + + for (int i = 0; i < partition_rows.size(); i++) + { + min_partition_rows = std::min(partition_rows[i][0], min_partition_rows); + num_rows_global += partition_rows[i][0]; + } + } + + Solver *coarseSolver = amg->getCoarseSolver( MemorySpace() ); + bool coarseSolverExists = coarseSolver != NULL; + + // Build the remaining / all the levels on the CPU. Note: level_h is NULL if all the setup happened on the GPU. + while (true) + { + nvtxRange test("setup_level"); + + //Check if you reached the coarsest level (min_partition_rows is the number of rows in this partition/rank) + //NOTE: min_rows = min_coarse_rows if async framework is disabled (min_fine_rows =< min_coarse_rows) + if (amg->num_levels >= amg->max_levels || min_partition_rows <= min_rows) + { + //Check if the user wishes to use DENSE_LU_SOLVER capping the matrix the size, and the matrix size exceeds the maximum allowed + //NOTE: if dense_lu_max_rows=0 then either you are not using dense solver or you don't want to cap the maximum matrix size + if ((amg->m_dense_lu_max_rows != 0) && (min_partition_rows > amg->m_dense_lu_max_rows)) + { + amg->setCoarseSolver(NULL, MemorySpace()); + delete coarseSolver; + coarseSolver = NULL; + coarseSolverExists = false; + } + + //Check if there is no coarse solver, then setup the smoother to solve the coarsest level + if (!coarseSolverExists) + { + level->setup_smoother(); + } + + return level; + } + + // Allocate next level or use existing one + int reuse_next_level; + AMG_Level *nextLevel; + + if (!level->getNextLevel(MemorySpace()) || (amg->m_structure_reuse_levels <= amg->num_levels && amg->m_structure_reuse_levels != -1)) + { + if (level->getNextLevel(MemorySpace())) + { + delete level->getNextLevel(MemorySpace()); + } + + reuse_next_level = 0; + level->setReuseLevel(false); + nextLevel = AMG_LevelFactory::allocate(amg, level->getSmoother()->get_thread_manager()); + level->setNextLevel( nextLevel ); + } + else + { + // reuse existing next level + reuse_next_level = 1; + level->setReuseLevel(true); + nextLevel = level->getNextLevel(MemorySpace()); + /* WARNING: we do not recompute prolongation (P) and restriction (R) when we + are reusing the level structure (structure_reuse_levels > 0), but + we do need to modify an existing coarse matrix Ac=R*A*P. + Instead of calling Ac.set_initialized(0) in every path afterwards, + we wil call it here. Notice that in the if part of this statement + above when the new level is allocated it creates a new matrix which + is not initialized by default (see the matrix constructor): + AMG_Level_Factory::allocate -> Classical_AMG_LevelFactory::create -> + new Classical_AMG_Level -> new AMG_Level -> new Matrix + We are just matching this Ac.set_initialized(0) setting here. */ + Matrix &Ac = nextLevel->getA(); + Ac.set_initialized(0); + } + + nextLevel->setLevelIndex( amg->num_levels ); + level->getA().template setParameter("level", amg->num_levels); + //profileLevelDown( ); + { + // only compute aggregates if we can't reuse existing ones + if (!reuse_next_level) + { + level->createCoarseVertices( ); + } + } + //set the amg_level_index for this matrix + nextLevel->getA().amg_level_index = amg->num_levels; + int64_t N = num_rows_global * level->getA().get_block_dimy(); + num_rows[0] = num_rows_global = level->getNumCoarseVertices(); + + if (level->getA().is_matrix_distributed()) + { + level->getA().manager->getComms()->global_reduce( partition_rows, num_rows, + level->getA(), level->tag * 100 + 8 ); + num_rows_global = 0; + + for (int i = 0; i < partition_rows.size(); i++) + { + num_rows_global += partition_rows[i][0]; + } + } + + // num_rows[0] contains the total number of rows across all partitions + int64_t nextN = num_rows_global * level->getA().get_block_dimy(); + + if (!level->getA().is_matrix_distributed()) + { + min_partition_rows = num_rows[0]; + } + else + { + int num_parts = level->getA().manager->getComms()->get_num_partitions(); + float avg_size = num_rows_global / num_parts; + + if (avg_size < amg->m_consolidation_lower_threshold) + { + if (level->isClassicalAMGLevel()) + { + FatalError("Consolidation with classical path not supported)", AMGX_ERR_NOT_IMPLEMENTED); + } + + int new_num_parts; + bool want_neighbors = false; + level->getA().manager->computeDestinationPartitions(amg->m_consolidation_upper_threshold, + avg_size, num_parts, new_num_parts, want_neighbors); + + if (new_num_parts != num_parts) + { + level->setIsConsolidationLevel(true); + // Modify partition_rows so that non-consolidated partitions have 0 rows + // Root partitions have total number of rows to consolidate + IVector_h row_count_part(num_parts, 0); + + for (int i = 0; i < num_parts; i++) + { + row_count_part[level->getA().manager->getDestinationPartitions()[i]] += partition_rows[i][0]; + } + + for (int i = 0; i < num_parts; i++) + { + partition_rows[i][0] = row_count_part[i]; + } + } + } + + if (!amg->m_sum_stopping_criteria) + { + min_partition_rows = INT_MAX; + + for (int i = 0; i < partition_rows.size(); i++) + { + // If aggregation AMG, ignore partitions with 0 rows, since those are caused by consolidation + // If classical AMG, include all partitions + if ( level->isClassicalAMGLevel() || (!(level->isClassicalAMGLevel()) && partition_rows[i][0] != 0)) + { + min_partition_rows = std::min(partition_rows[i][0], min_partition_rows); + } + } + } + else + { + // use sum instead of min + min_partition_rows = 0; + + for (int i = 0; i < partition_rows.size(); i++) + { + // If aggregation AMG, ignore partitions with 0 rows, since those are caused by consolidation + // If classical AMG, include all partitions + if ( level->isClassicalAMGLevel() || (!(level->isClassicalAMGLevel()) && partition_rows[i][0] != 0)) + { + min_partition_rows += partition_rows[i][0]; + } + } + } + } + + // stop here if next level size is < min_rows + if ( nextN <= amg->coarsen_threshold * N && nextN != N && min_partition_rows >= min_rows ) + { + level->createCoarseMatrices(); + // Resize coarse vectors. + int nextSize = level->getNextLevelSize(); + level->getxc( ).resize( nextSize ); + level->getxc().set_block_dimy(level->getA( ).get_block_dimy()); + level->getxc().set_block_dimx(1); + level->getxc().tag = nextLevel->tag * 100 + 1; + level->getbc( ).resize( nextSize ); + level->getbc().set_block_dimy(level->getA( ).get_block_dimy()); + level->getbc().set_block_dimx(1); + level->getbc().tag = nextLevel->tag * 100 + 0; + int size, offset; + level->getA().getOffsetAndSizeForView(FULL, &offset, &size); + level->getr().resize( size * level->getA( ).get_block_dimy() ); + level->getr().set_block_dimy(level->getA( ).get_block_dimy()); + level->getr().set_block_dimx(1); + level->getr().tag = nextLevel->tag * 100 + 2; + } + else + { + // delete next level that we just created + level->deleteNextLevel( memorySpaceTag ); + } + + if (!level->isCoarsest() || !coarseSolverExists) + { + level->setup_smoother(); + } + + if (level->isCoarsest()) + { + break; + } + + // If consolidation level and not root partition, break; + if (!level->getA().is_matrix_singleGPU() && level->isConsolidationLevel() + && !level->getA().manager->isRootPartition()) + { + amg->setCoarseSolver(NULL, MemorySpace()); + delete coarseSolver; + coarseSolver = NULL; + coarseSolverExists = false; + break; + } + + nextLevel->setup(); + // Move to the next level. + prev_level = level; + level = nextLevel; + // Increment the level counter. + amg->num_levels++; + } //end of while(true) + + return prev_level; + } + + template< typename TConfig_hd > + static + AMG_Level *setup_v2( AMG *amg, + AMG_Level *&level, + int min_rows, bool hybrid ) + { + typedef typename TConfig_hd::MemSpace MemorySpace; + typedef TemplateConfig TConfig_h; + typedef TemplateConfig TConfig_d; + typedef typename Matrix::IVector IVector_h; + typedef typename Matrix::IVector IVector_d; + typedef typename Matrix::VVector VVector_h; + typedef typename Matrix::VVector VVector_d; + typedef typename Matrix::MVector MVector_h; + typedef typename Matrix::MVector MVector_d; + typedef typename Matrix::IVector IVector_hd; + typedef typename Matrix::VVector VVector_hd; + typedef typename Matrix::MVector MVector_hd; + typedef typename MatPrecisionMap::Type ValueTypeA; + typedef typename VecPrecisionMap::Type ValueTypeB; + MemorySpace memorySpaceTag; + // The previous level. + AMG_Level *prev_level = 0L; + typedef TemplateConfig hvector_type; + typedef Vector HVector; + std::vector partition_rows(0); + HVector num_rows(1); + int64_t num_rows_global; + num_rows[0] = num_rows_global = level->getNumRows( ); + int min_partition_rows = INT_MAX, offset = 0, n = 0, num_parts = 1, num_active_parts = 0; + float avg_size; + + if (level->getA().is_matrix_distributed()) + { + num_parts = level->getA().manager->getComms()->get_num_partitions(); + level->getA( ).manager->getComms()->global_reduce(partition_rows, num_rows, + level->getA( ), level->tag * 100 + 7); + num_rows_global = 0; + + for (int i = 0; i < partition_rows.size(); i++) + { + if (partition_rows[i][0] != 0) + { + min_partition_rows = std::min(partition_rows[i][0], min_partition_rows); + num_active_parts++; + } + + num_rows_global += partition_rows[i][0]; + } + + if (min_partition_rows == INT_MAX) + { + min_partition_rows = 0; + } + } + + IVector_h row_count_part(num_parts, 0); + Solver *coarseSolver = amg->getCoarseSolver( MemorySpace() ); + bool coarseSolverExists = coarseSolver != NULL; + + // Build the remaining / all the levels on the CPU. Note: level_h is NULL if all the setup happened on the GPU. + while (true) + { + // Glue matrices of the current level + avg_size = num_rows_global / num_parts; + // Allow to glue other levels tha 0 if COARSE_CLA_CONSO is true +#if COARSE_CLA_CONSO + + if (level->getA().is_matrix_distributed() && avg_size < amg->m_consolidation_lower_threshold) + { +#else + + if (level->getA().is_matrix_distributed() && avg_size < amg->m_consolidation_lower_threshold && level->getLevelIndex() == 0) + { +#endif + // Just remove level->getLevelIndex() == 0 in the previous test to allow coarse level consolidation +#ifdef AMGX_WITH_MPI + level->getA().manager->setIsGlued(false); + int new_num_parts = glue_level(amg, level, num_active_parts); + + if (new_num_parts && new_num_parts != num_active_parts) + { + if (level->getA().manager->global_id() == 0) + { + std::cout << "Level " << level->getLevelIndex() << " has been consolidated : " << num_active_parts << " --> " << new_num_parts << std::endl; + } + + // this is for coarse level consolidation + if (level->getLevelIndex() > 0) + { + level->setIsConsolidationLevel(true); + } + + level->setup(); + num_active_parts = new_num_parts; + // Modify partition_rows so that non-consolidated partitions have 0 rows + // Root partitions have total number of rows to consolidate + num_rows[0] = level->getNumRows(); + level->getA().manager->getComms()->global_reduce( partition_rows, num_rows, + level->getA(), level->tag * 100 + 33 ); + // Update some local arrays and variables + num_rows_global = 0; + + for (int i = 0; i < partition_rows.size(); i++) + { + num_rows_global += partition_rows[i][0]; + } + + for (int i = 0; i < num_parts; i++) + { + row_count_part[level->getA().manager->getDestinationPartitions()[i]] += partition_rows[i][0]; + } + + for (int i = 0; i < num_parts; i++) + { + partition_rows[i][0] = row_count_part[i]; + } + } + else + { + level->getA().manager->setIsGlued(false); + } + +#endif + } + + level->getA().getOffsetAndSizeForView(OWNED, &offset, &n); + + if (!n) + { + // no coarse solver for empty matrices? + // maybe we can deal with this in classical amg cycle + amg->setCoarseSolver(NULL, MemorySpace()); + delete coarseSolver; + coarseSolver = NULL; + coarseSolverExists = false; + } + + //Check if you reached the coarsest level (min_partition_rows is the number of rows in this partition/rank) + //NOTE: min_rows = min_coarse_rows if async framework is disabled (min_fine_rows =< min_coarse_rows) + if (amg->num_levels >= amg->max_levels || min_partition_rows <= min_rows) + { +#if 0 //AMGX_ASYNCCPU_PROOF_OF_CONCEPT + asyncmanager::singleton()->waitall(); +#endif + + //Check if the user wishes to use DENSE_LU_SOLVER capping the matrix the size, and the matrix size exceeds the maximum allowed + //NOTE: if dense_lu_max_rows=0 then either you are not using dense solver or you don't want to cap the maximum matrix size + if ((amg->m_dense_lu_max_rows != 0) && (min_partition_rows > amg->m_dense_lu_max_rows)) + { + amg->setCoarseSolver(NULL, MemorySpace()); + delete coarseSolver; + coarseSolver = NULL; + coarseSolverExists = false; + } + + //Check if there is no coarse solver, then setup the smoother to solve the coarsest level + // If n is 0 then the matrix is consolidated so we don't setup the smoother + // We always setup the smoother on finest level + if (!coarseSolverExists) + { + level->setup_smoother(); + } + + return level; + } + + // Allocate next level or use existing one + int reuse_next_level; + AMG_Level *nextLevel; + + if (!level->getNextLevel(MemorySpace()) || (amg->m_structure_reuse_levels <= amg->num_levels && amg->m_structure_reuse_levels != -1)) + { + if (level->getNextLevel(MemorySpace())) + { + delete level->getNextLevel(MemorySpace()); + } + + reuse_next_level = 0; + level->setReuseLevel(false); + nextLevel = AMG_LevelFactory::allocate(amg, level->getSmoother()->get_thread_manager()); + level->setNextLevel( nextLevel ); + } + else + { + // reuse existing next level + reuse_next_level = 1; + level->setReuseLevel(true); + nextLevel = level->getNextLevel(MemorySpace()); + /* WARNING: we do not recompute prolongation (P) and restriction (R) when we + are reusing the level structure (structure_reuse_levels > 0), but + we do need to modify an existing coarse matrix Ac=R*A*P. + Instead of calling Ac.set_initialized(0) in every path afterwards, + we wil call it here. Notice that in the if part of this statement + above when the new level is allocated it creates a new matrix which + is not initialized by default (see the matrix constructor): + AMG_Level_Factory::allocate -> Classical_AMG_LevelFactory::create -> + new Classical_AMG_Level -> new AMG_Level -> new Matrix + We are just matching this Ac.set_initialized(0) setting here. */ + Matrix &Ac = nextLevel->getA(); + Ac.set_initialized(0); + } + + nextLevel->setLevelIndex( amg->num_levels ); + level->getA().template setParameter("level", amg->num_levels); +#if 0 //AMGX_ASYNCCPU_PROOF_OF_CONCEPT + + if (async_global::singleton()->using_async_coloring) + { + struct task_setupsmoother : public task + { + AMG_Level *level; + bool coarseSolverExists; + + int profiler_color() {return 0x00ffff;} + std::string name() { return "setup_smoother"; } + void run() + { + // Setup smoother unless coarseSolver exists and reached coarsest level + if ( !( level->isCoarsest() && coarseSolverExists ) ) + { + level->setup_smoother(); + } + } + }; + task_setupsmoother *task_setupsmoother_ = new task_setupsmoother; + task_setupsmoother_->level = level; + task_setupsmoother_->coarseSolverExists = coarseSolverExists; + // create the aggregates (aggregation) or coarse points (classical) + level->createCoarseVertices( ); + enqueue_async(asyncmanager::singleton()->main_thread_queue(0), task_setupsmoother_); + } + else +#endif + { + // only compute aggregates if we can't reuse existing ones + if (!reuse_next_level) + { + level->createCoarseVertices( ); + } + } + + //set the amg_level_index for this matrix + nextLevel->getA().amg_level_index = amg->num_levels; + int64_t N = num_rows_global * level->getA().get_block_dimy(); + num_rows[0] = num_rows_global = level->getNumCoarseVertices(); + + // Do reduction across all partitions + if (level->getA().is_matrix_distributed()) + { + level->getA().manager->getComms()->global_reduce( partition_rows, num_rows, + level->getA(), level->tag * 100 + 8 ); + num_rows_global = 0; + + for (int i = 0; i < partition_rows.size(); i++) + { + num_rows_global += partition_rows[i][0]; + } + } + + // num_rows[0] contains the total number of rows across all partitions + int64_t nextN = num_rows_global * level->getA().get_block_dimy(); + + if (!level->getA().is_matrix_distributed()) + { + min_partition_rows = num_rows[0]; + } + else + { + // level->setIsConsolidationLevel(true); // coaese root partions exited some time in classical + if (!amg->m_sum_stopping_criteria) + { + min_partition_rows = INT_MAX; + + for (int i = 0; i < partition_rows.size(); i++) + { + // Before we did + // If aggregation AMG, ignore partitions with 0 rows, since those are caused by consolidation + // If classical AMG, include all partitions + if (partition_rows[i][0] != 0) + { + min_partition_rows = std::min(partition_rows[i][0], min_partition_rows); + } + } + + // if we exit the previous loop with min_partition_rows == INT_MAX it means all next size are 0 + if (min_partition_rows == INT_MAX) + { + min_partition_rows = 0; + } + } + else + { + // use sum instead of min + min_partition_rows = 0; + + for (int i = 0; i < partition_rows.size(); i++) + { + // If aggregation AMG, ignore partitions with 0 rows, since those are caused by consolidation + // If classical AMG, include all partitions + if (partition_rows[i][0] != 0) + { + min_partition_rows += partition_rows[i][0]; + } + } + } + } + + // stop here if next level size is < min_rows + if ( nextN <= amg->coarsen_threshold * N && nextN != N && min_partition_rows >= min_rows ) + { + level->createCoarseMatrices(); + // Resize coarse vectors. + int nextSize = level->getNextLevelSize(); + level->getxc( ).resize( nextSize ); + level->getxc().set_block_dimy(level->getA( ).get_block_dimy()); + level->getxc().set_block_dimx(1); + level->getxc().tag = nextLevel->tag * 100 + 1; + level->getbc( ).resize( nextSize ); + level->getbc().set_block_dimy(level->getA( ).get_block_dimy()); + level->getbc().set_block_dimx(1); + level->getbc().tag = nextLevel->tag * 100 + 0; + int size, offset; + level->getA().getOffsetAndSizeForView(FULL, &offset, &size); + level->getr().resize( size * level->getA( ).get_block_dimy() ); + level->getr().set_block_dimy(level->getA( ).get_block_dimy()); + level->getr().set_block_dimx(1); + level->getr().tag = nextLevel->tag * 100 + 2; + } + else + { + // delete next level that we just created + level->deleteNextLevel( memorySpaceTag ); + } + +#if 0 //AMGX_ASYNCCPU_PROOF_OF_CONCEPT + + if (async_global::singleton()->using_async_coloring) + { + //cancel the CPU coloring task if the GPU is idle + cudaStreamSynchronize(thrust::global_thread_handle::get_stream()); + enqueue_async(asyncmanager::singleton()->global_parallel_queue, async_global::singleton()->cancel_cpu_coloring_task); + //wait for every spawning task + asyncmanager::singleton()->waitall(); + } + else +#endif + + // If n is 0 then the matrix is consolidated so we don't setup the smoother + if (!level->isCoarsest() || (!coarseSolverExists)) + { + level->setup_smoother(); + } + + if (level->isCoarsest()) + { + break; + } + + // Barrier (might be removed) + // ****************************************** + if (level->getA().is_matrix_distributed()) { level->getA().manager->getComms()->barrier(); } + + // ****************************************** + nextLevel->setup(); + nextLevel->getA().setResources(level->getA().getResources()); +#if 0 //AMGX_ASYNCCPU_PROOF_OF_CONCEPT + + // color the matrix ASAP + if (!nextmin_fine_rowsmin_fine_rowsmin_fine_rowsLevel->getA().is_matrix_setup()) + { + nextLevel->getA().setupMatrix(nextLevel->getSmoother(), *amg->m_cfg, false); + } + +#endif + // Move to the next level. + prev_level = level; + level = nextLevel; + // Increment the level counter. + amg->num_levels++; + } //end of while(true) + +#if 0 //AMGX_ASYNCCPU_PROOF_OF_CONCEPT + cudaStreamSynchronize(thrust::global_thread_handle::threadStream[getCurrentThreadId()]); + thrust::global_thread_handle::threadStream[getCurrentThreadId()] = 0; +#endif + return prev_level; + } + + template< typename TConfig_hd > + static + int glue_level(AMG *amg, AMG_Level *&level, int num_active_parts) + { +#ifdef AMGX_WITH_MPI + if (level->getA().manager->getComms() != NULL) + { + MPI_Comm A_com, temp_com; + int new_num_parts, n_global, num_parts, avg; + bool wantneighbors = true; + A_com = level->getA().manager->getComms()->get_mpi_comm(); + + if (level->getA().manager->part_offsets_h.size() == 0) // create part_offsets_h & part_offsets + { + create_part_offsets(A_com, level->getA()); + } + + n_global = level->getA().manager->part_offsets_h.back(); + num_parts = level->getA().manager->getComms()->get_num_partitions(); + avg = n_global / num_parts; + level->getA().manager->computeDestinationPartitions(amg->m_consolidation_upper_threshold, + avg, num_parts, new_num_parts, wantneighbors); + + if (new_num_parts != num_active_parts) + { + // Compute consolidation info + compute_glue_info(level->getA()); + // Compute a temporary splited communicator to glue matrices + temp_com = compute_glue_matrices_communicator(level->getA()); + // glue_matrices does the following : unpack --> glue --> upload --> repack + glue_matrices(level->getA(), A_com, temp_com); + return new_num_parts; + } + else + { + return num_active_parts; + } + } + else + { + return 0; + } +#else + return 0; +#endif + } + + template< typename TConfig0, AMGX_MemorySpace MemSpace0, AMGX_MemorySpace MemSpace1 > + static + void + setup( AMG *amg, Matrix &A ) + { + typedef typename TConfig0::template setMemSpace::Type TConfig1; + typedef typename MemorySpaceMap::Type MemorySpace0; + typedef typename MemorySpaceMap::Type MemorySpace1; + MemorySpace0 memorySpaceTag0; + MemorySpace1 memorySpaceTag1; + + // delete zero level from other memoryspace + if (amg->getFinestLevel(memorySpaceTag1) != NULL) + { + delete amg->getFinestLevel(memorySpaceTag1); + AMG_Level *level_0_1 = NULL; + amg->setFinestLevel(level_0_1); + } + + int min_fine_rows = amg->min_fine_rows; + int min_coarse_rows = amg->min_coarse_rows; + // Make sure the number of fine rows is never smaller than min_coarse_rows. + min_fine_rows = std::max( min_fine_rows, min_coarse_rows ); + // Reset AMG hierarchy. + amg->num_levels = 1; + // Build levels on the first device. + AMG_Level *level_0 = amg->getFinestLevel(memorySpaceTag0), *prev_level_0 = 0L; + + // if resetup + if (level_0->isSetup() && amg->m_structure_reuse_levels == 0) + { + delete level_0; + level_0 = AMG_LevelFactory::allocate(amg); + amg->setFinestLevel( level_0 ); + } + + level_0->setA(A); + level_0->setLevelIndex( 0 ); + level_0->setup(); + + if (level_0->isClassicalAMGLevel() && amg->m_amg_consolidation_flag == 1 && level_0->getA().is_matrix_distributed()) + { +#ifdef AMGX_WITH_MPI + + if (amg->m_consolidation_lower_threshold == 0 ) // m_consolidation_lower_threshold is unset + { + int root = 0; + int max = 0, min = 0; + MPI_Comm comm = level_0->getA().manager->getComms()->get_mpi_comm(); + + if (level_0->getA().manager->global_id() == 0 ) + { + size_t avail, total; + cudaMemGetInfo (&avail, &total); + size_t used = level_0->bytes(); // Memory used by the finest level. + size_t hierarchy = 6 * used; // Estimation of the size of the hierarchy + size_t overhead = 1000000000; // 1GB of storage for other AMGX stuff + // The Strength factor represents how many time a matrix like the one we localy have can fit into this GPU + // This is based on the one we have on the finest level on rank 0 and considering the total hierarchy can be 6x larger + double strength = (static_cast(total - overhead)) / hierarchy; + + // The sum of memory required by coarse levels should be (approximately) smaller or equal than 6x the memory requiered by the finest level. + // This assume a good load balencing + // We should check when we glue matrices that we are not going out of memory. + if (strength > 1.0) + { + int rows = level_0->getNumRows(); + max = (strength * rows) / 6; // We divide by 6 because we increase the size of the following coarse levels by increasing the size of the current matrix + + if (max > 0) + { + min = max - 1; + } + else + { + max = 1; + min = 0; + } + } + else + { + max = 1; + min = 0; + } + } + + MPI_Bcast( &max, 1, MPI_INT, root, comm ); + MPI_Bcast( &min, 1, MPI_INT, root, comm ); + amg->m_consolidation_lower_threshold = min; + amg->m_consolidation_upper_threshold = max; + } + + if (amg->m_consolidation_lower_threshold > 0) + { + prev_level_0 = setup_v2( amg, level_0, min_fine_rows, min_fine_rows > min_coarse_rows ); // entering in gluing path + } + else +#endif + { + prev_level_0 = setup( amg, level_0, min_fine_rows, min_fine_rows > min_coarse_rows ); // no glue because the matrix is too big + } + } + else + { + prev_level_0 = setup( amg, level_0, min_fine_rows, min_fine_rows > min_coarse_rows ); // usual path / aggregation consolidation path + } + + // Move to the other memory space if needed. + if ( min_fine_rows == min_coarse_rows ) + { + Solver *coarseSolver = amg->getCoarseSolver( memorySpaceTag0 ); + + if ( coarseSolver ) + { + coarseSolver->setup( level_0->getA(), false ); + } + } + else + { + AMG_Level *level_1 = AMG_LevelFactory::allocate(amg); + amg->setFinestLevel( level_1 ); + level_1->getA( ).copy( level_0->getA( ) ); + level_1->setLevelIndex( level_0->getLevelIndex( ) ); + level_1->setup(); + + // Make that level the next one in the hierarchy. + if ( prev_level_0 ) + { + prev_level_0->setNextLevel( level_1 ); + assert( prev_level_0->getNextLevel( memorySpaceTag0 ) == level_0 ); + prev_level_0->deleteNextLevel( memorySpaceTag0 ); + } + + // Build the hierarchy. + setup( amg, level_1, min_coarse_rows, false ); + // Build the coarse solver. + Solver *coarseSolver = amg->getCoarseSolver( memorySpaceTag1 ); + + if ( coarseSolver ) + { + coarseSolver->setup( level_1->getA(), false ); + } + } + + // Used only for device modes without hybrid mode. After reaching level where numrows <= amg_host_levels_rows + // it creates copy of the hierarchy starting with this level. + // This is experimental feauture intended to measure scaling of the solve part when coarse levels are on the host. + if (amg->m_amg_host_levels_rows > 0) + { + AMG_Level *d_cur_lvl = amg->getFinestLevel(memorySpaceTag0); + AMG_Level *h_cur_lvl = NULL, *h_prev_lvl = NULL; + AMG_Level *last_dev_lvl = NULL; + AMG_Level *first_host_lvl = NULL; + + while (d_cur_lvl != NULL) + { + if (d_cur_lvl->getNumRows() <= amg->m_amg_host_levels_rows) + { + break; + } + + last_dev_lvl = d_cur_lvl; + d_cur_lvl = d_cur_lvl->getNextLevel( memorySpaceTag0 ); + } + + if (d_cur_lvl != NULL) + { + while (d_cur_lvl != NULL) + { + h_cur_lvl = AMG_LevelFactory::allocate(amg, amg->tmng); + h_cur_lvl->transfer_from(d_cur_lvl); + h_cur_lvl->setup(); + + if (amg->getCoarseSolver(memorySpaceTag0) != NULL) + { + //remove coarse solver on the device + delete amg->getCoarseSolver(memorySpaceTag0); + amg->setCoarseSolver(NULL, memorySpaceTag0); + // it should exist for the host, but check nevertheless + Solver *coarseSolver = amg->getCoarseSolver( memorySpaceTag1 ); + bool coarseSolverExists = coarseSolver != NULL; + + if (!coarseSolverExists) + { + FatalError("Need to recrreate coarse solver got the host", AMGX_ERR_NOT_IMPLEMENTED); + } + } + else + { + h_cur_lvl->setup_smoother(); + } + + if (first_host_lvl == NULL) + { + first_host_lvl = h_cur_lvl; + } + + if (h_prev_lvl != NULL) + { + h_prev_lvl->setNextLevel(h_cur_lvl); + } + + h_prev_lvl = h_cur_lvl; + h_cur_lvl = NULL; + d_cur_lvl = d_cur_lvl->getNextLevel(memorySpaceTag0); + } + + // cleanup unnecessary device hierarchy part + delete last_dev_lvl->getNextLevel(memorySpaceTag0); + // link last device level to the first host level + last_dev_lvl->setNextLevel(first_host_lvl); + last_dev_lvl->resetNextLevel(memorySpaceTag0); + // tell amg that there are host levels + amg->setFinestLevel( first_host_lvl ); + } + } + + MemoryInfo::updateMaxMemoryUsage(); + logDeviceType( ); + logDeviceType( ); + } +}; + +/********************************************************** + * Solves the AMG system + *********************************************************/ +template< class T_Config > +class AMG_Solve +{ + typedef T_Config TConfig; + static const AMGX_VecPrecision vecPrec = TConfig::vecPrec; + static const AMGX_MatPrecision matPrec = TConfig::matPrec; + static const AMGX_IndPrecision indPrec = TConfig::indPrec; + typedef typename TConfig::MemSpace MemorySpace; + + typedef Matrix Matrix_hd; + typedef Vector Vector_hd; + typedef Vector > Vector_h; + typedef T_Config TConfig_hd; + typedef AMG AMG_Class; + + public: + + static void solve_iteration( AMG_Class *amg, Vector_hd &b, Vector_hd &x) + { + cudaStreamSynchronize(0); + nvtxRange amg_si("amg_solve_iteration"); + MemorySpace memorySpaceTag; + AMG_Level *fine = amg->getFinestLevel( memorySpaceTag ); + assert(fine != NULL); + CycleFactory::generate( amg, fine, b, x ); + fine->unsetInitCycle(); + // Note: this sometimes takes too much time on host making GPU idle. + // Solve is not that important for memory - main mem usage comes from setup. + // Disabling this call for now + //MemoryInfo::updateMaxMemoryUsage(); + cudaStreamSynchronize(0); + } + +}; +// Setup the hierarchy to solve on host/device. +template +void AMG::setup( Matrix_h &A ) +{ + if ( m_dense_lu_num_rows > 0 ) + { + min_coarse_rows = m_dense_lu_num_rows / A.get_block_dimy(); + } + + // read reuse structure levels option from config in case it has been changed + // this allows fine control over the reuse of hierarchies if setup/solve is called multiple times + m_structure_reuse_levels = m_cfg->getParameter("structure_reuse_levels", m_cfg_scope); + AMG_Setup::template setup( this, A ); + + // Don't need the workspace anymore + if ( d2_workspace != NULL && d2_workspace != csr_workspace ) + { + typedef TemplateConfig TConfig_d; + CSR_Multiply::csr_workspace_delete( d2_workspace ); + csr_workspace = NULL; + } + + if ( csr_workspace != NULL ) + { + typedef TemplateConfig TConfig_d; + CSR_Multiply::csr_workspace_delete( csr_workspace ); + csr_workspace = NULL; + } +} + +template +void AMG::setup( Matrix_d &A ) +{ + if ( m_dense_lu_num_rows > 0 ) + { + min_coarse_rows = m_dense_lu_num_rows / A.get_block_dimy(); + } + + // read reuse structure levels option from config in case it has been changed + // this allows fine control over the reuse of hierarchies if setup/solve is called multiple times + m_structure_reuse_levels = m_cfg->getParameter("structure_reuse_levels", m_cfg_scope); + AMG_Setup::template setup( this, A ); + + // Don't need the workspace anymore + if ( d2_workspace != NULL && d2_workspace != csr_workspace ) + { + typedef TemplateConfig TConfig_d; + CSR_Multiply::csr_workspace_delete( d2_workspace ); + csr_workspace = NULL; + } + + if ( csr_workspace != NULL ) + { + typedef TemplateConfig TConfig_d; + CSR_Multiply::csr_workspace_delete( csr_workspace ); + csr_workspace = NULL; + } +} + +// Setup the hierarchy to solve on host. +template +void AMG::setup( AMG_Level *level ) +{ + AMG_Setup::template setup( this, level, 2, false ); +} + +template +void AMG::setup( AMG_Level *level ) +{ + AMG_Setup::template setup( this, level, 2, false ); +} + +template< AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec > +void +AMG::solve_init( Vector_d &b, Vector_d &x, bool xIsZero) +{ + if (xIsZero) + { + fine_d->setInitCycle(); + } +} + +template< AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec > +void +AMG::solve_init( Vector_h &b, Vector_h &x, bool xIsZero) +{ + if (xIsZero) + { + fine_h->setInitCycle(); + } +} + +template< AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec > +void +AMG::solve_iteration( Vector_d &b, Vector_d &x) +{ + AMGX_CPU_PROFILER( "AMG::solve_iteration " ); + AMG_Solve::solve_iteration( this, b, x); +} + +template< AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec > +void +AMG::solve_iteration( Vector_h &b, Vector_h &x) +{ + AMGX_CPU_PROFILER( "AMG::solve_iteration " ); + AMG_Solve::solve_iteration( this, b, x); +} + + +template +void AMG::getGridStatisticsString(std::stringstream &ss) +{ + AMG_Level *level_d = this->fine_d; + AMG_Level *level_h = this->fine_h; + int64_t total_rows = 0; + int64_t total_nnz = 0; + float total_size = 0; + ss << "AMG Grid:\n"; + ss << " Number of Levels: " << this->num_levels << endl; + AMGXLOG("Number of Levels", this->num_levels) + ss << std::setw(15) << "LVL" << std::setw(13) << "ROWS" << std::setw(18) << "NNZ" + << std::setw(10) << "SPRSTY" << std::setw(15) << "Mem (GB)" << std::endl; + ss << " --------------------------------------------------------------\n"; + + while (level_d != NULL) + { + int has_diag = level_d->getA( ).hasProps(DIAG) ? 1 : 0; + int64_t num_rows = (int)(level_d->getA( ).get_num_rows() * level_d->getA( ).get_block_dimy()); + int64_t nnz = (int)((level_d->getA( ).get_num_nz() + + has_diag * level_d->getA( ).get_num_rows()) * level_d->getA( ).get_block_dimy() + * level_d->getA( ).get_block_dimx()); + float size = level_d->bytes(true) / 1024.0 / 1024 / 1024; + + // If aggregation AMG, skip this if # of neighbors = 0, since we're consolidating + // If classical AMG, we need to enter here since ranks are allowed to have 0 rows (or no neighbors) + if ( !level_d->getA().is_matrix_singleGPU() || + (level_d->isClassicalAMGLevel() && level_d->getA().is_matrix_distributed()) ) + { + level_d->getA().manager->global_reduce_sum(&num_rows); + level_d->getA().manager->global_reduce_sum(&nnz); + level_d->getA().manager->global_reduce_sum(&size); + } + + total_rows += num_rows; + total_nnz += nnz; + total_size += size; + double sparsity = nnz / (double) ( num_rows * num_rows); + ss << std::setw(12) << level_d->getLevelIndex( ) << "(D)" + << std::setw(13) << num_rows + << std::setw(18) << nnz + << std::setw(10) << std::setprecision(3) << sparsity + << std::setw(15) << size + << std::setprecision(6) << std::endl; + level_d = level_d->getNextLevel( device_memory( ) ); + } + + while (level_h != NULL) + { + int has_diag = level_h->getA( ).hasProps(DIAG) ? 1 : 0; + int64_t num_rows = (int)(level_h->getA( ).get_num_rows() * level_h->getA( ).get_block_dimy()); + int64_t nnz = (int)((level_h->getA( ).get_num_nz() + + has_diag * level_h->getA( ).get_num_rows()) * level_h->getA( ).get_block_dimy() + * level_h->getA( ).get_block_dimx()); + float size = level_h->bytes(true) / 1024.0 / 1024 / 1024; + + // If aggregation AMG, skip this if # of neighbors = 0, since we're consolidating + // If classical AMG, we need to enter here since ranks are allowed to have 0 rows (or no neighbors) + if ( !level_h->getA().is_matrix_singleGPU() || + (level_h->isClassicalAMGLevel() && level_h->getA().is_matrix_distributed()) ) + { + level_h->getA().manager->global_reduce_sum(&num_rows); + level_h->getA().manager->global_reduce_sum(&nnz); + level_h->getA().manager->global_reduce_sum(&size); + } + + total_rows += num_rows; + total_nnz += nnz; + total_size += size; + double sparsity = nnz / (double) ( num_rows * num_rows); + ss << std::setw(12) << level_h->getLevelIndex( ) << "(H)" + << std::setw(13) << num_rows + << std::setw(18) << nnz + << std::setw(10) << std::setprecision(3) << sparsity + << std::setw(15) << size + << std::setprecision(6) << std::endl; + level_h = level_h->getNextLevel( host_memory( ) ); + } + + int64_t fine_rows; + int64_t fine_nnz; + + if (this->fine_h) + { + fine_rows = this->fine_h->getA( ).get_num_rows() * this->fine_h->getA( ).get_block_dimy(); + fine_nnz = this->fine_h->getA( ).get_block_dimy() * this->fine_h->getA( ).get_block_dimx() + * ( this->fine_h->getA( ).get_num_nz() + + (this->fine_h->getA( ).hasProps(DIAG) ? this->fine_h->getA( ).get_num_rows() : 0) ) ; + + if (this->fine_h->getA().is_matrix_distributed()) + { + this->fine_h->getA().manager->global_reduce_sum(&fine_rows); + this->fine_h->getA().manager->global_reduce_sum(&fine_nnz); + } + } + else + { + fine_rows = this->fine_d->getA( ).get_num_rows() * this->fine_d->getA( ).get_block_dimy() ; + fine_nnz = this->fine_d->getA( ).get_block_dimy() * this->fine_d->getA( ).get_block_dimx() + * ( this->fine_d->getA( ).get_num_nz() + + (this->fine_d->getA( ).hasProps(DIAG) ? this->fine_d->getA( ).get_num_rows() : 0) ); + + if (this->fine_d->getA().is_matrix_distributed()) + { + this->fine_d->getA().manager->global_reduce_sum(&fine_rows); + this->fine_d->getA().manager->global_reduce_sum(&fine_nnz); + } + } + + ss << " --------------------------------------------------------------\n"; + ss << " Grid Complexity: " << total_rows / (double) fine_rows << std::endl; + ss << " Operator Complexity: " << total_nnz / (double) fine_nnz << std::endl; + ss << " Total Memory Usage: " << total_size << " GB" << std::endl; + ss << " --------------------------------------------------------------\n"; +} + +template +void AMG::printGridStatistics( ) +{ + std::stringstream ss; + this->getGridStatisticsString(ss); + amgx_output(ss.str().c_str(), static_cast(ss.str().length())); +} + +template +void AMG::getGridStatisticsString2(std::stringstream &ss) +{ + AMG_Level *level_d = this->fine_d; + AMG_Level *level_h = this->fine_h; + int total_rows = 0; + int total_nnz = 0; + float total_size = 0; + ss << " multigrid levels:\n"; + + while (level_d != NULL) + { + int has_diag = level_d->getA( ).hasProps(DIAG) ? 1 : 0; + total_rows += (int)(level_d->getA( ).get_num_rows() * level_d->getA( ).get_block_dimy()); + total_nnz += (int)((level_d->getA( ).get_num_nz() + has_diag * level_d->getA( ).get_num_rows()) * level_d->getA( ).get_block_dimy() * level_d->getA( ).get_block_dimx()); + float size = level_d->bytes() / 1024.0 / 1024 / 1024; + total_size += size; + ss << std::setw(5) << level_d->getLevelIndex( ) << " " + << std::setw(5) << level_d->getA( ).get_num_rows() << std::endl; + level_d = level_d->getNextLevel( device_memory( ) ); + } + + while (level_h != NULL) + { + int has_diag = level_h->getA( ).hasProps(DIAG) ? 1 : 0; + total_rows += (int)(level_h->getA( ).get_num_rows() * level_h->getA( ).get_block_dimy()); + total_nnz += (int)((level_h->getA( ).get_num_nz() + has_diag * level_h->getA( ).get_num_rows()) * level_h->getA( ).get_block_dimy() * level_h->getA( ).get_block_dimx()); + float size = level_h->bytes() / 1024.0 / 1024 / 1024; + total_size += size; + ss << std::setw(5) << level_h->getLevelIndex( ) << " " + << std::setw(5) << level_h->getA( ).get_num_rows() << std::endl; + level_h = level_h->getNextLevel( host_memory( ) ); + } +} + +template +void AMG::printGridStatistics2( ) +{ + std::stringstream ss; + this->getGridStatisticsString2(ss); + amgx_output(ss.str().c_str(), static_cast(ss.str().length())); +} + +using std::scientific; +using std::fixed; + +// print a line of length l, starting at character s +void printLine(const int l, const int s) +{ + std::stringstream ss; + ss << setw(s) << " "; + + for (int i = 0; i < l; i++) + { + ss << "-"; + } + + ss << endl; + amgx_output(ss.str().c_str(), static_cast(ss.str().length())); +} + +template +void AMG::printCoarsePoints() +{ +#ifdef DEBUG + typedef std::vector iVec; + typedef std::vector::iterator iVecIter; + ofstream coarsePoints("coarse_points.dat"); + iVec originalRows; + AMG_Level *level_d = fine_d; + + while ( level_d != NULL ) + { + originalRows = level_d->getOriginalRows(); + level_d = level_d->next_d; + + if (level_d == NULL) + { + break; + } + + coarsePoints << level_d->level_id << " " << level_d->getNumRows() << endl; + + for (iVecIter it = originalRows.begin(); it != originalRows.end(); ++it) + { + coarsePoints << *it << endl; + } + } + + AMG_Level *level_h = fine_h; + + while ( level_h != NULL ) + { + originalRows = level_h->getOriginalRows(); + level_h = level_h->next_h; + + if (level_h == NULL) + { + break; + } + + coarsePoints << level_h->level_id << " " << level_h->getNumRows() << endl; + + for (iVecIter it = originalRows.begin(); it != originalRows.end(); ++it) + { + coarsePoints << *it << endl; + } + } + + coarsePoints.close(); +#endif +} + +template +void AMG::printConnections() +{ +#ifdef DEBUG + ofstream connFile("connections.dat"); + AMG_Level *level_d = fine_d; + Matrix_d ATemp_d; + + while (level_d != NULL) + { + connFile << level_d->level_id << " " << level_d->getNumRows() << endl; + ATemp_d = level_d->getA(); + + for (int i = 0; i < ATemp_d.get_num_rows(); i++) + { + // get the row offset & num rows + int offset = ATemp_d.row_offsets[i]; + int numEntries = ATemp_d.row_offsets[i + 1] - offset; + // # of connections is numEntries - 1 (ignoring diagonal) + // this->numConnections.push_back(numEntries-1); + connFile << numEntries - 1 << " "; + + // loop over non-zeros and add non-diagonal terms + for (int j = offset; j < offset + numEntries; j++) + { + int columnIndex = ATemp_d.column_indices[j]; + + if (i != columnIndex) + { + // this->connections.push_back(columnIndex); + connFile << columnIndex << " "; + } + } + + connFile << endl; + } + + level_d = level_d->next_d; + } + + AMG_Level *level_h = fine_h; + Matrix_h ATemp_h; + + while (level_h != NULL) + { + connFile << level_h->level_id << " " << level_h->getNumRows() << endl; + ATemp_d = level_h->getA(); + + for (int i = 0; i < ATemp_h.get_num_rows(); i++) + { + // get the row offset & num rows + int offset = ATemp_h.row_offsets[i]; + int numEntries = ATemp_h.row_offsets[i + 1] - offset; + // # of connections is numEntries - 1 (ignoring diagonal) + // this->numConnections.push_back(numEntries-1); + connFile << numEntries - 1 << " "; + + // loop over non-zeros and add non-diagonal terms + for (int j = offset; j < offset + numEntries; j++) + { + int columnIndex = ATemp_h.column_indices[j]; + + if (i != columnIndex) + { + // this->connections.push_back(columnIndex); + connFile << columnIndex << " "; + } + } + + connFile << endl; + } + + level_h = level_h->next_h; + } + +#endif +} + +/**************************************** + * Explict instantiations + ***************************************/ +// real valued case +template class AMG; +template class AMG; +template class AMG; + +// complex valued case +template class AMG; +template class AMG; +template class AMG; + +} // namespace amgx + diff --git a/cuda_code/ampere_tf32_tensorop_gemm_1.cu b/cuda_code/ampere_tf32_tensorop_gemm_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..cba1f39e9dd852d3711a092964a8f5f8a949e14f --- /dev/null +++ b/cuda_code/ampere_tf32_tensorop_gemm_1.cu @@ -0,0 +1,267 @@ +/*************************************************************************************************** + * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + *modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright notice, + *this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + *notice, this list of conditions and the following disclaimer in the + *documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the names of its + *contributors may be used to endorse or promote products derived from this + *software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + *AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + *IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + *DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY DIRECT, + *INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + *DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + *OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TOR (INCLUDING + *NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, + *EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +/** +Please check example 07 and 08 for the basics of tensor op gemm kernels. On +NVIDIA Ampere architecture, most concept still holds. The two main differences +are + +1. NVIDIA Ampere architecture introduces a new series of tensor core +instructions (see include/cutlass/arch/mma_sm80.h) which are more efficient on +Ampere. + +2. NVIDIA Ampere architecture uses cp_async() to build multistage software +pipeline to better hide latency (see +include/cutlass/gemm/threadblock/mma_multistage.h) + +Moreover, NVIDIA Ampere architecture starts supporting tfloat32 (see +include/cutlass/tfloat32.h) data types in tensor cores. One big advantage is +that we can load in fp32 data and convert them implicitly to tf32 inside the +GEMM kernel which means no change is needed to accelerate traditional fp32 data +by using NVIDIA Ampere architecture. +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/gemm/device/gemm.h" +#include "cutlass/util/host_tensor.h" +#include "cutlass/util/reference/device/gemm.h" +#include "cutlass/util/reference/host/tensor_compare.h" +#include "cutlass/util/reference/host/tensor_copy.h" +#include "cutlass/util/reference/host/tensor_fill.h" +#include "cutlass/util/tensor_view_io.h" +#include "helper.h" + +// The code section below describes datatype for input, output matrices and +// computation between elements in input matrices. +using ElementAccumulator = float; // <- data type of accumulator +using ElementComputeEpilogue = + ElementAccumulator; // <- data type of epilogue operations +using ElementInputA = float; // <- data type of elements in input matrix A +using ElementInputB = float; // <- data type of elements in input matrix B +using ElementOutput = float; // <- data type of elements in output matrix D + +// The code section below describes matrix layout of input and output matrices. +// Column Major for Matrix A, Row Major for Matrix B and Row Major for Matrix C +using LayoutInputA = cutlass::layout::RowMajor; +using LayoutInputB = cutlass::layout::ColumnMajor; +using LayoutOutput = cutlass::layout::RowMajor; + +// This code section describes whether you want to use tensor cores or regular +// SIMT cores on GPU SM +using MMAOp = cutlass::arch::OpClassTensorOp; + +// This code section describes CUDA SM architecture number +using SmArch = cutlass::arch::Sm80; + +// This code section describes the tile size a thread block will compute +using ShapeMMAThreadBlock = + cutlass::gemm::GemmShape<128, 128, 16>; // <- threadblock tile M = 128, + // N = 128, K = 16 +// This code section describes tile size a warp will compute +using ShapeMMAWarp = + cutlass::gemm::GemmShape<64, 64, + 16>; // <- warp tile M = 64, N = 64, K = 16 +// This code section describes the size of MMA op +using ShapeMMAOp = cutlass::gemm::GemmShape<16, 8, 8>; // <- MMA Op tile M = + // 16, N = 8, K = 8 + +// This code section describes how threadblocks are scheduled on GPU +using SwizzleThreadBlock = + cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // <- ?? + +// This code section describes the epilogue part of the kernel +using EpilogueOp = cutlass::epilogue::thread::LinearCombination< + ElementOutput, // <- data type of output matrix + 128 / cutlass::sizeof_bits:: + value, // <- the number of elements per vectorized + // memory access. For a byte, it's 16 + // elements. This becomes the vector width of + // math instructions in the epilogue too + ElementAccumulator, // <- data type of accumulator + ElementComputeEpilogue>; // <- data type for alpha/beta in linear + // combination function + +// Number of pipelines you want to use +constexpr int NumStages = 4; + +using Gemm = cutlass::gemm::device::Gemm< + ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, + LayoutOutput, ElementAccumulator, MMAOp, SmArch, ShapeMMAThreadBlock, + ShapeMMAWarp, ShapeMMAOp, EpilogueOp, SwizzleThreadBlock, NumStages>; + +int run() { + const int length_m = 5120; + const int length_n = 4096; + const int length_k = 4096; + + // Create a tuple of problem size for matrix multiplication + cutlass::gemm::GemmCoord problem_size(length_m, length_n, length_k); + + // Initialize tensors using CUTLASS helper functions + cutlass::HostTensor tensor_a( + problem_size.mk()); // <- Create matrix A with dimensions M x K + cutlass::HostTensor tensor_b( + problem_size.kn()); // <- Create matrix B with dimensions K x N + cutlass::HostTensor tensor_c( + problem_size.mn()); // <- Create matrix C with dimensions M x N + cutlass::HostTensor tensor_d( + problem_size.mn()); // <- Create matrix D with dimensions M x N + // used to store output from CUTLASS kernel + cutlass::HostTensor tensor_ref_d( + problem_size.mn()); // <- Create matrix D with dimensions M x N + // used to store output from reference kernel + + // Fill input and output matrices on host using CUTLASS helper functions + cutlass::reference::host::TensorFillRandomUniform( + tensor_a.host_view(), 1, ElementInputA(4), ElementInputA(-4), + 0); // <- Fill matrix A on host with uniform-distribution random + // data + cutlass::reference::host::TensorFillRandomUniform( + tensor_b.host_view(), 1, ElementInputB(4), ElementInputB(-4), + 0); // <- Fill matrix B on host with uniform-distribution random + // data + cutlass::reference::host::TensorFillRandomUniform( + tensor_c.host_view(), 1, ElementOutput(4), ElementOutput(-4), + 0); // <- Fill matrix C on host with uniform-distribution random + // data + cutlass::reference::host::TensorFill( + tensor_d.host_view()); // <- fill matrix D on host with zeros + cutlass::reference::host::TensorFill( + tensor_ref_d.host_view()); // <- fill matrix D for reference on + // host with zeros + + // Copy data from host to GPU + tensor_a.sync_device(); + tensor_b.sync_device(); + tensor_c.sync_device(); + tensor_d.sync_device(); + tensor_ref_d.sync_device(); + + // Initialize alpha and beta for dot product computation + ElementComputeEpilogue alpha = ElementComputeEpilogue(1); + ElementComputeEpilogue beta = ElementComputeEpilogue(0); + + // Split K dimension into 1 partitions + int split_k_slices = 1; + + // Create a tuple of gemm kernel arguments. This is later passed as + // arguments to launch instantiated CUTLASS kernel + typename Gemm::Arguments arguments{ + problem_size, // <- problem size of matrix multiplication + tensor_a.device_ref(), // <- reference to matrix A on device + tensor_b.device_ref(), // <- reference to matrix B on device + tensor_c.device_ref(), // <- reference to matrix C on device + tensor_d.device_ref(), // <- reference to matrix D on device + {alpha, beta}, // <- tuple of alpha and beta + split_k_slices}; // <- k-dimension split factor + + // Using the arguments, query for extra workspace required for matrix + // multiplication computation + size_t workspace_size = Gemm::get_workspace_size(arguments); + + // Allocate workspace memory + cutlass::device_memory::allocation workspace(workspace_size); + + // Instantiate CUTLASS kernel depending on templates + Gemm gemm_op; + + // Initialize CUTLASS kernel with arguments and workspace pointer + cutlass::Status status = gemm_op.initialize(arguments, workspace.get()); + CUTLASS_CHECK(status); + + // Launch initialized CUTLASS kernel + status = gemm_op(); + CUTLASS_CHECK(status); + + // Create instantiation for device reference gemm kernel + cutlass::reference::device::Gemm + gemm_device; + + // Launch device reference gemm kernel + gemm_device(problem_size, alpha, tensor_a.device_ref(), + tensor_b.device_ref(), beta, tensor_c.device_ref(), + tensor_ref_d.device_ref()); + + // Wait for kernels to finish + cudaDeviceSynchronize(); + + // Copy output data from CUTLASS and reference kernel to host for comparison + tensor_d.sync_host(); + tensor_ref_d.sync_host(); + + // Check if output from CUTLASS kernel and reference kernel are equal or not + bool passed = cutlass::reference::host::TensorEquals( + tensor_d.host_view(), tensor_ref_d.host_view()); + + std::cout << (passed ? "Passed" : "Failed") << std::endl; + + return (passed ? 0 : -1); +} + +int main() { + bool notSupported = false; + + // Ampere Tensor Core operations exposed with mma.sync and ldmatrix are + // first available in CUDA 11.0. + // + // CUTLASS must be compiled with CUDA 11.0 Toolkit to run these examples. + if (!(__CUDACC_VER_MAJOR__ >= 11)) { + std::cerr << "Ampere Tensor Core operations must be compiled with CUDA " + "11.0 Toolkit or later." + << std::endl; + notSupported = true; + } + + cudaDeviceProp props; + + cudaError_t error = cudaGetDeviceProperties(&props, 0); + if (error != cudaSuccess) { + std::cerr << "cudaGetDeviceProperties() returned an error: " + << cudaGetErrorString(error) << std::endl; + return -1; + } + + if (!((props.major * 10 + props.minor) >= 80)) { + std::cerr << "Turing Tensor Core operations must be run on a machine " + "with compute capability at least 80." + << std::endl; + notSupported = true; + } + + if (notSupported) { + // Returning zero so this test passes on older Toolkits. Its actions are + // no-op. + return 0; + } + + return run(); +} diff --git a/cuda_code/angle_force_with_atom_energy_impl_4.cu b/cuda_code/angle_force_with_atom_energy_impl_4.cu new file mode 100644 index 0000000000000000000000000000000000000000..6891427b77d8544584d306576b05a3ed95e6a172 --- /dev/null +++ b/cuda_code/angle_force_with_atom_energy_impl_4.cu @@ -0,0 +1,89 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "backend/kernel_compiler/gpu/cuda_impl/sponge/angle/angle_force_with_atom_energy_impl.cuh" +#include "backend/kernel_compiler/gpu/cuda_impl/util.cuh" +#include "backend/kernel_compiler/gpu/cuda_impl/sponge/common_sponge.cuh" + +__global__ void AngleForceWithAtomEnergyKernel(int angle_numbers, const UNSIGNED_INT_VECTOR *uint_crd, + const VECTOR *scaler, const int *atom_a, const int *atom_b, + const int *atom_c, const float *angle_k, const float *angle_theta0, + VECTOR *frc, float *atom_energy) { + int angle_i = blockDim.x * blockIdx.x + threadIdx.x; + if (angle_i < angle_numbers) { + int atom_i = atom_a[angle_i]; + int atom_j = atom_b[angle_i]; + int atom_k = atom_c[angle_i]; + + float theta0 = angle_theta0[angle_i]; + float k = angle_k[angle_i]; + float k2 = k; + + VECTOR drij = Get_Periodic_Displacement(uint_crd[atom_i], uint_crd[atom_j], scaler[0]); + VECTOR drkj = Get_Periodic_Displacement(uint_crd[atom_k], uint_crd[atom_j], scaler[0]); + + float rij_2 = 1. / (drij * drij); + float rkj_2 = 1. / (drkj * drkj); + float rij_1_rkj_1 = sqrtf(rij_2 * rkj_2); + + float costheta = drij * drkj * rij_1_rkj_1; + costheta = fmaxf(-0.999999, fminf(costheta, 0.999999)); + float theta = acosf(costheta); + + float dtheta = theta - theta0; + k = -2 * k * dtheta / sinf(theta); + + float common_factor_cross = k * rij_1_rkj_1; + float common_factor_self = k * costheta; + + VECTOR fi = common_factor_self * rij_2 * drij - common_factor_cross * drkj; + VECTOR fk = common_factor_self * rkj_2 * drkj - common_factor_cross * drij; + + atomicAdd(&frc[atom_i].x, fi.x); + atomicAdd(&frc[atom_i].y, fi.y); + atomicAdd(&frc[atom_i].z, fi.z); + + atomicAdd(&frc[atom_k].x, fk.x); + atomicAdd(&frc[atom_k].y, fk.y); + atomicAdd(&frc[atom_k].z, fk.z); + + fi = -fi - fk; + + atomicAdd(&frc[atom_j].x, fi.x); + atomicAdd(&frc[atom_j].y, fi.y); + atomicAdd(&frc[atom_j].z, fi.z); + + atomicAdd(&atom_energy[atom_i], k2 * dtheta * dtheta); + } +} + +void AngleForceWithAtomEnergy(int angle_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a, + const int *atom_b, const int *atom_c, const float *angle_k, const float *angle_theta0, + float *frc_f, float *ene, cudaStream_t stream) { + size_t thread_per_block = 128; + size_t block_per_grid = ceilf(static_cast(angle_numbers) / 128); + UNSIGNED_INT_VECTOR *uint_crd = + const_cast(reinterpret_cast(uint_crd_f)); + VECTOR *frc = const_cast(reinterpret_cast(frc_f)); + VECTOR *scaler = const_cast(reinterpret_cast(scaler_f)); + + AngleForceWithAtomEnergyKernel<<>>( + angle_numbers, uint_crd, scaler, atom_a, atom_b, atom_c, angle_k, angle_theta0, frc, ene); + return; +} +void AngleForceWithAtomEnergy(int angle_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a, + const int *atom_b, const int *atom_c, const float *angle_k, const float *angle_theta0, + float *frc_f, float *ene, cudaStream_t stream); diff --git a/cuda_code/argsort_2.cu b/cuda_code/argsort_2.cu new file mode 100644 index 0000000000000000000000000000000000000000..666cf62f9b2851e09f143c8faad76b33d329bdac --- /dev/null +++ b/cuda_code/argsort_2.cu @@ -0,0 +1,182 @@ +/** + * \file dnn/src/cuda/argsort/argsort.cu + * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") + * + * Copyright (c) 2014-2021 Megvii Inc. All rights reserved. + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + */ + +#include "./argsort.cuh" +#include "./bitonic_sort.cuh" +#include "megdnn/basic_types.h" +#include "src/cuda/utils.cuh" + +#include "src/cuda/cub/device/device_radix_sort.cuh" +#include "src/cuda/cub/device/device_segmented_radix_sort.cuh" + +using namespace megdnn; +using namespace cuda; + +namespace { +struct StridedOffsetIterator { + int bias, stride; + + StridedOffsetIterator(int bias_, int stride_) + : bias(bias_), stride(stride_) {} + + __device__ __forceinline__ int operator[](int i) const { + return stride * i + bias; + } +}; + +bool use_bitonic(uint32_t /*M*/, uint32_t N) { + // bitonic sort is preferred when N is small (alwyas faster than radix sort) + return N <= BITONIC_SORT_MAX_LENGTH; +} + +bool use_segmented(uint32_t M, uint32_t /*N*/) { + // an empirical value: + // sort(1, 1e6): 0.574ms + // segsort({1,2,8,16}, 1e6): 7-8ms + // sort(1, 1e7): 3.425ms + // segsort({1,2,8,16}, 1e7): 71-84ms + // + // segsort is about 7x-10x slower than sort on small batches, so we can + // expect it to be faster than sort when batch is large enough. + return M >= 8; +} + +__global__ void kern_arange(int* dst, uint32_t n, uint32_t mod) { + uint32_t i = threadIdx.x + blockIdx.x * blockDim.x; + if (i < n) { + dst[i] = i % mod; + } +} + +template +size_t get_sort_workspace(uint32_t M, uint32_t N, bool is_ascending) { + if (use_bitonic(M, N)) { + return 0; + } + return argsort::cub_sort_pairs(is_ascending, NULL, 0, NULL, NULL, NULL, NULL, + M, N, 0, sizeof(float)*8, NULL); +} +} // anonymous namespace + +template +MEGDNN_NOINLINE size_t argsort::cub_sort_pairs( + bool is_ascending, void* workspace, size_t workspace_size, + const KeyType* keys_in, KeyType* keys_out, const ValueType* values_in, + ValueType* values_out, uint32_t M, uint32_t N, int begin_bit, int end_bit,cudaStream_t stream){ + cudaError_t err; + if (use_segmented(M, N)) { + if (is_ascending) { + err = cub::DeviceSegmentedRadixSort::SortPairs( + workspace, workspace_size, keys_in, keys_out, values_in, + values_out, N * M, M, StridedOffsetIterator(0, N), + StridedOffsetIterator(N, N), begin_bit, end_bit, stream); + cuda_check(err); + } else { + err = cub::DeviceSegmentedRadixSort::SortPairsDescending( + workspace, workspace_size, keys_in, keys_out, values_in, + values_out, N * M, M, StridedOffsetIterator(0, N), + StridedOffsetIterator(N, N), begin_bit, end_bit, stream); + cuda_check(err); + } + } else { + if (is_ascending) { + for (size_t i = 0; i < M; ++i) { + err = cub::DeviceRadixSort::SortPairs( + workspace, workspace_size, keys_in + N * i, + keys_out + N * i, values_in + N * i, values_out + N * i, + N, begin_bit, end_bit, stream); + cuda_check(err); + if (!keys_in) { + return workspace_size; + } + } + } else { + for (size_t i = 0; i < M; ++i) { + err = cub::DeviceRadixSort::SortPairsDescending( + workspace, workspace_size, keys_in + N * i, + keys_out + N * i, values_in + N * i, values_out + N * i, + N, begin_bit, end_bit, stream); + cuda_check(err); + if (!keys_in) { + return workspace_size; + } + } + } + } + return workspace_size; +} + +size_t argsort::get_fwd_workspace_in_bytes(uint32_t M, uint32_t N, DType dtype, + bool is_ascending, + bool iptr_src_given) { + size_t size = 0; + switch (dtype.enumv().ev) { +#define cb(ctype) \ + case DTypeTrait::enumv: \ + size = get_sort_workspace(M, N, is_ascending); \ + break; + ARGSORT_FOREACH_CTYPE(cb) +#undef cb + default: + megdnn_throw("argsort only supports float, int32 and float16"); + } + if (!iptr_src_given) { + size = DIVUP(size, sizeof(float)) * sizeof(float) + M * N * sizeof(int); + } + return size; +} + +template +void argsort::forward(const dtype* sptr, dtype* dptr, int* iptr, + void* workspace, uint32_t M, uint32_t N, + bool is_ascending, cudaStream_t stream, + const int* iptr_src) { + size_t wk_size = get_sort_workspace(M, N, is_ascending); + if (!iptr_src) { + int* ptr = reinterpret_cast(static_cast(workspace) + + DIVUP(wk_size, sizeof(float)) * + sizeof(float)); + kern_arange<<>>(ptr, M * N, N); + iptr_src = ptr; + } + + if (use_bitonic(M, N)) { + cuda_check(bitonic_sort(M, N, sptr, iptr_src, dptr, iptr, is_ascending, + stream)); + } else { + cub_sort_pairs(is_ascending, workspace, wk_size, sptr, dptr, iptr_src, + iptr, M, N, 0, sizeof(float)*8, stream); + } +} + +namespace megdnn { +namespace cuda { + +#define INST_CUB_SORT(dtype) \ +template MEGDNN_NOINLINE size_t argsort::cub_sort_pairs(bool, \ + void*, size_t, const dtype*, dtype*, \ + const dtype*, dtype*, uint32_t, uint32_t,\ + int, int, cudaStream_t); + +#define INST_FORWARD(dtype) \ +template void argsort::forward(const dtype*, dtype*, int*, void*, \ + uint32_t, uint32_t, bool, cudaStream_t, \ + const int*); + +ARGSORT_FOREACH_CTYPE(INST_FORWARD) +INST_CUB_SORT(uint32_t) +INST_CUB_SORT(uint64_t) +#undef INST_CUB_SORT +#undef INST_FORWARD +} +} // namespace megdnn +// vim: ft=cuda syntax=cuda.doxygen + diff --git a/cuda_code/arquivo3_2.cu b/cuda_code/arquivo3_2.cu new file mode 100644 index 0000000000000000000000000000000000000000..5d62fdf76db6d0ecf359c34b2b3cf2031d14c7f2 --- /dev/null +++ b/cuda_code/arquivo3_2.cu @@ -0,0 +1,241 @@ +#include +#include +#include +#include +#include + +// Utilities and system includes +#include "arquivo3.h" +#include "comm/comm.h" +#include "comm/funcs.h" + + + +const char *sSDKname3 = "conjugateGradient"; + +/* genTridiag: generate a random tridiagonal symmetric matrix */ +void genTridiag_3(int *I, int *J, float *val, int N, int nz) +{ + I[0] = 0, J[0] = 0, J[1] = 1; + val[0] = (float)rand()/RAND_MAX + 10.0f; + val[1] = (float)rand()/RAND_MAX; + int start; + + for (int i = 1; i < N; i++) + { + if (i > 1) + { + I[i] = I[i-1]+3; + } + else + { + I[1] = 2; + } + + start = (i-1)*3 + 2; + J[start] = i - 1; + J[start+1] = i; + + if (i < N-1) + { + J[start+2] = i + 1; + } + + val[start] = val[start-1]; + val[start+1] = (float)rand()/RAND_MAX + 10.0f; + + if (i < N-1) + { + val[start+2] = (float)rand()/RAND_MAX; + } + } + + I[N] = nz; +} + +extern "C" void funcao3(){ + + + int M = 0, N = 0, nz = 0, *I = NULL, *J = NULL; + float *val = NULL; + const float tol = 1e-5f; + const int max_iter = 10000; + float *x; + float *rhs; + float a, b, na, r0, r1; + int *d_col, *d_row; + float *d_val, *d_x, dot; + float *d_r, *d_p, *d_Ax; + int k; + float alpha, beta, alpham1; + + int i,j,iter; + + + /* Generate a random tridiagonal symmetric matrix in CSR format */ + FILE *arquivo; + + arquivo = fopen("tamanho_matriz","r"); + + int tamanho; + + fscanf(arquivo,"%d",&tamanho); + + M = N = tamanho; + + //M = N = 10485760; + nz = (N-2)*3 + 4; + I = (int *)malloc(sizeof(int)*(N+1)); + J = (int *)malloc(sizeof(int)*nz); + val = (float *)malloc(sizeof(float)*nz); + genTridiag_3(I, J, val, N, nz); + + x = (float *)malloc(sizeof(float)*N); + rhs = (float *)malloc(sizeof(float)*N); + + for (i = 0; i < N; i++) + { + rhs[i] = 1.0; + x[i] = 0.0; + } + + + /* Get handle to the CUBLAS context */ + cublasHandle_t cublasHandle = 0; + cublasStatus_t cublasStatus; + cublasStatus = cublasCreate(&cublasHandle); + + //checkCudaErrors(cublasStatus); + + /* Get handle to the CUSPARSE context */ + cusparseHandle_t cusparseHandle = 0; + cusparseStatus_t cusparseStatus; + cusparseStatus = cusparseCreate(&cusparseHandle); + + //checkCudaErrors(cusparseStatus); + + cusparseMatDescr_t descr = 0; + cusparseStatus = cusparseCreateMatDescr(&descr); + + //checkCudaErrors(cusparseStatus); + + cusparseSetMatType(descr,CUSPARSE_MATRIX_TYPE_GENERAL); + cusparseSetMatIndexBase(descr,CUSPARSE_INDEX_BASE_ZERO); + + cudaMalloc((void **)&d_col, nz*sizeof(int)); + cudaMalloc((void **)&d_row, (N+1)*sizeof(int)); + cudaMalloc((void **)&d_val, nz*sizeof(float)); + cudaMalloc((void **)&d_x, N*sizeof(float)); + cudaMalloc((void **)&d_r, N*sizeof(float)); + cudaMalloc((void **)&d_p, N*sizeof(float)); + cudaMalloc((void **)&d_Ax, N*sizeof(float)); + + cudaMemcpy(d_col, J, nz*sizeof(int), cudaMemcpyHostToDevice); + cudaMemcpy(d_row, I, (N+1)*sizeof(int), cudaMemcpyHostToDevice); + cudaMemcpy(d_val, val, nz*sizeof(float), cudaMemcpyHostToDevice); + cudaMemcpy(d_x, x, N*sizeof(float), cudaMemcpyHostToDevice); + cudaMemcpy(d_r, rhs, N*sizeof(float), cudaMemcpyHostToDevice); + + + alpha = 1.0; + alpham1 = -1.0; + beta = 0.0; + r0 = 0.; + + float rsum, diff, err; + + for(iter=0;iter<20;iter++){ + receiveMessage("funcao3","funcao2", FLOAT, (void*)d_x, N*N); + receiveMessage("funcao3","funcao2", INT, (void*)d_col, nz); + receiveMessage("funcao3","funcao2", INT, (void*)d_row, N+1); + receiveMessage("funcao3","funcao2", FLOAT, (void*)d_val, nz); + + + cusparseScsrmv(cusparseHandle,CUSPARSE_OPERATION_NON_TRANSPOSE, N, N, nz, &alpha, descr, d_val, d_row, d_col, d_x, &beta, d_Ax); + + cublasSaxpy(cublasHandle, N, &alpham1, d_Ax, 1, d_r, 1); + cublasStatus = cublasSdot(cublasHandle, N, d_r, 1, d_r, 1, &r1); + + k = 1; + + while (r1 > tol*tol && k <= max_iter) + { + if (k > 1) + { + b = r1 / r0; + cublasStatus = cublasSscal(cublasHandle, N, &b, d_p, 1); + cublasStatus = cublasSaxpy(cublasHandle, N, &alpha, d_r, 1, d_p, 1); + } + else + { + cublasStatus = cublasScopy(cublasHandle, N, d_r, 1, d_p, 1); + } + + cusparseScsrmv(cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, N, N, nz, &alpha, descr, d_val, d_row, d_col, d_p, &beta, d_Ax); + cublasStatus = cublasSdot(cublasHandle, N, d_p, 1, d_Ax, 1, &dot); + a = r1 / dot; + + cublasStatus = cublasSaxpy(cublasHandle, N, &a, d_p, 1, d_x, 1); + na = -a; + cublasStatus = cublasSaxpy(cublasHandle, N, &na, d_Ax, 1, d_r, 1); + + r0 = r1; + cublasStatus = cublasSdot(cublasHandle, N, d_r, 1, d_r, 1, &r1); + cudaThreadSynchronize(); + //printf("iteration = %3d, residual = %e\n", k, sqrt(r1)); + k++; + } + + cudaMemcpy(x, d_x, N*sizeof(float), cudaMemcpyDeviceToHost); + + rsum = 0.0; + diff = 0.0; + err = 0.0; + + for (i = 0; i < N; i++) + { + rsum = 0.0; + + for (j = I[i]; j < I[i+1]; j++) + { + rsum += val[j]*x[J[j]]; + } + + diff = fabs(rsum - rhs[i]); + + if (diff > err) + { + err = diff; + } + } + //generate the matrix again + genTridiag_3(I, J, val, N, nz); + cudaMemcpy(d_col, J, nz*sizeof(int), cudaMemcpyHostToDevice); + cudaMemcpy(d_row, I, (N+1)*sizeof(int), cudaMemcpyHostToDevice); + cudaMemcpy(d_val, val, nz*sizeof(float), cudaMemcpyHostToDevice); + } + + cusparseDestroy(cusparseHandle); + cublasDestroy(cublasHandle); + + free(I); + free(J); + free(val); + free(x); + free(rhs); + cudaFree(d_col); + cudaFree(d_row); + cudaFree(d_val); + cudaFree(d_x); + cudaFree(d_r); + cudaFree(d_p); + cudaFree(d_Ax); + + + + + + + + +} diff --git a/cuda_code/array_35.cu b/cuda_code/array_35.cu new file mode 100644 index 0000000000000000000000000000000000000000..f20115ad4de3acb01375a0c162e341b422aa3fb9 --- /dev/null +++ b/cuda_code/array_35.cu @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2019, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "array/array.h" + +#include +#include "test_utils.h" + +#include +#include + +namespace MLCommon { +namespace Array { + +class ArrayTest : public ::testing::Test { + protected: + void SetUp() override {} + void TearDown() override {} +}; + +typedef ArrayTest MakeMonotonicTest; +TEST_F(MakeMonotonicTest, Result) { + cudaStream_t stream; + CUDA_CHECK(cudaStreamCreate(&stream)); + + int m = 12; + + float *data, *actual, *expected; + + allocate(data, m, true); + allocate(actual, m, true); + allocate(expected, m, true); + + float *data_h = + new float[m]{1.0, 2.0, 2.0, 2.0, 2.0, 3.0, 8.0, 7.0, 8.0, 8.0, 25.0, 80.0}; + + float *expected_h = + new float[m]{1.0, 2.0, 2.0, 2.0, 2.0, 3.0, 5.0, 4.0, 5.0, 5.0, 6.0, 7.0}; + + updateDevice(data, data_h, m, stream); + updateDevice(expected, expected_h, m, stream); + + make_monotonic(actual, data, m, stream); + + CUDA_CHECK(cudaStreamSynchronize(stream)); + + ASSERT_TRUE(devArrMatch(actual, expected, m, Compare(), stream)); + + CUDA_CHECK(cudaStreamDestroy(stream)); + CUDA_CHECK(cudaFree(data)); + CUDA_CHECK(cudaFree(actual)); + + delete data_h; + delete expected_h; +} +}; // namespace Array +}; // namespace MLCommon diff --git a/cuda_code/array_36.cu b/cuda_code/array_36.cu new file mode 100644 index 0000000000000000000000000000000000000000..95365f23fabdc60aa5d29f607d989b0a67ef38ae --- /dev/null +++ b/cuda_code/array_36.cu @@ -0,0 +1,300 @@ +// This file is distributed under the MIT license. +// See the LICENSE file for details. + +#include +#include +#include // memcpy + +#include +#include +#include +#include +#include + +#include + +#include + +using namespace visionaray; + + +//------------------------------------------------------------------------------------------------- +// Test reverse iterators +// + +template +__device__ void iota(FwdIt first, FwdIt last, T value) +{ + for (auto it = first; it != last; ++it) + { + *it = value++; + } +} + +template +__global__ void kernel_reverse_it(bool* mem, Array /* */) +{ + Array arr1; + iota(arr1.begin(), arr1.end(), 0); + + // Test non-const iterators for writing + Array arr2; + iota(arr2.rbegin(), arr2.rend(), 0); + + + size_t i = 0; + + // Test const reverse iterators obtained implicitly through rbegin() and rend() + auto it1 = arr1.rbegin(); + auto it2 = arr2.begin(); + for (; it1 != arr1.rend() && it2 != arr2.end(); ++it1, ++it2) + { + mem[i++] = *it1 == *it2; + } + + // Test const reverse iterators obtained through crbegin() and crend() + auto cit1 = arr1.crbegin(); + auto cit2 = arr2.cbegin(); + for (; cit1 != arr1.crend() && cit2 != arr2.cend(); ++cit1, ++cit2) + { + mem[i++] = *cit1 == *cit2; + } +} + +TEST(ArrayCU, ReverseIt) +{ + static const size_t N = 50; + + thrust::device_vector d_result(N * 2); + thrust::fill(d_result.begin(), d_result.end(), false); + + kernel_reverse_it<<<1, 1>>>( + thrust::raw_pointer_cast(d_result.data()), + array{} + ); + + thrust::host_vector h_result(d_result); + + for (auto b : h_result) + { + EXPECT_TRUE(b); + } +} + + +//------------------------------------------------------------------------------------------------- +// Test array::fill() +// + +template +__global__ void kernel_fill(T value, T* mem, Array /* */) +{ + Array arr; + arr.fill(value); + + // Copy to global memory so we can compare on the host + memcpy(mem, arr.data(), sizeof(arr)); +} + +TEST(ArrayCU, Fill) +{ + static const size_t N = 50; + thrust::device_vector d_result(N); + int value = 23; + + kernel_fill<<<1, 1>>>( + value, + thrust::raw_pointer_cast(d_result.data()), + array{} + ); + + thrust::host_vector h_result(d_result); + + for (size_t i = 0; i < N; ++i) + { + EXPECT_EQ(h_result[i], value); + } +} + + +//------------------------------------------------------------------------------------------------- +// Test array::swap() +// + +template +__global__ void kernel_swap(T* mem, Array /* */) +{ + Array arr1; + Array arr2; + + memcpy(arr1.data(), mem, sizeof(arr1)); + memcpy(arr2.data(), mem + arr1.size(), sizeof(arr2)); + + arr1.swap(arr2); + + memcpy(mem, arr1.data(), sizeof(arr1)); + memcpy(mem + arr1.size(), arr2.data(), sizeof(arr2)); +} + +TEST(ArrayCU, Swap) +{ + static const size_t N = 50; + + thrust::host_vector h_data(N * 2); + std::fill(h_data.data(), h_data.data() + N, 23); + std::fill(h_data.data() + N, h_data.data() + h_data.size(), 24); + thrust::device_vector d_data(h_data); + + kernel_swap<<<1, 1>>>( + thrust::raw_pointer_cast(d_data.data()), + array{} + ); + + thrust::copy(d_data.begin(), d_data.end(), h_data.begin()); + + for (size_t i = 0; i < N; ++i) + { + EXPECT_EQ(h_data[i], 24); + } + + for (size_t i = N; i < N * 2; ++i) + { + EXPECT_EQ(h_data[i], 23); + } + +} + + +//------------------------------------------------------------------------------------------------- +// Test interoperability with thrust::swap() +// + +template +__global__ void kernel_thrust_swap(T* mem, Array /* */) +{ + Array arr1; + Array arr2; + + memcpy(arr1.data(), mem, sizeof(arr1)); + memcpy(arr2.data(), mem + arr1.size(), sizeof(arr2)); + + thrust::swap(arr1, arr2); + + memcpy(mem, arr1.data(), sizeof(arr1)); + memcpy(mem + arr1.size(), arr2.data(), sizeof(arr2)); +} + +TEST(ArrayCU, ThrustSwap) +{ + static const size_t N = 50; + + thrust::host_vector h_data(N * 2); + std::fill(h_data.data(), h_data.data() + N, 23); + std::fill(h_data.data() + N, h_data.data() + h_data.size(), 24); + thrust::device_vector d_data(h_data); + + kernel_thrust_swap<<<1, 1>>>( + thrust::raw_pointer_cast(d_data.data()), + array{} + ); + + thrust::copy(d_data.begin(), d_data.end(), h_data.begin()); + + for (size_t i = 0; i < N; ++i) + { + EXPECT_EQ(h_data[i], 24); + } + + for (size_t i = N; i < N * 2; ++i) + { + EXPECT_EQ(h_data[i], 23); + } + +} + + +//------------------------------------------------------------------------------------------------- +// Test comparisons +// + +__global__ void kernel_compare(bool* result) +{ + const int N = 50; + + array arr1; + arr1.fill(23); + + array arr2; + arr2.fill(24); + + array arr3; + arr3.fill(23); + + array arr4; + array arr5; + for (int i = 0; i < N; ++i) + { + arr4[i] = i; + arr5[i] = (i + 1) % N; + } + + result[0] = ( arr1 == arr1 ); + result[1] = ( arr1 != arr2 ); + result[2] = ( arr2 != arr1 ); + result[3] = ( arr1 == arr3 ); + result[4] = ( arr3 == arr1 ); + result[5] = ( arr4 != arr5 ); + result[6] = ( arr5 != arr4 ); +} + + +TEST(ArrayCU, Compare) +{ + thrust::device_vector d_result(7); + thrust::fill(d_result.begin(), d_result.end(), false); + + kernel_compare<<<1, 1>>>(thrust::raw_pointer_cast(d_result.data())); + + thrust::host_vector h_result(d_result); + + for (auto b : h_result) + { + EXPECT_TRUE(b); + } +} + + +//------------------------------------------------------------------------------------------------- +// Test element access with thrust::get() +// + +__global__ void kernel_get(bool* mem) +{ + array arr; + thrust::get<0>(arr) = 0; + thrust::get<1>(arr) = 1; + thrust::get<2>(arr) = 2; + + mem[0] = arr[0] == 0; + mem[1] = arr[1] == 1; + mem[2] = arr[2] == 2; + + mem[3] = thrust::get<0>(arr) == 0; + mem[4] = thrust::get<1>(arr) == 1; + mem[5] = thrust::get<2>(arr) == 2; +} + +TEST(ArrayCU, Get) +{ + thrust::device_vector d_result(6); + thrust::fill(d_result.begin(), d_result.end(), false); + + kernel_get<<<1, 1>>>(thrust::raw_pointer_cast(d_result.data())); + + thrust::host_vector h_result(d_result); + + for (auto b : h_result) + { + EXPECT_TRUE(b); + } +} diff --git a/cuda_code/assert.cu b/cuda_code/assert.cu new file mode 100644 index 0000000000000000000000000000000000000000..ee5bd45e22df812efba439746ff3fd2612deac25 --- /dev/null +++ b/cuda_code/assert.cu @@ -0,0 +1,82 @@ +// clang-format off +#include +#include + +__global__ void init_random_numbers(unsigned int seed) { + printf("seed = %d\n", seed); + atomicAdd((int *)(12312433432), 123); + atomicAdd((float *)(12312433432), 123.0f); + __threadfence_block(); // membar.cta + __threadfence(); // membar.gl + __threadfence_system(); // membar.sys + assert(seed != 0); +} + + +// How LLVM deals with CUDA kernels with huge structs as parameters: +struct Arg { + float x[128]; + int y[128]; +}; + + +/* llvm IR of function below: + ; Function Attrs: convergent noinline nounwind optnone +define dso_local void @_Z20test_struct_argument3Arg(%struct.Arg* byval align 4) #0 { + %2 = alloca %printf_args.0 + %3 = getelementptr inbounds %struct.Arg, %struct.Arg* %0, i32 0, i32 0 + %4 = getelementptr inbounds [128 x float], [128 x float]* %3, i64 0, i64 123 + %5 = load float, float* %4, align 4 + %6 = fpext float %5 to double + %7 = getelementptr inbounds %struct.Arg, %struct.Arg* %0, i32 0, i32 1 + %8 = getelementptr inbounds [128 x i32], [128 x i32]* %7, i64 0, i64 53 + %9 = load i32, i32* %8, align 4 + %10 = getelementptr inbounds %printf_args.0, %printf_args.0* %2, i32 0, i32 0 + store double %6, double* %10, align 8 + %11 = getelementptr inbounds %printf_args.0, %printf_args.0* %2, i32 0, i32 1 + store i32 %9, i32* %11, align 4 + %12 = bitcast %printf_args.0* %2 to i8* + %13 = call i32 @vprintf(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str3, i32 0, i32 0), i8* %12) + ret void +} +*/ +__global__ void test_struct_argument(Arg arg) { + printf("%f %d\n", arg.x[123], arg.y[53]); +} + + +/* llvm IR of function below: +; Function Attrs: convergent noinline nounwind optnone +define dso_local void @_Z24test_struct_argument_ptrP3Arg(%struct.Arg*) #0 { +%2 = alloca %struct.Arg*, align 8 +%3 = alloca %printf_args.1 +store %struct.Arg* %0, %struct.Arg** %2, align 8 +%4 = load %struct.Arg*, %struct.Arg** %2, align 8 +%5 = getelementptr inbounds %struct.Arg, %struct.Arg* %4, i32 0, i32 0 +%6 = getelementptr inbounds [128 x float], [128 x float]* %5, i64 0, i64 123 +%7 = load float, float* %6, align 4 +%8 = fpext float %7 to double +%9 = load %struct.Arg*, %struct.Arg** %2, align 8 +%10 = getelementptr inbounds %struct.Arg, %struct.Arg* %9, i32 0, i32 1 +%11 = getelementptr inbounds [128 x i32], [128 x i32]* %10, i64 0, i64 53 +%12 = load i32, i32* %11, align 4 +%13 = getelementptr inbounds %printf_args.1, %printf_args.1* %3, i32 0, i32 0 +store double %8, double* %13, align 8 +%14 = getelementptr inbounds %printf_args.1, %printf_args.1* %3, i32 0, i32 1 +store i32 %12, i32* %14, align 4 +%15 = bitcast %printf_args.1* %3 to i8* +%16 = call i32 @vprintf(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str3, i32 0, i32 0), i8* %15) +ret void +} +*/ +__global__ void test_struct_argument_ptr(Arg *arg) { + printf("%f %d\n", arg->x[123], arg->y[53]); +} + +int main() { + init_random_numbers<<<1024, 1024>>>(1); + Arg arg; + test_struct_argument<<<1, 1>>>(arg); + return 0; +} +// clang-format on diff --git a/cuda_code/async.kernel.cu b/cuda_code/async.kernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..6418ef0d48f3bc4ac34c041ab4a86b02c21004b1 --- /dev/null +++ b/cuda_code/async.kernel.cu @@ -0,0 +1,13 @@ +#include + +__global__ void myKernel(int64_t *dA) { + int id = blockIdx.x * blockDim.x + threadIdx.x; + dA[id] = dA[id] + 1; +} + +extern "C" { + void kernel(int64_t *ptr) { + myKernel<<<1,128>>>(ptr); + cudaDeviceSynchronize(); + } +} \ No newline at end of file diff --git a/cuda_code/async_reduce_9.cu b/cuda_code/async_reduce_9.cu new file mode 100644 index 0000000000000000000000000000000000000000..cb23f3d01bb89adadccd913c49ac8737a613b80f --- /dev/null +++ b/cuda_code/async_reduce_9.cu @@ -0,0 +1,78 @@ +#include "hip/hip_runtime.h" +#include +#include +#include +#include + +#if __cplusplus >= 201103L +#include +#endif + +// This example demonstrates two ways to achieve algorithm invocations that are asynchronous with +// the calling thread. +// +// The first method wraps a call to thrust::reduce inside a __global__ function. Since __global__ function +// launches are asynchronous with the launching thread, this achieves asynchrony. The result of the reduction +// is stored to a pointer to CUDA global memory. The calling thread waits for the result of the reduction to +// be ready by synchronizing with the CUDA stream on which the __global__ function is launched. +// +// The second method uses the C++11 library function, std::async, to create concurrency. The lambda function +// given to std::async returns the result of thrust::reduce to a std::future. The calling thread can use the +// std::future to wait for the result of the reduction. This method requires a compiler which supports +// C++11-capable language and library constructs. + +template +__global__ void reduce_kernel(Iterator first, Iterator last, T init, BinaryOperation binary_op, Pointer result) +{ + *result = thrust::reduce(thrust::cuda::par, first, last, init, binary_op); +} + +int main() +{ + size_t n = 1 << 20; + thrust::device_vector data(n, 1); + thrust::device_vector result(1, 0); + + // method 1: call thrust::reduce from an asynchronous CUDA kernel launch + + // create a CUDA stream + hipStream_t s; + hipStreamCreate(&s); + + // launch a CUDA kernel with only 1 thread on our stream + hipLaunchKernelGGL(HIP_KERNEL_NAME(reduce_kernel), dim3(1), dim3(1), 0, s, data.begin(), data.end(), 0, thrust::plus(), result.data()); + + // wait for the stream to finish + hipStreamSynchronize(s); + + // our result should be ready + assert(result[0] == n); + + hipStreamDestroy(s); + + // reset the result + result[0] = 0; + +#if __cplusplus >= 201103L + // method 2: use std::async to create asynchrony + + // copy all the algorithm parameters + auto begin = data.begin(); + auto end = data.end(); + unsigned int init = 0; + auto binary_op = thrust::plus(); + + // std::async captures the algorithm parameters by value + // use std::launch::async to ensure the creation of a new thread + std::future future_result = std::async(std::launch::async, [=] + { + return thrust::reduce(begin, end, init, binary_op); + }); + + // wait on the result and check that it is correct + assert(future_result.get() == n); +#endif + + return 0; +} + diff --git a/cuda_code/attackSponge.cu b/cuda_code/attackSponge.cu new file mode 100644 index 0000000000000000000000000000000000000000..b05ff47424dbe9f45de7cfc6576b7f30eb580b61 --- /dev/null +++ b/cuda_code/attackSponge.cu @@ -0,0 +1,1067 @@ +/** + * A simple implementation of Blake2b's and BlaMka's internal permutation + * in the form of a sponge. + * + * Author: The Lyra PHC team (http://www.lyra2.net/) -- 2015. + * + * This software is hereby placed in the public domain. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ''AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, + * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include +#include + +#include "attackLyra2.h" +#include "attackSponge.h" + + + +//#if (nPARALLEL > 1) +__device__ uint64_t sizeSlicedRows; +//#endif //nParallel > 1 + +/** + * Execute G function, with all 12 rounds for Blake2 and BlaMka, and 24 round for half-round BlaMka. + * + * @param v A 1024-bit (16 uint64_t) array to be processed by Blake2b's or BlaMka's G function + */ +__device__ inline static void spongeLyra(uint64_t *v) { + int i; + +#if (SPONGE == 0) + for (i = 0; i < 12; i++){ + ROUND_LYRA(i); + } +#elif (SPONGE == 1) + for (i = 0; i < 12; i++){ + ROUND_LYRA_BLAMKA(i); + } +#elif (SPONGE == 2) + uint64_t t0,t1,t2; + + for (i = 0; i < 24; i++){ + HALF_ROUND_LYRA_BLAMKA(i); + } +#endif +} + +/** + * Executes a reduced version of G function with only RHO round + * @param v A 1024-bit (16 uint64_t) array to be processed by Blake2b's or BlaMka's G function + */ +__device__ inline static void reducedSpongeLyra(uint64_t *v) { + int i; + +#if (SPONGE == 0) + for (i = 0; i < RHO; i++){ + ROUND_LYRA(i); + } +#elif (SPONGE == 1) + for (i = 0; i < RHO; i++){ + ROUND_LYRA_BLAMKA(i); + } +#elif (SPONGE == 2) + uint64_t t0,t1,t2; + + for (i = 0; i < RHO; i++){ + HALF_ROUND_LYRA_BLAMKA(i); + } +#endif +} + +/** + * Performs the initial organization of parameters + * And starts the setup phase. + * Initializes the Sponge's State + * Sets the passwords + salt + params and makes the padding + * Absorb this data to the state. + * From setup: + * Initializes M[0] + * Initializes M[1] + * Initializes M[2] + * + * @param memMatrixGPU Matrix start + * @param pkeysGPU The derived keys of each thread + * @param kLen Desired key length + * @param pwdGPU User password + * @param pwdlen Password length + * @param saltGPU Salt + * @param saltlen Salt length + * @param timeCost Parameter to determine the processing time (T) + * @param nRows Matrix total number of rows + * @param nCols Matrix total number of columns + * @param nBlocksInput The number of blocks to be absorbed + * @param totalPasswords Total number of passwords being tested + */ +__global__ void bootStrapGPU(uint64_t * memMatrixGPU, unsigned char * pkeysGPU, unsigned int kLen, unsigned char *pwdGPU, unsigned int pwdlen, unsigned char *saltGPU, unsigned int saltlen, unsigned int timeCost, unsigned int nRows, unsigned int nCols, uint64_t nBlocksInput, unsigned int totalPasswords) { + int i; + // Size of each chunk that each thread will work with + //updates global sizeSlicedRows; + sizeSlicedRows = (nRows / nPARALLEL) * ROW_LEN_INT64; + byte *ptrByte; + byte *ptrByteSource; + int threadNumber; + + // Thread index: + threadNumber = (blockIdx.x * blockDim.x) + threadIdx.x; + + if (threadNumber < (nPARALLEL * totalPasswords)) { + + uint64_t sliceStart = threadNumber*sizeSlicedRows; + uint64_t thStart = ((uint64_t) (threadNumber / nPARALLEL)); + + //============= Padding (password + salt + params) with 10*1 ===============// + //OBS.:The memory matrix will temporarily hold the password: not for saving memory, + //but this ensures that the password copied locally will be overwritten as soon as possible + ptrByte = (byte*) & memMatrixGPU[sliceStart]; + ptrByteSource = (byte*) & pwdGPU[thStart * pwdlen]; + + //First, we clean enough blocks for the password, salt, params and padding + for (i = 0; i < nBlocksInput * BLOCK_LEN_BLAKE2_SAFE_BYTES; i++) { + ptrByte[i] = (byte) 0; + } + + //Prepends the password + memcpy(ptrByte, ptrByteSource, pwdlen); + ptrByte += pwdlen; + + //The indexed salt + ptrByteSource = (byte*) & saltGPU[thStart * saltlen]; + + //Concatenates the salt + memcpy(ptrByte, ptrByteSource, saltlen); + ptrByte += saltlen; + + //Concatenates the basil: every integer passed as parameter, in the order they are provided by the interface + memcpy(ptrByte, &kLen, sizeof (int)); + ptrByte += sizeof (int); + memcpy(ptrByte, &pwdlen, sizeof (int)); + ptrByte += sizeof (int); + memcpy(ptrByte, &saltlen, sizeof (int)); + ptrByte += sizeof (int); + memcpy(ptrByte, &timeCost, sizeof (int)); + ptrByte += sizeof (int); + memcpy(ptrByte, &nRows, sizeof (int)); + ptrByte += sizeof (int); + memcpy(ptrByte, &nCols, sizeof (int)); + ptrByte += sizeof (int); + +#if (nPARALLEL > 1) + //The difference from sequential version: + //Concatenates the total number of threads + int p = nPARALLEL; + memcpy(ptrByte, &p, sizeof (int)); + ptrByte += sizeof (int); + //Concatenates thread number + int thread = threadNumber % nPARALLEL; + memcpy(ptrByte, &thread, sizeof (int)); + + ptrByte += sizeof (int); +#endif //nParallel > 1 + + //Now comes the padding + *ptrByte = 0x80; //first byte of padding: right after the password + + //resets the pointer to the start of the memory matrix + ptrByte = (byte*) & memMatrixGPU[sliceStart]; + ptrByte += nBlocksInput * BLOCK_LEN_BLAKE2_SAFE_BYTES - 1; //sets the pointer to the correct position: end of incomplete block + *ptrByte ^= 0x01; //last byte of padding: at the end of the last incomplete block + } +} + +/** + * Initializes the Sponge State. The first 512 bits are set to zeros and the remainder + * receive Blake2b's IV as per Blake2b's specification. Note: Even though sponges + * typically have their internal state initialized with zeros, Blake2b's G function + * has a fixed point: if the internal state and message are both filled with zeros. the + * resulting permutation will always be a block filled with zeros; this happens because + * Blake2b does not use the constants originally employed in Blake2 inside its G function, + * relying on the IV for avoiding possible fixed points. + * + * @param state The 1024-bit array to be initialized + * @param totalPasswords Total number of passwords being tested + */ +__global__ void initState(uint64_t state[/*16*/], unsigned int totalPasswords) { + int threadNumber; + uint64_t start; + + // Thread index: + threadNumber = (blockIdx.x * blockDim.x) + threadIdx.x; + + if (threadNumber < (nPARALLEL * totalPasswords)) { + + start = threadNumber * STATESIZE_INT64; + //First 512 bis are zeros + state[start + 0] = 0x0ULL; + state[start + 1] = 0x0ULL; + state[start + 2] = 0x0ULL; + state[start + 3] = 0x0ULL; + state[start + 4] = 0x0ULL; + state[start + 5] = 0x0ULL; + state[start + 6] = 0x0ULL; + state[start + 7] = 0x0ULL; + //Remainder BLOCK_LEN_BLAKE2_SAFE_BYTES are reserved to the IV + state[start + 8] = blake2b_IV[0]; + state[start + 9] = blake2b_IV[1]; + state[start + 10] = blake2b_IV[2]; + state[start + 11] = blake2b_IV[3]; + state[start + 12] = blake2b_IV[4]; + state[start + 13] = blake2b_IV[5]; + state[start + 14] = blake2b_IV[6]; + state[start + 15] = blake2b_IV[7]; + } +} + +/** + * Performs an absorb operation for a single block (BLOCK_LEN_BLAKE2_SAFE_INT64 + * words of type uint64_t), using G function as the internal permutation + * + * @param state The current state of the sponge + * @param in The block to be absorbed (BLOCK_LEN_BLAKE2_SAFE_INT64 words) + */ +__device__ inline void absorbBlockBlake2Safe(uint64_t *state, const uint64_t *in) { + //XORs the first BLOCK_LEN_BLAKE2_SAFE_INT64 words of "in" with the current state + state[0] ^= in[0]; + state[1] ^= in[1]; + state[2] ^= in[2]; + state[3] ^= in[3]; + state[4] ^= in[4]; + state[5] ^= in[5]; + state[6] ^= in[6]; + state[7] ^= in[7]; + + //Applies the transformation f to the sponge's state + spongeLyra(state); +} + +/** + * Performs a initial absorb operation + * Absorbs salt, password and the other parameters + * + * @param memMatrixGPU Matrix start + * @param stateThreadGPU The current state of the sponge + * @param stateIdxGPU Index of the threads, to be absorbed + * @param nBlocksInput The number of blocks to be absorbed + * @param totalPasswords Total number of passwords being tested + */ +__global__ void absorbInput(uint64_t * memMatrixGPU, uint64_t * stateThreadGPU, uint64_t *stateIdxGPU, uint64_t nBlocksInput, unsigned int totalPasswords) { + uint64_t *ptrWord; + uint64_t *threadState; + int threadNumber; + uint64_t kP; + uint64_t sliceStart; + + // Thread index: + threadNumber = (blockIdx.x * blockDim.x) + threadIdx.x; + + if (threadNumber < (nPARALLEL * totalPasswords)) { + + sliceStart = threadNumber*sizeSlicedRows; + threadState = (uint64_t *) & stateThreadGPU[threadNumber * STATESIZE_INT64]; + + //Absorbing salt, password and params: this is the only place in which the block length is hard-coded to 512 bits, for compatibility with Blake2b and BlaMka + ptrWord = (uint64_t *) & memMatrixGPU[sliceStart]; //threadSliceMatrix; + for (kP = 0; kP < nBlocksInput; kP++) { + absorbBlockBlake2Safe(threadState, ptrWord); //absorbs each block of pad(pwd || salt || params) + ptrWord += BLOCK_LEN_BLAKE2_SAFE_INT64; //BLOCK_LEN_BLAKE2_SAFE_INT64; //goes to next block of pad(pwd || salt || params) + } + } +} + +/** + * Performs a reduced squeeze operation for a single row, from the highest to + * the lowest index, using the reduced-round G function as the + * internal permutation + * + * @param state The current state of the sponge + * @param rowOut Row to receive the data squeezed + * @param totalPasswords Total number of passwords being tested + */ +__global__ void reducedSqueezeRow0(uint64_t* rowOut, uint64_t* state, unsigned int totalPasswords) { + int threadNumber; + uint64_t sliceStart; + uint64_t stateStart; + + // Thread index: + threadNumber = (blockIdx.x * blockDim.x) + threadIdx.x; + + if (threadNumber < (nPARALLEL * totalPasswords)) { + stateStart = threadNumber * STATESIZE_INT64; + sliceStart = threadNumber * sizeSlicedRows; + + uint64_t* ptrWord = &rowOut[sliceStart + (N_COLS - 1) * BLOCK_LEN_INT64]; //In Lyra2: pointer to M[0][C-1] + int i, j; + //M[0][C-1-col] = H.reduced_squeeze() + for (i = 0; i < N_COLS; i++) { + for (j = 0; j < BLOCK_LEN_INT64; j++) { + ptrWord[j] = state[stateStart + j]; + } + + //Goes to next block (column) that will receive the squeezed data + ptrWord -= BLOCK_LEN_INT64; + + //Applies the reduced-round transformation f to the sponge's state + reducedSpongeLyra(&state[stateStart]); + } + } +} + +/** + * Performs a reduced duplex operation for a single row, from the highest to + * the lowest index of its columns, using the reduced-round G function + * as the internal permutation + * + * @param state The current state of the sponge + * @param rowIn Matrix start (base row) + * @param first Index used with rowIn to calculate wich row will feed the sponge + * @param second Index used with rowIn to calculate wich row will be feeded with sponge state + * @param totalPasswords Total number of passwords being tested + */ +__global__ void reducedDuplexRow1and2(uint64_t *rowIn, uint64_t *state, unsigned int totalPasswords, int first, int second) { + int i, j; + + int threadNumber; + uint64_t sliceStart; + uint64_t stateStart; + + // Thread index: + threadNumber = (blockIdx.x * blockDim.x) + threadIdx.x; + + if (threadNumber < (nPARALLEL * totalPasswords)) { + + stateStart = threadNumber * STATESIZE_INT64; + sliceStart = threadNumber * sizeSlicedRows; + + //Row to feed the sponge + uint64_t* ptrWordIn = (uint64_t*) & rowIn[sliceStart + first * ROW_LEN_INT64]; //In Lyra2: pointer to prev + //Row to receive the sponge's output + uint64_t* ptrWordOut = (uint64_t*) & rowIn[sliceStart + second * ROW_LEN_INT64 + (N_COLS - 1) * BLOCK_LEN_INT64]; //In Lyra2: pointer to row + + for (i = 0; i < N_COLS; i++) { + + //Absorbing "M[0][col]" + for (j = 0; j < BLOCK_LEN_INT64; j++) { + state[stateStart + j] ^= (ptrWordIn[j]); + } + + //Applies the reduced-round transformation f to the sponge's state + reducedSpongeLyra(&state[stateStart]); + + //M[1][C-1-col] = M[0][col] XOR rand + for (j = 0; j < BLOCK_LEN_INT64; j++) { + ptrWordOut[j] = ptrWordIn[j] ^ state[stateStart + j]; + } + + //Input: next column (i.e., next block in sequence) + ptrWordIn += BLOCK_LEN_INT64; + //Output: goes to previous column + ptrWordOut -= BLOCK_LEN_INT64; + } + } +} + +/** + * Performs a duplexing operation over + * "M[rowInOut0][col] [+] M[rowInOut1][col] [+] M[rowIn0][col_0] [+] M[rowIn1][col_1]", + * where [+] denotes wordwise addition, ignoring carries between words. The value of + * "col_0" is computed as "lsw(rot^2(rand)) mod N_COLS", and "col_1" as + * "lsw(rot^3(rand)) mod N_COLS", where lsw() means "the least significant word" + * where rot is a right rotation by 'omega' bits (e.g., 1 or more words) + * N_COLS is a system parameter, and "rand" corresponds + * to the sponge's output for each column absorbed. + * The same output is then employed to make + * "M[rowInOut0][col] = M[rowInOut0][col] XOR rand" and + * "M[rowInOut1][col] = M[rowInOut1][col] XOR rot(rand)". + * + * @param memMatrixGPU Matrix start + * @param state The current state of the sponge + * @param prev0 Another row used only as input + * @param prev1 Stores the previous value of row1 + * @param row0 Row used as input and to receive output after rotation + * @param row1 Pseudorandom indice to a row from another slice, used only as input + * @param totalPasswords Total number of passwords being tested + */ + __device__ void reducedDuplexRowWandering_P1(uint64_t *memMatrixGPU, uint64_t *state, uint64_t prev0, uint64_t row0, uint64_t row1, uint64_t prev1, unsigned int totalPasswords) { + int threadNumber; + uint64_t sliceStart; + uint64_t stateStart; + uint64_t randomColumn0; //In Lyra2: col0 + uint64_t randomColumn1; //In Lyra2: col1 + + // Thread index: + threadNumber = (blockIdx.x * blockDim.x) + threadIdx.x; + + if (threadNumber < (nPARALLEL * totalPasswords)) { + + stateStart = threadNumber * STATESIZE_INT64; + sliceStart = threadNumber * sizeSlicedRows; + + + uint64_t* ptrWordInOut0 = (uint64_t *) & memMatrixGPU[sliceStart + (row0 * ROW_LEN_INT64)]; //In Lyra2: pointer to row0 + uint64_t* ptrWordInOut1 = (uint64_t *) & memMatrixGPU[sliceStart + (row1 * ROW_LEN_INT64)]; //In Lyra2: pointer to row0_p + uint64_t* ptrWordIn0; //In Lyra2: pointer to prev0 + uint64_t* ptrWordIn1; //In Lyra2: pointer to prev1 + + int i, j; + + for (i = 0; i < N_COLS; i++) { + //col0 = lsw(rot^2(rand)) mod N_COLS + //randomColumn0 = ((uint64_t)state[stateStart + 4] & (N_COLS-1))*BLOCK_LEN_INT64; /*(USE THIS IF N_COLS IS A POWER OF 2)*/ + randomColumn0 = ((uint64_t) state[stateStart + 4] % N_COLS) * BLOCK_LEN_INT64; /*(USE THIS FOR THE "GENERIC" CASE)*/ + ptrWordIn0 = (uint64_t *) & memMatrixGPU[sliceStart + (prev0 * ROW_LEN_INT64) + randomColumn0]; + + //col0 = LSW(rot^3(rand)) mod N_COLS + //randomColumn1 = ((uint64_t)state[stateStart + 6] & (N_COLS-1))*BLOCK_LEN_INT64; /*(USE THIS IF N_COLS IS A POWER OF 2)*/ + randomColumn1 = ((uint64_t) state[stateStart + 6] % N_COLS) * BLOCK_LEN_INT64; /*(USE THIS FOR THE "GENERIC" CASE)*/ + ptrWordIn1 = (uint64_t *) & memMatrixGPU[sliceStart + (prev1 * ROW_LEN_INT64) + randomColumn1]; + + //Absorbing "M[row0] [+] M[row1] [+] M[prev0] [+] M[prev1]" + for (j = 0; j < BLOCK_LEN_INT64; j++) { + state[stateStart + j] ^= (ptrWordInOut0[j] + ptrWordInOut1[j] + ptrWordIn0[j] + ptrWordIn1[j]); + } + + //Applies the reduced-round transformation f to the sponge's state + reducedSpongeLyra(&state[stateStart]); + + //M[rowInOut0][col] = M[rowInOut0][col] XOR rand + for (j = 0; j < BLOCK_LEN_INT64; j++) { + ptrWordInOut0[j] ^= state[stateStart + j]; + } + + //M[rowInOut1][col] = M[rowInOut1][col] XOR rot(rand) + //rot(): right rotation by 'omega' bits (e.g., 1 or more words) + //we rotate 2 words for compatibility with the SSE implementation + for (j = 0; j < BLOCK_LEN_INT64; j++){ + ptrWordInOut1[j] ^= state[stateStart + ((j+2) % BLOCK_LEN_INT64) ]; + } + + //Goes to next block + ptrWordInOut0 += BLOCK_LEN_INT64; + ptrWordInOut1 += BLOCK_LEN_INT64; + + } + } +} + +/** + * Performs a duplexing operation over + * "M[rowInOut0][col] [+] M[rowInP][col] [+] M[rowIn0][col_0]", + * where [+] denotes wordwise addition, ignoring carries between words. The value of + * "col_0" is computed as "LSW(rot^3(rand)) mod N_COLS",where LSW means "the less significant word" + * (assuming 64-bit words), rot is a 128-bit rotation to the right, + * N_COLS is a system parameter, and "rand" corresponds + * to the sponge's output for each column absorbed. + * The same output is then employed to make + * "M[rowInOut0][col] = M[rowInOut0][col] XOR rand". + * + * @param memMatrixGPU Matrix start + * @param state The current state of the sponge + * @param prev0 Another row used only as input + * @param row0 Row used as input and to receive output after rotation + * @param rowP Pseudorandom indice to a row from another slice, used only as input + * @param window Visitation window (equals a half slice) + * @param jP Index to another slice of matrix + * @param totalPasswords Total number of passwords being tested + */ + __device__ void reducedDuplexRowWanderingParallel(uint64_t *memMatrixGPU, uint64_t *state, uint64_t prev0, uint64_t row0, uint64_t rowP, uint64_t window, uint64_t jP, unsigned int totalPasswords) { + int threadNumber; + uint64_t sliceStart; + uint64_t stateStart; + uint64_t sliceStartjP; + uint64_t randomColumn0; //In Lyra2: col0 + + // Thread index: + threadNumber = (blockIdx.x * blockDim.x) + threadIdx.x; + + if (threadNumber < (nPARALLEL * totalPasswords)) { + + stateStart = threadNumber * STATESIZE_INT64; + sliceStart = threadNumber * sizeSlicedRows; + + //jP slice must be inside the password´s thread pool + //The integer part of threadNumber/nPARALLEL multiplied by nPARALLEL is the Base Slice Start for the password thread pool + sliceStartjP = ((((uint64_t) (threadNumber / nPARALLEL)) * nPARALLEL) + jP) * sizeSlicedRows; + + //Row used as input and to receive output after rotation + uint64_t* ptrWordInOut0 = (uint64_t *) & memMatrixGPU[sliceStart + (row0 * ROW_LEN_INT64)]; //In Lyra2: pointer to row0 + //Row used only as input + uint64_t* ptrWordInP = (uint64_t *) & memMatrixGPU[sliceStartjP + (rowP * ROW_LEN_INT64)]; //In Lyra2: pointer to row0_p + //Another row used only as input + uint64_t* ptrWordIn0; //In Lyra2: pointer to prev0 + + int i, j; + + for (i = 0; i < N_COLS; i++) { + //col0 = LSW(rot^3(rand)) mod N_COLS + //randomColumn0 = ((uint64_t)state[stateStart + 6] & (N_COLS-1))*BLOCK_LEN_INT64; /*(USE THIS IF N_COLS IS A POWER OF 2)*/ + randomColumn0 = ((uint64_t) state[stateStart + 6] % N_COLS) * BLOCK_LEN_INT64; /*(USE THIS FOR THE "GENERIC" CASE)*/ + + ptrWordIn0 = (uint64_t *) & memMatrixGPU[sliceStart + (prev0 * ROW_LEN_INT64) + randomColumn0]; + + //Absorbing "M[row0] [+] M[prev0] [+] M[rowP]" + for (j = 0; j < BLOCK_LEN_INT64; j++) { + state[stateStart + j] ^= (ptrWordInOut0[j] + ptrWordIn0[j] + ptrWordInP[j]); + } + + //Applies the reduced-round transformation f to the sponge's state + reducedSpongeLyra(&state[stateStart]); + + //M[rowInOut0][col] = M[rowInOut0][col] XOR rand + for (j = 0; j < BLOCK_LEN_INT64; j++) { + ptrWordInOut0[j] ^= state[stateStart + j]; + } + + //Goes to next block + ptrWordInOut0 += BLOCK_LEN_INT64; + ptrWordInP += BLOCK_LEN_INT64; + + } + } +} + +/** + * Performs an absorb operation of single column from "in", the + * said column being pseudorandomly picked in the range [0, BLOCK_LEN_INT64[, + * using the full-round G function as the internal permutation + * + * @param state The current state of the sponge + * @param in Matrix start + * @param row0 The row whose column (BLOCK_LEN_INT64 words) should be absorbed + * @param randomColumn0 The random column to be absorbed + * @param totalPasswords Total number of passwords being tested + */ +__device__ void absorbRandomColumn(uint64_t *in, uint64_t *state, uint64_t row0, uint64_t randomColumn0, unsigned int totalPasswords) { + int i; + int threadNumber; + uint64_t sliceStart; + uint64_t stateStart; + + // Thread index: + threadNumber = (blockIdx.x * blockDim.x) + threadIdx.x; + + if (threadNumber < (nPARALLEL * totalPasswords)) { + + stateStart = threadNumber * STATESIZE_INT64; + sliceStart = threadNumber * sizeSlicedRows; + + uint64_t* ptrWordIn = (uint64_t*) & in[sliceStart + (row0 * ROW_LEN_INT64) + randomColumn0]; + + //absorbs the column picked + for (i = 0; i < BLOCK_LEN_INT64; i++) { + state[stateStart + i] ^= ptrWordIn[i]; + } + + //Applies the full-round transformation f to the sponge's state + spongeLyra(&state[stateStart]); + } +} + +/** + * Wandering phase: performs the visitation loop + * Visitation loop chooses pseudo random rows (row0 and row1) based in state content + * And performs a reduced-round duplexing operation over: + * "M[row0][col] [+] M[row1][col] [+] M[prev0][col0] [+] M[prev1][col1] + * Updating both M[row0] and M[row1] using the output to make: + * M[row0][col] = M[row0][col] XOR rand; + * M[row1][col] = M[row1][col] XOR rot(rand) + * Where rot() is a right rotation by 'omega' bits (e.g., 1 or more words) + * + * @param stateThreadGPU The current state of the sponge + * @param memMatrixGPU Array that will receive the data squeezed + * @param timeCost Parameter to determine the processing time (T) + * @param nRows Number of rows + * @param totalPasswords Total number of passwords being tested + * @param prev0 Stores the previous value of row0, the last row ever initialized + * @param prev1 Stores the previous value of row1 + */ +__device__ void wanderingPhaseGPU2_P1(uint64_t * memMatrixGPU, uint64_t * stateThreadGPU, unsigned int timeCost, uint64_t nRows, unsigned int totalPasswords, uint64_t prev0, uint64_t prev1) { + uint64_t wCont; //Time Loop iterator + uint64_t row0; //row0: sequentially written during Setup; randomly picked during Wandering + uint64_t row1; //rowP: revisited during Setup, and then read [and written]; randomly picked during Wandering + uint64_t threadNumber; + + uint64_t stateStart; + + + // Thread index: + threadNumber = (blockIdx.x * blockDim.x) + threadIdx.x; + + if (threadNumber < (nPARALLEL * totalPasswords)) { + + stateStart = threadNumber * STATESIZE_INT64; + + for (wCont = 0; wCont < timeCost * nRows; wCont++) { + //Selects a pseudorandom indices row0 and rowP (row0 = LSW(rand) mod wnd and rowP = LSW(rot(rand)) mod wnd) + //------------------------------------------------------------------------------------------ + //(USE THIS IF window IS A POWER OF 2) + //row0 = (((uint64_t)stateThreadGPU[stateStart + 0]) & nRows); + //row1 = (((uint64_t)stateThreadGPU[stateStart + 2]) & nRows); + //(USE THIS FOR THE "GENERIC" CASE) + row0 = (((uint64_t) stateThreadGPU[stateStart + 0]) % nRows); //row0 = lsw(rand) mod nRows + row1 = (((uint64_t) stateThreadGPU[stateStart + 2]) % nRows); //row1 = lsw(rot(rand)) mod nRows + //we rotate 2 words for compatibility with the SSE implementation + + //Performs a reduced-round duplexing operation over "M[row0][col] [+] M[row1][col] [+] M[prev0][col0] [+] M[prev1][col1], updating both M[row0] and M[row1] + //M[row0][col] = M[row0][col] XOR rand; + //M[row1][col] = M[row1][col] XOR rot(rand) rot(): right rotation by 'omega' bits (e.g., 1 or more words) + reducedDuplexRowWandering_P1(memMatrixGPU, stateThreadGPU, prev0, row0, row1, prev1, totalPasswords); + + //update prev: they now point to the last rows ever updated + prev0 = row0; + prev1 = row1; + + } + + //============================ Wrap-up Phase ===============================// + //Absorbs one last block of the memory matrix with the full-round sponge + absorbRandomColumn(memMatrixGPU, stateThreadGPU, row0, 0, totalPasswords); + } + +} + +/** + * Wandering phase: performs the visitation loop + * Visitation loop chooses pseudo random rows (row0 and rowP) based in state content + * And performs a reduced-round duplexing operation over: + * M[row0] [+] Mj[rowP] [+] M[prev0] + * Updating M[row0] using the output from reduced-round duplexing (rand): + * M[row0][col] = M[row0][col] XOR rand + * + * @param stateThreadGPU The current state of the sponge + * @param memMatrixGPU Array that will receive the data squeezed + * @param timeCost Parameter to determine the processing time (T) + * @param sizeSlice Number of rows for each thread + * @param totalPasswords Total number of passwords being tested + * @param sqrt To control step changes in visitation + * @param prev0 Stores the previous value of row0, the last row ever initialized + */ +__device__ void wanderingPhaseGPU2(uint64_t * memMatrixGPU, uint64_t * stateThreadGPU, unsigned int timeCost, uint64_t sizeSlice, unsigned int totalPasswords, uint64_t sqrt, uint64_t prev0) { + uint64_t wCont; //Time Loop iterator + uint64_t window; //Visitation window (used to define which rows can be revisited during Setup) + uint64_t row0; //row0: sequentially written during Setup; randomly picked during Wandering + + uint64_t rowP; //rowP: revisited during Setup, and then read [and written]; randomly picked during Wandering + uint64_t jP; //Index to another thread + uint64_t threadNumber; + + uint64_t stateStart; + + uint64_t off0; //complementary offsets to calculate row0 + uint64_t offP; //complementary offsets to calculate rowP + uint64_t offTemp; + + uint64_t sync = sqrt; + + uint64_t halfSlice = sizeSlice / 2; + + // Thread index: + threadNumber = (blockIdx.x * blockDim.x) + threadIdx.x; + + if (threadNumber < (nPARALLEL * totalPasswords)) { + + stateStart = threadNumber * STATESIZE_INT64; + + window = halfSlice; + off0 = 0; + offP = window; + + for (wCont = 0; wCont < timeCost * sizeSlice; wCont++) { + //Selects a pseudorandom indices row0 and rowP (row0 = LSW(rand) mod wnd and rowP = LSW(rot(rand)) mod wnd) + //------------------------------------------------------------------------------------------ + //(USE THIS IF window IS A POWER OF 2) + //row0 = off0 + (((uint64_t)stateThreadGPU[stateStart + 0]) & (window-1)); + //row0P = offP + (((uint64_t)stateThreadGPU[stateStart + 2]) & (window-1)); + //(USE THIS FOR THE "GENERIC" CASE) + row0 = off0 + (((uint64_t) stateThreadGPU[stateStart + 0]) % window); + rowP = offP + (((uint64_t) stateThreadGPU[stateStart + 2]) % window); + + //Selects a pseudorandom indices j0 (LSW(rot^2 (rand)) mod p) + jP = ((uint64_t) stateThreadGPU[stateStart + 4]) % nPARALLEL; + + //Performs a reduced-round duplexing operation over M[row0] [+] Mj[rowP] [+] M[prev0], updating M[row0] + //M[row0][col] = M[row0][col] XOR rand; + reducedDuplexRowWanderingParallel(memMatrixGPU, stateThreadGPU, prev0, row0, rowP, window, jP, totalPasswords); + + //update prev: they now point to the last rows ever updated + prev0 = row0; + + if (wCont == sync) { + sync += sqrt; + offTemp = off0; + off0 = offP; + offP = offTemp; + __syncthreads(); + } + } + __syncthreads(); + + //============================ Wrap-up Phase ===============================// + //Absorbs one last block of the memory matrix with the full-round sponge + absorbRandomColumn(memMatrixGPU, stateThreadGPU, row0, 0, totalPasswords); + } + +} + +/** + * Performs a duplexing operation over + * "M[rowInOut][col] [+] M[rowIn0][col] [+] M[rowIn1][col]", where [+] denotes + * wordwise addition, ignoring carries between words, , for all values of "col" + * in the [0,N_COLS[ interval. The output of this operation, "rand", is then + * employed to make + * "M[rowOut][(N_COLS-1)-col] = M[rowIn0][col] XOR rand" and + * "M[rowInOut][col] = M[rowInOut][col] XOR rot(rand)", + * where rot is a right rotation by 'omega' bits (e.g., 1 or more words) + * and N_COLS is a system parameter. + * + * @param state The current state of the sponge + * @param memMatrixGPU Matrix start + * @param prev0 Index to calculate rowIn0, the previous row0 + * @param prev1 Index to calculate rowIn1 + * @param row0 Index to calculate rowOut, the row being initialized + * @param row1 Index to calculate rowInOut, the row to be revisited and updated + * @param totalPasswords Total number of passwords being tested + */ +__device__ void reducedDuplexRowFilling_P1(uint64_t *state, uint64_t *memMatrixGPU, uint64_t prev0, uint64_t prev1, uint64_t row0, uint64_t row1, unsigned int totalPasswords) { + int i, j; + int threadNumber; + + uint64_t sliceStart; + uint64_t stateStart; + + // Thread index: + threadNumber = (blockIdx.x * blockDim.x) + threadIdx.x; + + if (threadNumber < (nPARALLEL * totalPasswords)) { + stateStart = threadNumber * STATESIZE_INT64; + sliceStart = threadNumber * sizeSlicedRows; //sizeSlicedRows = (nRows/nPARALLEL) * ROW_LEN_INT64 + + //Row used only as input (rowIn0 or M[prev0]) + uint64_t* ptrWordIn0 = (uint64_t *) & memMatrixGPU[sliceStart + prev0 * ROW_LEN_INT64]; //In Lyra2: pointer to prev0, the last row ever initialized + + //Another row used only as input (rowIn1 or M[prev1]) + uint64_t* ptrWordIn1 = (uint64_t *) & memMatrixGPU[sliceStart + prev1 * ROW_LEN_INT64]; //In Lyra2: pointer to prev1, the last row ever revisited and updated + + //Row used as input and to receive output after rotation (rowInOut or M[row1]) + uint64_t* ptrWordInOut = (uint64_t *) & memMatrixGPU[sliceStart + row1 * ROW_LEN_INT64]; //In Lyra2: pointer to row1, to be revisited and updated + + //Row receiving the output (rowOut or M[row0]) + uint64_t* ptrWordOut = (uint64_t *) & memMatrixGPU[sliceStart + (row0 * ROW_LEN_INT64) + ((N_COLS - 1) * BLOCK_LEN_INT64)]; //In Lyra2: pointer to row0, to be initialized + + for (i = 0; i < N_COLS; i++) { + //Absorbing "M[row1] [+] M[prev0] [+] M[prev1]" + for (j = 0; j < BLOCK_LEN_INT64; j++) { + state[stateStart + j] ^= (ptrWordInOut[j] + ptrWordIn0[j] + ptrWordIn1[j]); + } + + //Applies the reduced-round transformation f to the sponge's state + reducedSpongeLyra(&state[stateStart]); + + //M[row0][col] = M[prev0][col] XOR rand + for (j = 0; j < BLOCK_LEN_INT64; j++) { + ptrWordOut[j] = ptrWordIn0[j] ^ state[stateStart + j]; + } + + //M[row1][col] = M[row1][col] XOR rot(rand) + //rot(): right rotation by 'omega' bits (e.g., 1 or more words) + //we rotate 2 words for compatibility with the SSE implementation + for (j = 0; j < BLOCK_LEN_INT64; j++) { + ptrWordInOut[j] ^= state[stateStart + ((j + 2) % BLOCK_LEN_INT64)]; // BLOCK_LEN_INT64 = 12 + } + + //Inputs: next column (i.e., next block in sequence) + ptrWordInOut += BLOCK_LEN_INT64; + ptrWordIn0 += BLOCK_LEN_INT64; + ptrWordIn1 += BLOCK_LEN_INT64; + //Output: goes to previous column + ptrWordOut -= BLOCK_LEN_INT64; + } + } +} + + + +/** + * Performs a duplexing operation over + * "M[rowInOut][col] [+] M[rowIn0][col] [+] M[rowIn1][col]", where [+] denotes + * wordwise addition, ignoring carries between words, , for all values of "col" + * in the [0,N_COLS[ interval. The output of this operation, "rand", is then + * employed to make + * "M[rowOut][(N_COLS-1)-col] = M[rowIn0][col] XOR rand" and + * "M[rowInOut][col] = M[rowInOut][col] XOR rot(rand)", + * where rot is a right rotation by 'omega' bits (e.g., 1 or more words) + * and N_COLS is a system parameter. + * + * @param state The current state of the sponge + * @param memMatrixGPU Matrix start + * @param prev0 Index to calculate rowIn0, the previous row0 + * @param prevP Index to calculate rowIn1 + * @param row0 Index to calculate rowOut, the row being initialized + * @param rowP Index to calculate rowInOut, the row to be revisited and updated + * @param jP Index to another slice of matrix (slice belonging to another thread) + * @param totalPasswords Total number of passwords being tested + */ +__device__ void reducedDuplexRowFilling(uint64_t *state, uint64_t *memMatrixGPU, uint64_t prev0, uint64_t prevP, uint64_t row0, uint64_t rowP, uint64_t jP, unsigned int totalPasswords) { + int i, j; + int threadNumber; + + uint64_t sliceStart; + uint64_t sliceStartjP; + uint64_t stateStart; + + // Thread index: + threadNumber = (blockIdx.x * blockDim.x) + threadIdx.x; + + if (threadNumber < (nPARALLEL * totalPasswords)) { + stateStart = threadNumber * STATESIZE_INT64; + sliceStart = threadNumber * sizeSlicedRows; //sizeSlicedRows = (nRows/nPARALLEL) * ROW_LEN_INT64 + //jP slice must be inside the password´s thread pool + //The integer part of threadNumber/nPARALLEL multiplied by nPARALLEL is the Base Slice Start for the password thread pool + sliceStartjP = ((((uint64_t) (threadNumber / nPARALLEL)) * nPARALLEL) + jP) * sizeSlicedRows; + + //Row used only as input + uint64_t* ptrWordIn0 = (uint64_t *) & memMatrixGPU[sliceStart + prev0 * ROW_LEN_INT64]; //In Lyra2: pointer to prev0, the last row ever initialized + + //Another row used only as input + uint64_t* ptrWordIn1 = (uint64_t *) & memMatrixGPU[sliceStartjP + (prevP * ROW_LEN_INT64)]; //In Lyra2: pointer to prev1, the last row ever revisited and updated + + //Row used as input and to receive output after rotation + uint64_t* ptrWordInOut = (uint64_t *) & memMatrixGPU[sliceStartjP + (rowP * ROW_LEN_INT64)]; //In Lyra2: pointer to row1, to be revisited and updated + + //Row receiving the output + uint64_t* ptrWordOut = (uint64_t *) & memMatrixGPU[sliceStart + (row0 * ROW_LEN_INT64) + ((N_COLS - 1) * BLOCK_LEN_INT64)]; //In Lyra2: pointer to row0, to be initialized + + for (i = 0; i < N_COLS; i++) { + //Absorbing "M[rowP] [+] M[prev0] [+] M[prev1]" + for (j = 0; j < BLOCK_LEN_INT64; j++) { + state[stateStart + j] ^= (ptrWordInOut[j] + ptrWordIn0[j] + ptrWordIn1[j]); + } + + //Applies the reduced-round transformation f to the sponge's state + reducedSpongeLyra(&state[stateStart]); + + //M[row0][col] = M[prev0][col] XOR rand + for (j = 0; j < BLOCK_LEN_INT64; j++) { + ptrWordOut[j] = ptrWordIn0[j] ^ state[stateStart + j]; + } + + //M[rowP][col] = M[rowP][col] XOR rot(rand) + //rot(): right rotation by 'omega' bits (e.g., 1 or more words) + //we rotate 2 words for compatibility with the SSE implementation + for (j = 0; j < BLOCK_LEN_INT64; j++) { + ptrWordInOut[j] ^= state[stateStart + ((j + 2) % BLOCK_LEN_INT64)]; // BLOCK_LEN_INT64 = 12 + } + + //Inputs: next column (i.e., next block in sequence) + ptrWordInOut += BLOCK_LEN_INT64; + ptrWordIn0 += BLOCK_LEN_INT64; + ptrWordIn1 += BLOCK_LEN_INT64; + //Output: goes to previous column + ptrWordOut -= BLOCK_LEN_INT64; + } + } +} + +/** + * Performs matrix initialization and calls wandering phase + * During setup, performs a reduced-round duplexing operation over: + * "M[row1][col] [+] M[prev0][col] [+] M[prev1][col]", filling M[row0] and updating Mj[row1] + * M[row0][N_COLS-1-col] = M[prev0][col] XOR rand; + * M[row1][col] = Mj[row1][col] XOR rot(rand) + * Where rot() is a right rotation by 'omega' bits (e.g., 1 or more words) + * and N_COLS is a system parameter. + * + * @param memMatrixGPU Matrix start + * @param stateThreadGPU The current state of the sponge + * @param sizeSlice Number of rows for each thread (nRows when P=1) + * @param totalPasswords Total number of passwords being tested + * @param timeCost Parameter to determine the processing time (T) + */ +__global__ void setupPhaseWanderingGPU_P1(uint64_t * memMatrixGPU, uint64_t * stateThreadGPU, uint64_t sizeSlice, unsigned int totalPasswords, unsigned int timeCost) { + int64_t gap = 1; //Modifier to the step, assuming the values 1 or -1 + uint64_t step = 1; //Visitation step (used during Setup to dictate the sequence in which rows are read) + uint64_t window = 2; //Visitation window (used to define which rows can be revisited during Setup) + uint64_t sqrt = 2; //Square of window (i.e., square(window)), when a window is a square number; + //otherwise, sqrt = 2*square(window/2) + + uint64_t row0 = 3; //row0: sequentially written during Setup; randomly picked during Wandering + uint64_t prev0 = 2; //prev0: stores the previous value of row0 + uint64_t row1 = 1; //row1: revisited during Setup, and then read [and written]; randomly picked during Wandering + uint64_t prev1 = 0; //prev1: stores the previous value of row1 + + int threadNumber; + + // Thread index: + threadNumber = (blockIdx.x * blockDim.x) + threadIdx.x; + + if (threadNumber < (nPARALLEL * totalPasswords)) { + + //Filling Loop + for (row0 = 3; row0 < sizeSlice; row0++) { + //Performs a reduced-round duplexing operation over "M[row1][col] [+] M[prev0][col] [+] M[prev1][col]", filling M[row0] and updating M[row1] + //M[row0][N_COLS-1-col] = M[prev0][col] XOR rand; + //M[row1][col] = M[row1][col] XOR rot(rand) rot(): right rotation by 'omega' bits (e.g., 1 or more words) + reducedDuplexRowFilling_P1(stateThreadGPU, memMatrixGPU, prev0, prev1, row0, row1, totalPasswords); + + //Updates the "prev" indices: the rows more recently updated + prev0 = row0; + prev1 = row1; + + //updates the value of row1: deterministically picked, with a variable step + row1 = (row1 + step) & (window - 1); + + //Checks if all rows in the window where visited. + if (row1 == 0) { + window *= 2; //doubles the size of the re-visitation window + step = sqrt + gap; //changes the step + gap = -gap; //inverts the modifier to the step + if (gap == -1) { + sqrt *= 2; //Doubles sqrt every other iteration + } + } + } + + //Now goes to Wandering Phase and the Absorb from Wrap-up + //============================ Wandering Phase =============================// + //=====Iteratively overwrites pseudorandom cells of the memory matrix=======// + wanderingPhaseGPU2_P1(memMatrixGPU, stateThreadGPU, timeCost, sizeSlice, totalPasswords, prev0, prev1); + } +} + +/** + * Performs matrix initialization and calls wandering phase + * During setup, performs a reduced-round duplexing operation over: + * "Mj[rowP][col] [+] Mi[prev0][col] [+] Mj[prevP][col]", filling Mi[row0] and updating Mj[rowP] + * M[row0][N_COLS-1-col] = M[prev0][col] XOR rand; + * Mj[rowP][col] = Mj[rowP][col] XOR rot(rand) + * Where rot() is a right rotation by 'omega' bits (e.g., 1 or more words) + * and N_COLS is a system parameter. + * + * @param memMatrixGPU Matrix start + * @param stateThreadGPU The current state of the sponge + * @param sizeSlice Number of rows for each thread + * @param totalPasswords Total number of passwords being tested + * @param timeCost Parameter to determine the processing time (T) + */ +__global__ void setupPhaseWanderingGPU(uint64_t * memMatrixGPU, uint64_t * stateThreadGPU, uint64_t sizeSlice, unsigned int totalPasswords, unsigned int timeCost) { + uint64_t step = 1; //Visitation step (used during Setup and Wandering phases) + uint64_t window = 2; //Visitation window (used to define which rows can be revisited during Setup) + int64_t gap = 1; //Modifier to the step, assuming the values 1 or -1 + + uint64_t row0 = 3; //row0: sequentially written during Setup; randomly picked during Wandering + uint64_t prev0 = 2; //prev0: stores the previous value of row0 + uint64_t rowP = 1; //rowP: revisited during Setup, and then read [and written]; randomly picked during Wandering + uint64_t prevP = 0; //prevP: stores the previous value of rowP + uint64_t jP; //Index to another thread, starts with threadNumber + uint64_t sync = 4; //Synchronize counter + uint64_t sqrt = 2; //Square of window (i.e., square(window)), when a window is a square number; + //otherwise, sqrt = 2*square(window/2) + + int threadNumber; + + // Thread index: + threadNumber = (blockIdx.x * blockDim.x) + threadIdx.x; + + if (threadNumber < (nPARALLEL * totalPasswords)) { + + //jP must be in the thread pool of the same password + jP = threadNumber % nPARALLEL; + + //Filling Loop + for (row0 = 3; row0 < sizeSlice; row0++) { + //Performs a reduced-round duplexing operation over "Mj[rowP][col] [+] Mi[prev0][col] [+] Mj[prevP][col]", filling Mi[row0] and updating Mj[rowP] + //Mi[row0][N_COLS-1-col] = Mi[prev0][col] XOR rand; + //Mj[rowP][col] = Mj[rowP][col] XOR rot(rand) rot(): right rotation by 'omega' bits (e.g., 1 or more words) + reducedDuplexRowFilling(stateThreadGPU, memMatrixGPU, prev0, prevP, row0, rowP, jP, totalPasswords); + + //Updates the "prev" indices: the rows more recently updated + prev0 = row0; + prevP = rowP; + + //updates the value of row1: deterministically picked, with a variable step + rowP = (rowP + step) & (window - 1); + + //Checks if all rows in the window where visited. + if (rowP == 0) { + window *= 2; //doubles the size of the re-visitation window + step = sqrt + gap; //changes the step + gap = -gap; //inverts the modifier to the step + if (gap == -1) { + sqrt *= 2; //Doubles sqrt every other iteration + } + } + if (row0 == sync) { + sync += sqrt / 2; //increment synchronize counter + jP = (jP + 1) % nPARALLEL; //change the visitation thread + __syncthreads(); + } + } + + //Waits all threads + __syncthreads(); + + //Now goes to Wandering Phase and the Absorb from Wrap-up + //============================ Wandering Phase =============================// + //=====Iteratively overwrites pseudorandom cells of the memory matrix=======// + wanderingPhaseGPU2(memMatrixGPU, stateThreadGPU, timeCost, sizeSlice, totalPasswords, sqrt, prev0); + + } +} + +/** + * Performs a squeeze operation, using G function as the + * internal permutation + * + * @param state The current state of the sponge + * @param out Array that will receive the data squeezed + * @param len The number of bytes to be squeezed into the "out" array + * @param totalPasswords Total number of passwords being tested + */ +__global__ void squeeze(uint64_t *state, byte *out, unsigned int len, unsigned int totalPasswords) { + int i; + int fullBlocks = len / BLOCK_LEN_BYTES; + + int threadNumber; + uint64_t stateStart; + + // Thread index: + threadNumber = (blockIdx.x * blockDim.x) + threadIdx.x; + + if (threadNumber < (nPARALLEL * totalPasswords)) { + + stateStart = threadNumber * STATESIZE_INT64; + + byte *ptr = (byte *) & out[threadNumber * len]; + + //Squeezes full blocks + for (i = 0; i < fullBlocks; i++) { + memcpy(ptr, &state[stateStart], BLOCK_LEN_BYTES); + spongeLyra(&state[stateStart]); + ptr += BLOCK_LEN_BYTES; + } + + //Squeezes remaining bytes + memcpy(ptr, &state[stateStart], (len % BLOCK_LEN_BYTES)); + } +} + +/** + Prints an array of unsigned chars + */ +void printArray(unsigned char *array, unsigned int size, char *name) { + int i; + printf("%s: ", name); + for (i = 0; i < size; i++) { + printf("%2x|", array[i]); + } + printf("\n"); +} diff --git a/cuda_code/attrs_3.cu b/cuda_code/attrs_3.cu new file mode 100644 index 0000000000000000000000000000000000000000..4f7692f3fc14918d37a43f68ec8682c1ebfc6007 --- /dev/null +++ b/cuda_code/attrs_3.cu @@ -0,0 +1,439 @@ +/* +* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +#include +#include +#include +#include +#include +#include + +#include "NVStrings.h" + +#include "./NVStringsImpl.h" +#include "../custring_view.cuh" +#include "../unicode/is_flags.h" +#include "../util.h" + +// this will return the number of characters for each string +unsigned int NVStrings::len(int* lengths, bool todevice) +{ + unsigned int count = size(); + if( lengths==0 || count==0 ) + return count; + + auto execpol = rmm::exec_policy(0); + int* d_rtn = lengths; + if( !todevice ) + d_rtn = device_alloc(count,0); + + custring_view** d_strings = pImpl->getStringsPtr(); + thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator(0), count, + [d_strings, d_rtn] __device__(unsigned int idx){ + custring_view* dstr = d_strings[idx]; + if( dstr ) + d_rtn[idx] = dstr->chars_count(); + else + d_rtn[idx] = -1; + }); + // + //printCudaError(cudaDeviceSynchronize(),"nvs-len"); + size_t size = thrust::reduce(execpol->on(0), d_rtn, d_rtn+count, (size_t)0, + []__device__(int lhs, int rhs) { + if( lhs < 0 ) + lhs = 0; + if( rhs < 0 ) + rhs = 0; + return lhs + rhs; + }); + + if( !todevice ) + { // copy result back to host + CUDA_TRY( cudaMemcpyAsync(lengths,d_rtn,sizeof(int)*count,cudaMemcpyDeviceToHost)) + RMM_FREE(d_rtn,0); + } + return (unsigned int)size; +} + +// this will return the number of bytes for each string +size_t NVStrings::byte_count(int* lengths, bool todevice) +{ + unsigned int count = size(); + if( count==0 ) + return 0; + + auto execpol = rmm::exec_policy(0); + int* d_rtn = lengths; + if( !lengths ) + todevice = false; // makes sure we free correctly + if( !todevice ) + d_rtn = device_alloc(count,0); + + custring_view** d_strings = pImpl->getStringsPtr(); + thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator(0), count, + [d_strings, d_rtn] __device__(unsigned int idx){ + custring_view* dstr = d_strings[idx]; + if( dstr ) + d_rtn[idx] = dstr->size(); + else + d_rtn[idx] = -1; + }); + // + //printCudaError(cudaDeviceSynchronize(),"nvs-bytes"); + size_t size = thrust::reduce(execpol->on(0), d_rtn, d_rtn+count, (size_t)0, + []__device__(int lhs, int rhs) { + if( lhs < 0 ) + lhs = 0; + if( rhs < 0 ) + rhs = 0; + return lhs + rhs; + }); + if( !todevice ) + { // copy result back to host + if( lengths ) + CUDA_TRY( cudaMemcpyAsync(lengths,d_rtn,sizeof(int)*count,cudaMemcpyDeviceToHost)) + RMM_FREE(d_rtn,0); + } + return (unsigned int)size; +} + + +// +unsigned int NVStrings::isalnum( bool* results, bool todevice ) +{ + unsigned int count = size(); + if( count==0 || results==0 ) + return 0; + auto execpol = rmm::exec_policy(0); + unsigned char* d_flags = get_unicode_flags(); + bool* d_rtn = results; + if( !todevice ) + d_rtn = device_alloc(count,0); + custring_view_array d_strings = pImpl->getStringsPtr(); + thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator(0), count, + [d_strings, d_flags, d_rtn] __device__(unsigned int idx){ + custring_view* dstr = d_strings[idx]; + bool brc = false; + if( dstr ) + { + brc = !dstr->empty(); // alnum requires at least one character + for( auto itr = dstr->begin(); brc && (itr != dstr->end()); itr++ ) + { + unsigned int uni = u82u(*itr); + unsigned int flg = uni <= 0x00FFFF ? d_flags[uni] : 0; + brc = IS_ALPHANUM(flg); + } + } + d_rtn[idx] = brc; + }); + // count the number of trues + int matches = thrust::count(execpol->on(0), d_rtn, d_rtn+count, true ); + if( !todevice ) + { // copy result back to host + CUDA_TRY( cudaMemcpyAsync(results,d_rtn,sizeof(bool)*count,cudaMemcpyDeviceToHost)) + RMM_FREE(d_rtn,0); + } + return (unsigned int)matches; +} + +unsigned int NVStrings::isalpha( bool* results, bool todevice ) +{ + unsigned int count = size(); + if( count==0 || results==0 ) + return 0; + auto execpol = rmm::exec_policy(0); + unsigned char* d_flags = get_unicode_flags(); + bool* d_rtn = results; + if( !todevice ) + d_rtn = device_alloc(count,0); + custring_view_array d_strings = pImpl->getStringsPtr(); + thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator(0), count, + [d_strings, d_flags, d_rtn] __device__(unsigned int idx){ + custring_view* dstr = d_strings[idx]; + bool brc = false; + if( dstr ) + { + brc = !dstr->empty(); // alpha requires at least one character + for( auto itr = dstr->begin(); brc && (itr != dstr->end()); itr++ ) + { + unsigned int uni = u82u(*itr); + unsigned int flg = uni <= 0x00FFFF ? d_flags[uni] : 0; + brc = IS_ALPHA(flg); + } + } + d_rtn[idx] = brc; + }); + // count the number of trues + int matches = thrust::count(execpol->on(0), d_rtn, d_rtn+count, true); + if( !todevice ) + { // copy result back to host + CUDA_TRY( cudaMemcpyAsync(results,d_rtn,sizeof(bool)*count,cudaMemcpyDeviceToHost)) + RMM_FREE(d_rtn,0); + } + return (unsigned int)matches; +} + +// +unsigned int NVStrings::isdigit( bool* results, bool todevice ) +{ + unsigned int count = size(); + if( count==0 || results==0 ) + return 0; + auto execpol = rmm::exec_policy(0); + unsigned char* d_flags = get_unicode_flags(); + bool* d_rtn = results; + if( !todevice ) + d_rtn = device_alloc(count,0); + custring_view_array d_strings = pImpl->getStringsPtr(); + thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator(0), count, + [d_strings, d_flags, d_rtn] __device__(unsigned int idx){ + custring_view* dstr = d_strings[idx]; + bool brc = false; + if( dstr ) + { + brc = !dstr->empty(); // digit requires at least one character + for( auto itr = dstr->begin(); brc && (itr != dstr->end()); itr++ ) + { + unsigned int uni = u82u(*itr); + unsigned int flg = uni <= 0x00FFFF ? d_flags[uni] : 0; + brc = IS_DIGIT(flg); + } + } + d_rtn[idx] = brc; + }); + // count the number of trues + int matches = thrust::count(execpol->on(0), d_rtn, d_rtn+count, true); + if( !todevice ) + { // copy result back to host + CUDA_TRY( cudaMemcpyAsync(results,d_rtn,sizeof(bool)*count,cudaMemcpyDeviceToHost)) + RMM_FREE(d_rtn,0); + } + return (unsigned int)matches; +} + +unsigned int NVStrings::isspace( bool* results, bool todevice ) +{ + unsigned int count = size(); + if( count==0 || results==0 ) + return 0; + auto execpol = rmm::exec_policy(0); + unsigned char* d_flags = get_unicode_flags(); + bool* d_rtn = results; + if( !todevice ) + d_rtn = device_alloc(count,0); + custring_view_array d_strings = pImpl->getStringsPtr(); + thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator(0), count, + [d_strings, d_flags, d_rtn] __device__(unsigned int idx){ + custring_view* dstr = d_strings[idx]; + bool brc = false; + if( dstr ) + { + brc = !dstr->empty(); // space requires at least one character + for( auto itr = dstr->begin(); brc && (itr != dstr->end()); itr++ ) + { + unsigned int uni = u82u(*itr); + unsigned int flg = uni <= 0x00FFFF ? d_flags[uni] : 0; + brc = IS_SPACE(flg); + } + } + d_rtn[idx] = brc; + }); + // count the number of trues + int matches = thrust::count(execpol->on(0), d_rtn, d_rtn+count, true); + if( !todevice ) + { // copy result back to host + CUDA_TRY( cudaMemcpyAsync(results,d_rtn,sizeof(bool)*count,cudaMemcpyDeviceToHost)) + RMM_FREE(d_rtn,0); + } + return (unsigned int)matches; +} + +unsigned int NVStrings::isdecimal( bool* results, bool todevice ) +{ + unsigned int count = size(); + if( count==0 || results==0 ) + return 0; + auto execpol = rmm::exec_policy(0); + unsigned char* d_flags = get_unicode_flags(); + bool* d_rtn = results; + if( !todevice ) + d_rtn = device_alloc(count,0); + custring_view_array d_strings = pImpl->getStringsPtr(); + thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator(0), count, + [d_strings, d_flags, d_rtn] __device__(unsigned int idx){ + custring_view* dstr = d_strings[idx]; + bool brc = false; + if( dstr ) + { + brc = !dstr->empty(); // decimal requires at least one character + for( auto itr = dstr->begin(); brc && (itr != dstr->end()); itr++ ) + { + unsigned int uni = u82u(*itr); + unsigned int flg = uni <= 0x00FFFF ? d_flags[uni] : 0; + brc = IS_DECIMAL(flg); + } + } + d_rtn[idx] = brc; + }); + // count the number of trues + int matches = thrust::count(execpol->on(0), d_rtn, d_rtn+count, true); + if( !todevice ) + { // copy result back to host + CUDA_TRY( cudaMemcpyAsync(results,d_rtn,sizeof(bool)*count,cudaMemcpyDeviceToHost)) + RMM_FREE(d_rtn,0); + } + return (unsigned int)matches; +} + +unsigned int NVStrings::isnumeric( bool* results, bool todevice ) +{ + unsigned int count = size(); + if( count==0 || results==0 ) + return 0; + auto execpol = rmm::exec_policy(0); + unsigned char* d_flags = get_unicode_flags(); + bool* d_rtn = results; + if( !todevice ) + d_rtn = device_alloc(count,0); + custring_view_array d_strings = pImpl->getStringsPtr(); + thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator(0), count, + [d_strings, d_flags, d_rtn] __device__(unsigned int idx){ + custring_view* dstr = d_strings[idx]; + bool brc = false; + if( dstr ) + { + brc = !dstr->empty(); // numeric requires at least one character + for( auto itr = dstr->begin(); brc && (itr != dstr->end()); itr++ ) + { + unsigned int uni = u82u(*itr); + unsigned int flg = uni <= 0x00FFFF ? d_flags[uni] : 0; + brc = IS_NUMERIC(flg); + } + } + d_rtn[idx] = brc; + }); + // count the number of trues + int matches = thrust::count(execpol->on(0), d_rtn, d_rtn+count, true); + if( !todevice ) + { // copy result back to host + CUDA_TRY( cudaMemcpyAsync(results,d_rtn,sizeof(bool)*count,cudaMemcpyDeviceToHost)) + RMM_FREE(d_rtn,0); + } + return (unsigned int)matches; +} + +unsigned int NVStrings::islower( bool* results, bool todevice ) +{ + unsigned int count = size(); + if( count==0 || results==0 ) + return 0; + auto execpol = rmm::exec_policy(0); + unsigned char* d_flags = get_unicode_flags(); + bool* d_rtn = results; + if( !todevice ) + d_rtn = device_alloc(count,0); + custring_view_array d_strings = pImpl->getStringsPtr(); + thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator(0), count, + [d_strings, d_flags, d_rtn] __device__(unsigned int idx){ + custring_view* dstr = d_strings[idx]; + bool brc = false; + if( dstr ) + { + brc = !dstr->empty(); // requires at least one character + for( auto itr = dstr->begin(); brc && (itr != dstr->end()); itr++ ) + { + unsigned int uni = u82u(*itr); + unsigned int flg = (uni <= 0x00FFFF ? d_flags[uni] : 0); + brc = !IS_ALPHA(flg) || IS_LOWER(flg); + } + } + d_rtn[idx] = brc; + }); + // count the number of trues + int matches = thrust::count(execpol->on(0), d_rtn, d_rtn+count, true); + if( !todevice ) + { // copy result back to host + CUDA_TRY( cudaMemcpyAsync(results,d_rtn,sizeof(bool)*count,cudaMemcpyDeviceToHost)) + RMM_FREE(d_rtn,0); + } + return (unsigned int)matches; +} + +unsigned int NVStrings::isupper( bool* results, bool todevice ) +{ + unsigned int count = size(); + if( count==0 || results==0 ) + return 0; + auto execpol = rmm::exec_policy(0); + unsigned char* d_flags = get_unicode_flags(); + bool* d_rtn = results; + if( !todevice ) + d_rtn = device_alloc(count,0); + custring_view_array d_strings = pImpl->getStringsPtr(); + thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator(0), count, + [d_strings, d_flags, d_rtn] __device__(unsigned int idx){ + custring_view* dstr = d_strings[idx]; + bool brc = false; + if( dstr ) + { + brc = !dstr->empty(); // requires at least one character + for( auto itr = dstr->begin(); brc && (itr != dstr->end()); itr++ ) + { + unsigned int uni = u82u(*itr); + unsigned int flg = (uni <= 0x00FFFF ? d_flags[uni] : 0); + brc = !IS_ALPHA(flg) || IS_UPPER(flg); + } + } + d_rtn[idx] = brc; + }); + // count the number of trues + int matches = thrust::count(execpol->on(0), d_rtn, d_rtn+count, true); + if( !todevice ) + { // copy result back to host + CUDA_TRY( cudaMemcpyAsync(results,d_rtn,sizeof(bool)*count,cudaMemcpyDeviceToHost)) + RMM_FREE(d_rtn,0); + } + return (unsigned int)matches; +} + +unsigned int NVStrings::is_empty( bool* results, bool todevice ) +{ + unsigned int count = size(); + if( count==0 || results==0 ) + return 0; + auto execpol = rmm::exec_policy(0); + bool* d_rtn = results; + if( !todevice ) + d_rtn = device_alloc(count,0); + custring_view_array d_strings = pImpl->getStringsPtr(); + thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator(0), count, + [d_strings, d_rtn] __device__(unsigned int idx){ + custring_view* dstr = d_strings[idx]; + bool brc = true; // null is empty + if( dstr ) + brc = dstr->empty(); // requires at least one character + d_rtn[idx] = brc; + }); + // count the number of trues + int matches = thrust::count(execpol->on(0), d_rtn, d_rtn+count, true); + if( !todevice ) + { // copy result back to host + CUDA_TRY( cudaMemcpyAsync(results,d_rtn,sizeof(bool)*count,cudaMemcpyDeviceToHost)) + RMM_FREE(d_rtn,0); + } + return (unsigned int)matches; +} + diff --git a/cuda_code/avl-fine.cu b/cuda_code/avl-fine.cu new file mode 100644 index 0000000000000000000000000000000000000000..5b5762926ffe9b05aca107496f404a45b8398b56 --- /dev/null +++ b/cuda_code/avl-fine.cu @@ -0,0 +1,225 @@ +#include +#include +#include + +typedef struct node { + int data; + struct node *parent; + struct node *left; + struct node *right; + int height; + int sema; +} node; + +__device__ node* global_root = NULL; + +__device__ volatile int MASTER_LOCK = 0; + +__device__ int lock(node* n) { + int status = atomicExch(&n->sema, 1); + return (!status && !MASTER_LOCK); +} + +__device__ void unlock(node* n) { + atomicExch(&n->sema, 0); +} + +__device__ node* new_node(int val, node* parent) { + node *tmp = (node *) malloc(sizeof(node)); + tmp->data = val; + tmp->parent = parent; + tmp->left = tmp->right = NULL; + tmp->height = 1; + tmp->sema = 0; + return tmp; +} + +__device__ node* find(node* root, int key) { + if (root == NULL) return NULL; + + if (root->data == key) return root; + else if (root->data > key) return find(root->left, key); + else return find(root->right, key); +} + +__device__ int height(node *root) +{ + if (root == NULL) + return 0; + return root->height; +} + +__device__ int get_balance(node *root) +{ + if (root == NULL) + return 0; + return height(root->left) - height(root->right); +} + + +__device__ node* left_rotate(node* root, node* parent) +{ + node* temp1 = root->right; + node* temp2 = temp1->left; + + temp1->left = root; + root->parent = temp1; + root->right = temp2; + + if (temp2) + temp2->parent = root; + + root->height = max(height(root->left), height(root->right))+1; + temp1->height = max(height(temp1->left), height(temp1->right))+1; + + temp1->parent = parent; + return temp1; +} + +__device__ node* right_rotate(node* root, node* parent) +{ + node* temp1 = root->left; + node* temp2 = temp1->right; + + temp1->right = root; + root->parent = temp1; + root->left = temp2; + + if(temp2) + temp2->parent = root; + + root->height = max(height(root->left), height(root->right))+1; + temp1->height = max(height(temp1->left), height(temp1->right))+1; + + temp1->parent = parent; + return temp1; +} + + +__device__ void rebalance(node* root, int key) { + root->height = max(height(root->left),height(root->right)) + 1; + int balance = get_balance(root); + + // Left Left Case + node* p = root->parent; + if (balance > 1 && key < root->left->data) { + if (p) { + if (root->data < p->data) + p->left = right_rotate(root, p); + else + p->right = right_rotate(root, p); + } + else + global_root = right_rotate(root, global_root); + } + + // Right Right Case + else if (balance < -1 && key > root->right->data) { + if (p) { + if (root->data < p->data) + p->left = left_rotate(root, p); + else + p->right = left_rotate(root, p); + } + else + global_root = left_rotate(root, global_root); + } + + // Left Right Case + else if (balance > 1 && key > root->left->data) { + root->left = left_rotate(root->left, root); + if (p) { + if (root->data < p->data) + p->left = right_rotate(root, p); + else + p->right = right_rotate(root, p); + } + else + global_root = right_rotate(root, global_root); + } + + // Right Left Case + else if (balance < -1 && key < root->right->data) + { + root->right = right_rotate(root->right, root); + if (p) { + if (root->data < p->data) + p->left = left_rotate(root, p); + else + p->right = left_rotate(root, p); + } + else + global_root = left_rotate(root, global_root); + } + else { + if (root->parent) + rebalance(root->parent, key); + } + return; +} + + +__device__ void insert(node* root, int key) { + + if (root == NULL) { // Empty Tree + root = new_node(key, NULL); + return; + } + + int acquired = lock(root); + + if (acquired) { + if (key < root->data) { + if (root->left == NULL) { // Can be inserted to the immediate left + root->left = new_node(key, root); + unlock(root); + while (!atomicExch((int*)&MASTER_LOCK, 1)); + rebalance(root, key); + atomicExch((int*)&MASTER_LOCK, 0); + return; + } else { // Release this Node and proceed + unlock(root); + insert(root->left, key); + } + } else { + if (root->right == NULL) { // Can be inserted to the immediate right + root->right = new_node(key, root); + unlock(root); + while (!atomicExch((int*)&MASTER_LOCK, 1)); + rebalance(root, key); + atomicExch((int*)&MASTER_LOCK, 0); + return; + } else { + unlock(root); // Release this Node and proceed + insert(root->right, key); + } + } + + + } else { + insert(root, key); + } +} + + +__device__ void pre_order(node* root) +{ + if(root != NULL) + { + printf("%d ", root->data); + pre_order(root->left); + pre_order(root->right); + } + return; +} + +__device__ void in_order(node* root) +{ + if(root != NULL) + { + in_order(root->left); + printf("%d ", root->data); + in_order(root->right); + } + return; +} diff --git a/cuda_code/awkward_RegularArray_getitem_next_at.cu b/cuda_code/awkward_RegularArray_getitem_next_at.cu new file mode 100644 index 0000000000000000000000000000000000000000..be74e012210604c393c1f6511ce9138a0f041f0c --- /dev/null +++ b/cuda_code/awkward_RegularArray_getitem_next_at.cu @@ -0,0 +1,28 @@ +// BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE + +enum class REGULARARRAY_GETITEM_NEXT_AT_ERRORS { + IND_OUT_OF_RANGE // message: "index out of range" +}; + +template +__global__ void +awkward_RegularArray_getitem_next_at(T* tocarry, + int64_t at, + int64_t length, + int64_t size, + uint64_t invocation_index, + uint64_t* err_code) { + if (err_code[0] == NO_ERROR) { + int64_t thread_id = blockIdx.x * blockDim.x + threadIdx.x; + int64_t regular_at = at; + if (regular_at < 0) { + regular_at += size; + } + if (!(0 <= regular_at && regular_at < size)) { + RAISE_ERROR(REGULARARRAY_GETITEM_NEXT_AT_ERRORS::IND_OUT_OF_RANGE) + } + if (thread_id < length) { + tocarry[thread_id] = (thread_id * size) + regular_at; + } + } +} diff --git a/cuda_code/axpy_12.cu b/cuda_code/axpy_12.cu new file mode 100644 index 0000000000000000000000000000000000000000..343f803b9f4e992ce31fe74294722ce52ec54521 --- /dev/null +++ b/cuda_code/axpy_12.cu @@ -0,0 +1,58 @@ +#include +#include + +__global__ void daxpy_(int n, double a, double *x, double *y) +{ + int tid = threadIdx.x + blockIdx.x * blockDim.x; + int stride = gridDim.x * blockDim.x; + + for (; tid < n; tid += stride) { + y[tid] += a * x[tid]; + } +} + +__global__ void saxpy_(int n, float a, float *x, float *y) +{ + int tid = threadIdx.x + blockIdx.x * blockDim.x; + int stride = gridDim.x * blockDim.x; + + for (; tid < n; tid += stride) { + y[tid] += a * x[tid]; + } +} + +int main(void) +{ + int i; + const int n = 10000; + double a = 3.4; + double x[n], y[n], y_ref[n]; + double *x_, *y_; + + // initialise data and calculate reference values on CPU + for (i=0; i < n; i++) { + x[i] = sin(i) * 2.3; + y[i] = cos(i) * 1.1; + y_ref[i] = a * x[i] + y[i]; + } + + // allocate + copy initial values + cudaMalloc((void **) &x_, sizeof(double) * n); + cudaMalloc((void **) &y_, sizeof(double) * n); + cudaMemcpy(x_, x, sizeof(double) * n, cudaMemcpyHostToDevice); + cudaMemcpy(y_, y, sizeof(double) * n, cudaMemcpyHostToDevice); + + // calculate axpy on GPU + daxpy_<<<32,256>>>(n, a, x_, y_); + + // copy result back to host and print with reference + printf(" initial: %f %f %f %f ... %f %f\n", + y[0], y[1], y[2], y[3], y[n-2], y[n-1]); + cudaMemcpy(y, y_, sizeof(double) * n, cudaMemcpyDeviceToHost); + printf("reference: %f %f %f %f ... %f %f\n", + y_ref[0], y_ref[1], y_ref[2], y_ref[3], y_ref[n-2], y_ref[n-1]); + printf(" result: %f %f %f %f ... %f %f\n", + y[0], y[1], y[2], y[3], y[n-2], y[n-1]); + + return 0; +} diff --git a/cuda_code/ball_query_gpu_12.cu b/cuda_code/ball_query_gpu_12.cu new file mode 100644 index 0000000000000000000000000000000000000000..cfc2eebb1b6528cda5b6a95849eefbba5237448b --- /dev/null +++ b/cuda_code/ball_query_gpu_12.cu @@ -0,0 +1,59 @@ +// Copyright (c) Facebook, Inc. and its affiliates. +// +// This source code is licensed under the MIT license found in the +// LICENSE file in the root directory of this source tree. + +#include +#include +#include + +#include "cuda_utils.h" + +// input: new_xyz(b, m, 3) xyz(b, n, 3) +// output: idx(b, m, nsample) +__global__ void query_ball_point_kernel(int b, int n, int m, float radius, + int nsample, + const float *__restrict__ new_xyz, + const float *__restrict__ xyz, + int *__restrict__ idx) { + int batch_index = blockIdx.x; + xyz += batch_index * n * 3; + new_xyz += batch_index * m * 3; + idx += m * nsample * batch_index; + + int index = threadIdx.x; + int stride = blockDim.x; + + float radius2 = radius * radius; + for (int j = index; j < m; j += stride) { + float new_x = new_xyz[j * 3 + 0]; + float new_y = new_xyz[j * 3 + 1]; + float new_z = new_xyz[j * 3 + 2]; + for (int k = 0, cnt = 0; k < n && cnt < nsample; ++k) { + float x = xyz[k * 3 + 0]; + float y = xyz[k * 3 + 1]; + float z = xyz[k * 3 + 2]; + float d2 = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) + + (new_z - z) * (new_z - z); + if (d2 < radius2) { + if (cnt == 0) { + for (int l = 0; l < nsample; ++l) { + idx[j * nsample + l] = k; + } + } + idx[j * nsample + cnt] = k; + ++cnt; + } + } + } +} + +void query_ball_point_kernel_wrapper(int b, int n, int m, float radius, + int nsample, const float *new_xyz, + const float *xyz, int *idx) { + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + query_ball_point_kernel<<>>( + b, n, m, radius, nsample, new_xyz, xyz, idx); + + CUDA_CHECK_ERRORS(); +} diff --git a/cuda_code/ball_query_gpu_31.cu b/cuda_code/ball_query_gpu_31.cu new file mode 100644 index 0000000000000000000000000000000000000000..5eed6e63eabb7a4ac73f7299d597efbc393284f8 --- /dev/null +++ b/cuda_code/ball_query_gpu_31.cu @@ -0,0 +1,140 @@ +#include +#include +#include + +#include "ball_query_gpu.h" +#include "cuda_utils.h" + + +__global__ void ball_query_kernel_fast(int b, int n, int m, float radius, int nsample, + const float *__restrict__ new_xyz, const float *__restrict__ xyz, int *__restrict__ idx) { + // new_xyz: (B, M, 3) + // xyz: (B, N, 3) + // output: + // idx: (B, M, nsample) + int bs_idx = blockIdx.y; + int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (bs_idx >= b || pt_idx >= m) return; + + new_xyz += bs_idx * m * 3 + pt_idx * 3; + xyz += bs_idx * n * 3; + idx += bs_idx * m * nsample + pt_idx * nsample; + + float radius2 = radius * radius; + float new_x = new_xyz[0]; + float new_y = new_xyz[1]; + float new_z = new_xyz[2]; + + int cnt = 0; + for (int k = 0; k < n; ++k) { + float x = xyz[k * 3 + 0]; + float y = xyz[k * 3 + 1]; + float z = xyz[k * 3 + 2]; + float d2 = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) + (new_z - z) * (new_z - z); + if (d2 < radius2){ + if (cnt == 0){ + for (int l = 0; l < nsample; ++l) { + idx[l] = k; + } + } + idx[cnt] = k; + ++cnt; + if (cnt >= nsample) break; + } + } +} + + +void ball_query_kernel_launcher_fast(int b, int n, int m, float radius, int nsample, \ + const float *new_xyz, const float *xyz, int *idx, cudaStream_t stream) { + // new_xyz: (B, M, 3) + // xyz: (B, N, 3) + // output: + // idx: (B, M, nsample) + + cudaError_t err; + + dim3 blocks(DIVUP(m, THREADS_PER_BLOCK), b); // blockIdx.x(col), blockIdx.y(row) + dim3 threads(THREADS_PER_BLOCK); + + ball_query_kernel_fast<<>>(b, n, m, radius, nsample, new_xyz, xyz, idx); + // cudaDeviceSynchronize(); // for using printf in kernel function + err = cudaGetLastError(); + if (cudaSuccess != err) { + fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); + exit(-1); + } +} + + +__global__ void ball_query_dilated_kernel_fast(int b, int n, int m, float max_radius, float min_radius, int nsample, + const float *__restrict__ new_xyz, const float *__restrict__ xyz, int *__restrict__ idx) { + // new_xyz: (B, M, 3) + // xyz: (B, N, 3) + // output: + // idx: (B, M, nsample) + int bs_idx = blockIdx.y; + int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (bs_idx >= b || pt_idx >= m) return; + + new_xyz += bs_idx * m * 3 + pt_idx * 3; + xyz += bs_idx * n * 3; + idx += bs_idx * m * nsample + pt_idx * nsample; + + float radius1 = max_radius * max_radius; + float radius2 = min_radius * min_radius; + float new_x = new_xyz[0]; + float new_y = new_xyz[1]; + float new_z = new_xyz[2]; + + int cnt = 0; + for (int k = 0; k < n; ++k) { + float x = xyz[k * 3 + 0]; + float y = xyz[k * 3 + 1]; + float z = xyz[k * 3 + 2]; + float d2 = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) + (new_z - z) * (new_z - z); + if (d2 == 0){ + if (cnt == 0){ + for (int l = 0; l < nsample; ++l) { + idx[l] = k; + } + } + idx[cnt] = k; + ++cnt; + if (cnt >= nsample) break; + } + if (d2 >= radius2 && d2 < radius1){ + if (cnt == 0){ + for (int l = 0; l < nsample; ++l) { + idx[l] = k; + } + } + idx[cnt] = k; + ++cnt; + if (cnt >= nsample) break; + } + } +} + + + +void ball_query_dilated_kernel_launcher_fast(int b, int n, int m, float max_radius, float min_radius, int nsample, \ + const float *new_xyz, const float *xyz, int *idx, cudaStream_t stream) { + // new_xyz: (B, M, 3) + // xyz: (B, N, 3) + // output: + // idx: (B, M, nsample) + + cudaError_t err; + + dim3 blocks(DIVUP(m, THREADS_PER_BLOCK), b); // blockIdx.x(col), blockIdx.y(row) + dim3 threads(THREADS_PER_BLOCK); + + ball_query_dilated_kernel_fast<<>>(b, n, m, max_radius, min_radius, nsample, new_xyz, xyz, idx); + // cudaDeviceSynchronize(); // for using printf in kernel function + err = cudaGetLastError(); + if (cudaSuccess != err) { + fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); + exit(-1); + } +} \ No newline at end of file diff --git a/cuda_code/bar.cu b/cuda_code/bar.cu new file mode 100644 index 0000000000000000000000000000000000000000..71ba21fa784ea5a7cbc8f7f027e83e872a7e063c --- /dev/null +++ b/cuda_code/bar.cu @@ -0,0 +1,38 @@ +#include +#include + +#include +#include + +#include "cudaCheck.h" + +__global__ +void bar() { + printf("bar\n"); +} + +#ifdef MAY_CRASH +__global__ +void crash() { + bar<<<1,1>>>(); + cudaDeviceSynchronize(); +} +#endif // MAY_CRASH + +void wrapper() { + bar<<<1,1>>>(); + cudaCheck(cudaGetLastError()); + cudaDeviceSynchronize(); + cudaCheck(cudaGetLastError()); +} + +struct Me { + + Me() { + std::cout << "Loaded" << std::endl; + wrapper(); + } + +}; + +Me me; diff --git a/cuda_code/barrier_diverge_test6.cu b/cuda_code/barrier_diverge_test6.cu new file mode 100644 index 0000000000000000000000000000000000000000..edd972aebdeed6dafcd3c724466ae419f3c87b65 --- /dev/null +++ b/cuda_code/barrier_diverge_test6.cu @@ -0,0 +1,81 @@ +#include +#include +#include +#include + +using namespace std; +#define imin(a,b) (a= N/2){ + //int t0=0; + //int t1=0; + //for ( int j = 0 ;j < tid; j++) { + // a[N + j] = a[tid]; + //} + b[tid - N/2] = a[tid - N/2] + a[tid]; + __syncthreads(); + __syncthreads(); + a[tid - N/2] = b[tid - N/2]; + //__syncthreads(); + //__syncthreads(); + + } else { + __syncthreads(); + //int t0 = a[tid]; + //int t1 = a[tid + N/2]; + a[tid] = b[tid]; + b[tid] *= 2; + __syncthreads(); + //__syncthreads(); + //__syncthreads(); + + } + //__syncthreads(); + } +// else +// c[gid] = 1; +// c[gid] = c[gid] + 1; +} +//__global__ void dot2(int *a, int *b, int*c) { +//int gid = threadIdx.x + blockIdx.x * blockDim.x; +//int i = a[gid]; +//int j = b[gid]; +//int k = i + j; +//} +//__global__ void mykernel(int *data){ +// atomicAdd(data, 10); +//} + +int main(){ + int *a; + int *dev_a; + + a = new int[N]; + for (int i = 0; i < N; i++) { + a[i] = i; + } + cudaMalloc((void **)&dev_a, sizeof(int) * N); + cudaMemcpy(dev_a, a, N*sizeof(int), cudaMemcpyHostToDevice); + dot<<>>(dev_a); + cudaMemcpy( a, dev_a, N*sizeof(int), cudaMemcpyDeviceToHost); + #define sum_sq(x) (x*(x+1)*(2*x+1)/6) + + for (int i = 0;i < N/2; i++) + printf("%d %d\n", a[i], a[i + N/2]); + cudaFree(dev_a); + + + delete[] a; +} diff --git a/cuda_code/base_17.cu b/cuda_code/base_17.cu new file mode 100644 index 0000000000000000000000000000000000000000..aaee470c229f98f085e1d8477374da483a55f6a5 --- /dev/null +++ b/cuda_code/base_17.cu @@ -0,0 +1,49 @@ +// Copyright 2016, National University of Defense Technology +// Authors: Xuhao Chen +#include +#include "common.h" +#include "cutil_subset.h" + +__global__ void vector_add(int n, ValueType *a, ValueType *b, ValueType *c) { + int tid = blockIdx.x * blockDim.x + threadIdx.x; + for (int id = tid; id < n; id += blockDim.x * gridDim.x) + c[id] = a[id] + b[id]; +} + +void run_gpu(int num, ValueType *h_a, ValueType *h_b, ValueType *h_c) { + ValueType *d_a, *d_b, *d_c; + CUDA_SAFE_CALL(cudaMalloc((void **)&d_a, num * sizeof(ValueType))); + CUDA_SAFE_CALL(cudaMalloc((void **)&d_b, num * sizeof(ValueType))); + CUDA_SAFE_CALL(cudaMalloc((void **)&d_c, num * sizeof(ValueType))); + CUDA_SAFE_CALL(cudaMemcpy(d_a, h_a, num * sizeof(ValueType), cudaMemcpyHostToDevice)); + CUDA_SAFE_CALL(cudaMemcpy(d_b, h_b, num * sizeof(ValueType), cudaMemcpyHostToDevice)); + int nthreads = BLOCK_SIZE; + //int nblocks = 1; + int nblocks = (num - 1) / nthreads + 1; + vector_add<<>>(num, d_a, d_b, d_c); + CUDA_SAFE_CALL(cudaMemcpy(h_c, d_c, num * sizeof(ValueType), cudaMemcpyDeviceToHost)); + cudaFree(d_a); + cudaFree(d_b); + cudaFree(d_c); +} + +int main(int argc, char *argv[]) { + int num = 1024 * 1024; + if(argc == 2) num = atoi(argv[1]); + + ValueType *h_a = (ValueType *)malloc(num * sizeof(ValueType)); + ValueType *h_b = (ValueType *)malloc(num * sizeof(ValueType)); + ValueType *h_c = (ValueType *)malloc(num * sizeof(ValueType)); + for(int i = 0; i < num; i ++) { + h_a[i] = 1; + h_b[i] = 1; + } + run_gpu(num, h_a, h_b, h_c); + for(int i = 0; i < 16; i ++) { + printf("c[%d]=%f\n", i, h_c[i]); + } + free(h_a); + free(h_b); + free(h_c); + return 0; +} diff --git a/cuda_code/batch_norm_39.cu b/cuda_code/batch_norm_39.cu new file mode 100644 index 0000000000000000000000000000000000000000..40d677a8f16d342a1d85444a3042c3f95ad9d624 --- /dev/null +++ b/cuda_code/batch_norm_39.cu @@ -0,0 +1,758 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/*! + * Copyright (c) 2017 by Contributors + * \file batch_norm.cu + * \brief CUDA Batch Normalization code + * \author Chris Olivier, Bing Xu, Da Zheng + * Adapted from Torch +*/ +#include +#include +#include "batch_norm-inl.h" + +#define WRITE_DATA_FLAG 1 +#define WRITE_GAMMA_FLAG 2 +#define WRITE_BETA_FLAG 4 +#define FIX_GAMMA_FLAG 8 +#define IS_TRAINING_FLAG 16 +#define USE_GLOBAL_STATS_FLAG 32 +#define ADDTO_DATA_FLAG (1 << 6) +#define ADDTO_GAMMA_FLAG (1 << 7) +#define ADDTO_BETA_FLAG (1 << 8) + +#if MXNET_USE_CUDNN == 1 +#include "./cudnn/cudnn_batch_norm-inl.h" +#endif + +#include "../../common/cuda_utils.h" +#include "../../../include/mxnet/tensor_blob.h" + +using namespace mxnet; + +/*! \brief inverse standard deviation <-> variance */ +#define VARIANCE_TO_INVSTD(__var$, __eps$) (1.0/sqrt((__var$) + DType(__eps$))) +#define INVSTD_TO_VARIANCE(__invstd$, __eps$) ((1.0 / ((__invstd$) * (__invstd$))) - (__eps$)) + +namespace mxnet { +namespace op { +namespace batchnorm { +namespace cuda { + +static const unsigned WARP_SIZE = 32; + +// The maximum number of threads in a block +static const unsigned MAX_BLOCK_SIZE = 512U; + +template +struct ScalarConvert { + static __host__ __device__ __forceinline__ Out to(const In v) { return (Out) v; } +}; + +// Number of threads in a block given an input size up to MAX_BLOCK_SIZE +static unsigned getNumThreads(int nElem, const bool smaller) { + unsigned threadSizes[5] = {32, 64, 128, 256, MAX_BLOCK_SIZE}; + const int maxi = smaller ? 4 : 5; + for (int i = 0; i != maxi; ++i) { + if (static_cast(nElem) <= threadSizes[i]) { + return threadSizes[i]; + } + } + return smaller ? (MAX_BLOCK_SIZE >> 1) : MAX_BLOCK_SIZE; +} + +// Returns the index of the most significant 1 bit in `val`. +__device__ __forceinline__ int getMSB(int val) { + return 31 - __clz(val); +} + +template +struct Float2 { + AccReal v1, v2; + __device__ Float2() {} + __device__ Float2(DType v1, DType v2) + : v1(ScalarConvert::to(v1)) + , v2(ScalarConvert::to(v2)) {} + __device__ Float2(DType v) + : v1(ScalarConvert::to(v)) + , v2(ScalarConvert::to(v)) {} + __device__ Float2(int v) + : v1(ScalarConvert::to(v)) + , v2(ScalarConvert::to(v)) {} + __device__ Float2 &operator+=(const Float2 &a) { + v1 += a.v1; + v2 += a.v2; + return *this; + } +}; + +template +struct SumOp { + __device__ SumOp(const DeviceTensor t) : tensor(t) {} + __device__ __forceinline__ AccReal operator()(int batch, int plane, int n) { + return ScalarConvert::to(tensor.get_ref(batch, plane, n)); + } + const DeviceTensor tensor; +}; + +template +struct VarOp { + __device__ VarOp(AccReal m, const DeviceTensor t) + : mean(m) + , tensor(t) { + } + __device__ __forceinline__ AccReal operator()(int batch, int plane, int n) { + DType val = tensor.get_ref(batch, plane, n); + return (val - mean) * (val - mean); + } + const AccReal mean; + const DeviceTensor tensor; +}; + +template +struct GradOp { + __device__ GradOp(AccReal m, const DeviceTensor i, const DeviceTensor g) + : mean(m), input(i), gradOutput(g) {} + __device__ __forceinline__ Float2 operator()(int batch, int plane, int n) { + const DType g = gradOutput.get_ref(batch, plane, n); + const DType c = ScalarConvert::to(input.get_ref(batch, plane, n) - mean); + return Float2(g, g * c); + } + const AccReal mean; + const DeviceTensor input; + const DeviceTensor gradOutput; +}; + +#if CUDA_VERSION >= 9000 +#define FULLMASK 0xFFFFFFFF +#define __shfl_xor(...) __shfl_xor_sync(FULLMASK, __VA_ARGS__) +#endif + +// Sum across all threads within a warp +template +static __device__ __forceinline__ T warpSum(T val) { +#if __CUDA_ARCH__ >= 300 +for (int i = 0; i < getMSB(WARP_SIZE); ++i) { + val += __shfl_xor(val, 1 << i, WARP_SIZE); + } +#else +__shared__ T values[MAX_BLOCK_SIZE]; +values[threadIdx.x] = val; +__threadfence_block(); +const int base = (threadIdx.x / WARP_SIZE) * WARP_SIZE; +for (int i = 1; i < WARP_SIZE; i++) { +val += values[base + ((i + threadIdx.x) % WARP_SIZE)]; +} +#endif +return val; +} + +template +static __device__ __forceinline__ Float2 warpSum(Float2 value) { + value.v1 = warpSum(value.v1); + value.v2 = warpSum(value.v2); + return value; +} + +// Sum across (batch, x/y/z) applying Op() pointwise +template +static __device__ T reduce(Op op, DeviceTensor tensor, int plane) { + T sum = (T) 0; + for (int batch = 0; batch < tensor.OuterSize(); ++batch) { + for (int x = threadIdx.x; x < tensor.InnerSize(); x += blockDim.x) { + sum += op(batch, plane, x); + } + } + + // sum over NumThreads within a warp + sum = warpSum(sum); + + // 'transpose', and reduce within warp again + __shared__ T shared[32]; + __syncthreads(); + if (threadIdx.x % WARP_SIZE == 0) { + shared[threadIdx.x / WARP_SIZE] = sum; + } + if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) { + // zero out the other entries in shared + shared[threadIdx.x] = (T) 0; + } + __syncthreads(); + if (threadIdx.x / WARP_SIZE == 0) { + sum = warpSum(shared[threadIdx.x]); + if (threadIdx.x == 0) { + shared[0] = sum; + } + } + __syncthreads(); + + // Everyone picks it up, should be broadcast into the whole gradInput + return shared[0]; +} + +template +__global__ void BatchNormalizationUpdateOutputInferenceKernel( + DeviceTensor input, + DeviceTensor output, + DeviceTensor1 runningMean, + DeviceTensor1 runningVar, + DeviceTensor1 saveMean, + DeviceTensor1 saveInvStd, + DeviceTensor1 weight, + DeviceTensor1 bias, + const DType epsilon, + const uint32_t flags) { + int plane = blockIdx.x; + + AccReal invstd = VARIANCE_TO_INVSTD(runningVar[plane], epsilon); + AccReal mean = ScalarConvert::to(runningMean[plane]); + AccReal gamma = ((flags & FIX_GAMMA_FLAG) == 0 && weight.numElements() > 0) + ? ScalarConvert::to(weight[plane]) + : ScalarConvert::to(1); + AccReal beta = bias.numElements() > 0 ? ScalarConvert::to(bias[plane]) + : ScalarConvert::to(0); + if (threadIdx.x == 0) { + saveMean[plane] = runningMean[plane]; + saveInvStd[plane] = VARIANCE_TO_INVSTD(runningVar[plane], epsilon); + if ((flags & WRITE_GAMMA_FLAG) != 0 && (flags & FIX_GAMMA_FLAG) != 0 + && weight.numElements() > 0) { + weight[plane] = AccReal(1); + } + } + // Write normalized and update the output + for (int batch = 0, nbatch = input.OuterSize(); batch < nbatch; ++batch) { + for (int x = threadIdx.x, nx = input.InnerSize(); x < nx; x += blockDim.x) { + const DType inp = input.get_ref(batch, plane, x); + output.get_ref(batch, plane, x) = + ScalarConvert::to(gamma * (inp - mean) * invstd + beta); + } + } +} + +template +__global__ void BatchNormalizationUpdateOutputKernel( + DeviceTensor input, + DeviceTensor output, + DeviceTensor1 weight, + DeviceTensor1 bias, + const AccReal epsilon, + const AccReal momentum, + DeviceTensor1 runningMean, + DeviceTensor1 runningVar, + DeviceTensor1 saveMean, + DeviceTensor1 saveInvStd, + const uint32_t flags) { + const int plane = blockIdx.x; + const int N = input.OuterSize() * input.InnerSize(); + + const AccReal norm = AccReal(1) / N; + + // Compute the mean and variance across (batch, x/y/z) + const AccReal mean = reduce( + SumOp(input), input, plane) * norm; + __syncthreads(); + const AccReal varN = reduce(VarOp(mean, input), + input, plane); + AccReal invStd = 0; + if (varN != AccReal(0) || epsilon != AccReal(0)) { + invStd = AccReal(1.0) / sqrt(varN * norm + epsilon); + } + + // Save the mean, variance, and moving averages + if (threadIdx.x == 0) { + // For one item (0th) per plane (channel), write the per-channel data (ie mean, variance, etc) + // Momentum based writeback + saveMean[plane] = ScalarConvert::to(mean); + saveInvStd[plane] = invStd; + if ((flags & WRITE_GAMMA_FLAG) != 0 && (flags & FIX_GAMMA_FLAG) != 0 + && weight.numElements() > 0) { + weight[plane] = AccReal(1); + } + } + + // Write normalized and update the output + const AccReal gamma = ((flags & FIX_GAMMA_FLAG) == 0 && weight.numElements() > 0) + ? ScalarConvert::to(weight[plane]) + : ScalarConvert::to(1); + const AccReal beta = bias.numElements() > 0 ? ScalarConvert::to(bias[plane]) + : ScalarConvert::to(0); + for (int batch = 0, nbatch = input.OuterSize(); batch < nbatch; ++batch) { + for (int x = threadIdx.x, nx = input.InnerSize(); x < nx; x += blockDim.x) { + const DType inp = input.get_ref(batch, plane, x); + output.get_ref(batch, plane, x) = + ScalarConvert::to(gamma * (inp - mean) * invStd + beta); + } + } +} + +template +struct CUDATensors { + DeviceTensor1 gradWeight; + DeviceTensor1 gradBias; + DeviceTensor1 weight; + DeviceTensor1 runningMean; + DeviceTensor1 runningVar; + DeviceTensor1 saveMean; + DeviceTensor1 saveInvStd; +}; + +template +static __global__ void BatchNormalizationBackwardKernel( + const DeviceTensor input, + const DeviceTensor gradOutput, + DeviceTensor gradInput, + CUDATensors tensors, + const uint32_t flags, + const AccReal momentum, + const double eps) { + int plane = blockIdx.x; + int N = gradOutput.OuterSize() * gradOutput.InnerSize(); + + const bool is_train_and_not_global_stats = + (flags & IS_TRAINING_FLAG) != 0 && (flags & USE_GLOBAL_STATS_FLAG) == 0; + + AccReal mean, invstd; + if (is_train_and_not_global_stats) { + mean = ScalarConvert::to(tensors.saveMean[plane]); + invstd = tensors.saveInvStd[plane]; + } else { + mean = ScalarConvert::to(tensors.runningMean[plane]); + invstd = VARIANCE_TO_INVSTD(tensors.runningVar[plane], eps); + } + + const AccReal weightVal = ((flags & FIX_GAMMA_FLAG) == 0 && tensors.weight.numElements() > 0) ? + ScalarConvert::to(tensors.weight[plane]) : AccReal(1); + const AccReal norm = AccReal(1) / N; + + // Compute two values across (batch, x/y/z) in one pass: + // 1. Sum(gradOutput) + // 2. DotProduct(input - mean, gradOutput) + GradOp g(mean, input, gradOutput); + Float2< DType, AccReal > res = reduce < Float2 < DType, AccReal >, + GradOp< DType, AccReal, DeviceTensor >, DeviceTensor > (g, gradOutput, plane); + const AccReal gradOutputSum = res.v1; + const AccReal dotP = res.v2; + + const AccReal gradMean = gradOutputSum * norm; + const AccReal projScale = dotP * norm * invstd * invstd; + const AccReal gradScale = invstd * weightVal; + + if (threadIdx.x == 0 && is_train_and_not_global_stats) { + const AccReal localVariance = INVSTD_TO_VARIANCE(tensors.saveInvStd[plane], eps); + const AccReal localMean = tensors.saveMean[plane]; + + // update running averages + tensors.runningMean[plane] = tensors.runningMean[plane] + * momentum + localMean * (AccReal(1) - momentum); + tensors.runningVar[plane] = tensors.runningVar[plane] + * momentum + localVariance * (AccReal(1) - momentum); + } + + if (gradInput.Size() > 0 && (flags & (WRITE_DATA_FLAG | ADDTO_DATA_FLAG)) != 0) { + const bool grad_write = flags & WRITE_DATA_FLAG; + if (grad_write) { + for (int batch = 0, nbatch = gradOutput.OuterSize(); batch < nbatch; ++batch) { + for (int x = threadIdx.x, nx = gradOutput.InnerSize(); x < nx; x += blockDim.x) { + const DType gradOut = gradOutput.get_ref(batch, plane, x); + if (is_train_and_not_global_stats) { + const DType inp = input.get_ref(batch, plane, x); + const AccReal proj = (inp - mean) * projScale; + gradInput.get_ref(batch, plane, x) = + ScalarConvert::to((gradOut - proj - gradMean) * gradScale); + } else { + gradInput.get_ref(batch, plane, x) = ScalarConvert::to( + gradOut * gradScale); + } + } + } + } else { + // grad addto + for (int batch = 0, nbatch = gradOutput.OuterSize(); batch < nbatch; ++batch) { + for (int x = threadIdx.x, nx = gradOutput.InnerSize(); x < nx; x += blockDim.x) { + const DType gradOut = gradOutput.get_ref(batch, plane, x); + if (is_train_and_not_global_stats) { + const DType inp = input.get_ref(batch, plane, x); + const AccReal proj = (inp - mean) * projScale; + gradInput.get_ref(batch, plane, x) += + ScalarConvert::to((gradOut - proj - gradMean) * gradScale); + } else { + gradInput.get_ref(batch, plane, x) += ScalarConvert::to( + gradOut * gradScale); + } + } + } + } + } + + if (tensors.gradWeight.numElements() > 0 && threadIdx.x == 0 && + (flags & (WRITE_GAMMA_FLAG | ADDTO_GAMMA_FLAG)) != 0) { + if ((flags & FIX_GAMMA_FLAG) == 0) { + if (flags & WRITE_GAMMA_FLAG) + tensors.gradWeight[plane] = ScalarConvert::to(dotP * invstd); + else + tensors.gradWeight[plane] += ScalarConvert::to(dotP * invstd); + } else { + tensors.gradWeight[plane] = DType(0); + } + } + + if (tensors.gradBias.numElements() > 0 && threadIdx.x == 0 && + (flags & (WRITE_BETA_FLAG | ADDTO_BETA_FLAG)) != 0) { + if (flags & WRITE_BETA_FLAG) + tensors.gradBias[plane] = ScalarConvert::to(gradOutputSum); + else + tensors.gradBias[plane] += ScalarConvert::to(gradOutputSum); + } +} + +template +struct DeviceTensor { + public: + inline DeviceTensor() {} + inline DeviceTensor(DType *p, const int *size) + : dptr_(p) { + for (int i = 0; i < Dim; ++i) { + size_[i] = size ? size[i] : 0; + } + } + + MSHADOW_XINLINE unsigned getSize(const int i) const { + return size_[i]; + } + + MSHADOW_XINLINE int numElements() const { + int n = 1; + for (int i = 0; i < Dim; ++i) { + n *= size_[i]; + } + return n; + } + + MSHADOW_XINLINE DType &operator()(const size_t batch, + const size_t plane, + const size_t x) const { + int offset = 0; + + offset *= size_[0]; + offset += batch; + + offset *= size_[1]; + offset += plane; + + offset *= size_[2]; + offset += x; + + return *(const_cast(dptr_ + offset)); + } + + MSHADOW_XINLINE DType &operator[](const size_t x) const { + return *(dptr_ + x); + } + + MSHADOW_XINLINE size_t InnerSize() const { + size_t sz = 1; + for (size_t i = 2; i < Dim; ++i) { + sz *= size_[i]; + } + return sz; + } + + MSHADOW_XINLINE size_t ChannelCount() const { + return size_[1]; + } + + DType *dptr_; + int size_[Dim]; +}; + +template +static DeviceTensor devicetensor(const TBlob &blob) { + CHECK_EQ(blob.type_flag_, mshadow::DataType::kFlag); + DType *data = blob.dptr(); + const int inDim = blob.shape_.ndim(); + if (inDim == Dim) { + DeviceTensor tensor(data, nullptr); + for (int i = 0; i < Dim; ++i) { + tensor.size_[i] = blob.size(i); + } + return tensor; + } + + // View in which the last dimensions are collapsed or expanded as needed + int size[Dim]; + for (int i = 0; i < Dim || i < inDim; ++i) { + if (i < Dim && i < inDim) { + size[i] = blob.size(i); + } else if (i < Dim) { + size[i] = 1; + } else { + size[Dim - 1] *= blob.size(i); + } + } + return DeviceTensor(data, &size[0]); +} + + +#define DeviceTensor1 DeviceTensor + +using namespace mxnet::op; + +template +static void BatchNormalizationUpdateOutput(mshadow::Stream *s, + const OpContext &ctx, + const BatchNormParam& param, + const std::vector &in_data, + const std::vector &out_data, + const std::vector &aux_states, + const uint32_t flags, + double momentum, + double eps) { + batchnorm::BNTensor3 input = batchnorm::BNTensor3( + in_data[batchnorm::kData], param.axis); + batchnorm::BNTensor3 output = batchnorm::BNTensor3( + out_data[batchnorm::kOut], param.axis); + DeviceTensor1 weight = devicetensor(in_data[batchnorm::kGamma]); + DeviceTensor1 bias = devicetensor(in_data[batchnorm::kBeta]); + DeviceTensor1 runningMean = devicetensor(aux_states[batchnorm::kMovingMean]); + DeviceTensor1 runningVar = devicetensor(aux_states[batchnorm::kMovingVar]); + DeviceTensor1 saveMean = devicetensor(out_data[batchnorm::kMean]); + DeviceTensor1 saveInvStd = devicetensor(out_data[batchnorm::kVar]); + + DCHECK_GT(weight.numElements(), 0); + + if ((flags & IS_TRAINING_FLAG) == 0 || (flags & USE_GLOBAL_STATS_FLAG) != 0) { + dim3 blocks(input.ChannelCount()); + dim3 threads(batchnorm::cuda::getNumThreads(input.InnerSize(), false)); + BatchNormalizationUpdateOutputInferenceKernel> + <<< blocks, threads, 0, mshadow::Stream::GetStream(s) >>> ( + input, output, runningMean, runningVar, saveMean, + saveInvStd, weight, bias, eps, flags); + } else { + dim3 blocks(input.ChannelCount()); + dim3 threads(batchnorm::cuda::getNumThreads(input.InnerSize(), false)); + BatchNormalizationUpdateOutputKernel> + << < blocks, threads, 0, mshadow::Stream::GetStream(s) >> > ( + input, output, weight, bias, eps, momentum, runningMean, runningVar, + saveMean, saveInvStd, flags); + } + MSHADOW_CUDA_POST_KERNEL_CHECK(BatchNormalizationUpdateOutput); +} + +template +static void BatchNormalizationBackward(mshadow::Stream *s, + const OpContext &ctx, + const BatchNormParam& param, + const std::vector &out_grad, + const std::vector &in_data, + const std::vector &out_data, + const std::vector &in_grad, + const std::vector &aux_states, + const uint32_t flags, + double momentum, + double eps) { + batchnorm::BNTensor3 input = batchnorm::BNTensor3( + in_data[batchnorm::kData], param.axis); + batchnorm::BNTensor3gradOutput = batchnorm::BNTensor3( + out_grad[batchnorm::kOut], param.axis); + batchnorm::BNTensor3gradInput = batchnorm::BNTensor3( + in_grad[batchnorm::kData], param.axis); + + CHECK_EQ(gradOutput.Size(), gradInput.Size()); + + CUDATensors tensors; + + tensors.gradWeight = devicetensor(in_grad[batchnorm::kGamma]); + tensors.gradBias = devicetensor(in_grad[batchnorm::kBeta]); + tensors.weight = devicetensor(in_data[batchnorm::kGamma]); + tensors.runningMean = devicetensor(aux_states[batchnorm::kMovingMean]); + tensors.runningVar = devicetensor(aux_states[batchnorm::kMovingVar]); + tensors.saveMean = devicetensor(out_data[batchnorm::kMean]); + tensors.saveInvStd = devicetensor(out_data[batchnorm::kVar]); + + DCHECK_GT(tensors.weight.numElements(), 0); +#ifdef NDEBUG + constexpr bool SMALLER_THREADS = false; +#else + constexpr bool SMALLER_THREADS = true; +#endif + dim3 blocks(gradOutput.ChannelCount()); + dim3 threads(batchnorm::cuda::getNumThreads(gradOutput.InnerSize(), SMALLER_THREADS)); + BatchNormalizationBackwardKernel> + <<< blocks, threads, 0, mshadow::Stream::GetStream(s) >>> ( + input, gradOutput, gradInput, tensors, flags, momentum, eps); + MSHADOW_CUDA_POST_KERNEL_CHECK(BatchNormalizationBackward); +} + +} // namespace cuda +} // namespace batchnorm + +template +static inline uint32_t SetupFlags(const OpContext &ctx, + const BatchNormParam& params, + const std::vector &req) { + uint32_t flags = 0; + flags |= ctx.is_train ? IS_TRAINING_FLAG : 0; + flags |= params.fix_gamma ? FIX_GAMMA_FLAG : 0; + flags |= params.use_global_stats ? USE_GLOBAL_STATS_FLAG : 0; + if (IsBNWriting(req[batchnorm::kData])) { + flags |= WRITE_DATA_FLAG; + } else if (req[batchnorm::kData] == kAddTo) { + flags |= ADDTO_DATA_FLAG; + } + if (IsBNWriting(req[batchnorm::kGamma])) { + flags |= WRITE_GAMMA_FLAG; + } else if (req[batchnorm::kGamma] == kAddTo) { + flags |= ADDTO_GAMMA_FLAG; + } + if (IsBNWriting(req[batchnorm::kBeta])) { + flags |= WRITE_BETA_FLAG; + } else if (req[batchnorm::kBeta] == kAddTo) { + flags |= ADDTO_BETA_FLAG; + } + return flags; +} + +/*! \brief Forward batch-norm pass on GPU */ +template +void BatchNormForwardImpl(mshadow::Stream *stream, + const OpContext &ctx, const BatchNormParam& param_, + const std::vector &in_data, + const std::vector &req, + const std::vector &out_data, + const std::vector &aux_states) { + batchnorm::cuda::BatchNormalizationUpdateOutput( + stream, + ctx, + param_, + in_data, + out_data, + aux_states, + SetupFlags(ctx, param_, req), + param_.momentum, + param_.eps); + MSHADOW_CUDA_POST_KERNEL_CHECK(BatchNormOp_DoForward_gpu); +} + +/*! \brief Backward batch-norm pass on GPU */ +template +void BatchNormBackwardImpl(mshadow::Stream *stream, + const OpContext &ctx, const BatchNormParam& param_, + const std::vector &out_grad, + const std::vector &in_data, + const std::vector &out_data, + const std::vector &req, + const std::vector &in_grad, + const std::vector &aux_states) { + batchnorm::cuda::BatchNormalizationBackward( + stream, + ctx, + param_, + out_grad, + in_data, + out_data, + in_grad, + aux_states, + SetupFlags(ctx, param_, req), + param_.momentum, + param_.eps); + MSHADOW_CUDA_POST_KERNEL_CHECK(BatchNormOp_DoBackward_gpu); +} + +#if MXNET_USE_CUDNN == 1 +template +static CuDNNBatchNormOp &GetCuDNNOp(const BatchNormParam& param) { +#if DMLC_CXX11_THREAD_LOCAL + static thread_local CuDNNBatchNormOp op; +#else + static MX_THREAD_LOCAL CuDNNBatchNormOp op; +#endif + op.Init(param); + return op; +} +#endif + +template<> +void BatchNormCompute(const nnvm::NodeAttrs& attrs, + const OpContext& ctx, const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { + BatchNormParam param = nnvm::get(attrs.parsed); + CHECK_EQ(inputs.size(), 5U); + std::vector in_data(inputs.begin(), inputs.begin() + 3); + std::vector aux_states(inputs.begin() + 3, inputs.end()); + int dtype = inputs[0].type_flag_; + mxnet::TShape shape = inputs[0].shape_; + + param.axis = mxnet::op::batchnorm::GetRealAxis(shape, param.axis); +#if MXNET_USE_CUDNN == 1 + if (!param.use_global_stats && !param.cudnn_off) { + MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { + GetCuDNNOp(param).Forward(ctx, in_data, req, outputs, aux_states); + }) + } else { + MSHADOW_REAL_TYPE_SWITCH_EX(dtype, DType, AccReal, { + BatchNormForward(ctx, param, in_data, req, outputs, aux_states); + }) + } +#else + MSHADOW_REAL_TYPE_SWITCH_EX(inputs[0].type_flag_, DType, AccReal, { + BatchNormForward(ctx, param, in_data, req, outputs, aux_states); + }); +#endif +} + +template<> +void BatchNormGradCompute(const nnvm::NodeAttrs& attrs, + const OpContext& ctx, const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { + CHECK_EQ(inputs.size(), 8U); + BatchNormParam param = nnvm::get(attrs.parsed); + int dtype = inputs[0].type_flag_; + mxnet::TShape shape = inputs[0].shape_; + + param.axis = mxnet::op::batchnorm::GetRealAxis(shape, param.axis); +#if MXNET_USE_CUDNN == 1 + if (!param.use_global_stats && !param.cudnn_off) { + MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { + GetCuDNNOp(param).Backward(ctx, inputs, req, outputs); + }) + } else { + MSHADOW_REAL_TYPE_SWITCH_EX(dtype, DType, AccReal, { + BatchNormBackward(ctx, param, inputs, req, outputs); + }) + } +#else + MSHADOW_REAL_TYPE_SWITCH_EX(dtype, DType, AccReal, { + BatchNormBackward(ctx, param, inputs, req, outputs); + }); +#endif +} + +NNVM_REGISTER_OP(BatchNorm) +.set_attr("FCompute", BatchNormCompute); + +NNVM_REGISTER_OP(_backward_BatchNorm) +.set_attr("FCompute", BatchNormGradCompute); + +} // namespace op +} // namespace mxnet diff --git a/cuda_code/batch_normalization_4.cu b/cuda_code/batch_normalization_4.cu new file mode 100644 index 0000000000000000000000000000000000000000..d88012a1b042004aa62c77299337f1a8fd36c05f --- /dev/null +++ b/cuda_code/batch_normalization_4.cu @@ -0,0 +1,144 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include "cudakernel/nn/batch_normalization.h" +#include "cudakernel/math/math.h" +#include "cudakernel/common/common.h" +#include "cudakernel/common/divmod_fast.h" +#include "ppl/nn/common/tensor_shape.h" +#include + +template +__device__ T ppl_get_std(T var_val, T eps) +{ + return 1.f / sqrtf(var_val + eps); +} + +template <> +__device__ half ppl_get_std(half var_val, half eps) +{ +#if __CUDA_ARCH__ >= 600 && __CUDACC_VER_MAJOR__ >= 9 + return __hdiv(half(1), hsqrt(__hadd(var_val, eps))); +#else + return half(0); +#endif +} + +template +__global__ void ppl_cukernel_batchnorm_withmeanvar( + int64_t num_elems, + DivModFast channel_fast, + int channels, + const T* input, + const T* scale, + const T* B, + const T* mean, + const T* var, + float eps, + T* output) +{ + typedef Math OpMath; + int index = blockIdx.x * blockDim.x + threadIdx.x; + if (index >= num_elems) + return; + T t_eps = (T)eps; + int c_idx = 0; + int inner_offset = 0; + channel_fast.divmod(index, c_idx, inner_offset); + c_idx = c_idx % channels; + + T scale_val = scale[c_idx]; + T B_val = B[c_idx]; + T mean_val = mean[c_idx]; + T var_val = var[c_idx]; + T std = ppl_get_std(var_val, t_eps); + output[index] = OpMath::add(OpMath::mul(OpMath::mul(OpMath::sub(input[index], mean_val), std), scale_val), B_val); +} + +template +__global__ void ppl_cukernel_batchnorm_withmeanvar_nhwc( + int64_t num_elems, + DivModFast channel_fast, + int pad_channels, + const T* input, + const T* scale, + const T* B, + const T* mean, + const T* var, + float eps, + T* output) +{ + typedef Math OpMath; + int index = blockIdx.x * blockDim.x + threadIdx.x; + if (index >= num_elems) + return; + T t_eps = (T)eps; + int c_idx = 0; + int outer_offset = 0; + channel_fast.divmod(index, outer_offset, c_idx); + + T scale_val = scale[c_idx]; + T B_val = B[c_idx]; + T mean_val = mean[c_idx]; + T var_val = var[c_idx]; + T std = ppl_get_std(var_val, t_eps); + int nhwc_index = outer_offset * pad_channels + c_idx; + output[nhwc_index] = OpMath::add(OpMath::mul(OpMath::mul(OpMath::sub(input[nhwc_index], mean_val), std), scale_val), B_val); +} + +ppl::common::RetCode PPLCUDABatchNormalizationForwardImp( + cudaStream_t stream, + ppl::nn::TensorShape* input_shape, + const void* input, + ppl::nn::TensorShape* scale_shape, + const void* scale, + // share scale shape + const void* B, + const void* mean, + const void* var, + ppl::nn::TensorShape* output_shape, + void* output, + float epsilon) +{ + int dim_count = input_shape->GetDimCount(); + int batch = input_shape->GetDim(0); + int channels = dim_count >= 2 ? input_shape->GetDim(1) : 1; + int hw_count = 1; + for (int it = 2; it < dim_count; ++it) + hw_count *= input_shape->GetDim(it); + int64_t num_elems = output_shape->GetElementsExcludingPadding(); + int block_size = 256; + int grid_size = DivUp(num_elems, block_size); + + if (output_shape->GetDataFormat() == ppl::common::DATAFORMAT_NDARRAY) { + DivModFast channel_fast(hw_count); + if (output_shape->GetDataType() == ppl::common::DATATYPE_FLOAT32) { + ppl_cukernel_batchnorm_withmeanvar<<>>(num_elems, channel_fast, channels, (const float*)input, (const float*)scale, (const float*)B, (const float*)mean, (const float*)var, epsilon, (float*)output); + } else if (output_shape->GetDataType() == ppl::common::DATATYPE_FLOAT16) { + ppl_cukernel_batchnorm_withmeanvar<<>>(num_elems, channel_fast, channels, (const half*)input, (const half*)scale, (const half*)B, (const half*)mean, (const half*)var, epsilon, (half*)output); + } + } else { + DivModFast channel_fast(channels); + int pad_channels = dim_count >= 2 ? input_shape->GetDim(1) + input_shape->GetPadding0(1) + input_shape->GetPadding1(1) : 1; + if (output_shape->GetDataType() == ppl::common::DATATYPE_FLOAT32) { + ppl_cukernel_batchnorm_withmeanvar_nhwc<<>>(num_elems, channel_fast, pad_channels, (const float*)input, (const float*)scale, (const float*)B, (const float*)mean, (const float*)var, epsilon, (float*)output); + } else if (output_shape->GetDataType() == ppl::common::DATATYPE_FLOAT16) { + ppl_cukernel_batchnorm_withmeanvar_nhwc<<>>(num_elems, channel_fast, pad_channels, (const half*)input, (const half*)scale, (const half*)B, (const half*)mean, (const half*)var, epsilon, (half*)output); + } + } + return ppl::common::RC_SUCCESS; +} diff --git a/cuda_code/batch_normalization_7.cu b/cuda_code/batch_normalization_7.cu new file mode 100644 index 0000000000000000000000000000000000000000..033224ea01d06da000490990c9b7850e12196303 --- /dev/null +++ b/cuda_code/batch_normalization_7.cu @@ -0,0 +1,642 @@ +//////////////////////////////////////////////////////////////////////////////// +// Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC. +// Produced at the Lawrence Livermore National Laboratory. +// Written by the LBANN Research Team (B. Van Essen, et al.) listed in +// the CONTRIBUTORS file. +// +// LLNL-CODE-697807. +// All rights reserved. +// +// This file is part of LBANN: Livermore Big Artificial Neural Network +// Toolkit. For details, see http://software.llnl.gov/LBANN or +// https://github.com/LLNL/LBANN. +// +// Licensed under the Apache License, Version 2.0 (the "Licensee"); you +// may not use this file except in compliance with the License. You may +// obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the license. +//////////////////////////////////////////////////////////////////////////////// + +#define LBANN_BATCH_NORMALIZATION_LAYER_INSTANTIATE +#include "lbann/layers/regularizers/batch_normalization.hpp" +#include "lbann/utils/cuda.hpp" + +#include "batch_norm_helpers.hpp" + +namespace lbann { + +namespace { + +/** CUDA kernel to compute channel sums. + * Sums and squares of sums are used to compute mean and variance. + */ +template +__global__ void channel_sums_kernel( + El::Int channel_height, + El::Int width, + const TensorDataType * __restrict__ data, El::Int data_ldim, + TensorDataType * __restrict__ sums, + TensorDataType * __restrict__ sqsums) { + + // Indices + const El::Int tid = threadIdx.x; + const El::Int gidx = threadIdx.x + blockIdx.x * blockDim.x; + const El::Int bidy = blockIdx.y; + + // Initialize shared memory + __shared__ TensorDataType shared_sums[block_size]; + __shared__ TensorDataType shared_sqsums[block_size]; + + // Compute row sums in shared memory + TensorDataType private_sum = 0; + TensorDataType private_sqsum = 0; + if (gidx < channel_height) { + const auto& row = gidx + bidy * channel_height; + for (El::Int col = 0; col < width; ++col) { + const auto& x = data[row + col * data_ldim]; + private_sum += x; + private_sqsum += x * x; + } + } + shared_sums[tid] = private_sum; + shared_sqsums[tid] = private_sqsum; + + // Compute channel sum with shared memory reduction + /// @todo unroll loops + for (El::Int stride = block_size / 2; stride > 0; stride /= 2) { + __syncthreads(); + if(tid < stride) { + shared_sums[tid] += shared_sums[tid + stride]; + shared_sqsums[tid] += shared_sqsums[tid + stride]; + } + } + + // Output channel sum to global memory + if (tid == 0) { + cuda::atomic_add(&sums[bidy], shared_sums[0]); + cuda::atomic_add(&sqsums[bidy], shared_sqsums[0]); + } + +} + +/** CUDA kernel to compute statistics. + * On input, global_mean and global_var are assumed to contain sums + * and squares of sums, respectively. + */ +template +__global__ void compute_statistics_kernel( + El::Int num_sums, + El::Int num_per_sum, + TensorDataType epsilon, + TensorDataType decay, + TensorDataType * __restrict__ global_mean, + TensorDataType * __restrict__ global_var, + TensorDataType * __restrict__ global_running_mean, + TensorDataType * __restrict__ global_running_var) { + + const El::Int gid = threadIdx.x + blockIdx.x * blockDim.x; + const El::Int num_threads = blockDim.x * gridDim.x; + for (El::Int i = gid; i < num_sums; i += num_threads) { + + TensorDataType num_per_sum_dt = TensorDataType(num_per_sum); + // Compute mean and variance + const auto& mean = global_mean[i] / num_per_sum_dt; + const auto& sqmean = global_var[i] / num_per_sum_dt; + auto var = num_per_sum_dt * (sqmean - mean * mean) / TensorDataType(num_per_sum - 1); + var = var > epsilon ? var : epsilon; + global_mean[gid] = mean; + global_var[gid] = var; + + // Compute running statistics + auto& running_mean = global_running_mean[gid]; + auto& running_var = global_running_var[gid]; + running_mean = decay * running_mean + (TensorDataType(1.0) - decay) * mean; + running_var = decay * running_var + (TensorDataType(1.0) - decay) * var; + + } + +} + +/** CUDA kernel to apply batch normalization. */ +template +__global__ void batch_normalization_kernel( + El::Int channel_height, + El::Int width, + const TensorDataType * __restrict__ global_input, El::Int input_ldim, + const TensorDataType * __restrict__ global_mean, + const TensorDataType * __restrict__ global_var, + TensorDataType epsilon, + const TensorDataType * __restrict__ global_scale, + const TensorDataType * __restrict__ global_bias, + TensorDataType * __restrict__ global_output, El::Int output_ldim) { + + // Indices + const El::Int gidx = threadIdx.x + blockIdx.x * blockDim.x; + const El::Int bidy = blockIdx.y; + + // Copy batch normalization parameters to private memory + const auto& mean = global_mean[bidy]; + const auto& var = global_var[bidy]; + const auto& scale = global_scale[bidy]; + const auto& bias = global_bias[bidy]; + + // Get reciprocal of standard deviation + const auto& inv_stdev = cuda::rsqrt(var + epsilon); + + // Apply batch normalization + if (gidx < channel_height) { + const auto& row = gidx + bidy * channel_height; + for (El::Int col = 0; col < width; ++col) { + const auto& x = global_input[row + col * input_ldim]; + const auto& xhat = (x - mean) * inv_stdev; + const auto& y = scale * xhat + bias; + global_output[row + col * output_ldim] = y; + } + } + +} + +/** CUDA kernel to compute gradients w.r.t. batch norm parameters. */ +template +__global__ void backprop1_kernel( + El::Int channel_height, + El::Int width, + const TensorDataType * __restrict__ global_input, + El::Int input_ldim, + const TensorDataType * __restrict__ global_gradient_wrt_output, + El::Int gradient_wrt_output_ldim, + const TensorDataType * __restrict__ global_mean, + const TensorDataType * __restrict__ global_var, + TensorDataType epsilon, + const TensorDataType * __restrict__ global_scale, + TensorDataType * __restrict__ global_dscale, + TensorDataType * __restrict__ global_dbias, + TensorDataType * __restrict__ global_dmean, + TensorDataType * __restrict__ global_dvar) { + + // Indices + const El::Int tid = threadIdx.x; + const El::Int gidx = threadIdx.x + blockIdx.x * blockDim.x; + const El::Int bidy = blockIdx.y; + + // Initialize shared memory + __shared__ TensorDataType shared_dscale[block_size]; + __shared__ TensorDataType shared_dbias[block_size]; + __shared__ TensorDataType shared_dmean[block_size]; + __shared__ TensorDataType shared_dvar[block_size]; + + // Copy batch normalization parameters to private memory + const auto& mean = global_mean[bidy]; + const auto& var = global_var[bidy]; + const auto& scale = global_scale[bidy]; + + // Compute useful constants + const TensorDataType zero = TensorDataType(0); + const auto& inv_stdev = cuda::rsqrt(var + epsilon); + const auto& dvar_factor = inv_stdev * inv_stdev * inv_stdev / TensorDataType(2); + + // Compute row-wise gradient contributions in shared memory + auto dscale = zero; + auto dbias = zero; + auto dmean = zero; + auto dvar = zero; + if (gidx < channel_height) { + const auto& row = gidx + bidy * channel_height; + for(El::Int col = 0; col < width; ++col) { + const auto& x = global_input[row + col * input_ldim]; + const auto& xhat = (x - mean) * inv_stdev; + const auto& dy = global_gradient_wrt_output[row + col * gradient_wrt_output_ldim]; + dscale += dy * xhat; + dbias += dy; + const auto& dxhat = dy * scale; + dmean += - dxhat * inv_stdev; + dvar += - dxhat * (x - mean) * dvar_factor; + } + } + shared_dscale[tid] = dscale; + shared_dbias[tid] = dbias; + shared_dmean[tid] = dmean; + shared_dvar[tid] = dvar; + + // Compute gradients with shared memory reduction + // @todo unroll loops + for (El::Int stride = block_size / 2; stride > 0; stride /= 2) { + __syncthreads(); + if (tid < stride) { + shared_dscale[tid] += shared_dscale[tid + stride]; + shared_dbias[tid] += shared_dbias[tid + stride]; + shared_dmean[tid] += shared_dmean[tid + stride]; + shared_dvar[tid] += shared_dvar[tid + stride]; + } + } + + // Output channel sum to global memory + if (tid == 0) { + cuda::atomic_add(&global_dscale[bidy], shared_dscale[0]); + cuda::atomic_add(&global_dbias[bidy], shared_dbias[0]); + cuda::atomic_add(&global_dmean[bidy], shared_dmean[0]); + cuda::atomic_add(&global_dvar[bidy], shared_dvar[0]); + } + +} + +/** CUDA kernel to compute gradients w.r.t. input. */ +template +__global__ void backprop2_kernel( + El::Int channel_height, + El::Int local_width, + El::Int num_per_sum, + const TensorDataType * __restrict__ global_input, + El::Int input_ldim, + const TensorDataType * __restrict__ global_gradient_wrt_output, + El::Int gradient_wrt_output_ldim, + const TensorDataType * __restrict__ global_mean, + const TensorDataType * __restrict__ global_var, + TensorDataType epsilon, + const TensorDataType * __restrict__ global_scale, + const TensorDataType * __restrict__ global_dmean, + const TensorDataType * __restrict__ global_dvar, + TensorDataType * __restrict__ global_gradient_wrt_input, + El::Int gradient_wrt_input_ldim) { + + // Indices + const El::Int gidx = threadIdx.x + blockIdx.x * blockDim.x; + const El::Int bidy = blockIdx.y; + + // Copy batch normalization parameters to private memory + const auto& mean = global_mean[bidy]; + const auto& var = global_var[bidy]; + const auto& scale = global_scale[bidy]; + const auto& dmean = global_dmean[bidy]; + const auto& dvar = global_dvar[bidy]; + + // Compute useful constants + const auto& inv_stdev = cuda::rsqrt(var + epsilon); + const auto& dmean_term = dmean / TensorDataType(num_per_sum); + const auto& dvar_term = dvar * TensorDataType(2) / TensorDataType(num_per_sum - 1); + + // Apply batch normalization + if (gidx < channel_height) { + const auto& row = gidx + bidy * channel_height; + for (El::Int col = 0; col < local_width; ++col) { + const auto& x = global_input[row + col * input_ldim]; + const auto& dy = global_gradient_wrt_output[row + col * gradient_wrt_output_ldim]; + const auto& dxhat = dy * scale; + auto& dx = global_gradient_wrt_input[row + col * gradient_wrt_input_ldim]; + dx = dxhat * inv_stdev + dmean_term + dvar_term * (x - mean); + } + } + +} + +} // namespace + +#ifdef LBANN_HAS_DISTCONV + +template +void batch_normalization_distconv_adapter::fp_compute() { + assert_always(Dev == El::Device::GPU); + assert_always(T_layout == data_layout::DATA_PARALLEL); + + using ValuesGetter = bn_details::SafeWeightsAccessor; + + auto &l = dynamic_cast&>(this->layer()); + + const bool is_training = + l.m_model->get_execution_context().get_execution_mode() == execution_mode::training; + auto& local_running_mean = + ValuesGetter::mutable_values(this->get_weights(2)).Matrix(); + auto& local_running_var = + ValuesGetter::mutable_values(this->get_weights(3)).Matrix(); + + assert0(dc::tensor::View( + m_scale, l.weights_values(0).LockedMatrix().LockedBuffer())); + assert0(dc::tensor::View( + m_bias, l.weights_values(1).LockedMatrix().LockedBuffer())); + assert0(dc::tensor::View( + m_running_mean, local_running_mean.Buffer())); + assert0(dc::tensor::View( + m_running_var, local_running_var.Buffer())); + + m_bn->forward_stage1(this->get_prev_activations(), m_mean, + m_var, is_training); + + if (l.m_statistics_group_size == 0) { + l.m_comm->allreduce(*l.m_mean_and_var, l.m_mean_and_var->RedundantComm(), + El::mpi::SUM); + } else if (l.m_statistics_group_size == 1) { + // Local aggregation + } else { + LBANN_ERROR("statics_group_size must be either 0 or 1 for now."); + } + + m_bn->forward_stage2(this->get_prev_activations(), + m_mean, m_var, m_running_mean, + m_running_var, m_scale, m_bias, + this->get_activations(), is_training); +} + +template +void batch_normalization_distconv_adapter::bp_compute() { + assert_always(Dev == El::Device::GPU); + assert_always(T_layout == data_layout::DATA_PARALLEL); + + auto &l = dynamic_cast&>(this->layer()); + + // Check execution mode + const bool is_training = + l.m_model->get_execution_context().get_execution_mode() == execution_mode::training; + assert_always(is_training); + + assert0(dc::tensor::View( + m_scale, l.weights_values(0).LockedMatrix().LockedBuffer())); + + m_bn->backward_stage1(this->get_prev_activations(), + this->get_prev_error_signals(), + m_mean, m_var, m_scale, + m_scale_gradient, m_bias_gradient, + m_mean_gradient, m_var_gradient); + + // Verbatim copy from bp_compute_gpu + // Accumulate gradients + if (is_training) { + if (l.m_statistics_group_size == 0) { + l.m_comm->allreduce(*l.m_mean_and_var_gradient, + l.m_mean_and_var_gradient->RedundantComm(), + El::mpi::SUM); + } + } else { + Zero(*l.m_mean_and_var_gradient); + } + + auto* scale_optimizer = l.get_weights(0).get_optimizer(); + if (scale_optimizer != nullptr) { + scale_optimizer->add_to_gradient(*l.m_scale_gradient, TensorDataType{1}, true); + } + auto* bias_optimizer = l.get_weights(1).get_optimizer(); + if (bias_optimizer != nullptr) { + bias_optimizer->add_to_gradient(*l.m_bias_gradient, TensorDataType{1}, true); + } + + m_bn->backward_stage2(this->get_prev_activations(), this->get_prev_error_signals(), + m_mean, m_var, m_scale, m_mean_gradient, m_var_gradient, + this->get_error_signals()); +} + +#endif // LBANN_HAS_DISTCONV + +template +void batch_normalization_layer::fp_compute() { +#ifdef LBANN_HAS_DISTCONV + if (this->distconv_enabled()) { + get_distconv_adapter().fp_compute(); + return; + } +#endif // LBANN_HAS_DISTCONV + + const bool is_training = this->m_model->get_execution_context().get_execution_mode() == execution_mode::training; + + // CUDA objects + CHECK_CUDA(cudaSetDevice(El::GPUManager::Device())); + auto&& stream = El::GPUManager::Stream(); + + // Matrices + const auto& input = this->get_prev_activations(); + const auto& local_input = input.LockedMatrix(); + auto& local_output = this->get_local_activations(); + + // Matrix parameters + const auto& width = input.Width(); + const auto& local_width = local_input.Width(); + const auto& output_dims = this->get_output_dims(); + const auto& num_channels = output_dims[0]; + const auto& channel_size = this->get_output_size() / num_channels; + + // Compute statistics + if (is_training) { + using ValuesGetter = bn_details::SafeWeightsAccessor; + + // Local matrices + auto& local_mean = this->m_mean_v->Matrix(); + auto& local_var = this->m_var_v->Matrix(); + auto& local_running_mean = + ValuesGetter::mutable_values(this->get_weights(2)).Matrix(); + auto& local_running_var = + ValuesGetter::mutable_values(this->get_weights(3)).Matrix(); + + // Compute sums and sums of squares + El::Zero(local_mean); + El::Zero(local_var); + if (!local_input.IsEmpty()) { + const El::Int block_size = 256; + dim3 block_dims, grid_dims; + block_dims.x = block_size; + grid_dims.x = (channel_size + block_size - 1) / block_size; + grid_dims.y = num_channels; + channel_sums_kernel + <<>>( + channel_size, local_width, + local_input.LockedBuffer(), local_input.LDim(), + local_mean.Buffer(), local_var.Buffer()); + } + El::Int num_per_sum; + if (this->m_statistics_group_size == 0) { + // Global statistics aggregation; allreduce on fused buffer. + this->m_comm->allreduce(*this->m_mean_and_var, this->m_mean_and_var->RedundantComm(), + El::mpi::SUM); + num_per_sum = channel_size * width; + } else if (this->m_statistics_group_size == 1) { + // Local aggregation, no allreduce needed. + num_per_sum = channel_size * local_width; + } else { + // Grouped batchnorm. Allreduce on fused buffer. + this->m_comm->allreduce(*this->m_mean_and_var, + this->m_comm->get_packed_group_comm(this->m_statistics_group_size), + El::mpi::SUM); + if (this->m_num_per_sum_cache.count(width) == 0) { + num_per_sum = channel_size * local_width; + num_per_sum = this->m_comm->allreduce( + num_per_sum, this->m_comm->get_packed_group_comm(this->m_statistics_group_size)); + this->m_num_per_sum_cache[width] = num_per_sum; + } else { + num_per_sum = this->m_num_per_sum_cache[width]; + } + } + + // Compute minibatch statistics + if (num_per_sum <= 1) { + El::Fill(local_var, TensorDataType(1.0)); + } else if (num_channels > 0) { + const El::Int block_dim = 256; + const El::Int grid_dim = (num_channels + block_dim - 1) / block_dim; + compute_statistics_kernel<<>>( + num_channels, num_per_sum, this->m_epsilon, this->m_decay, + local_mean.Buffer(), local_var.Buffer(), + local_running_mean.Buffer(), local_running_var.Buffer()); + } + + } + + // Apply batch normalization + const auto& local_scale = this->weights_values(0).LockedMatrix(); + const auto& local_bias = this->weights_values(1).LockedMatrix(); + const auto& local_mean = (is_training ? + this->m_mean_v->LockedMatrix() : + this->weights_values(2).LockedMatrix()); + const auto& local_var = (is_training ? + this->m_var_v->LockedMatrix() : + this->weights_values(3).LockedMatrix()); + if (!local_input.IsEmpty()) { + const El::Int block_size = 256; + dim3 block_dims, grid_dims; + block_dims.x = block_size; + grid_dims.x = (channel_size + block_size - 1) / block_size; + grid_dims.y = num_channels; + batch_normalization_kernel + <<>>( + channel_size, local_width, + local_input.LockedBuffer(), local_input.LDim(), + local_mean.LockedBuffer(), local_var.LockedBuffer(), this->m_epsilon, + local_scale.LockedBuffer(), local_bias.LockedBuffer(), + local_output.Buffer(), local_output.LDim()); + } + +} + +template +void batch_normalization_layer::bp_compute() { +#ifdef LBANN_HAS_DISTCONV + if (this->distconv_enabled()) { + get_distconv_adapter().bp_compute(); + return; + } +#endif // LBANN_HAS_DISTCONV + + const bool is_training = this->m_model->get_execution_context().get_execution_mode() == execution_mode::training; + + // CUDA objects + CHECK_CUDA(cudaSetDevice(El::GPUManager::Device())); + auto&& stream = El::GPUManager::Stream(); + + // Matrices + const auto& local_scale = this->weights_values(0).LockedMatrix(); + const auto& local_mean = (is_training ? + this->m_mean_v->LockedMatrix() : + this->weights_values(2).LockedMatrix()); + const auto& local_var = (is_training ? + this->m_var_v->LockedMatrix() : + this->weights_values(3).LockedMatrix()); + const auto& input = this->get_prev_activations(); + const auto& local_input = input.LockedMatrix(); + const auto& local_gradient_wrt_output = this->get_local_prev_error_signals(); + auto& local_gradient_wrt_input = this->get_local_error_signals(); + auto& local_mean_gradient = this->m_mean_gradient_v->Matrix(); + auto& local_var_gradient = this->m_var_gradient_v->Matrix(); + auto& local_scale_gradient = this->m_scale_gradient->Matrix(); + auto& local_bias_gradient = this->m_bias_gradient->Matrix(); + + // Matrix parameters + const auto& width = input.Width(); + const auto& local_width = local_input.Width(); + const auto& output_dims = this->get_output_dims(); + const auto& num_channels = output_dims[0]; + const auto& channel_size = this->get_output_size() / num_channels; + + // Compute local gradients + // Compute gradients w.r.t. batch norm parameters + El::Zero(local_scale_gradient); + El::Zero(local_bias_gradient); + El::Zero(local_mean_gradient); + El::Zero(local_var_gradient); + if (!local_input.IsEmpty()) { + const El::Int block_size = 256; + dim3 block_dims, grid_dims; + block_dims.x = block_size; + grid_dims.x = (channel_size + block_size - 1) / block_size; + grid_dims.y = num_channels; + backprop1_kernel + <<>>( + channel_size, local_width, + local_input.LockedBuffer(), local_input.LDim(), + local_gradient_wrt_output.LockedBuffer(), local_gradient_wrt_output.LDim(), + local_mean.LockedBuffer(), local_var.LockedBuffer(), this->m_epsilon, + local_scale.LockedBuffer(), + local_scale_gradient.Buffer(), local_bias_gradient.Buffer(), + local_mean_gradient.Buffer(), local_var_gradient.Buffer()); + } + + // Accumulate gradients + if (is_training) { + if (this->m_statistics_group_size == 0) { + // Global aggregation; allreduce on fused buffer. + this->m_comm->allreduce(*this->m_mean_and_var_gradient, + this->m_mean_and_var_gradient->RedundantComm(), + El::mpi::SUM); + } else if (this->m_statistics_group_size > 1) { + // Grouped batchnorm; allreduce on fused buffer. + this->m_comm->allreduce(*this->m_mean_and_var_gradient, + this->m_comm->get_packed_group_comm(this->m_statistics_group_size), + El::mpi::SUM); + } + } else { + // Zero fused buffer. + El::Zero(*this->m_mean_and_var_gradient); + } + auto* scale_optimizer = this->get_weights(0).get_optimizer(); + if (scale_optimizer != nullptr) { + scale_optimizer->add_to_gradient(*this->m_scale_gradient, TensorDataType(1.0), true); + } + auto* bias_optimizer = this->get_weights(1).get_optimizer(); + if (bias_optimizer != nullptr) { + bias_optimizer->add_to_gradient(*this->m_bias_gradient, TensorDataType(1.0), true); + } + + // Compute error signal + El::Int num_per_sum; + if (this->m_statistics_group_size == 0) { + // Global statistics aggregation. + num_per_sum = channel_size * width; + } else if (this->m_statistics_group_size == 1) { + // Local aggregation. + num_per_sum = channel_size * local_width; + } else { + // Grouped batchnorm. + num_per_sum = this->m_num_per_sum_cache[width]; // This was computed in FP. + } + if (num_per_sum <= 1) { + El::Zero(local_gradient_wrt_input); + } else if (!local_input.IsEmpty()) { + const El::Int block_size = 256; + dim3 block_dims, grid_dims; + block_dims.x = block_size; + grid_dims.x = (channel_size + block_size - 1) / block_size; + grid_dims.y = num_channels; + backprop2_kernel + <<>>( + channel_size, local_width, num_per_sum, + local_input.LockedBuffer(), local_input.LDim(), + local_gradient_wrt_output.LockedBuffer(), local_gradient_wrt_output.LDim(), + local_mean.LockedBuffer(), local_var.LockedBuffer(), this->m_epsilon, + local_scale.LockedBuffer(), + local_mean_gradient.LockedBuffer(), local_var_gradient.LockedBuffer(), + local_gradient_wrt_input.Buffer(), local_gradient_wrt_input.LDim()); + } + +} + +#define PROTO(T) \ + template class batch_normalization_layer + +#define LBANN_INSTANTIATE_GPU_HALF +#include "lbann/macros/instantiate.hpp" + +} // namespace lbann diff --git a/cuda_code/batch_renorm_layer.cu b/cuda_code/batch_renorm_layer.cu new file mode 100644 index 0000000000000000000000000000000000000000..18130c4ec24964c6caea853360ca3fa19a9cdf90 --- /dev/null +++ b/cuda_code/batch_renorm_layer.cu @@ -0,0 +1,311 @@ +#include +#include + +#include "caffe/layers/batch_renorm_layer.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + + template + __global__ void R_D_CUT(const int n, Dtype* r, Dtype* d + , Dtype cur_r_max, Dtype cur_r_min, Dtype cur_d_max, Dtype cur_d_min) { + CUDA_KERNEL_LOOP(index, n) { + r[index] = __min(cur_r_max, __max(r[index], cur_r_min)); + d[index] = __min(cur_d_max, __max(d[index], cur_d_min)); + } + } + + template + void BatchReNormLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); + int num = bottom[0]->shape(0); + int spatial_dim = bottom[0]->count() / (channels_*bottom[0]->shape(0)); + Dtype iter; + caffe_copy(1, this->blobs_[3]->gpu_data(), &iter); + int step = int(iter) / iter_size_; + bool first_iter_in_step = (int(iter)%iter_size_ == 0); + + //LOG(INFO) << this->layer_param_.name() << " iter:" << iter << ", step:" << step; + + /* Use top[1], top[2], top[3], and top[4] for temp, x_norm, all_r, and all_d */ + Blob& temp = *top[1]; + Blob& x_norm = *top[2]; + Blob& all_r = *top[3]; + Blob& all_d = *top[4]; + + + if (bottom[0] != top[0]) { + caffe_copy(bottom[0]->count(), bottom_data, top_data); + } + + if (use_global_stats_) { + // use the stored mean/variance estimates. + Dtype m_counter; + caffe_copy(1, this->blobs_[2]->gpu_data(), &m_counter); + const Dtype scale_factor = m_counter == 0 ? 0 : 1 / m_counter; + caffe_gpu_scale(variance_.count(), scale_factor, + this->blobs_[0]->gpu_data(), mean_.mutable_gpu_data()); + caffe_gpu_scale(variance_.count(), scale_factor, + this->blobs_[1]->gpu_data(), variance_.mutable_gpu_data()); + if (0 == Caffe::worker_id()) { + LOG(INFO) << this->layer_param_.name() << " iter:" << iter << ", m_counter:" << m_counter; + } + } + else { + // compute mean + caffe_gpu_gemv(CblasNoTrans, channels_ * num, spatial_dim, + 1. / (num * spatial_dim), bottom_data, + spatial_sum_multiplier_.gpu_data(), 0., + num_by_chans_.mutable_gpu_data()); + caffe_gpu_gemv(CblasTrans, num, channels_, 1., + num_by_chans_.gpu_data(), batch_sum_multiplier_.gpu_data(), 0., + mean_.mutable_gpu_data()); + } + + // subtract mean + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num, channels_, 1, 1, + batch_sum_multiplier_.gpu_data(), mean_.gpu_data(), 0., + num_by_chans_.mutable_gpu_data()); + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, channels_ * num, + spatial_dim, 1, -1, num_by_chans_.gpu_data(), + spatial_sum_multiplier_.gpu_data(), 1., top_data); + + if (!use_global_stats_) { + // compute variance using var(X) = E((X-EX)^2) + caffe_gpu_powx(top[0]->count(), top_data, Dtype(2), + temp.mutable_gpu_data()); // (X-EX)^2 + caffe_gpu_gemv(CblasNoTrans, channels_ * num, spatial_dim, + 1. / (num * spatial_dim), temp.gpu_data(), + spatial_sum_multiplier_.gpu_data(), 0., + num_by_chans_.mutable_gpu_data()); + caffe_gpu_gemv(CblasTrans, num, channels_, 1., + num_by_chans_.gpu_data(), batch_sum_multiplier_.gpu_data(), 0., + variance_.mutable_gpu_data()); // E((X_EX)^2) + + if (step >= step_to_init_ && first_iter_in_step) + { + Dtype m_counter; + caffe_copy(1, this->blobs_[2]->gpu_data(), &m_counter); + const Dtype scale_factor = 1. / m_counter; + caffe_gpu_scale(variance_.count(), scale_factor, this->blobs_[0]->gpu_data(), + mean_glb_.mutable_gpu_data()); + caffe_gpu_scale(variance_.count(), scale_factor, this->blobs_[1]->gpu_data(), + variance_glb_.mutable_gpu_data()); + caffe_gpu_add_scalar(variance_.count(), eps_, variance_glb_.mutable_gpu_data()); + caffe_gpu_powx(variance_.count(), this->variance_glb_.gpu_data(), Dtype(0.5), + this->variance_glb_.mutable_gpu_data()); + } + + // Backup the variance here so we can calculate moving average at + // the backward phase + caffe_copy(variance_.count(), variance_.gpu_data(), + variance_back_.mutable_gpu_data()); + // compute and save moving average + //Dtype moving_average_fraction = first_iter_in_step ? moving_average_fraction_ : 1.0; + //this->blobs_[2]->mutable_cpu_data()[0] *= moving_average_fraction; + //this->blobs_[2]->mutable_cpu_data()[0] += 1; + //caffe_gpu_axpby(mean_.count(), Dtype(1), mean_.gpu_data(), + // moving_average_fraction, this->blobs_[0]->mutable_gpu_data()); + //int m = bottom[0]->count() / channels_; + //Dtype bias_correction_factor = m > 1 ? Dtype(m) / (m - 1) : 1; + //caffe_gpu_axpby(variance_.count(), bias_correction_factor, + // variance_.gpu_data(), moving_average_fraction, + // this->blobs_[1]->mutable_gpu_data()); + } + + // normalize variance + caffe_gpu_add_scalar(variance_.count(), eps_, variance_.mutable_gpu_data()); + caffe_gpu_powx(variance_.count(), variance_.gpu_data(), Dtype(0.5), + variance_.mutable_gpu_data()); + + // replicate variance to input size + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num, channels_, 1, 1, + batch_sum_multiplier_.gpu_data(), variance_.gpu_data(), 0., + num_by_chans_.mutable_gpu_data()); + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, channels_ * num, + spatial_dim, 1, 1., num_by_chans_.gpu_data(), + spatial_sum_multiplier_.gpu_data(), 0., temp.mutable_gpu_data()); + caffe_gpu_div(temp.count(), top_data, temp.gpu_data(), top_data); + // TODO(cdoersch): The caching is only needed because later in-place layers + // might clobber the data. Can we skip this if they won't? + caffe_copy(x_norm.count(), top_data, + x_norm.mutable_gpu_data()); + + if (!use_global_stats_ && step >= step_to_init_) + { + Dtype cur_r_max = __max(1, __min(1 + (step - step_to_init_ + 1)*(r_max_ - 1) / (step_to_r_max_ - step_to_init_), r_max_)); + Dtype cur_r_min = 1. / cur_r_max; + Dtype cur_d_max = __max(0, __min((step - step_to_init_ + 1)*d_max_ / (step_to_d_max_ - step_to_init_), d_max_)); + Dtype cur_d_min = -cur_d_max; + + caffe_gpu_div(variance_.count(), variance_.gpu_data(), variance_glb_.gpu_data(), + r_.mutable_gpu_data()); + + caffe_copy(variance_.count(), mean_.gpu_data(), d_.mutable_gpu_data()); + caffe_gpu_axpby(variance_.count(), Dtype(-1), mean_glb_.gpu_data(), + Dtype(1), d_.mutable_gpu_data()); + caffe_gpu_div(variance_.count(), d_.gpu_data(), variance_glb_.gpu_data(), + d_.mutable_gpu_data()); + + R_D_CUT << > >( + variance_.count(), r_.mutable_gpu_data(), d_.mutable_gpu_data(), cur_r_max, cur_r_min + , cur_d_max, cur_d_min); + CUDA_POST_KERNEL_CHECK; + + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num, channels_, 1, 1, + batch_sum_multiplier_.gpu_data(), r_.gpu_data(), 0., + num_by_chans_.mutable_gpu_data()); + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, channels_ * num, + spatial_dim, 1, 1., num_by_chans_.gpu_data(), + spatial_sum_multiplier_.gpu_data(), 0., all_r.mutable_gpu_data()); + caffe_gpu_mul(temp.count(), top_data, all_r.gpu_data(), top_data); + + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num, channels_, 1, 1, + batch_sum_multiplier_.gpu_data(), d_.gpu_data(), 0., + num_by_chans_.mutable_gpu_data()); + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, channels_ * num, + spatial_dim, 1, 1., num_by_chans_.gpu_data(), + spatial_sum_multiplier_.gpu_data(), 0., all_d.mutable_gpu_data()); + caffe_gpu_add(temp.count(), top_data, all_d.gpu_data(), top_data); + } + } + + template + void BatchReNormLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + const Dtype* top_diff; + + /* Use top[1], top[2], top[3], and top[4] for temp, x_norm, all_r, and all_d */ + Blob& temp = *top[1]; + Blob& x_norm = *top[2]; + Blob& all_r = *top[3]; + Blob& all_d = *top[4]; + + Dtype iter; + caffe_copy(1, this->blobs_[3]->gpu_data(), &iter); + int step = int(iter) / iter_size_; + + if (bottom[0] != top[0]) { + top_diff = top[0]->gpu_diff(); + } + else { + caffe_copy(x_norm.count(), top[0]->gpu_diff(), x_norm.mutable_gpu_diff()); + top_diff = x_norm.gpu_diff(); + } + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + + if (use_global_stats_) + { + caffe_gpu_div(temp.count(), top_diff, temp.gpu_data(), bottom_diff); + return; + } + + // compute and save moving average + if (!use_global_stats_) { + const Dtype* m_counter = this->blobs_[2]->gpu_data(); + Dtype* m_counter_diff = this->blobs_[2]->mutable_gpu_diff(); + + const Dtype* m_average = this->blobs_[0]->gpu_data(); + Dtype* m_average_diff = this->blobs_[0]->mutable_gpu_diff(); + + const Dtype* m_variance = this->blobs_[1]->gpu_data(); + Dtype* m_variance_diff = this->blobs_[1]->mutable_gpu_diff(); + int m = bottom[0]->count()/channels_; + Dtype bias_correction_factor = m > 1 ? Dtype(m)/(m-1) : 1; + + // We only apply moving average fraction on worker 0 + //if (0 == Caffe::worker_id()) { + { + caffe_gpu_axpy(this->blobs_[2]->count(), moving_average_fraction_ - Dtype(1), + m_counter, m_counter_diff); + caffe_gpu_axpy(mean_.count(), moving_average_fraction_ - Dtype(1), + m_average, m_average_diff); + caffe_gpu_axpy(variance_back_.count(), moving_average_fraction_ - Dtype(1), + m_variance, m_variance_diff); + } + + caffe_gpu_add_scalar(this->blobs_[2]->count(), Dtype(1), m_counter_diff); + caffe_gpu_axpy(mean_.count(), Dtype(1), mean_.gpu_data(), m_average_diff); + caffe_gpu_axpy(variance_back_.count(), bias_correction_factor, + variance_back_.gpu_data(), m_variance_diff); + } + + const Dtype* top_data = x_norm.gpu_data(); + int num = bottom[0]->shape()[0]; + int spatial_dim = bottom[0]->count() / (channels_*bottom[0]->shape(0)); + // if Y = (X-mean(X))/(sqrt(var(X)+eps)), then + // + // dE(Y)/dX = + // (dE/dY - mean(dE/dY) - mean(dE/dY \cdot Y) \cdot Y) + // ./ sqrt(var(X) + eps) + // + // where \cdot and ./ are hadamard product and elementwise division, + // respectively, dE/dY is the top diff, and mean/var/sum are all computed + // along all dimensions except the channels dimension. In the above + // equation, the operations allow for expansion (i.e. broadcast) along all + // dimensions except the channels dimension where required. + + // sum(dE/dY \cdot Y) + caffe_gpu_mul(temp.count(), top_data, top_diff, bottom_diff); + caffe_gpu_gemv(CblasNoTrans, channels_ * num, spatial_dim, 1., + bottom_diff, spatial_sum_multiplier_.gpu_data(), 0., + num_by_chans_.mutable_gpu_data()); + caffe_gpu_gemv(CblasTrans, num, channels_, 1., + num_by_chans_.gpu_data(), batch_sum_multiplier_.gpu_data(), 0., + mean_.mutable_gpu_data()); + + // reshape (broadcast) the above + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num, channels_, 1, 1, + batch_sum_multiplier_.gpu_data(), mean_.gpu_data(), 0., + num_by_chans_.mutable_gpu_data()); + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, channels_ * num, + spatial_dim, 1, 1., num_by_chans_.gpu_data(), + spatial_sum_multiplier_.gpu_data(), 0., bottom_diff); + + // sum(dE/dY \cdot Y) \cdot Y + caffe_gpu_mul(temp.count(), top_data, bottom_diff, bottom_diff); + + // sum(dE/dY)-sum(dE/dY \cdot Y) \cdot Y + caffe_gpu_gemv(CblasNoTrans, channels_ * num, spatial_dim, 1., + top_diff, spatial_sum_multiplier_.gpu_data(), 0., + num_by_chans_.mutable_gpu_data()); + caffe_gpu_gemv(CblasTrans, num, channels_, 1., + num_by_chans_.gpu_data(), batch_sum_multiplier_.gpu_data(), 0., + mean_.mutable_gpu_data()); + // reshape (broadcast) the above to make + // sum(dE/dY)-sum(dE/dY \cdot Y) \cdot Y + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num, channels_, 1, 1, + batch_sum_multiplier_.gpu_data(), mean_.gpu_data(), 0., + num_by_chans_.mutable_gpu_data()); + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num * channels_, + spatial_dim, 1, 1., num_by_chans_.gpu_data(), + spatial_sum_multiplier_.gpu_data(), 1., bottom_diff); + + // dE/dY - mean(dE/dY)-mean(dE/dY \cdot Y) \cdot Y + caffe_gpu_axpby(temp.count(), Dtype(1), top_diff, + Dtype(-1. / (num * spatial_dim)), bottom_diff); + + // note: temp still contains sqrt(var(X)+eps), computed during the forward + // pass. + caffe_gpu_div(temp.count(), bottom_diff, temp.gpu_data(), bottom_diff); + + + if (!use_global_stats_ && step >= step_to_init_) + { + caffe_gpu_mul(temp.count(), bottom_diff, all_r.gpu_data(), bottom_diff); + } + + if (this->phase_ == TRAIN && 0 == Caffe::worker_id()) + { + caffe_gpu_add_scalar(this->blobs_[3]->count(), Dtype(1), + this->blobs_[3]->mutable_gpu_diff()); + } + } + + INSTANTIATE_LAYER_GPU_FUNCS(BatchReNormLayer); + + +} // namespace caffe diff --git a/cuda_code/batchnorm_grad_impl.cu b/cuda_code/batchnorm_grad_impl.cu new file mode 100644 index 0000000000000000000000000000000000000000..f62541422c34006b7327f3c7f20a6c2a65d18a05 --- /dev/null +++ b/cuda_code/batchnorm_grad_impl.cu @@ -0,0 +1,123 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include "batchnorm_grad_impl.cuh" +#include "include/cuda_runtime.h" +#include "include/cuda_fp16.h" + +const int kWarpSize = 32; +const int kBlockSize = 1024; +const int kNumWarps = 32; + +template +__global__ void BatchNormGradKernel(T *x_input, T *dy, float *scale, float *save_mean, float *save_variance, T *dx, + float *bn_scale, float *bn_bias, double epsilon, int N, int C, int H, int W) { + __shared__ T shared_dy[kNumWarps]; + __shared__ T shared_p[kNumWarps]; + int warpId = threadIdx.x / kWarpSize; + int laneId = threadIdx.x % kWarpSize; + + int plane = blockIdx.x; + int plane_size = N * H * W; + + T invstd = static_cast(1) / static_cast(sqrt(save_variance[plane] + epsilon)); + T scale_val = scale != nullptr ? static_cast(scale[plane]) : static_cast(1); + T grad_scale = invstd * scale_val; + + T mean = static_cast(save_mean[plane]); + T dy_sum = static_cast(0); + T dot_p = static_cast(0); + + if (threadIdx.x < kNumWarps) { + shared_dy[threadIdx.x] = static_cast(0); + shared_p[threadIdx.x] = static_cast(0); + } + __syncthreads(); + + // Compute three values across (Batch, Height, Width) in one pass: + // 1. dx + // 2. Sum(dy) + // 3. DotProduct(x - mean, dy) + for (int x = threadIdx.x; x < plane_size; x += blockDim.x) { + int index = (x / (H * W) * C * H * W) + (plane * H * W) + (x % (H * W)); + dx[index] = static_cast(dy[index] * grad_scale); + dy_sum += dy[index]; + dot_p += (x_input[index] - mean) * dy[index]; + } + __syncthreads(); + + // Warp reduction + for (int offset = kWarpSize / 2; offset > 0; offset /= 2) { + T other_dy = __shfl_down_sync(0xffffffff, dy_sum, offset); + T other_p = __shfl_down_sync(0xffffffff, dot_p, offset); + dy_sum += other_dy; + dot_p += other_p; + } + __syncwarp(); + + // Move warp-reduction result to shared memory + if (laneId == 0) { + shared_dy[warpId] = dy_sum; + shared_p[warpId] = dot_p; + } + __syncthreads(); + + // Shared memory reduction + // There are exactly 32 items in shared memory, can be reduced within one warp. + if (warpId == 0) { + dy_sum = shared_dy[laneId]; + dot_p = shared_p[laneId]; + __syncwarp(); + for (int offset = kWarpSize / 2; offset > 0; offset /= 2) { + T other_dy = __shfl_down_sync(0xffffffff, dy_sum, offset); + T other_p = __shfl_down_sync(0xffffffff, dot_p, offset); + dy_sum += other_dy; + dot_p += other_p; + } + __syncwarp(); + } + + // Compute bn_scale & bn_bias + if (threadIdx.x == 0) { + bn_scale[plane] = static_cast(dot_p * invstd); + } + + if (threadIdx.x == 0) { + bn_bias[plane] = static_cast(dy_sum); + } +} + +template +void CalBatchNormGrad(T *x, T *dy, float *scale, float *save_mean, float *save_variance, T *dx, float *bn_scale, + float *bn_bias, double epsilon, int N, int C, int H, int W, cudaStream_t cuda_stream) { + BatchNormGradKernel<<>>(x, dy, scale, save_mean, save_variance, dx, bn_scale, bn_bias, + epsilon, N, C, H, W); +} + +template CUDA_LIB_EXPORT void CalBatchNormGrad(float *x, float *dy, float *scale, float *save_mean, + float *save_variance, float *dx, float *bn_scale, float *bn_bias, + double epsilon, int N, int C, int H, int W, + cudaStream_t cuda_stream); + +template CUDA_LIB_EXPORT void CalBatchNormGrad(half *x, half *dy, float *scale, float *save_mean, + float *save_variance, half *dx, float *bn_scale, float *bn_bias, + double epsilon, int N, int C, int H, int W, + cudaStream_t cuda_stream); diff --git a/cuda_code/bb_flip_13.cu b/cuda_code/bb_flip_13.cu new file mode 100644 index 0000000000000000000000000000000000000000..e3d61f10b4495f752759a8251d441c89bef8cbdb --- /dev/null +++ b/cuda_code/bb_flip_13.cu @@ -0,0 +1,151 @@ +// Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include + +namespace dali { + +/// @param output - output bounding boxes +/// @param input - input bounding boxes +/// @param num_boxes - number of bounding boxes in the input +/// @param sample_indices - when using per-sample flip, contains sample indices for each +/// bounding box in the input tensor list +/// @param per_sample_horizontal - per-sample flag indicating whether bounding boxes from +// a given sample should be flipped horizontally; may by NULL +/// @param per_sample_vertical - per-sample flag indicating whether bounding boxes from +// a given sample should be flipped vertically; may be NULL +/// @param global_horizontal - whether to flip horizontally; overriden by +/// per_sample_horizontal, if specified +/// @param global_vertical - whether to flip vertically; overriden by +/// per_sample_vertical, if specified +template +__global__ void BbFlipKernel(float *output, const float *input, size_t num_boxes, + bool global_horizontal, const int *per_sample_horizontal, + bool global_vertical, const int *per_sample_vertical, + const int *sample_indices) { + size_t idx = blockIdx.x * blockDim.x + threadIdx.x; + if (idx >= num_boxes) + return; + + bool h = per_sample_horizontal + ? per_sample_horizontal[sample_indices[idx]] + : global_horizontal; + bool v = per_sample_vertical + ? per_sample_vertical[sample_indices[idx]] + : global_vertical; + + const auto *in = &input[4 * idx]; + auto *out = &output[4 * idx]; + if (ltrb) { + out[0] = h ? 1.0f - in[2] : in[0]; + out[1] = v ? 1.0f - in[3] : in[1]; + out[2] = h ? 1.0f - in[0] : in[2]; + out[3] = v ? 1.0f - in[1] : in[3]; + } else { + // No range checking required if the parenthesis is respected in the two lines below. + // If the original bounding box satisfies the condition that x + w <= 1.0f, then the expression + // 1.0f - (x + w) is guaranteed to yield a non-negative result. QED. + out[0] = h ? 1.0f - (in[0] + in[2]) : in[0]; + out[1] = v ? 1.0f - (in[1] + in[3]) : in[1]; + out[2] = in[2]; // width and + out[3] = in[3]; // height remain unaffected + } +} + + +void BbFlip::RunImpl(Workspace *ws) { + auto &input = ws->Input(0); + auto&output = ws->Output(0); + + DALI_ENFORCE(IsType(input.type()), "Expected input data as float;" + " got " + input.type().name()); + DALI_ENFORCE(input.size() % 4 == 0, + "Input data size must be a multiple of 4 if it contains bounding boxes;" + " got " + std::to_string(input.size())); + + ArgValue horz("horizontal", spec_, ws); + ArgValue vert("vertical", spec_, ws); + bool ltrb = spec_.GetArgument("ltrb"); + + auto stream = ws->stream(); + + const auto num_boxes = input.size() / 4; + + const int *sample_idx = nullptr; + const int *per_sample_horz = nullptr; + const int *per_sample_vert = nullptr; + + // contains a map from box index to sample index - used + // for accessing per-sample horz/vert arguments. + Tensor sample_idx_tensor; + + if (horz.IsTensor() || vert.IsTensor()) { + std::vector indices; + indices.reserve(num_boxes); + + // populate the index map + auto shape = input.shape(); + for (int sample = 0; sample < shape.size(); sample++) { + auto dim = shape[sample].size(); + + DALI_ENFORCE(dim < 2 || shape[sample][dim-1] == 4, + "If bounding box tensor is >= 2D, innermost dimension must be 4"); + DALI_ENFORCE(dim > 1 || shape[sample][0] % 4 == 0, + "Flat representation of bouding boxes must have size divisible by 4"); + + size_t sample_boxes = dim == 2 ? shape[sample][0] : shape[sample][0] / 4; + for (size_t i = 0; i < sample_boxes ; i++) { + indices.push_back(sample); + } + } + sample_idx_tensor.Copy(indices, stream); + + if (horz.IsTensor()) + per_sample_horz = horz.AsGPU(stream)->data(); + + if (vert.IsTensor()) + per_sample_vert = vert.AsGPU(stream)->data(); + + sample_idx = sample_idx_tensor.data(); + } + + output.ResizeLike(input); + + if (num_boxes == 0) { + return; + } + + const unsigned block = num_boxes < 1024 ? num_boxes : 1024; + const unsigned grid = (num_boxes + block - 1) / block; + + if (ltrb) { + BbFlipKernel<<>>( + output.mutable_data(), input.data(), num_boxes, + !per_sample_horz && horz[0], per_sample_horz, + !per_sample_vert && vert[0], per_sample_vert, + sample_idx); + } else { + BbFlipKernel<<>>( + output.mutable_data(), input.data(), num_boxes, + !per_sample_horz && horz[0], per_sample_horz, + !per_sample_vert && vert[0], per_sample_vert, + sample_idx); + } +} + +DALI_REGISTER_OPERATOR(BbFlip, BbFlip, GPU); + +} // namespace dali diff --git a/cuda_code/benchmark_16.cu b/cuda_code/benchmark_16.cu new file mode 100644 index 0000000000000000000000000000000000000000..0886c20229266bd41cf803f0381a55d70b593307 --- /dev/null +++ b/cuda_code/benchmark_16.cu @@ -0,0 +1,171 @@ +/* + * Copyright (c) 2020-2021 dePaul Miller (dsm220@lehigh.edu) + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include + +using namespace megakv; + +double stage1(MegaKV *s, std::vector> reqs, GPUData *data) { + auto start = std::chrono::high_resolution_clock::now(); + s->preprocess_hashes(reqs, data); + s->preprocess_rest(reqs, data); + auto end = std::chrono::high_resolution_clock::now(); + return std::chrono::duration(end - start).count() * 1e3; +} + +double stage2(MegaKV *s, GPUData *data, cudaStream_t stream) { + auto start = std::chrono::high_resolution_clock::now(); + s->moveTo(data, stream); + s->execute(data, stream); + s->moveFrom(data, stream); + cudaStreamSynchronize(stream); + auto end = std::chrono::high_resolution_clock::now(); + return std::chrono::duration(end - start).count() * 1e3; +} + +double stage3(MegaKV *s, std::vector> reqs, GPUData *data, std::shared_ptr resp) { + auto start = std::chrono::high_resolution_clock::now(); + s->postprocess(reqs, resp, data); + auto end = std::chrono::high_resolution_clock::now(); + return std::chrono::duration(end - start).count() * 1e3; +} + +struct stage1args { + + stage1args(MegaKV *s, std::vector> &&reqs, GPUData *data) : s(s), reqs(reqs), data(data) { + + } + + MegaKV *s; + std::vector> reqs; + GPUData *data; +}; + +void CUDART_CB stage1CallBack(cudaStream_t stream, cudaError_t status, void *userData) { + auto args = (stage1args *) userData; + stage1(args->s, args->reqs, args->data); +} + +void asyncStage2(MegaKV *s, GPUData *data, cudaStream_t stream) { + s->moveTo(data, stream); + s->execute(data, stream); + s->moveFrom(data, stream); +} + +struct stage3args { + + stage3args(MegaKV *s, std::vector> &&reqs, GPUData *data, std::shared_ptr resp) : s(s), reqs(reqs), + data(data), resp(resp) {} + + MegaKV *s; + std::vector> reqs; + GPUData *data; + std::shared_ptr resp; +}; + +void CUDART_CB stage3CallBack(cudaStream_t stream, cudaError_t status, void *userData) { + auto args = (stage3args *) userData; + stage3(args->s, args->reqs, args->data, args->resp); +} + +int main() { + + int batches = 100; + int nstreams = 10; + + MegaKV *s = new MegaKV(1000000); + + GPUData **data = new GPUData *[nstreams]; + for (int i = 0; i < nstreams; i++) { + data[i] = new GPUData(); + } + + auto b = new std::vector>[batches]; + for (int i = 0; i < batches; i++) { + for (int k = 0; k < BLOCKS; k++) { + std::shared_ptr r = std::make_shared(); + for (int j = 0; j < 512; j++) { + r->reqs[j] = {(i < 1) ? REQUEST_INSERT : REQUEST_GET, std::to_string((unsigned) (rand() % 100000)), + "test"}; + } + b[i].push_back(r); + } + } + std::vector> resp; + for (int i = 0; i < nstreams; i++) { + resp.push_back(std::make_shared(BLOCKS * THREADS_PER_BLOCK)); + } + cudaStream_t streams[nstreams]; + std::mutex streamMtx[nstreams]; + std::chrono::high_resolution_clock::time_point starts[nstreams]; + + for (int i = 0; i < nstreams; i++) gpuErrchk(cudaStreamCreate(&streams[i])); + + int dataElement = 0; + + auto start = std::chrono::high_resolution_clock::now(); + + tbb::parallel_pipeline(nstreams, + tbb::make_filter(tbb::filter::serial_in_order, [&](tbb::flow_control &fc) { + if (dataElement == batches) { + fc.stop(); + return 0; + } + return dataElement++; + }) & tbb::make_filter(tbb::filter::parallel, [&](int i) { + streamMtx[i % nstreams].lock(); + starts[i % nstreams] = std::chrono::high_resolution_clock::now(); + stage1(s, std::move(b[i]), data[i % nstreams]); + return i; + }) & tbb::make_filter(tbb::filter::parallel, [&](int i) { + s->moveTo(data[i % nstreams], streams[i % nstreams]); + s->execute(data[i % nstreams], streams[i % nstreams]); + s->moveFrom(data[i % nstreams], streams[i % nstreams]); + cudaStreamSynchronize(streams[i % nstreams]); + return i; + }) & tbb::make_filter(tbb::filter::parallel, [&](int i) { + stage3(s, std::move(b[i]), data[i % nstreams], resp[i % nstreams]); + auto startTmp = starts[i % nstreams]; + streamMtx[i % nstreams].unlock(); + double dur = std::chrono::duration(std::chrono::high_resolution_clock::now() - startTmp).count(); + std::cerr << dur * 1e3 << std::endl; + }) + ); + + auto end = std::chrono::high_resolution_clock::now(); + std::cout << batches * BLOCKS * THREADS_PER_BLOCK / std::chrono::duration(end - start).count() / 1e6 + << std::endl; + + delete s; + for (int i = 0; i < nstreams; i++) { + delete data[i]; + } + + delete[] data; + + return 0; +} \ No newline at end of file diff --git a/cuda_code/benchmark_IterativeMapSimple100x10.cu b/cuda_code/benchmark_IterativeMapSimple100x10.cu new file mode 100644 index 0000000000000000000000000000000000000000..5b3fa7f419aa607a03468d16017689fda6dbb91f --- /dev/null +++ b/cuda_code/benchmark_IterativeMapSimple100x10.cu @@ -0,0 +1,1183 @@ +#include +#include +#include +#include +#include + +#include +#include + +using namespace std; + + +/* ----- BEGIN Shared Library Export ----- */ +// taken from http://stackoverflow.com/questions/2164827/explicitly-exporting-shared-library-functions-in-linux + +#if defined(_MSC_VER) + // Microsoft + #define EXPORT __declspec(dllexport) + #define IMPORT __declspec(dllimport) +#elif defined(_GCC) + // GCC + #define EXPORT __attribute__((visibility("default"))) + #define IMPORT +#else + // do nothing and hope for the best? + #define EXPORT + #define IMPORT + #pragma warning Unknown dynamic link import/export semantics. +#endif +/* ----- END Shared Library Export ----- */ + +/* ----- BEGIN Class Type ----- */ +typedef int obj_id_t; +typedef int class_id_t; +/* ----- END Class Type ----- */ + +/* ----- BEGIN Environment (lexical variables) ----- */ +// environment_struct must be defined later +typedef struct environment_struct environment_t; +/* ----- END Environment (lexical variables) ----- */ + + +/* ----- BEGIN Forward declarations ----- */ +typedef struct result_t result_t; +/* ----- END Forward declarations ----- */ + +// Define program result variable. Also contains benchmark numbers. +result_t *program_result; + +// Variables for measuring time +chrono::high_resolution_clock::time_point start_time; +chrono::high_resolution_clock::time_point end_time; + +/* ----- BEGIN Macros ----- */ +#define timeStartMeasure() start_time = chrono::high_resolution_clock::now(); + +#define timeReportMeasure(result_var, variable_name) \ +end_time = chrono::high_resolution_clock::now(); \ +result_var->time_##variable_name = result_var->time_##variable_name + chrono::duration_cast(end_time - start_time).count(); +/* ----- END Macros ----- */ +struct indexed_struct_4_lt_int_int_int_int_gt_t +{ + int field_0; +int field_1; +int field_2; +int field_3; +}; + +/* ----- BEGIN Structs ----- */ +struct variable_size_array_t { + void *content; + int size; + + variable_size_array_t(void *content_ = NULL, int size_ = 0) : content(content_), size(size_) { }; + + static const variable_size_array_t error_return_value; +}; + +// error_return_value is used in case a host section terminates abnormally +const variable_size_array_t variable_size_array_t::error_return_value = + variable_size_array_t(NULL, 0); + +/* ----- BEGIN Union Type ----- */ +typedef union union_type_value { + obj_id_t object_id; + int int_; + float float_; + bool bool_; + void *pointer; + variable_size_array_t variable_size_array; + + __host__ __device__ union_type_value(int value) : int_(value) { }; + __host__ __device__ union_type_value(float value) : float_(value) { }; + __host__ __device__ union_type_value(bool value) : bool_(value) { }; + __host__ __device__ union_type_value(void *value) : pointer(value) { }; + __host__ __device__ union_type_value(variable_size_array_t value) : variable_size_array(value) { }; + + __host__ __device__ static union_type_value from_object_id(obj_id_t value) + { + return union_type_value(value); + } + + __host__ __device__ static union_type_value from_int(int value) + { + return union_type_value(value); + } + + __host__ __device__ static union_type_value from_float(float value) + { + return union_type_value(value); + } + + __host__ __device__ static union_type_value from_bool(bool value) + { + return union_type_value(value); + } + + __host__ __device__ static union_type_value from_pointer(void *value) + { + return union_type_value(value); + } + + __host__ __device__ static union_type_value from_variable_size_array_t(variable_size_array_t value) + { + return union_type_value(value); + } +} union_v_t; + +typedef struct union_type_struct +{ + class_id_t class_id; + union_v_t value; + + __host__ __device__ union_type_struct( + class_id_t class_id_ = 0, union_v_t value_ = union_v_t(0)) + : class_id(class_id_), value(value_) { }; + + static const union_type_struct error_return_value; +} union_t; + +// error_return_value is used in case a host section terminates abnormally +const union_type_struct union_t::error_return_value = union_type_struct(0, union_v_t(0)); +/* ----- END Union Type ----- */ + +typedef struct result_t { + variable_size_array_t result; + int last_error; + + uint64_t time_setup_cuda; + uint64_t time_prepare_env; + uint64_t time_kernel; + uint64_t time_free_memory; + uint64_t time_transfer_memory; + uint64_t time_allocate_memory; + + // Memory management + vector *device_allocations; +} result_t; +/* ----- END Structs ----- */ + +struct array_command_1 { + // Ikra::Symbolic::ArrayIndexCommand + indexed_struct_4_lt_int_int_int_int_gt_t *result; + __host__ __device__ array_command_1(indexed_struct_4_lt_int_int_int_int_gt_t *result = NULL) : result(result) { } +}; +struct array_command_2 { + // Ikra::Symbolic::ArrayCombineCommand + int *result; + array_command_1 *input_0; + __host__ __device__ array_command_2(int *result = NULL, array_command_1 *input_0 = NULL) : result(result), input_0(input_0) { } +}; +struct array_command_3 { + // Ikra::Symbolic::FixedSizeArrayInHostSectionCommand + int *result; + variable_size_array_t input_0; + __host__ __device__ array_command_3(int *result = NULL, variable_size_array_t input_0 = variable_size_array_t::error_return_value) : result(result), input_0(input_0) { } + int size() { return input_0.size; } +}; +struct array_command_5 { + // Ikra::Symbolic::ArrayIndexCommand + indexed_struct_4_lt_int_int_int_int_gt_t *result; + __host__ __device__ array_command_5(indexed_struct_4_lt_int_int_int_int_gt_t *result = NULL) : result(result) { } +}; +struct array_command_4 { + // Ikra::Symbolic::ArrayCombineCommand + int *result; + array_command_3 *input_0; + array_command_5 *input_1; + __host__ __device__ array_command_4(int *result = NULL, array_command_3 *input_0 = NULL, array_command_5 *input_1 = NULL) : result(result), input_0(input_0), input_1(input_1) { } +}; +struct array_command_6 { + // Ikra::Symbolic::ArrayCombineCommand + int *result; + array_command_4 *input_0; + array_command_5 *input_1; + __host__ __device__ array_command_6(int *result = NULL, array_command_4 *input_0 = NULL, array_command_5 *input_1 = NULL) : result(result), input_0(input_0), input_1(input_1) { } +}; +struct array_command_8 { + // Ikra::Symbolic::ArrayCombineCommand + int *result; + array_command_6 *input_0; + array_command_5 *input_1; + __host__ __device__ array_command_8(int *result = NULL, array_command_6 *input_0 = NULL, array_command_5 *input_1 = NULL) : result(result), input_0(input_0), input_1(input_1) { } +}; +struct array_command_10 { + // Ikra::Symbolic::ArrayCombineCommand + int *result; + array_command_8 *input_0; + array_command_5 *input_1; + __host__ __device__ array_command_10(int *result = NULL, array_command_8 *input_0 = NULL, array_command_5 *input_1 = NULL) : result(result), input_0(input_0), input_1(input_1) { } +}; +struct array_command_12 { + // Ikra::Symbolic::ArrayCombineCommand + int *result; + array_command_10 *input_0; + array_command_5 *input_1; + __host__ __device__ array_command_12(int *result = NULL, array_command_10 *input_0 = NULL, array_command_5 *input_1 = NULL) : result(result), input_0(input_0), input_1(input_1) { } +}; +struct environment_struct +{ +}; + +// TODO: There should be a better to check if _block_k_2_ is already defined +#ifndef _block_k_2__func +#define _block_k_2__func +__device__ int _block_k_2_(environment_t *_env_, indexed_struct_4_lt_int_int_int_int_gt_t indices) +{ + + + { + return (indices.field_2 % 133777); + } +} + +#endif + + +__global__ void kernel_137(environment_t *_env_, int _num_threads_, int *_result_) +{ + int _tid_ = threadIdx.x + blockIdx.x * blockDim.x; + + if (_tid_ < _num_threads_) + { + + + _result_[_tid_] = _block_k_2_(_env_, ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2})); + } +} + + + +// TODO: There should be a better to check if _block_k_2_ is already defined +#ifndef _block_k_2__func +#define _block_k_2__func +__device__ int _block_k_2_(environment_t *_env_, indexed_struct_4_lt_int_int_int_int_gt_t indices) +{ + + + { + return (indices.field_2 % 133777); + } +} + +#endif + + +__global__ void kernel_139(environment_t *_env_, int _num_threads_, int *_result_) +{ + int _tid_ = threadIdx.x + blockIdx.x * blockDim.x; + + if (_tid_ < _num_threads_) + { + + + _result_[_tid_] = _block_k_2_(_env_, ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2})); + } +} + + + + +__global__ void kernel_141(environment_t *_env_, int _num_threads_, int *_result_, int *_array_143_) +{ + int _tid_ = threadIdx.x + blockIdx.x * blockDim.x; + + if (_tid_ < _num_threads_) + { + + + _result_[_tid_] = _array_143_[_tid_]; + } +} + + + + + + + + + + + +// TODO: There should be a better to check if _block_k_4_ is already defined +#ifndef _block_k_4__func +#define _block_k_4__func +__device__ int _block_k_4_(environment_t *_env_, int i, indexed_struct_4_lt_int_int_int_int_gt_t indices) +{ + + + + { + return (((i + indices.field_2)) % 13377); + } +} + +#endif + + + +// TODO: There should be a better to check if _block_k_6_ is already defined +#ifndef _block_k_6__func +#define _block_k_6__func +__device__ int _block_k_6_(environment_t *_env_, int i, indexed_struct_4_lt_int_int_int_int_gt_t indices) +{ + + + + { + return (((i + indices.field_1)) % 13377); + } +} + +#endif + + + +// TODO: There should be a better to check if _block_k_8_ is already defined +#ifndef _block_k_8__func +#define _block_k_8__func +__device__ int _block_k_8_(environment_t *_env_, int i, indexed_struct_4_lt_int_int_int_int_gt_t indices) +{ + + + + { + return (((i + indices.field_3)) % 1337); + } +} + +#endif + + + +// TODO: There should be a better to check if _block_k_10_ is already defined +#ifndef _block_k_10__func +#define _block_k_10__func +__device__ int _block_k_10_(environment_t *_env_, int i, indexed_struct_4_lt_int_int_int_int_gt_t indices) +{ + + + + { + return (((i + indices.field_0)) % 13377); + } +} + +#endif + + + +// TODO: There should be a better to check if _block_k_12_ is already defined +#ifndef _block_k_12__func +#define _block_k_12__func +__device__ int _block_k_12_(environment_t *_env_, int i, indexed_struct_4_lt_int_int_int_int_gt_t indices) +{ + + + + { + return (((i + indices.field_1)) % 1377); + } +} + +#endif + + +__global__ void kernel_144(environment_t *_env_, int _num_threads_, int *_result_, int *_array_146_) +{ + int _tid_ = threadIdx.x + blockIdx.x * blockDim.x; + + if (_tid_ < _num_threads_) + { + + + _result_[_tid_] = _block_k_12_(_env_, _block_k_10_(_env_, _block_k_8_(_env_, _block_k_6_(_env_, _block_k_4_(_env_, _array_146_[_tid_], ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2})), ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2})), ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2})), ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2})), ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2})); + } +} + + + + +__global__ void kernel_147(environment_t *_env_, int _num_threads_, int *_result_, int *_array_149_) +{ + int _tid_ = threadIdx.x + blockIdx.x * blockDim.x; + + if (_tid_ < _num_threads_) + { + + + _result_[_tid_] = _array_149_[_tid_]; + } +} + + + + + + + + + + + +// TODO: There should be a better to check if _block_k_4_ is already defined +#ifndef _block_k_4__func +#define _block_k_4__func +__device__ int _block_k_4_(environment_t *_env_, int i, indexed_struct_4_lt_int_int_int_int_gt_t indices) +{ + + + + { + return (((i + indices.field_2)) % 13377); + } +} + +#endif + + + +// TODO: There should be a better to check if _block_k_6_ is already defined +#ifndef _block_k_6__func +#define _block_k_6__func +__device__ int _block_k_6_(environment_t *_env_, int i, indexed_struct_4_lt_int_int_int_int_gt_t indices) +{ + + + + { + return (((i + indices.field_1)) % 13377); + } +} + +#endif + + + +// TODO: There should be a better to check if _block_k_8_ is already defined +#ifndef _block_k_8__func +#define _block_k_8__func +__device__ int _block_k_8_(environment_t *_env_, int i, indexed_struct_4_lt_int_int_int_int_gt_t indices) +{ + + + + { + return (((i + indices.field_3)) % 1337); + } +} + +#endif + + + +// TODO: There should be a better to check if _block_k_10_ is already defined +#ifndef _block_k_10__func +#define _block_k_10__func +__device__ int _block_k_10_(environment_t *_env_, int i, indexed_struct_4_lt_int_int_int_int_gt_t indices) +{ + + + + { + return (((i + indices.field_0)) % 13377); + } +} + +#endif + + + +// TODO: There should be a better to check if _block_k_12_ is already defined +#ifndef _block_k_12__func +#define _block_k_12__func +__device__ int _block_k_12_(environment_t *_env_, int i, indexed_struct_4_lt_int_int_int_int_gt_t indices) +{ + + + + { + return (((i + indices.field_1)) % 1377); + } +} + +#endif + + +__global__ void kernel_150(environment_t *_env_, int _num_threads_, int *_result_, int *_array_152_) +{ + int _tid_ = threadIdx.x + blockIdx.x * blockDim.x; + + if (_tid_ < _num_threads_) + { + + + _result_[_tid_] = _block_k_12_(_env_, _block_k_10_(_env_, _block_k_8_(_env_, _block_k_6_(_env_, _block_k_4_(_env_, _array_152_[_tid_], ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2})), ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2})), ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2})), ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2})), ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2})); + } +} + + + + +__global__ void kernel_153(environment_t *_env_, int _num_threads_, int *_result_, int *_array_155_) +{ + int _tid_ = threadIdx.x + blockIdx.x * blockDim.x; + + if (_tid_ < _num_threads_) + { + + + _result_[_tid_] = _array_155_[_tid_]; + } +} + + + + + + + + + + + +// TODO: There should be a better to check if _block_k_4_ is already defined +#ifndef _block_k_4__func +#define _block_k_4__func +__device__ int _block_k_4_(environment_t *_env_, int i, indexed_struct_4_lt_int_int_int_int_gt_t indices) +{ + + + + { + return (((i + indices.field_2)) % 13377); + } +} + +#endif + + + +// TODO: There should be a better to check if _block_k_6_ is already defined +#ifndef _block_k_6__func +#define _block_k_6__func +__device__ int _block_k_6_(environment_t *_env_, int i, indexed_struct_4_lt_int_int_int_int_gt_t indices) +{ + + + + { + return (((i + indices.field_1)) % 13377); + } +} + +#endif + + + +// TODO: There should be a better to check if _block_k_8_ is already defined +#ifndef _block_k_8__func +#define _block_k_8__func +__device__ int _block_k_8_(environment_t *_env_, int i, indexed_struct_4_lt_int_int_int_int_gt_t indices) +{ + + + + { + return (((i + indices.field_3)) % 1337); + } +} + +#endif + + + +// TODO: There should be a better to check if _block_k_10_ is already defined +#ifndef _block_k_10__func +#define _block_k_10__func +__device__ int _block_k_10_(environment_t *_env_, int i, indexed_struct_4_lt_int_int_int_int_gt_t indices) +{ + + + + { + return (((i + indices.field_0)) % 13377); + } +} + +#endif + + + +// TODO: There should be a better to check if _block_k_12_ is already defined +#ifndef _block_k_12__func +#define _block_k_12__func +__device__ int _block_k_12_(environment_t *_env_, int i, indexed_struct_4_lt_int_int_int_int_gt_t indices) +{ + + + + { + return (((i + indices.field_1)) % 1377); + } +} + +#endif + + +__global__ void kernel_156(environment_t *_env_, int _num_threads_, int *_result_, int *_array_158_) +{ + int _tid_ = threadIdx.x + blockIdx.x * blockDim.x; + + if (_tid_ < _num_threads_) + { + + + _result_[_tid_] = _block_k_12_(_env_, _block_k_10_(_env_, _block_k_8_(_env_, _block_k_6_(_env_, _block_k_4_(_env_, _array_158_[_tid_], ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2})), ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2})), ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2})), ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2})), ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2})); + } +} + + + + +__global__ void kernel_159(environment_t *_env_, int _num_threads_, int *_result_, int *_array_161_) +{ + int _tid_ = threadIdx.x + blockIdx.x * blockDim.x; + + if (_tid_ < _num_threads_) + { + + + _result_[_tid_] = _array_161_[_tid_]; + } +} + + + + + + + + + + + +// TODO: There should be a better to check if _block_k_4_ is already defined +#ifndef _block_k_4__func +#define _block_k_4__func +__device__ int _block_k_4_(environment_t *_env_, int i, indexed_struct_4_lt_int_int_int_int_gt_t indices) +{ + + + + { + return (((i + indices.field_2)) % 13377); + } +} + +#endif + + + +// TODO: There should be a better to check if _block_k_6_ is already defined +#ifndef _block_k_6__func +#define _block_k_6__func +__device__ int _block_k_6_(environment_t *_env_, int i, indexed_struct_4_lt_int_int_int_int_gt_t indices) +{ + + + + { + return (((i + indices.field_1)) % 13377); + } +} + +#endif + + + +// TODO: There should be a better to check if _block_k_8_ is already defined +#ifndef _block_k_8__func +#define _block_k_8__func +__device__ int _block_k_8_(environment_t *_env_, int i, indexed_struct_4_lt_int_int_int_int_gt_t indices) +{ + + + + { + return (((i + indices.field_3)) % 1337); + } +} + +#endif + + + +// TODO: There should be a better to check if _block_k_10_ is already defined +#ifndef _block_k_10__func +#define _block_k_10__func +__device__ int _block_k_10_(environment_t *_env_, int i, indexed_struct_4_lt_int_int_int_int_gt_t indices) +{ + + + + { + return (((i + indices.field_0)) % 13377); + } +} + +#endif + + + +// TODO: There should be a better to check if _block_k_12_ is already defined +#ifndef _block_k_12__func +#define _block_k_12__func +__device__ int _block_k_12_(environment_t *_env_, int i, indexed_struct_4_lt_int_int_int_int_gt_t indices) +{ + + + + { + return (((i + indices.field_1)) % 1377); + } +} + +#endif + + +__global__ void kernel_162(environment_t *_env_, int _num_threads_, int *_result_, int *_array_164_) +{ + int _tid_ = threadIdx.x + blockIdx.x * blockDim.x; + + if (_tid_ < _num_threads_) + { + + + _result_[_tid_] = _block_k_12_(_env_, _block_k_10_(_env_, _block_k_8_(_env_, _block_k_6_(_env_, _block_k_4_(_env_, _array_164_[_tid_], ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2})), ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2})), ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2})), ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2})), ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2})); + } +} + + + + +__global__ void kernel_165(environment_t *_env_, int _num_threads_, int *_result_, int *_array_167_) +{ + int _tid_ = threadIdx.x + blockIdx.x * blockDim.x; + + if (_tid_ < _num_threads_) + { + + + _result_[_tid_] = _array_167_[_tid_]; + } +} + + + + + + + + + + + +// TODO: There should be a better to check if _block_k_4_ is already defined +#ifndef _block_k_4__func +#define _block_k_4__func +__device__ int _block_k_4_(environment_t *_env_, int i, indexed_struct_4_lt_int_int_int_int_gt_t indices) +{ + + + + { + return (((i + indices.field_2)) % 13377); + } +} + +#endif + + + +// TODO: There should be a better to check if _block_k_6_ is already defined +#ifndef _block_k_6__func +#define _block_k_6__func +__device__ int _block_k_6_(environment_t *_env_, int i, indexed_struct_4_lt_int_int_int_int_gt_t indices) +{ + + + + { + return (((i + indices.field_1)) % 13377); + } +} + +#endif + + + +// TODO: There should be a better to check if _block_k_8_ is already defined +#ifndef _block_k_8__func +#define _block_k_8__func +__device__ int _block_k_8_(environment_t *_env_, int i, indexed_struct_4_lt_int_int_int_int_gt_t indices) +{ + + + + { + return (((i + indices.field_3)) % 1337); + } +} + +#endif + + + +// TODO: There should be a better to check if _block_k_10_ is already defined +#ifndef _block_k_10__func +#define _block_k_10__func +__device__ int _block_k_10_(environment_t *_env_, int i, indexed_struct_4_lt_int_int_int_int_gt_t indices) +{ + + + + { + return (((i + indices.field_0)) % 13377); + } +} + +#endif + + + +// TODO: There should be a better to check if _block_k_12_ is already defined +#ifndef _block_k_12__func +#define _block_k_12__func +__device__ int _block_k_12_(environment_t *_env_, int i, indexed_struct_4_lt_int_int_int_int_gt_t indices) +{ + + + + { + return (((i + indices.field_1)) % 1377); + } +} + +#endif + + +__global__ void kernel_168(environment_t *_env_, int _num_threads_, int *_result_, int *_array_170_) +{ + int _tid_ = threadIdx.x + blockIdx.x * blockDim.x; + + if (_tid_ < _num_threads_) + { + + + _result_[_tid_] = _block_k_12_(_env_, _block_k_10_(_env_, _block_k_8_(_env_, _block_k_6_(_env_, _block_k_4_(_env_, _array_170_[_tid_], ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2})), ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2})), ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2})), ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2})), ((indexed_struct_4_lt_int_int_int_int_gt_t) {_tid_ / 500000, (_tid_ / 1000) % 500, (_tid_ / 2) % 500, (_tid_ / 1) % 2})); + } +} + + +#undef checkErrorReturn +#define checkErrorReturn(result_var, expr) \ +if (result_var->last_error = expr) \ +{\ + cudaError_t error = cudaGetLastError();\ + printf("!!! Cuda Failure %s:%d (%i): '%s'\n", __FILE__, __LINE__, expr, cudaGetErrorString(error));\ + cudaDeviceReset();\ + return variable_size_array_t::error_return_value;\ +} + +variable_size_array_t _host_section__(environment_t *host_env, environment_t *dev_env, result_t *program_result) +{ + array_command_2 * x = new array_command_2(); + int r; + union_t _ssa_var_old_data_2; + array_command_12 * _ssa_var_y_8; + array_command_10 * _ssa_var_y_7; + array_command_8 * _ssa_var_y_6; + array_command_6 * _ssa_var_y_5; + array_command_4 * _ssa_var_y_4; + union_t _ssa_var_old_data_3; + union_t _ssa_var_y_1; + { + _ssa_var_y_1 = union_t(10, union_v_t::from_pointer((void *) new array_command_3(NULL, ({ + // [Ikra::Symbolic::ArrayCombineCommand, size = 10000000] + + array_command_2 * cmd = x; + + if (cmd->result == 0) { + timeStartMeasure(); + int * _kernel_result_138; + checkErrorReturn(program_result, cudaMalloc(&_kernel_result_138, (sizeof(int) * 10000000))); + program_result->device_allocations->push_back(_kernel_result_138); + timeReportMeasure(program_result, allocate_memory); + timeStartMeasure(); + kernel_137<<<39063, 256>>>(dev_env, 10000000, _kernel_result_138); + checkErrorReturn(program_result, cudaPeekAtLastError()); + checkErrorReturn(program_result, cudaThreadSynchronize()); + timeReportMeasure(program_result, kernel); + cmd->result = _kernel_result_138; + + + } + + variable_size_array_t((void *) cmd->result, 10000000); + })))); + _ssa_var_old_data_2 = union_t(19, union_v_t::from_pointer((void *) x)); + for (r = 0; r <= (100 - 1); r++) + { + _ssa_var_old_data_3 = _ssa_var_y_1; + _ssa_var_y_4 = new array_command_4(NULL, new array_command_3(NULL, ({ + variable_size_array_t _polytemp_result_49; + { + union_t _polytemp_expr_50 = _ssa_var_y_1; + switch (_polytemp_expr_50.class_id) + { + case 10: /* [Ikra::Symbolic::FixedSizeArrayInHostSectionCommand, size = 10000000] (Ikra::Symbolic::ArrayCommand) */ _polytemp_result_49 = ({ + // [Ikra::Symbolic::FixedSizeArrayInHostSectionCommand, size = 10000000] + + array_command_3 * cmd = (array_command_3 *) _polytemp_expr_50.value.pointer; + + if (cmd->result == 0) { + timeStartMeasure(); + int * _kernel_result_142; + checkErrorReturn(program_result, cudaMalloc(&_kernel_result_142, (sizeof(int) * 10000000))); + program_result->device_allocations->push_back(_kernel_result_142); + timeReportMeasure(program_result, allocate_memory); + timeStartMeasure(); + kernel_141<<<39063, 256>>>(dev_env, 10000000, _kernel_result_142, ((int *) cmd->input_0.content)); + checkErrorReturn(program_result, cudaPeekAtLastError()); + checkErrorReturn(program_result, cudaThreadSynchronize()); + timeReportMeasure(program_result, kernel); + cmd->result = _kernel_result_142; + + + } + + variable_size_array_t((void *) cmd->result, 10000000); + }); break; + case 20: /* [Ikra::Symbolic::ArrayCombineCommand, size = 10000000] (Ikra::Symbolic::ArrayCommand) */ _polytemp_result_49 = ({ + // [Ikra::Symbolic::ArrayCombineCommand, size = 10000000]: [SendNode: [LVarReadNode: _ssa_var_y_7].pmap([HashNode: {<:with_index> => [BeginNode: {}]}])] + + array_command_12 * cmd = (array_command_12 *) _polytemp_expr_50.value.pointer; + + if (cmd->result == 0) { + timeStartMeasure(); + int * _kernel_result_145; + checkErrorReturn(program_result, cudaMalloc(&_kernel_result_145, (sizeof(int) * 10000000))); + program_result->device_allocations->push_back(_kernel_result_145); + timeReportMeasure(program_result, allocate_memory); + timeStartMeasure(); + kernel_144<<<39063, 256>>>(dev_env, 10000000, _kernel_result_145, ((int *) ((int *) ((int *) ((int *) ((int *) ((int *) cmd->input_0->input_0->input_0->input_0->input_0->input_0.content))))))); + checkErrorReturn(program_result, cudaPeekAtLastError()); + checkErrorReturn(program_result, cudaThreadSynchronize()); + timeReportMeasure(program_result, kernel); + cmd->result = _kernel_result_145; + + + } + + variable_size_array_t((void *) cmd->result, 10000000); + }); break; + } + } + _polytemp_result_49; + }))); + _ssa_var_y_5 = new array_command_6(NULL, _ssa_var_y_4); + _ssa_var_y_6 = new array_command_8(NULL, _ssa_var_y_5); + _ssa_var_y_7 = new array_command_10(NULL, _ssa_var_y_6); + _ssa_var_y_8 = new array_command_12(NULL, _ssa_var_y_7); + ({ + bool _polytemp_result_57; + { + union_t _polytemp_expr_58 = _ssa_var_old_data_3; + switch (_polytemp_expr_58.class_id) + { + case 10: /* [Ikra::Symbolic::FixedSizeArrayInHostSectionCommand, size = 10000000] (Ikra::Symbolic::ArrayCommand) */ _polytemp_result_57 = ({ + array_command_3 * cmd_to_free = (array_command_3 *) _polytemp_expr_58.value.pointer; + + timeStartMeasure(); + bool freed_memory = false; + + if (cmd_to_free->result != 0) { + checkErrorReturn(program_result, cudaFree(cmd_to_free->result));; + + // Remove from list of allocations + program_result->device_allocations->erase( + std::remove( + program_result->device_allocations->begin(), + program_result->device_allocations->end(), + cmd_to_free->result), + program_result->device_allocations->end()); + + freed_memory = true; + } + + timeReportMeasure(program_result, free_memory); + + freed_memory; + }); break; + case 20: /* [Ikra::Symbolic::ArrayCombineCommand, size = 10000000] (Ikra::Symbolic::ArrayCommand) */ _polytemp_result_57 = ({ + array_command_12 * cmd_to_free = (array_command_12 *) _polytemp_expr_58.value.pointer; + + timeStartMeasure(); + bool freed_memory = false; + + if (cmd_to_free->result != 0) { + checkErrorReturn(program_result, cudaFree(cmd_to_free->result));; + + // Remove from list of allocations + program_result->device_allocations->erase( + std::remove( + program_result->device_allocations->begin(), + program_result->device_allocations->end(), + cmd_to_free->result), + program_result->device_allocations->end()); + + freed_memory = true; + } + + timeReportMeasure(program_result, free_memory); + + freed_memory; + }); break; + } + } + _polytemp_result_57; + }); + _ssa_var_y_1 = union_t(20, union_v_t::from_pointer((void *) _ssa_var_y_8)); + _ssa_var_old_data_2 = _ssa_var_old_data_3; + } + r--; + return ({ + variable_size_array_t _polytemp_result_59; + { + union_t _polytemp_expr_60 = _ssa_var_y_1; + switch (_polytemp_expr_60.class_id) + { + case 10: /* [Ikra::Symbolic::FixedSizeArrayInHostSectionCommand, size = 10000000] (Ikra::Symbolic::ArrayCommand) */ _polytemp_result_59 = ({ + // [Ikra::Symbolic::FixedSizeArrayInHostSectionCommand, size = 10000000] + + array_command_3 * cmd = (array_command_3 *) _polytemp_expr_60.value.pointer; + + if (cmd->result == 0) { + timeStartMeasure(); + int * _kernel_result_166; + checkErrorReturn(program_result, cudaMalloc(&_kernel_result_166, (sizeof(int) * 10000000))); + program_result->device_allocations->push_back(_kernel_result_166); + timeReportMeasure(program_result, allocate_memory); + timeStartMeasure(); + kernel_165<<<39063, 256>>>(dev_env, 10000000, _kernel_result_166, ((int *) cmd->input_0.content)); + checkErrorReturn(program_result, cudaPeekAtLastError()); + checkErrorReturn(program_result, cudaThreadSynchronize()); + timeReportMeasure(program_result, kernel); + cmd->result = _kernel_result_166; + + + } + + variable_size_array_t((void *) cmd->result, 10000000); + }); break; + case 20: /* [Ikra::Symbolic::ArrayCombineCommand, size = 10000000] (Ikra::Symbolic::ArrayCommand) */ _polytemp_result_59 = ({ + // [Ikra::Symbolic::ArrayCombineCommand, size = 10000000]: [SendNode: [LVarReadNode: _ssa_var_y_7].pmap([HashNode: {<:with_index> => [BeginNode: {}]}])] + + array_command_12 * cmd = (array_command_12 *) _polytemp_expr_60.value.pointer; + + if (cmd->result == 0) { + timeStartMeasure(); + int * _kernel_result_169; + checkErrorReturn(program_result, cudaMalloc(&_kernel_result_169, (sizeof(int) * 10000000))); + program_result->device_allocations->push_back(_kernel_result_169); + timeReportMeasure(program_result, allocate_memory); + timeStartMeasure(); + kernel_168<<<39063, 256>>>(dev_env, 10000000, _kernel_result_169, ((int *) ((int *) ((int *) ((int *) ((int *) ((int *) cmd->input_0->input_0->input_0->input_0->input_0->input_0.content))))))); + checkErrorReturn(program_result, cudaPeekAtLastError()); + checkErrorReturn(program_result, cudaThreadSynchronize()); + timeReportMeasure(program_result, kernel); + cmd->result = _kernel_result_169; + + + } + + variable_size_array_t((void *) cmd->result, 10000000); + }); break; + } + } + _polytemp_result_59; + }); + } +} + +#undef checkErrorReturn +#define checkErrorReturn(result_var, expr) \ +expr + +extern "C" EXPORT result_t *launch_kernel(environment_t *host_env) +{ + // CUDA Initialization + program_result = new result_t(); + program_result->device_allocations = new vector(); + + timeStartMeasure(); + + cudaError_t cudaStatus = cudaSetDevice(0); + + if (cudaStatus != cudaSuccess) { + fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?\n"); + program_result->last_error = -1; + return program_result; + } + + checkErrorReturn(program_result, cudaFree(0)); + + timeReportMeasure(program_result, setup_cuda); + + + /* Prepare environment */ + /* Allocate device environment and copy over struct */ + environment_t *dev_env; + + timeStartMeasure(); + checkErrorReturn(program_result, cudaMalloc(&dev_env, sizeof(environment_t))); + timeReportMeasure(program_result, allocate_memory); + + timeStartMeasure(); + checkErrorReturn(program_result, cudaMemcpy(dev_env, host_env, sizeof(environment_t), cudaMemcpyHostToDevice)); + timeReportMeasure(program_result, transfer_memory); + + + + /* Copy back memory and set pointer of result */ + program_result->result = ({ + variable_size_array_t device_array = _host_section__(host_env, dev_env, program_result); + int * tmp_result = (int *) malloc(sizeof(int) * device_array.size); + + timeStartMeasure(); + checkErrorReturn(program_result, cudaMemcpy(tmp_result, device_array.content, sizeof(int) * device_array.size, cudaMemcpyDeviceToHost)); + timeReportMeasure(program_result, transfer_memory); + + variable_size_array_t((void *) tmp_result, device_array.size); +}); + + /* Free device memory */ + timeStartMeasure(); + + for ( + auto device_ptr = program_result->device_allocations->begin(); + device_ptr < program_result->device_allocations->end(); + device_ptr++) + { + checkErrorReturn(program_result, cudaFree(*device_ptr)); + } + + delete program_result->device_allocations; + + timeReportMeasure(program_result, free_memory); + + return program_result; +} diff --git a/cuda_code/bfs_topo_thread_centric.cu b/cuda_code/bfs_topo_thread_centric.cu new file mode 100644 index 0000000000000000000000000000000000000000..ab12c92b43059189115d49eac4b20d7e7caf416a --- /dev/null +++ b/cuda_code/bfs_topo_thread_centric.cu @@ -0,0 +1,151 @@ +//=================================================================// +// CUDA BFS kernel +// Topological-Driven: one node per thread, thread_centric, +// no atomic instructions +// Reference: +// Sungpack Hong, et al. Accelerating CUDA graph algorithms +// at maximum warp +//=================================================================// +#include +#include +#include + +#include "cudaGraph.h" + +__global__ void initialize(uint32_t * d_graph_property, uint64_t num_vertex) +{ + size_t tid = blockIdx.x * blockDim.x + threadIdx.x; + if ( tid < num_vertex ) + { + d_graph_property[tid] = MY_INFINITY; + } +} + +__global__ +void kernel(uint32_t * vplist, cudaGraph graph, unsigned curr, bool *changed) { + uint64_t tid = blockIdx.x * blockDim.x + threadIdx.x; + + if (tid >= graph.vertex_cnt) return; + if (vplist[tid]==curr) + { + uint64_t start, end; + start = graph.get_firstedge_index(tid); + end = graph.get_edge_index_end(tid); + for (uint64_t i=start; i devProp.maxThreadsPerBlock) + num_thread_per_block = devProp.maxThreadsPerBlock; + unsigned int num_block = (unsigned int)ceil( vertex_cnt/(double)num_thread_per_block ); + + // malloc of gpu side + cudaErrCheck( cudaMalloc((void**)&device_vpl, vertex_cnt*sizeof(uint32_t)) ); + cudaErrCheck( cudaMalloc((void**)&device_over, sizeof(bool)) ); + + cudaEvent_t start_event, stop_event; + cudaErrCheck( cudaEventCreate(&start_event) ); + cudaErrCheck( cudaEventCreate(&stop_event) ); + + // initialization + initialize<<>>(device_vpl, vertex_cnt); + + // prepare graph struct + // one for host side, one for device side + cudaGraph h_graph, d_graph; + // here copy only the pointers + h_graph.read(vertexlist, edgelist, vertex_cnt, edge_cnt); + + uint32_t zeronum=0; + // memcpy from host to device + cudaEventRecord(start_event, 0); + + // copy graph data to device + h_graph.cudaGraphCopy(&d_graph); + + cudaErrCheck( cudaMemcpy(&(device_vpl[root]), &zeronum, sizeof(uint32_t), + cudaMemcpyHostToDevice) ); + + cudaEventRecord(stop_event, 0); + cudaEventSynchronize(stop_event); + cudaEventElapsedTime(&h2d_copy_time, start_event, stop_event); + + + // BFS traversal + bool stop; + cudaEventRecord(start_event, 0); + + int curr=0; + do + { + // Each iteration processes + // one level of BFS traversal + stop = false; + cudaErrCheck( cudaMemcpy(device_over, &stop, sizeof(bool), cudaMemcpyHostToDevice) ); + + kernel<<>>(device_vpl, d_graph, curr, device_over); + + cudaErrCheck( cudaMemcpy(&stop, device_over, sizeof(bool), cudaMemcpyDeviceToHost) ); + + curr++; + }while(stop); + + cudaEventRecord(stop_event, 0); + cudaEventSynchronize(stop_event); + cudaEventElapsedTime(&kernel_time, start_event, stop_event); + + + cudaEventRecord(start_event, 0); + + cudaErrCheck( cudaMemcpy(vproplist, device_vpl, vertex_cnt*sizeof(uint32_t), + cudaMemcpyDeviceToHost) ); + + cudaEventRecord(stop_event, 0); + cudaEventSynchronize(stop_event); + cudaEventElapsedTime(&d2h_copy_time, start_event, stop_event); + + printf("== iteration #: %d\n", curr); +#ifndef ENABLE_VERIFY + printf("== host->device copy time: %f ms\n", h2d_copy_time); + printf("== device->host copy time: %f ms\n", d2h_copy_time); + printf("== kernel time: %f ms\n", kernel_time); +#endif + cudaEventDestroy(start_event); + cudaEventDestroy(stop_event); + + // free graph struct on device side + d_graph.cudaGraphFree(); + + cudaErrCheck( cudaFree(device_vpl) ); +} + diff --git a/cuda_code/bias_softmax_impl_11.cu b/cuda_code/bias_softmax_impl_11.cu new file mode 100644 index 0000000000000000000000000000000000000000..2338dfaec34f1c313cf41ec8f4042a691de393bc --- /dev/null +++ b/cuda_code/bias_softmax_impl_11.cu @@ -0,0 +1,337 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#include "contrib_ops/cuda/math/bias_softmax.h" + +#include +#include + +#include "core/providers/cuda/cuda_common.h" +#include "core/providers/cuda/cudnn_common.h" +#include "core/providers/cuda/cu_inc/binary_elementwise_impl.cuh" +#include "core/providers/cuda/cu_inc/common.cuh" +#include "core/providers/cuda/math/binary_elementwise_ops_impl_functors.cuh" +#include "core/providers/cuda/math/softmax_impl.cuh" +#include "core/providers/cuda/shared_inc/accumulation_type.h" + +using namespace onnxruntime; +using namespace onnxruntime::cuda; + +namespace onnxruntime { +namespace contrib { +namespace cuda { + +// Duplicated softmax_impl.cu here +// So far attempt to use shared kernel with additional template resulted in lost performance + +// Note: The intended case for 'input_bias' is the input sequence mask for transformer models +// As an additive mask, it should be zero for preserved tokens and -infty for tokens to screen +// The mask will broadcast from [batch_size, 1, 1, seq_len] to input [batch_size, num_heads, seq_len, seq_len] +// Here element_count = seq_len and bias_broadcast_size_per_batch = num_heads * seq_len + +// The softmax + additive mask fusion follows NVIDIA apex's additive_masked_softmax_warp_forward +// see https://github.com/NVIDIA/apex/blob/4ef930c1c884fdca5f472ab2ce7cb9b505d26c1a/apex/contrib/csrc/multihead_attn/softmax.h + +template +__global__ void BiasSoftmaxWarpForward( + output_t* output, + const input_t* input, + const input_t* input_bias, + int element_count, + int batch_count, + int batch_stride, + int bias_broadcast_count_per_batch) { + // "WARP" refers to cooperative threads and might not equal 32 threads of GPU warp + // thread block is (WARP_SIZE, 128/WARP_SIZE) + constexpr int next_power_of_two = 1 << log2_elements; + constexpr int WARP_SIZE = next_power_of_two < GPU_WARP_SIZE ? next_power_of_two : GPU_WARP_SIZE; + constexpr int WARP_ITERATIONS = next_power_of_two / WARP_SIZE; + constexpr int WARP_BATCH = (next_power_of_two <= 128) ? 2 : 1; + + // each "WARP" (<=32) processes WARP_BATCH(one of {1,2}) batches + int first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * WARP_BATCH; + + // last warp may have fewer batches + int local_batches = batch_count - first_batch; + if (local_batches > WARP_BATCH) + local_batches = WARP_BATCH; + + // thread will process elements (local_index + n * warp_size) within batch + int local_idx = threadIdx.x; + + // push input, input_bias output pointers to batch we need to process + input += first_batch * batch_stride + local_idx; + output += first_batch * batch_stride + local_idx; + + // load from global memory and apply bias (likely an additive mask) + acc_t elements[WARP_BATCH][WARP_ITERATIONS]; +#pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + // the bias has assumed shape [batch_size, element_count] + // .. and needs to broadcast to [batch_size, broadcast_size, element_count] + int bias_offset = (first_batch + i) / bias_broadcast_count_per_batch * batch_stride + local_idx; + + int batch_element_count = (i >= local_batches) ? 0 : element_count; +#pragma unroll + for (int it = 0; it < WARP_ITERATIONS; ++it) { + int element_index = local_idx + it * WARP_SIZE; + if (element_index < batch_element_count) { + elements[i][it] = (acc_t)input[i * element_count + it * WARP_SIZE] + (acc_t)input_bias[bias_offset + it * WARP_SIZE]; + } else { + elements[i][it] = -std::numeric_limits::infinity(); + } + } + } + + // find maximum value within batch for numerical stability + acc_t max_value[WARP_BATCH]; +#pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + max_value[i] = elements[i][0]; +#pragma unroll + for (int it = 1; it < WARP_ITERATIONS; ++it) { + max_value[i] = (max_value[i] > elements[i][it]) ? max_value[i] : elements[i][it]; + } + } + warp_reduce(max_value); + + // normalization factor Z = Sum[ exp(element_i), for element_i in batch ] + acc_t sum[WARP_BATCH]{acc_t(0.0)}; +#pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { +#pragma unroll + for (int it = 0; it < WARP_ITERATIONS; ++it) { + elements[i][it] = std::exp((acc_t)(elements[i][it] - max_value[i])); + sum[i] += elements[i][it]; + } + } + warp_reduce(sum); + +// write back normalized value = exp(element_i)/Z to global memory +#pragma unroll + for (int i = 0; i < WARP_BATCH; ++i) { + if (i >= local_batches) + break; +#pragma unroll + for (int it = 0; it < WARP_ITERATIONS; ++it) { + int element_index = local_idx + it * WARP_SIZE; + if (element_index < element_count) { + output[i * element_count + it * WARP_SIZE] = elements[i][it] / sum[i]; + } else { + break; + } + } + } +} + +template +void DispatchBiasSoftmaxForwardImpl( + cudaStream_t stream, + Tensor* output_tensor, + const Tensor* input_tensor, + const Tensor* input_bias_tensor, + int element_count, + int batch_count, + int batch_stride, + int bias_broadcast_size_per_batch) { + typedef typename ToCudaType::MappedType CudaT; + typedef CudaT input_t; + typedef CudaT output_t; + typedef AccumulationType_t acc_t; + + const auto* input = reinterpret_cast(input_tensor->template Data()); + const auto* input_bias = reinterpret_cast(input_bias_tensor->template Data()); + auto* output = reinterpret_cast(output_tensor->template MutableData()); + + if (element_count == 0) + return; + + int log2_elements = log2_ceil(element_count); + const int next_power_of_two = 1 << log2_elements; + + // This value must match the WARP_SIZE constexpr value computed inside softmax_warp_forward. + int warp_size = std::min(next_power_of_two, GPU_WARP_SIZE); + + // This value must match the WARP_BATCH constexpr value computed inside softmax_warp_forward. + int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1; + + // use 128 threads per block to maximize gpu utilization + constexpr int threads_per_block = 128; + + int warps_per_block = (threads_per_block / warp_size); + int batches_per_block = warps_per_block * batches_per_warp; + int blocks = (batch_count + batches_per_block - 1) / batches_per_block; + dim3 threads(warp_size, warps_per_block, 1); + + // Launch code would be more elegant if C++ supported FOR CONSTEXPR + switch (log2_elements) { + case 0: // 1 + BiasSoftmaxWarpForward + <<>>(output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch); + break; + case 1: // 2 + BiasSoftmaxWarpForward + <<>>(output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch); + break; + case 2: // 4 + BiasSoftmaxWarpForward + <<>>(output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch); + break; + case 3: // 8 + BiasSoftmaxWarpForward + <<>>(output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch); + break; + case 4: // 16 + BiasSoftmaxWarpForward + <<>>(output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch); + break; + case 5: // 32 + BiasSoftmaxWarpForward + <<>>(output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch); + break; + case 6: // 64 + BiasSoftmaxWarpForward + <<>>(output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch); + break; + case 7: // 128 + BiasSoftmaxWarpForward + <<>>(output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch); + break; + case 8: // 256 + BiasSoftmaxWarpForward + <<>>(output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch); + break; + case 9: // 512 + BiasSoftmaxWarpForward + <<>>(output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch); + break; + case 10: // 1024 + BiasSoftmaxWarpForward + <<>>(output, input, input_bias, element_count, batch_count, batch_stride, bias_broadcast_size_per_batch); + break; + default: + break; + } +} + +#define SPECIALIZED_BIAS_SOFTMAX_IMPL(T) \ + template void DispatchBiasSoftmaxForwardImpl( \ + cudaStream_t stream, \ + Tensor * output_tensor, \ + const Tensor* input_tensor, \ + const Tensor* input_bias_tensor, \ + int element_count, \ + int batch_count, \ + int batch_stride, \ + int bias_broadcast_size_per_batch); + +SPECIALIZED_BIAS_SOFTMAX_IMPL(double) +SPECIALIZED_BIAS_SOFTMAX_IMPL(float) +SPECIALIZED_BIAS_SOFTMAX_IMPL(MLFloat16) + +// For large element count we fall back to explicit Add kernel + CUDA DNN library +// note: This is an unhappy path! There is no performance benefit for the fusion. +template +Status DispatchBiasSoftMaxForwardViaDnnLibraryImpl( + cudaStream_t stream, + cudnnHandle_t cudaDnnHandle, + int element_count, + int batch_count, + int broadcast_axis, + int softmax_axis, + const onnxruntime::TensorShape& X_shape, + const onnxruntime::Tensor* X, + const onnxruntime::TensorShape& B_shape, + const onnxruntime::Tensor* B, + onnxruntime::Tensor* Y) { + typedef typename ToCudaType::MappedType CudaT; + + const auto* X_data = reinterpret_cast(X->template Data()); + const auto* B_data = reinterpret_cast(B->template Data()); + auto* Y_data = reinterpret_cast(Y->template MutableData()); + + // binary elementise kernel requires input pitches + TArray lhs_padded_strides(static_cast(X_shape.NumDimensions())); + int64_t lhs_pitch = 1, rhs_pitch = 1; + for (int i = -1; i >= -(int)X_shape.NumDimensions(); i--) { + size_t positive_i = X_shape.NumDimensions() + i; + lhs_padded_strides[static_cast(positive_i)] = lhs_pitch; + lhs_pitch *= X_shape[positive_i]; + } + + // set pitches for bias so it broadcasts along relevant dimensions + TArray rhs_padded_strides(static_cast(X_shape.NumDimensions())); + for (int i = -1; i >= -(int)X_shape.NumDimensions(); i--) { + size_t positive_ix = X_shape.NumDimensions() + i; + size_t positive_ib = B_shape.NumDimensions() + i; + if (broadcast_axis <= positive_ix && positive_ix < softmax_axis) { + rhs_padded_strides[static_cast(positive_ix)] = 0; + continue; + } + rhs_padded_strides[static_cast(positive_ix)] = rhs_pitch; + rhs_pitch *= B_shape[positive_ib]; + } + + TArray fdm_output_strides(static_cast(X_shape.NumDimensions())); + //TODO: fast_divmod only supports int32 + for (int i = 0; i < fdm_output_strides.Size(); i++) + fdm_output_strides[i] = fast_divmod(static_cast(lhs_padded_strides[i])); + fast_divmod fdm_H, fdm_C; + + // invoke elementwise add with broadcast kernel + ::onnxruntime::cuda::BinaryElementWiseImpl( + stream, + (int32_t)X_shape.NumDimensions(), + &lhs_padded_strides, + X_data, + &rhs_padded_strides, + B_data, + &fdm_output_strides, + fdm_H, + fdm_C, + Y_data, + OP_Add(), + (size_t)X_shape.Size()); + + // invoke cuda DNN library for Y = softmax(X) + std::vector dims({batch_count, 1, 1, element_count}); + const auto alpha = Consts::One; + const auto beta = Consts::Zero; + CudnnTensor input_tensor, output_tensor; + ORT_RETURN_IF_ERROR(input_tensor.Set(dims, CudnnTensor::GetDataType())); + ORT_RETURN_IF_ERROR(output_tensor.Set(dims, CudnnTensor::GetDataType())); + cudnnSoftmaxForward( + cudaDnnHandle, + CUDNN_SOFTMAX_ACCURATE, + CUDNN_SOFTMAX_MODE_INSTANCE, + &alpha, + input_tensor, + Y_data, + &beta, + output_tensor, + Y_data); + + return Status::OK(); +} + +#define SPECIALIZED_BIAS_SOFTMAX_IMPL_VIA_DNN(T) \ + template Status DispatchBiasSoftMaxForwardViaDnnLibraryImpl( \ + cudaStream_t stream, \ + cudnnHandle_t cudaDnnHandle, \ + int element_count, \ + int batch_count, \ + int broadcast_axis, \ + int softmax_axis, \ + const onnxruntime::TensorShape& X_shape, \ + const Tensor* X_data, \ + const onnxruntime::TensorShape& B_shape, \ + const Tensor* B_data, \ + Tensor* Y_data); + +SPECIALIZED_BIAS_SOFTMAX_IMPL_VIA_DNN(double) +SPECIALIZED_BIAS_SOFTMAX_IMPL_VIA_DNN(float) +SPECIALIZED_BIAS_SOFTMAX_IMPL_VIA_DNN(MLFloat16) + +} // namespace cuda +} // namespace contrib +} // namespace onnxruntime diff --git a/cuda_code/bicg_reference_kernel.cu b/cuda_code/bicg_reference_kernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..894be090569ce1c6735ac96d8833ba803344197a --- /dev/null +++ b/cuda_code/bicg_reference_kernel.cu @@ -0,0 +1,95 @@ + // process BICG_BATCH elements in thread +#define BICG_BATCH 8 +#define BICG_STEP 32/BICG_BATCH + +typedef float DATA_TYPE; + +extern "C" __global__ void bicgKernel1( DATA_TYPE *A, DATA_TYPE *p, DATA_TYPE *q, int m, int n) +{ + int i = blockDim.x*blockIdx.x + threadIdx.x; + + if (i < n) + { + q[i] = 0.0; + + int j; + for (j = 0; j < m; j++) + { + q[i] += A[i * m + j] * p[j]; + } + } + +} + +extern "C" __global__ void bicgKernel2( DATA_TYPE *A, DATA_TYPE *r, DATA_TYPE *s, int m, int n) +{ + int j = blockDim.x*blockIdx.x + threadIdx.x; + + if (j < m) + { + s[j] = 0.0; + + int i; + for (i = 0; i < n; i++) + { + s[j] += A[i * m + j] * r[i]; + } + } + +} + +extern "C" __global__ void bicgFusedRef( float *A, float *x1, float *y1, float *x2, float *y2, int m, int n) +{ + int tx = threadIdx.x; + int ty = threadIdx.y; + int bx = blockIdx.x; + int by = blockIdx.y; + + __shared__ float s_A[32][33]; + __shared__ float s_x1[32]; + __shared__ float s_x2[32]; + + float l_sum = 0.0f; + + // load x2 + if (ty == 0) + s_x2[tx] = x2[bx * 32 + tx]; + for (int i = m*by; i < m*(by + 1); i += 32) { + // load x1 + if (ty == 1) + s_x1[tx] = x1[i + tx]; + __syncthreads(); + + for (int j = 0; j < 32; j += BICG_STEP) { + s_A[ty + j][tx] = A[(i + ty + j)*n + bx * 32 + tx]; + l_sum += s_A[ty + j][tx] * s_x1[ty + j]; + } + __syncthreads(); + float tmp = 0.0f; + + for (int j = 0; j < 32; j += BICG_STEP) + tmp += s_A[tx][ty + j] * s_x2[ty + j]; + s_A[tx][ty] = tmp; + __syncthreads(); + + if (ty < 2) + s_A[tx][ty] = tmp = tmp + s_A[tx][ty + 2]; + __syncthreads(); + + if (ty == 0) { + atomicAdd(y2 + i + tx, tmp + s_A[tx][1]); + } + } + + // compute total sum + __syncthreads(); + s_A[ty][tx] = l_sum; + __syncthreads(); + if (ty < 2) { + s_A[ty][tx] = l_sum = l_sum + s_A[ty + 2][tx]; + } + __syncthreads(); + if (ty == 0) { + atomicAdd(y1 + bx * 32 + tx, l_sum + s_A[1][tx]); + } +} diff --git a/cuda_code/binarize_1.cu b/cuda_code/binarize_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..cbcbad8a13960764c9b2d3cfc5946f6c062533cf --- /dev/null +++ b/cuda_code/binarize_1.cu @@ -0,0 +1,263 @@ +#include "binarize.cuh" +#include +#include +#include +#include +#include + +#include _CUB_INCLUDE(cub/block/block_radix_sort.cuh) + +namespace NKernel { + + __global__ void WriteCompressedIndexImpl(TCFeature feature, const ui8* bins, ui32 docCount, ui32* cindex) { + + cindex += feature.Offset; + ui32 i = blockIdx.x * blockDim.x + threadIdx.x; + while (i < docCount) { + const ui32 bin = (((ui32)bins[i]) & feature.Mask) << feature.Shift; + cindex[i] = cindex[i] | bin; + i += blockDim.x * gridDim.x; + } + } + + void WriteCompressedIndex(TCFeature feature, + const ui8* bins, ui32 docCount, + ui32* cindex, + TCudaStream stream) { + + const ui32 blockSize = 256; + const ui32 numBlocks = (docCount + blockSize - 1) / blockSize; + + WriteCompressedIndexImpl<< < numBlocks, blockSize, 0, stream >> > (feature, bins, docCount, cindex); + } + + + + template + __launch_bounds__(BLOCK_SIZE, 2) + __global__ void BinarizeFloatFeatureImpl(TCFeature feature, const float* values, ui32 docCount, + const float* borders, + const ui32* gatherIndex, ui32* dst) { + + const ui32 i = (blockIdx.x * BLOCK_SIZE * DOCS_PER_THREAD + threadIdx.x); + + __shared__ float sharedBorders[256]; + sharedBorders[0] = borders[0]; + __syncthreads(); + const int bordersCount = static_cast(sharedBorders[0]); + __syncthreads(); + dst += feature.Offset; + + if (threadIdx.x < bordersCount) { + sharedBorders[threadIdx.x] = LdgWithFallback(borders, threadIdx.x + 1); + } + __syncthreads(); + + ui32 index[DOCS_PER_THREAD]; + float featureValues[DOCS_PER_THREAD]; + + #pragma unroll + for (int j = 0; j < DOCS_PER_THREAD; ++j) { + index[j] = 0; + const int idx = i + j * BLOCK_SIZE; + + if (idx < docCount) { + const ui32 readIdx = gatherIndex ? StreamLoad(gatherIndex + idx) : idx; + featureValues[j] = StreamLoad(values + readIdx); + } + } + + #pragma unroll + for (int border = 0; border < bordersCount; ++border) + { + const float borderValue = sharedBorders[border]; + #pragma unroll + for (int j = 0; j < DOCS_PER_THREAD; ++j) + { + if (featureValues[j] > borderValue) + { + ++index[j]; + } + } + } + + + #pragma unroll + for (int j = 0; j < DOCS_PER_THREAD; ++j) + { + const int idx = i + j * BLOCK_SIZE; + + if (idx < docCount) { + + if (ATOMIC_UPDATE) + { + atomicOr(dst + idx, (index[j] & feature.Mask) << feature.Shift); + } else { + ui32 bin = dst[idx]; + bin |= (index[j] & feature.Mask) << feature.Shift; + dst[idx] = bin; + } + } + } + } + + //smth like bootstrap for quantiles estimation + template + __global__ void FastGpuBordersImpl(const float* values, ui32 size, float* borders, ui32 bordersCount) { + + const int valuesPerThread = 2; + using BlockRadixSort = cub::BlockRadixSort; + const int tid = threadIdx.x; + float vals[valuesPerThread]; + + if (tid == 0 && blockIdx.x == 0) { + borders[0] = bordersCount; + } + + ui64 seed = (blockIdx.x * 6364136223846793005 + 1442695040888963407) + (1664525 * threadIdx.x + 1013904223) & 0xFFFFFF; + + for (int i = 0; i < valuesPerThread; ++i) { + const int idx = static_cast(AdvanceSeed(&seed) % size); + vals[i] = StreamLoad(values + idx); + } + + { + using TTempStorage = typename BlockRadixSort::TempStorage; + __shared__ TTempStorage temp; + BlockRadixSort(temp).Sort(vals); + } + + float sum = 0; + float weight = 0; + for (int i = 0; i < valuesPerThread; ++i) { + sum += vals[i]; + weight += 1.0f; + } + + __shared__ float localBorders[BLOCK_SIZE]; + localBorders[tid] = sum / weight; + __syncthreads(); + + if (tid < bordersCount) { + const ui32 offset = static_cast((tid + 1.0f) * BLOCK_SIZE / bordersCount - 1e-5f); + atomicAdd(borders + tid + 1, (localBorders[offset]) * 0.9999 / gridDim.x); + } + } + + __global__ void SortBordersImpl(float* borders, ui32 bordersCount) + { + + using BlockRadixSort = cub::BlockRadixSort; + ui32 tid = threadIdx.x; + float val[1]; + val[0] = tid < bordersCount ? borders[tid] : PositiveInfty(); + using TTempStorage = typename BlockRadixSort::TempStorage; + __shared__ TTempStorage temp; + BlockRadixSort(temp).Sort(val); + if (tid < bordersCount) { + borders[tid] = val[0]; + } + } + + void FastGpuBorders(const float* values, ui32 size, float* borders, ui32 bordersCount, TCudaStream stream) { + FillBuffer(borders, 0.0f, bordersCount + 1, stream); + const ui32 blockSize = 1024; + const ui32 valuesPerBlock = 2 * blockSize; + const ui32 numBlocks = min(CeilDivide(size, valuesPerBlock), 15); + FastGpuBordersImpl<<>>(values, size, borders, bordersCount); + SortBordersImpl<<<1, 256, 0, stream>>>(borders + 1, bordersCount); + } + + __global__ void QuantileBordersImpl(const float* sortedValues, ui32 size, float* borders, ui32 bordersCount) { + const ui32 tid = threadIdx.x; + __shared__ float localBorders[256]; + + if (tid < bordersCount) { + const ui32 offset = static_cast((tid + 1.0) * size / (bordersCount + 1)); + localBorders[tid] = LdgWithFallback(sortedValues, offset); + } + __syncthreads(); + + if (tid <(bordersCount + 1)) { + borders[tid] = tid == 0 ? bordersCount : localBorders[tid - 1]; + } + } + + + __global__ void UniformBordersImpl(const float* values, ui32 size, float* borders, ui32 bordersCount) { + + const ui32 tid = threadIdx.x; + const int blockSize = 1024; + + __shared__ float localMin[blockSize]; + __shared__ float localMax[blockSize]; + + float minValue = PositiveInfty(); + float maxValue = NegativeInfty(); + + ui64 seed = (1664525 * threadIdx.x + 1013904223) & 0xFFFFFF; + + #pragma unroll 32 + for (int i = 0; i < 32; ++i) { + const int idx = static_cast(AdvanceSeed(&seed) % size); + float val = StreamLoad(values + idx); + minValue = val < minValue ? val : minValue; + maxValue = val > maxValue ? val : maxValue; + } + + localMin[tid] = minValue * 0.999; + localMax[tid] = maxValue * 1.001; + __syncthreads(); + + for (ui32 s = blockSize >> 1; s > 0; s >>= 1) { + if (tid < s) { + localMin[tid] = min(localMin[tid], localMin[tid + s]); + localMax[tid] = max(localMax[tid], localMax[tid + s]); + } + __syncthreads(); + } + minValue = localMin[0]; + maxValue = localMax[0]; + + if (tid < (bordersCount + 1)) { + const float borderIdx = tid * 1.0f / bordersCount; + //emulate ui8 rounding in cpu + const float val = (minValue + borderIdx * (maxValue - minValue)) * 0.9999; + borders[tid] = tid == 0 ? bordersCount : val; + } + } + + void ComputeQuantileBorders(const float* values, ui32 size, float* borders, ui32 bordersCount, TCudaStream stream) { + QuantileBordersImpl<<< 1, 256, 0, stream >>> (values, size, borders, bordersCount); + } + + void ComputeUniformBorders(const float* values, ui32 size, float* borders, ui32 bordersCount, TCudaStream stream) { + UniformBordersImpl<<< 1, 1024, 0, stream >>> (values, size, borders, bordersCount); + } + + void BinarizeFloatFeature(const float* values, ui32 docCount, + const float* borders, + TCFeature feature, + ui32* dst, + const ui32* gatherIndex, + bool atomicUpdate, + TCudaStream stream) { + + const ui32 blockSize = 1024; + const ui32 docsPerThread = 8; + const ui32 numBlocks = (docCount + docsPerThread * blockSize - 1) / (docsPerThread * blockSize); + + if (atomicUpdate) + { + BinarizeFloatFeatureImpl << < numBlocks, blockSize, 0, stream >> > (feature, values, docCount, + borders, gatherIndex, + dst); + } else { + BinarizeFloatFeatureImpl << < numBlocks, blockSize, 0, stream >> > (feature, values, docCount, + borders, gatherIndex, + dst); + } + } + + +} diff --git a/cuda_code/binarize_2.cu b/cuda_code/binarize_2.cu new file mode 100644 index 0000000000000000000000000000000000000000..fe62c9070a0f07914c62e21efe58db88fba1de63 --- /dev/null +++ b/cuda_code/binarize_2.cu @@ -0,0 +1,240 @@ +#include "binarize.cuh" +#include +#include +#include +#include +#include + +namespace NKernel { + + + template + __launch_bounds__(BLOCK_SIZE, 2) + __global__ void BinarizeFloatFeatureImpl(TCFeature feature, const float* values, ui32 docCount, + const float* borders, + const ui32* gatherIndex, ui32* dst) { + + const ui32 i = (blockIdx.x * BLOCK_SIZE * DOCS_PER_THREAD + threadIdx.x); + + __shared__ float sharedBorders[256]; + sharedBorders[0] = borders[0]; + __syncthreads(); + const int bordersCount = static_cast(sharedBorders[0]); + __syncthreads(); + dst += feature.Offset * (ui64) docCount; + + if (threadIdx.x < bordersCount) { + sharedBorders[threadIdx.x] = LdgWithFallback(borders, threadIdx.x + 1); + } + __syncthreads(); + + ui32 index[DOCS_PER_THREAD]; + float featureValues[DOCS_PER_THREAD]; + + #pragma unroll + for (int j = 0; j < DOCS_PER_THREAD; ++j) { + index[j] = 0; + const int idx = i + j * BLOCK_SIZE; + + if (idx < docCount) { + const ui32 readIdx = gatherIndex ? StreamLoad(gatherIndex + idx) : idx; + featureValues[j] = StreamLoad(values + readIdx); + } + } + + #pragma unroll + for (int border = 0; border < bordersCount; ++border) + { + const float borderValue = sharedBorders[border]; + #pragma unroll + for (int j = 0; j < DOCS_PER_THREAD; ++j) + { + if (featureValues[j] > borderValue) + { + ++index[j]; + } + } + } + + + #pragma unroll + for (int j = 0; j < DOCS_PER_THREAD; ++j) + { + const int idx = i + j * BLOCK_SIZE; + + if (idx < docCount) + { + + if (ATOMIC_UPDATE) + { + atomicOr(dst + idx, (index[j] & feature.Mask) << feature.Shift); + } else + { + ui32 bin = dst[idx]; + bin |= (index[j] & feature.Mask) << feature.Shift; + dst[idx] = bin; + } + } + } + } + + //smth like bootstrap for quantiles estimation + template + __global__ void FastGpuBordersImpl(const float* values, ui32 size, float* borders, ui32 bordersCount) { + + const int valuesPerThread = 2; + using BlockRadixSort = cub::BlockRadixSort; + const int tid = threadIdx.x; + float vals[valuesPerThread]; + + if (tid == 0 && blockIdx.x == 0) { + borders[0] = bordersCount; + } + + ui64 seed = (blockIdx.x * 6364136223846793005 + 1442695040888963407) + (1664525 * threadIdx.x + 1013904223) & 0xFFFFFF; + + for (int i = 0; i < valuesPerThread; ++i) { + const int idx = static_cast(AdvanceSeed(&seed) % size); + vals[i] = StreamLoad(values + idx); + } + + { + using TTempStorage = typename BlockRadixSort::TempStorage; + __shared__ TTempStorage temp; + BlockRadixSort(temp).Sort(vals); + } + + float sum = 0; + float weight = 0; + for (int i = 0; i < valuesPerThread; ++i) { + sum += vals[i]; + weight += 1.0f; + } + + __shared__ float localBorders[BLOCK_SIZE]; + localBorders[tid] = sum / weight; + __syncthreads(); + + if (tid < bordersCount) { + const ui32 offset = static_cast((tid + 1.0f) * BLOCK_SIZE / bordersCount - 1e-5f); + atomicAdd(borders + tid + 1, (localBorders[offset]) * 0.9999 / gridDim.x); + } + } + + __global__ void SortBordersImpl(float* borders, ui32 bordersCount) + { + + using BlockRadixSort = cub::BlockRadixSort; + ui32 tid = threadIdx.x; + float val[1]; + val[0] = tid < bordersCount ? borders[tid] : PositiveInfty(); + using TTempStorage = typename BlockRadixSort::TempStorage; + __shared__ TTempStorage temp; + BlockRadixSort(temp).Sort(val); + if (tid < bordersCount) { + borders[tid] = val[0]; + } + } + + void FastGpuBorders(const float* values, ui32 size, float* borders, ui32 bordersCount, TCudaStream stream) { + FillBuffer(borders, 0.0f, bordersCount + 1, stream); + const ui32 blockSize = 1024; + const ui32 valuesPerBlock = 2 * blockSize; + const ui32 numBlocks = min(CeilDivide(size, valuesPerBlock), 15); + FastGpuBordersImpl<<>>(values, size, borders, bordersCount); + SortBordersImpl<<<1, 256, 0, stream>>>(borders + 1, bordersCount); + } + + __global__ void QuantileBordersImpl(const float* sortedValues, ui32 size, float* borders, ui32 bordersCount) { + const ui32 tid = threadIdx.x; + __shared__ float localBorders[256]; + + if (tid < bordersCount) { + const ui32 offset = static_cast((tid + 1.0) * size / (bordersCount + 1)); + localBorders[tid] = LdgWithFallback(sortedValues, offset); + } + __syncthreads(); + + if (tid <(bordersCount + 1)) { + borders[tid] = tid == 0 ? bordersCount : localBorders[tid - 1]; + } + } + + + __global__ void UniformBordersImpl(const float* values, ui32 size, float* borders, ui32 bordersCount) { + + const ui32 tid = threadIdx.x; + const int blockSize = 1024; + + __shared__ float localMin[blockSize]; + __shared__ float localMax[blockSize]; + + float minValue = PositiveInfty(); + float maxValue = NegativeInfty(); + + ui64 seed = (1664525 * threadIdx.x + 1013904223) & 0xFFFFFF; + + #pragma unroll 32 + for (int i = 0; i < 32; ++i) { + const int idx = static_cast(AdvanceSeed(&seed) % size); + float val = StreamLoad(values + idx); + minValue = val < minValue ? val : minValue; + maxValue = val > maxValue ? val : maxValue; + } + + localMin[tid] = minValue * 0.999; + localMax[tid] = maxValue * 1.001; + __syncthreads(); + + for (ui32 s = blockSize >> 1; s > 0; s >>= 1) { + if (tid < s) { + localMin[tid] = min(localMin[tid], localMin[tid + s]); + localMax[tid] = max(localMax[tid], localMax[tid + s]); + } + __syncthreads(); + } + minValue = localMin[0]; + maxValue = localMax[0]; + + if (tid < (bordersCount + 1)) { + const float borderIdx = tid * 1.0f / bordersCount; + //emulate ui8 rounding in cpu + const float val = (minValue + borderIdx * (maxValue - minValue)) * 0.9999; + borders[tid] = tid == 0 ? bordersCount : val; + } + } + + void ComputeQuantileBorders(const float* values, ui32 size, float* borders, ui32 bordersCount, TCudaStream stream) { + QuantileBordersImpl<<< 1, 256, 0, stream >>> (values, size, borders, bordersCount); + } + + void ComputeUniformBorders(const float* values, ui32 size, float* borders, ui32 bordersCount, TCudaStream stream) { + UniformBordersImpl<<< 1, 1024, 0, stream >>> (values, size, borders, bordersCount); + } + + void BinarizeFloatFeature(const float* values, ui32 docCount, + const float* borders, + TCFeature feature, + ui32* dst, + const ui32* gatherIndex, + bool atomicUpdate, + TCudaStream stream) { + + const ui32 blockSize = 1024; + const ui32 docsPerThread = 8; + const ui32 numBlocks = (docCount + docsPerThread * blockSize - 1) / (docsPerThread * blockSize); + + if (atomicUpdate) + { + BinarizeFloatFeatureImpl << < numBlocks, blockSize, 0, stream >> > (feature, values, docCount, + borders, gatherIndex, + dst); + } else { + BinarizeFloatFeatureImpl << < numBlocks, blockSize, 0, stream >> > (feature, values, docCount, + borders, gatherIndex, + dst); + } + } + + +} diff --git a/cuda_code/binary_elementwise_ops_impl_12.cu b/cuda_code/binary_elementwise_ops_impl_12.cu new file mode 100644 index 0000000000000000000000000000000000000000..9d10d9bb76acb64db163f51a9562d14d121dbb9d --- /dev/null +++ b/cuda_code/binary_elementwise_ops_impl_12.cu @@ -0,0 +1,136 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#include +#include "binary_elementwise_ops_impl.h" +#include "core/providers/cuda/cu_inc/common.cuh" +#include "core/providers/cuda/cu_inc/binary_elementwise_impl.cuh" +#include "core/providers/cuda/math/binary_elementwise_ops_impl_functors.cuh" + +namespace onnxruntime { +namespace cuda { + +#define BINARY_ELEMENTWISE_IMPL(name) \ + BINARY_ELEMENTWISE_IMPL_DECLARATION(name) { \ + BinaryElementWiseImpl(output_rank_or_simple_broadcast, \ + lhs_padded_strides, \ + lhs_data, \ + rhs_padded_strides, \ + rhs_data, \ + fdm_output_strides, \ + fdm_H, \ + fdm_C, \ + output_data, \ + OP_##name(), \ + count); \ + } + +#define BINARY_ELEMENTWISE_IMPL_T1(name) \ + BINARY_ELEMENTWISE_IMPL_DECLARATION_T1(name) { \ + BinaryElementWiseImpl(output_rank_or_simple_broadcast, \ + lhs_padded_strides, \ + lhs_data, \ + rhs_padded_strides, \ + rhs_data, \ + fdm_output_strides, \ + fdm_H, \ + fdm_C, \ + output_data, \ + OP_##name(), \ + count); \ + } + +#define SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, T) \ + template void Impl_##x(int32_t output_rank, \ + const TArray* lhs_padded_strides, const T* lhs_data, \ + const TArray* rhs_padded_strides, const T* rhs_data, \ + const TArray* fdm_output_strides, const fast_divmod& fdm_H, const fast_divmod& fdm_C, T* output_data, size_t count); + +#define SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1(x, T, T1) \ + template void ImplT1_##x(int32_t output_rank, \ + const TArray* lhs_padded_strides, const T* lhs_data, \ + const TArray* rhs_padded_strides, const T1* rhs_data, \ + const TArray* fdm_output_strides, const fast_divmod& fdm_H, const fast_divmod& fdm_C, T* output_data, size_t count); + +#define SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD(x) \ + SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, uint32_t) \ + SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, uint64_t) \ + SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, int32_t) \ + SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, int64_t) \ + SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, half) \ + SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, float) \ + SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, double) + +#define SPECIALIZED_BINARY_ELEMENTWISE_IMPL_OIL(x) \ + SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, bool) \ + SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, int32_t) \ + SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, int64_t) + +#define SPECIALIZED_BINARY_ELEMENTWISE_IMPL_HFD(x) \ + SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, half) \ + SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, float) \ + SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, double) + +// create declarations for impl +#define BINARY_OP_NAME_EXPR(name, expr) \ + BINARY_ELEMENTWISE_IMPL(name) + +BINARY_OPS() +#undef BINARY_OP_NAME_EXPR + +// create specialized impl +// the postfix of means the types supported by the op: +// B: uint8_t +// W: uint16_t +// U: uint32_t +// Z: uint64_t +// C: int8_t +// S: int16_t +// I: int32_t +// L: int64_t +// H: float16 +// F: float +// D: double +// O: bool + +SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD(Add) +SPECIALIZED_BINARY_ELEMENTWISE_IMPL(Add, bool) +SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD(Sub) +SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD(Mul) +SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD(Div) +SPECIALIZED_BINARY_ELEMENTWISE_IMPL_HFD(Pow_7) +SPECIALIZED_BINARY_ELEMENTWISE_IMPL(And, bool) +SPECIALIZED_BINARY_ELEMENTWISE_IMPL(Or, bool) +SPECIALIZED_BINARY_ELEMENTWISE_IMPL(Xor, bool) +SPECIALIZED_BINARY_ELEMENTWISE_IMPL_HFD(PRelu) +SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD(Greater) +SPECIALIZED_BINARY_ELEMENTWISE_IMPL_OIL(Equal) +SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD(Max) +SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD(Min) +SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD(Less) + +// create declarations for impl for Pow +BINARY_ELEMENTWISE_IMPL_T1(Pow) + +SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1(Pow, int32_t, int32_t) +SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1(Pow, int32_t, int64_t) +SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1(Pow, int32_t, float) +SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1(Pow, int32_t, double) + +SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1(Pow, int64_t, int32_t) +SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1(Pow, int64_t, int64_t) +SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1(Pow, int64_t, float) +SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1(Pow, int64_t, double) + +SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1(Pow, float, int32_t) +SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1(Pow, float, int64_t) +SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1(Pow, float, float) +SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1(Pow, float, double) + +SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1(Pow, double, int32_t) +SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1(Pow, double, int64_t) +SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1(Pow, double, float) +SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1(Pow, double, double) + +} // namespace cuda +} // namespace onnxruntime diff --git a/cuda_code/bisect_util_2.cu b/cuda_code/bisect_util_2.cu new file mode 100644 index 0000000000000000000000000000000000000000..a3b5d443c7fd81688be3a3125c5d7cfd5754268d --- /dev/null +++ b/cuda_code/bisect_util_2.cu @@ -0,0 +1,632 @@ +/* + * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. + * + * Please refer to the NVIDIA end user license agreement (EULA) associated + * with this source code for terms and conditions that govern your use of + * this software. Any use, reproduction, disclosure, or distribution of + * this software and related documentation outside the terms of the EULA + * is strictly prohibited. + * + */ + +/* Utility / shared functionality for bisection kernels */ + +#ifndef _BISECT_UTIL_H_ +#define _BISECT_UTIL_H_ + +#include + +namespace cg = cooperative_groups; + +// includes, project +#include "config.h" +#include "util.h" + +//////////////////////////////////////////////////////////////////////////////// +//! Compute the next lower power of two of n +//! @param n number for which next higher power of two is sought +//////////////////////////////////////////////////////////////////////////////// +__device__ +inline int +floorPow2(int n) +{ + + // early out if already power of two + if (0 == (n & (n-1))) + { + return n; + } + + int exp; + frexp((float)n, &exp); + return (1 << (exp - 1)); +} + +//////////////////////////////////////////////////////////////////////////////// +//! Compute the next higher power of two of n +//! @param n number for which next higher power of two is sought +//////////////////////////////////////////////////////////////////////////////// +__device__ +inline int +ceilPow2(int n) +{ + + // early out if already power of two + if (0 == (n & (n-1))) + { + return n; + } + + int exp; + frexp((float)n, &exp); + return (1 << exp); +} + +//////////////////////////////////////////////////////////////////////////////// +//! Compute midpoint of interval [\a left, \a right] avoiding overflow if +//! possible +//! @param left left / lower limit of interval +//! @param right right / upper limit of interval +//////////////////////////////////////////////////////////////////////////////// +__device__ +inline float +computeMidpoint(const float left, const float right) +{ + + float mid; + + if (sign_f(left) == sign_f(right)) + { + mid = left + (right - left) * 0.5f; + } + else + { + mid = (left + right) * 0.5f; + } + + return mid; +} + +//////////////////////////////////////////////////////////////////////////////// +//! Check if interval converged and store appropriately +//! @param addr address where to store the information of the interval +//! @param s_left shared memory storage for left interval limits +//! @param s_right shared memory storage for right interval limits +//! @param s_left_count shared memory storage for number of eigenvalues less +//! than left interval limits +//! @param s_right_count shared memory storage for number of eigenvalues less +//! than right interval limits +//! @param left lower limit of interval +//! @param right upper limit of interval +//! @param left_count eigenvalues less than \a left +//! @param right_count eigenvalues less than \a right +//! @param precision desired precision for eigenvalues +//////////////////////////////////////////////////////////////////////////////// +template +__device__ +void +storeInterval(unsigned int addr, + float *s_left, float *s_right, + T *s_left_count, T *s_right_count, + float left, float right, + S left_count, S right_count, + float precision) +{ + s_left_count[addr] = left_count; + s_right_count[addr] = right_count; + + // check if interval converged + float t0 = abs(right - left); + float t1 = max(abs(left), abs(right)) * precision; + + if (t0 <= max(MIN_ABS_INTERVAL, t1)) + { + + // compute mid point + float lambda = computeMidpoint(left, right); + + // mark as converged + s_left[addr] = lambda; + s_right[addr] = lambda; + } + else + { + + // store current limits + s_left[addr] = left; + s_right[addr] = right; + } +} + +//////////////////////////////////////////////////////////////////////////////// +//! Compute number of eigenvalues that are smaller than x given a symmetric, +//! real, and tridiagonal matrix +//! @param g_d diagonal elements stored in global memory +//! @param g_s superdiagonal elements stored in global memory +//! @param n size of matrix +//! @param x value for which the number of eigenvalues that are smaller is +//! seeked +//! @param tid thread identified (e.g. threadIdx.x or gtid) +//! @param num_intervals_active number of active intervals / threads that +//! currently process an interval +//! @param s_d scratch space to store diagonal entries of the tridiagonal +//! matrix in shared memory +//! @param s_s scratch space to store superdiagonal entries of the tridiagonal +//! matrix in shared memory +//! @param converged flag if the current thread is already converged (that +//! is count does not have to be computed) +//////////////////////////////////////////////////////////////////////////////// +__device__ +inline unsigned int +computeNumSmallerEigenvals(float *g_d, float *g_s, const unsigned int n, + const float x, + const unsigned int tid, + const unsigned int num_intervals_active, + float *s_d, float *s_s, + unsigned int converged, + cg::thread_block cta + ) +{ + + float delta = 1.0f; + unsigned int count = 0; + + cg::sync(cta); + + // read data into shared memory + if (threadIdx.x < n) + { + s_d[threadIdx.x] = *(g_d + threadIdx.x); + s_s[threadIdx.x] = *(g_s + threadIdx.x - 1); + } + + cg::sync(cta); + + // perform loop only for active threads + if ((tid < num_intervals_active) && (0 == converged)) + { + + // perform (optimized) Gaussian elimination to determine the number + // of eigenvalues that are smaller than n + for (unsigned int k = 0; k < n; ++k) + { + delta = s_d[k] - x - (s_s[k] * s_s[k]) / delta; + count += (delta < 0) ? 1 : 0; + } + + } // end if thread currently processing an interval + + return count; +} + +//////////////////////////////////////////////////////////////////////////////// +//! Compute number of eigenvalues that are smaller than x given a symmetric, +//! real, and tridiagonal matrix +//! @param g_d diagonal elements stored in global memory +//! @param g_s superdiagonal elements stored in global memory +//! @param n size of matrix +//! @param x value for which the number of eigenvalues that are smaller is +//! seeked +//! @param tid thread identified (e.g. threadIdx.x or gtid) +//! @param num_intervals_active number of active intervals / threads that +//! currently process an interval +//! @param s_d scratch space to store diagonal entries of the tridiagonal +//! matrix in shared memory +//! @param s_s scratch space to store superdiagonal entries of the tridiagonal +//! matrix in shared memory +//! @param converged flag if the current thread is already converged (that +//! is count does not have to be computed) +//////////////////////////////////////////////////////////////////////////////// +__device__ +inline unsigned int +computeNumSmallerEigenvalsLarge(float *g_d, float *g_s, const unsigned int n, + const float x, + const unsigned int tid, + const unsigned int num_intervals_active, + float *s_d, float *s_s, + unsigned int converged, + cg::thread_block cta + ) +{ + float delta = 1.0f; + unsigned int count = 0; + + unsigned int rem = n; + + // do until whole diagonal and superdiagonal has been loaded and processed + for (unsigned int i = 0; i < n; i += blockDim.x) + { + + cg::sync(cta); + + // read new chunk of data into shared memory + if ((i + threadIdx.x) < n) + { + + s_d[threadIdx.x] = *(g_d + i + threadIdx.x); + s_s[threadIdx.x] = *(g_s + i + threadIdx.x - 1); + } + + cg::sync(cta); + + + if (tid < num_intervals_active) + { + + // perform (optimized) Gaussian elimination to determine the number + // of eigenvalues that are smaller than n + for (unsigned int k = 0; k < min(rem,blockDim.x); ++k) + { + delta = s_d[k] - x - (s_s[k] * s_s[k]) / delta; + // delta = (abs( delta) < (1.0e-10)) ? -(1.0e-10) : delta; + count += (delta < 0) ? 1 : 0; + } + + } // end if thread currently processing an interval + + rem -= blockDim.x; + } + + return count; +} + +//////////////////////////////////////////////////////////////////////////////// +//! Store all non-empty intervals resulting from the subdivision of the interval +//! currently processed by the thread +//! @param addr base address for storing intervals +//! @param num_threads_active number of threads / intervals in current sweep +//! @param s_left shared memory storage for left interval limits +//! @param s_right shared memory storage for right interval limits +//! @param s_left_count shared memory storage for number of eigenvalues less +//! than left interval limits +//! @param s_right_count shared memory storage for number of eigenvalues less +//! than right interval limits +//! @param left lower limit of interval +//! @param mid midpoint of interval +//! @param right upper limit of interval +//! @param left_count eigenvalues less than \a left +//! @param mid_count eigenvalues less than \a mid +//! @param right_count eigenvalues less than \a right +//! @param precision desired precision for eigenvalues +//! @param compact_second_chunk shared mem flag if second chunk is used and +//! ergo requires compaction +//! @param s_compaction_list_exc helper array for stream compaction, +//! s_compaction_list_exc[tid] = 1 when the +//! thread generated two child intervals +//! @is_active_interval mark is thread has a second non-empty child interval +//////////////////////////////////////////////////////////////////////////////// +template +__device__ +void +storeNonEmptyIntervals(unsigned int addr, + const unsigned int num_threads_active, + float *s_left, float *s_right, + T *s_left_count, T *s_right_count, + float left, float mid, float right, + const S left_count, + const S mid_count, + const S right_count, + float precision, + unsigned int &compact_second_chunk, + T *s_compaction_list_exc, + unsigned int &is_active_second) +{ + // check if both child intervals are valid + if ((left_count != mid_count) && (mid_count != right_count)) + { + + // store the left interval + storeInterval(addr, s_left, s_right, s_left_count, s_right_count, + left, mid, left_count, mid_count, precision); + + // mark that a second interval has been generated, only stored after + // stream compaction of second chunk + is_active_second = 1; + s_compaction_list_exc[threadIdx.x] = 1; + atomicExch(&compact_second_chunk, 1); + } + else + { + + // only one non-empty child interval + + // mark that no second child + is_active_second = 0; + s_compaction_list_exc[threadIdx.x] = 0; + + // store the one valid child interval + if (left_count != mid_count) + { + storeInterval(addr, s_left, s_right, s_left_count, s_right_count, + left, mid, left_count, mid_count, precision); + } + else + { + storeInterval(addr, s_left, s_right, s_left_count, s_right_count, + mid, right, mid_count, right_count, precision); + } + + } +} +//////////////////////////////////////////////////////////////////////////////// +//! Create indices for compaction, that is process \a s_compaction_list_exc +//! which is 1 for intervals that generated a second child and 0 otherwise +//! and create for each of the non-zero elements the index where the new +//! interval belongs to in a compact representation of all generated second +//! childs +//! @param s_compaction_list_exc list containing the flags which threads +//! generated two children +//! @param num_threads_compaction number of threads to employ for compaction +//////////////////////////////////////////////////////////////////////////////// +template +__device__ +void +createIndicesCompaction(T *s_compaction_list_exc, + unsigned int num_threads_compaction, cg::thread_block cta) +{ + + unsigned int offset = 1; + const unsigned int tid = threadIdx.x; + + // higher levels of scan tree + for (int d = (num_threads_compaction >> 1); d > 0; d >>= 1) + { + + cg::sync(cta); + + if (tid < d) + { + + unsigned int ai = offset*(2*tid+1)-1; + unsigned int bi = offset*(2*tid+2)-1; + + s_compaction_list_exc[bi] = s_compaction_list_exc[bi] + + s_compaction_list_exc[ai]; + } + + offset <<= 1; + } + + // traverse down tree: first down to level 2 across + for (int d = 2; d < num_threads_compaction; d <<= 1) + { + + offset >>= 1; + cg::sync(cta); + + if (tid < (d-1)) + { + + unsigned int ai = offset*(tid+1) - 1; + unsigned int bi = ai + (offset >> 1); + + s_compaction_list_exc[bi] = s_compaction_list_exc[bi] + + s_compaction_list_exc[ai]; + } + } + + cg::sync(cta); + +} + +/////////////////////////////////////////////////////////////////////////////// +//! Perform stream compaction for second child intervals +//! @param s_left shared +//! @param s_left shared memory storage for left interval limits +//! @param s_right shared memory storage for right interval limits +//! @param s_left_count shared memory storage for number of eigenvalues less +//! than left interval limits +//! @param s_right_count shared memory storage for number of eigenvalues less +//! than right interval limits +//! @param mid midpoint of current interval (left of new interval) +//! @param right upper limit of interval +//! @param mid_count eigenvalues less than \a mid +//! @param s_compaction_list list containing the indices where the data has +//! to be stored +//! @param num_threads_active number of active threads / intervals +//! @is_active_interval mark is thread has a second non-empty child interval +/////////////////////////////////////////////////////////////////////////////// +template +__device__ +void +compactIntervals(float *s_left, float *s_right, + T *s_left_count, T *s_right_count, + float mid, float right, + unsigned int mid_count, unsigned int right_count, + T *s_compaction_list, + unsigned int num_threads_active, + unsigned int is_active_second) +{ + const unsigned int tid = threadIdx.x; + + // perform compaction / copy data for all threads where the second + // child is not dead + if ((tid < num_threads_active) && (1 == is_active_second)) + { + unsigned int addr_w = num_threads_active + s_compaction_list[tid]; + + s_left[addr_w] = mid; + s_right[addr_w] = right; + s_left_count[addr_w] = mid_count; + s_right_count[addr_w] = right_count; + } +} + +/////////////////////////////////////////////////////////////////////////////// +//! Store intervals that have already converged (w.r.t. the desired precision), +//! duplicating intervals that contain multiple eigenvalues +//! @param s_left shared memory storage for left interval limits +//! @param s_right shared memory storage for right interval limits +//! @param s_left_count shared memory storage for number of eigenvalues less +//! than left interval limits +//! @param s_right_count shared memory storage for number of eigenvalues less +//! than right interval limits +//! @param left lower limit of interval +//! @param mid midpoint of interval (updated if split is necessary) +//! @param right upper limit of interval +//! @param left_count eigenvalues less than \a left +//! @param mid_count eigenvalues less than \a mid +//! @param right_count eigenvalues less than \a right +//! @param s_compaction_list_exc helper array for stream compaction, updated +//! at tid if split is necessary +//! @param compact_second_chunk shared mem flag if second chunk is used and +//! ergo requires compaction +//! @param num_threads_active number of active threads / intervals +/////////////////////////////////////////////////////////////////////////////// +template +__device__ +void +storeIntervalConverged(float *s_left, float *s_right, + T *s_left_count, T *s_right_count, + float &left, float &mid, float &right, + S &left_count, S &mid_count, S &right_count, + T *s_compaction_list_exc, + unsigned int &compact_second_chunk, + const unsigned int num_threads_active) +{ + const unsigned int tid = threadIdx.x; + const unsigned int multiplicity = right_count - left_count; + + // check multiplicity of eigenvalue + if (1 == multiplicity) + { + + // just re-store intervals, simple eigenvalue + s_left[tid] = left; + s_right[tid] = right; + s_left_count[tid] = left_count; + s_right_count[tid] = right_count; + + // mark that no second child / clear + s_right_count[tid + num_threads_active] = 0; + s_compaction_list_exc[tid] = 0; + } + else + { + + // number of eigenvalues after the split less than mid + mid_count = left_count + (multiplicity >> 1); + + // store left interval + s_left[tid] = left; + s_right[tid] = right; + s_left_count[tid] = left_count; + s_right_count[tid] = mid_count; + + mid = left; + + // mark that second child interval exists + s_right_count[tid + num_threads_active] = right_count; + s_compaction_list_exc[tid] = 1; + compact_second_chunk = 1; + } +} + +template +__device__ +void +storeIntervalConverged(float *s_left, float *s_right, + T *s_left_count, T *s_right_count, + float &left, float &mid, float &right, + S &left_count, S &mid_count, S &right_count, + T *s_compaction_list_exc, + unsigned int &compact_second_chunk, + const unsigned int num_threads_active, + unsigned int &is_active_second) +{ + const unsigned int tid = threadIdx.x; + const unsigned int multiplicity = right_count - left_count; + + // check multiplicity of eigenvalue + if (1 == multiplicity) + { + + // just re-store intervals, simple eigenvalue + s_left[tid] = left; + s_right[tid] = right; + s_left_count[tid] = left_count; + s_right_count[tid] = right_count; + + // mark that no second child / clear + is_active_second = 0; + s_compaction_list_exc[tid] = 0; + } + else + { + + // number of eigenvalues after the split less than mid + mid_count = left_count + (multiplicity >> 1); + + // store left interval + s_left[tid] = left; + s_right[tid] = right; + s_left_count[tid] = left_count; + s_right_count[tid] = mid_count; + + mid = left; + + // mark that second child interval exists + is_active_second = 1; + s_compaction_list_exc[tid] = 1; + compact_second_chunk = 1; + } +} + +/////////////////////////////////////////////////////////////////////////////// +//! Subdivide interval if active and not already converged +//! @param tid id of thread +//! @param s_left shared memory storage for left interval limits +//! @param s_right shared memory storage for right interval limits +//! @param s_left_count shared memory storage for number of eigenvalues less +//! than left interval limits +//! @param s_right_count shared memory storage for number of eigenvalues less +//! than right interval limits +//! @param num_threads_active number of active threads in warp +//! @param left lower limit of interval +//! @param right upper limit of interval +//! @param left_count eigenvalues less than \a left +//! @param right_count eigenvalues less than \a right +//! @param all_threads_converged shared memory flag if all threads are +//! converged +/////////////////////////////////////////////////////////////////////////////// +template +__device__ +void +subdivideActiveInterval(const unsigned int tid, + float *s_left, float *s_right, + T *s_left_count, T *s_right_count, + const unsigned int num_threads_active, + float &left, float &right, + unsigned int &left_count, unsigned int &right_count, + float &mid, unsigned int &all_threads_converged) +{ + // for all active threads + if (tid < num_threads_active) + { + + left = s_left[tid]; + right = s_right[tid]; + left_count = s_left_count[tid]; + right_count = s_right_count[tid]; + + // check if thread already converged + if (left != right) + { + + mid = computeMidpoint(left, right); + atomicExch(&all_threads_converged, 0); + } + else if ((right_count - left_count) > 1) + { + // mark as not converged if multiple eigenvalues enclosed + // duplicate interval in storeIntervalsConverged() + atomicExch(&all_threads_converged, 0); + } + + } // end for all active threads +} + + +#endif // #ifndef _BISECT_UTIL_H_ + + diff --git a/cuda_code/bitcoin.cu b/cuda_code/bitcoin.cu new file mode 100644 index 0000000000000000000000000000000000000000..4eabd83b3f79d725956ffd2c3f688bcf97ee5fc8 --- /dev/null +++ b/cuda_code/bitcoin.cu @@ -0,0 +1,191 @@ +#include "miner.h" +#include "cuda_helper.h" + +static uint32_t *h_nounce[MAX_GPUS]; + +extern void bitcoin_cpu_init(int thr_id); +extern void bitcoin_cpu_hash(int thr_id, uint32_t threads, uint32_t startNounce, const uint32_t *const ms, uint32_t merkle, uint32_t time, uint32_t compacttarget, uint32_t *const h_nounce); +extern void bitcoin_midstate(const uint32_t *data, uint32_t *midstate); + +uint32_t rrot(uint32_t x, unsigned int n) +{ + return (x >> n) | (x << (32 - n)); +} + +void bitcoin_hash(uint32_t *output, const uint32_t *data, uint32_t nonce, const uint32_t *midstate) +{ + int i; + uint32_t s0, s1, t1, t2, maj, ch, a, b, c, d, e, f, g, h; + uint32_t w[64]; + + const uint32_t k[64] = { + 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, + 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, + 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, + 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, + 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, + 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, + 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, + 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2 + }; + const uint32_t hc[8] = { + 0x6a09e667U, 0xbb67ae85U, 0x3c6ef372U, 0xa54ff53aU, + 0x510e527fU, 0x9b05688cU, 0x1f83d9abU, 0x5be0cd19U + }; + + for (i = 0; i <= 15; i++) + { + w[i] = data[i + 16]; + } + w[3] = nonce; + for (i = 16; i <= 63; i++) + { + s0 = rrot(w[i - 15], 7) ^ rrot(w[i - 15], 18) ^ (w[i - 15] >> 3); + s1 = rrot(w[i - 2], 17) ^ rrot(w[i - 2], 19) ^ (w[i - 2] >> 10); + w[i] = w[i - 16] + s0 + w[i - 7] + s1; + } + a = midstate[0]; + b = midstate[1]; + c = midstate[2]; + d = midstate[3]; + e = midstate[4]; + f = midstate[5]; + g = midstate[6]; + h = midstate[7]; + for (i = 0; i <= 63; i++) + { + s0 = rrot(a, 2) ^ rrot(a, 13) ^ rrot(a, 22); + maj = (a & b) ^ (a & c) ^ (b & c); + t2 = s0 + maj; + s1 = rrot(e, 6) ^ rrot(e, 11) ^ rrot(e, 25); + ch = (e & f) ^ ((~e) & g); + t1 = h + s1 + ch + k[i] + w[i]; + h = g; + g = f; + f = e; + e = d + t1; + d = c; + c = b; + b = a; + a = t1 + t2; + } + w[0] = a + midstate[0]; + w[1] = b + midstate[1]; + w[2] = c + midstate[2]; + w[3] = d + midstate[3]; + w[4] = e + midstate[4]; + w[5] = f + midstate[5]; + w[6] = g + midstate[6]; + w[7] = h + midstate[7]; + w[8] = 0x80000000U; + for (i = 9; i <= 14; i++) + w[i] = 0U; + w[15] = 0x100U; + for (i = 16; i <= 63; i++) + { + s0 = rrot(w[i - 15], 7) ^ rrot(w[i - 15], 18) ^ (w[i - 15] >> 3); + s1 = rrot(w[i - 2], 17) ^ rrot(w[i - 2], 19) ^ (w[i - 2] >> 10); + w[i] = w[i - 16] + s0 + w[i - 7] + s1; + } + a = hc[0]; + b = hc[1]; + c = hc[2]; + d = hc[3]; + e = hc[4]; + f = hc[5]; + g = hc[6]; + h = hc[7]; + for (i = 0; i <= 63; i++) + { + s0 = rrot(a, 2) ^ rrot(a, 13) ^ rrot(a, 22); + maj = (a & b) ^ (a & c) ^ (b & c); + t2 = s0 + maj; + s1 = rrot(e, 6) ^ rrot(e, 11) ^ rrot(e, 25); + ch = (e & f) ^ ((~e) & g); + t1 = h + s1 + ch + k[i] + w[i]; + h = g; + g = f; + f = e; + e = d + t1; + d = c; + c = b; + b = a; + a = t1 + t2; + } + be32enc(&output[0], a + hc[0]); + be32enc(&output[1], b + hc[1]); + be32enc(&output[2], c + hc[2]); + be32enc(&output[3], d + hc[3]); + be32enc(&output[4], e + hc[4]); + be32enc(&output[5], f + hc[5]); + be32enc(&output[6], g + hc[6]); + be32enc(&output[7], h + hc[7]); +} + +static bool init[MAX_GPUS] = { 0 }; + +int scanhash_bitcoin(int thr_id, uint32_t *pdata, + const uint32_t *ptarget, uint32_t max_nonce, + unsigned long *hashes_done) +{ + const uint32_t first_nonce = pdata[19]; + uint32_t throughput = device_intensity(thr_id, __func__, 1U << 28); + throughput = min(throughput, (max_nonce - first_nonce)); + + if (opt_benchmark) + ((uint32_t*)ptarget)[7] = 0x0005; + + if (!init[thr_id]) + { + cudaSetDevice(device_map[thr_id]); + cudaDeviceReset(); + cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync); + cudaDeviceSetCacheConfig(cudaFuncCachePreferL1); + + bitcoin_cpu_init(thr_id); + CUDA_SAFE_CALL(cudaMallocHost(&h_nounce[thr_id], 2 * sizeof(uint32_t))); + init[thr_id] = true; + } + + uint32_t ms[8]; + bitcoin_midstate(pdata, ms); + + do + { + bitcoin_cpu_hash(thr_id, (int)throughput, pdata[19], ms, pdata[16], pdata[17], pdata[18], h_nounce[thr_id]); + if (h_nounce[thr_id][0] != UINT32_MAX) + { + uint32_t vhash64[8]; + bitcoin_hash(vhash64, pdata, h_nounce[thr_id][0], ms); + if (vhash64[7] == 0 && fulltest(vhash64, ptarget)) + { + int res = 1; + // check if there was some other ones... + *hashes_done = pdata[19] - first_nonce + throughput; + if (h_nounce[thr_id][1] != 0xffffffff) + { + pdata[21] = h_nounce[thr_id][1]; + res++; + if (opt_benchmark) + applog(LOG_INFO, "GPU #%d Found second nounce %08x", thr_id, h_nounce[thr_id][1]); + } + pdata[19] = h_nounce[thr_id][0]; + if (opt_benchmark) + applog(LOG_INFO, "GPU #%d Found nounce %08x", thr_id, h_nounce[thr_id][0]); + return res; + } + else + { + if (vhash64[7] > 0) + { + applog(LOG_INFO, "GPU #%d: result for %08x does not validate on CPU!", thr_id, h_nounce[thr_id][0]); + } + } + } + + pdata[19] += throughput; + } while (!work_restart[thr_id].restart && ((uint64_t)max_nonce > ((uint64_t)(pdata[19]) + (uint64_t)throughput))); + + *hashes_done = pdata[19] - first_nonce; + return 0; +} diff --git a/cuda_code/bmw_7.cu b/cuda_code/bmw_7.cu new file mode 100644 index 0000000000000000000000000000000000000000..2bef9caab6f8e3cb950b87fa62b055bca7137f68 --- /dev/null +++ b/cuda_code/bmw_7.cu @@ -0,0 +1,120 @@ +/** + * bmw-256 MDT + * tpruvot - 2015 + */ +extern "C" { +#include "sph/sph_bmw.h" +} + +#include +#include + +static uint32_t *d_hash[MAX_GPUS]; + +extern void bmw256_midstate_init(int thr_id, uint32_t threads); +extern void bmw256_midstate_free(int thr_id); +extern void bmw256_setBlock_80(int thr_id, void *pdata); +extern void bmw256_cpu_hash_80(int thr_id, uint32_t threads, uint32_t startNounce, uint32_t *d_outputHash, int swap); + +extern uint32_t cuda_check_hash_32(int thr_id, uint32_t threads, uint32_t startNounce, uint32_t *d_inputHash); + +// CPU Hash +extern "C" void bmw_hash(void *state, const void *input) +{ + uint32_t _ALIGN(64) hash[16]; + sph_bmw256_context ctx; + + sph_bmw256_init(&ctx); + sph_bmw256(&ctx, input, 80); + sph_bmw256_close(&ctx, (void*) hash); + + memcpy(state, hash, 32); +} + +static bool init[MAX_GPUS] = { 0 }; + +static __inline uint32_t swab32_if(uint32_t val, bool iftrue) { + return iftrue ? swab32(val) : val; +} + +extern "C" int scanhash_bmw(int thr_id, struct work* work, uint32_t max_nonce, unsigned long *hashes_done) +{ + uint32_t _ALIGN(64) endiandata[20]; + uint32_t *pdata = work->data; + uint32_t *ptarget = work->target; + const uint32_t first_nonce = pdata[19]; + bool swapnonce = true; + uint32_t throughput = cuda_default_throughput(thr_id, 1U << 21); + if (init[thr_id]) throughput = min(throughput, max_nonce - first_nonce); + + if (opt_benchmark) + ptarget[7] = 0x0005; + + if (!init[thr_id]) { + cudaSetDevice(device_map[thr_id]); + + cuda_check_cpu_init(thr_id, throughput); + bmw256_midstate_init(thr_id, throughput); + + CUDA_SAFE_CALL(cudaMalloc(&d_hash[thr_id], (size_t)32 * throughput)); + + init[thr_id] = true; + } + + for (int k=0; k < 20; k++) { + be32enc(&endiandata[k], ((uint32_t*)pdata)[k]); + } + + cudaGetLastError(); + bmw256_setBlock_80(thr_id, (void*)endiandata); + + cuda_check_cpu_setTarget(ptarget); + + do { + bmw256_cpu_hash_80(thr_id, (int) throughput, pdata[19], d_hash[thr_id], (int) swapnonce); + uint32_t foundNonce = cuda_check_hash_32(thr_id, throughput, pdata[19], d_hash[thr_id]); + if (foundNonce != UINT32_MAX) + { + uint32_t _ALIGN(64) vhash64[8]; + endiandata[19] = swab32_if(foundNonce, swapnonce); + bmw_hash(vhash64, endiandata); + + if (vhash64[7] <= ptarget[7] && fulltest(vhash64, ptarget)) { + *hashes_done = foundNonce - first_nonce + 1; + pdata[19] = swab32_if(foundNonce,!swapnonce); + work_set_target_ratio(work, vhash64); + return 1; + } + else { + gpulog(LOG_WARNING, thr_id, "result for %08x does not validate on CPU!", foundNonce); + } + } + + if ((uint64_t) throughput + pdata[19] >= max_nonce) { + pdata[19] = max_nonce; + break; + } + + pdata[19] += throughput; + + } while (!work_restart[thr_id].restart); + + *hashes_done = pdata[19] - first_nonce; + return 0; +} + +// cleanup +extern "C" void free_bmw(int thr_id) +{ + if (!init[thr_id]) + return; + + cudaThreadSynchronize(); + + cudaFree(d_hash[thr_id]); + bmw256_midstate_free(thr_id); + cuda_check_cpu_free(thr_id); + + cudaDeviceSynchronize(); + init[thr_id] = false; +} diff --git a/cuda_code/bn_layer_6.cu b/cuda_code/bn_layer_6.cu new file mode 100644 index 0000000000000000000000000000000000000000..993069ca5720b36eead8b1751c080158ba95c4fe --- /dev/null +++ b/cuda_code/bn_layer_6.cu @@ -0,0 +1,307 @@ +#include +#include + +#include "caffe/filler.hpp" +#include "caffe/layers/bn_layer.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + + template + void BNLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + const Dtype* const_bottom_data = bottom[0]->gpu_data(); + const Dtype* const_top_data = top[0]->gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); + + const Dtype* scale_data = this->blobs_[0]->gpu_data(); + const Dtype* shift_data = this->blobs_[1]->gpu_data(); + + // ---------- mean subtraction ---------- // + // statistic across spatial + caffe_gpu_gemv(CblasNoTrans, num_ * channels_, height_ * width_, Dtype(1. / (height_ * width_)), const_bottom_data, + spatial_sum_multiplier_.gpu_data(), Dtype(0), spatial_statistic_.mutable_gpu_data()); + // statistic across batch + caffe_gpu_gemv(CblasTrans, num_, channels_, Dtype(1. / num_), spatial_statistic_.gpu_data(), + batch_sum_multiplier_.gpu_data(), Dtype(0), ex_.mutable_gpu_data()); + if (this->phase_ == TRAIN) { + // sync statistics + if ( sync_forward_ ){ + // first, sync EX + caffe_copy(channels_, ex_.gpu_data(), statistics_all_.mutable_gpu_data()); + P2PSync* p2p = this->callbacks()[ 0 ]->callbacks()[ 0 ]->p2p()[ 0 ]; + Blob statistics_child(1, channels_, 1, 1); + for ( int i = 0; i < p2p->children().size(); ++i ){ +#ifdef _WIN64 + Blob* s_c_ogpu = p2p->dataQueue().pop(); +#else + Blob* s_c_ogpu = NULL; + while ( !p2p->dataQueue().try_pop(&s_c_ogpu) ) + ; +#endif + //Blob* s_c_ogpu = p2p->dataQueue().pop(); + CUDA_CHECK(cudaMemcpyAsync(statistics_child.mutable_gpu_data(), s_c_ogpu->gpu_data(), channels_*sizeof( Dtype ), cudaMemcpyDeviceToDevice, cudaStreamDefault)); + CUDA_CHECK(cudaStreamSynchronize(cudaStreamDefault)); + caffe_gpu_add(channels_, statistics_child.gpu_data(), statistics_all_.gpu_data(), statistics_all_.mutable_gpu_data()); + } + if ( p2p->parent() ){ + p2p->parent()->dataQueue().push(&statistics_all_); +#ifdef _WIN64 + Blob* statistics_final = p2p->dataQueue().pop(); +#else + Blob* statistics_final = NULL; + while ( !p2p->dataQueue().try_pop(&statistics_final) ) + ; +#endif + //Blob* statistics_final = p2p->dataQueue().pop(); + CUDA_CHECK(cudaMemcpyAsync(ex_.mutable_gpu_data(), statistics_final->gpu_data(), channels_*sizeof( Dtype ), cudaMemcpyDeviceToDevice, cudaStreamDefault)); + CUDA_CHECK(cudaStreamSynchronize(cudaStreamDefault)); + } + else { + caffe_gpu_scal(channels_, Dtype(1.0 / Caffe::solver_count()), statistics_all_.mutable_gpu_data()); + caffe_copy(channels_, statistics_all_.gpu_data(), ex_.mutable_gpu_data()); + } + for ( int i = 0; i < p2p->children().size(); ++i ){ + p2p->children()[ i ]->dataQueue().push(&ex_); + } + } + // save history mean + caffe_gpu_axpby(ex_.count(), Dtype(1) - decay_, ex_.gpu_data(), decay_, + this->blobs_[2]->mutable_gpu_data()); + } + if (this->phase_ == TEST && moving_average_) { + // use moving average mean + caffe_copy(ex_.count(), this->blobs_[ 2 ]->gpu_data(), ex_.mutable_gpu_data()); + } + + // put mean blob into buffer_blob_ + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num_, channels_, 1, Dtype(1), + batch_sum_multiplier_.gpu_data(), ex_.gpu_data(), Dtype(0), + spatial_statistic_.mutable_gpu_data()); + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num_ * channels_, height_ * width_, 1, Dtype(-1), + spatial_statistic_.gpu_data(), spatial_sum_multiplier_.gpu_data(), Dtype(0), + buffer_blob_.mutable_gpu_data()); + // substract mean + caffe_gpu_add(buffer_blob_.count(), const_bottom_data, buffer_blob_.gpu_data(), top_data); + + // ---------- variance normalization ---------- // + // add by yu liu + // calculate EX2 + caffe_gpu_powx(bottom[ 0 ]->count(), const_bottom_data, Dtype(2), buffer_blob_.mutable_gpu_data()); + // statistic across spatial + caffe_gpu_gemv(CblasNoTrans, num_ * channels_, height_ * width_, Dtype(1. / ( height_ * width_ )), buffer_blob_.gpu_data(), + spatial_sum_multiplier_.gpu_data(), Dtype(0), spatial_statistic_.mutable_gpu_data()); + // statistic across batch + caffe_gpu_gemv(CblasTrans, num_, channels_, Dtype(1. / num_), spatial_statistic_.gpu_data(), + batch_sum_multiplier_.gpu_data(), Dtype(0), batch_statistic_.mutable_gpu_data()); + + + // original dx + //// put the squares of X - mean into buffer_blob_ + //caffe_gpu_powx(buffer_blob_.count(), const_top_data, Dtype(2), buffer_blob_.mutable_gpu_data()); + //// statistic across spatial + //caffe_gpu_gemv(CblasNoTrans, num_ * channels_, height_ * width_, Dtype(1. / (height_ * width_)), buffer_blob_.gpu_data(), + // spatial_sum_multiplier_.gpu_data(), Dtype(0), spatial_statistic_.mutable_gpu_data()); + //// statistic across batch + //caffe_gpu_gemv(CblasTrans, num_, channels_, Dtype(1. / num_), spatial_statistic_.gpu_data(), + // batch_sum_multiplier_.gpu_data(), Dtype(0), batch_statistic_.mutable_gpu_data()); + if (this->phase_ == TRAIN) { + if ( sync_forward_ ){ + // second, sync EX2 + caffe_copy(channels_, batch_statistic_.gpu_data(), statistics_all_.mutable_gpu_data()); + P2PSync* p2p = this->callbacks()[ 0 ]->callbacks()[ 0 ]->p2p()[ 0 ]; + Blob statistics_child(1, channels_, 1, 1); + for ( int i = 0; i < p2p->children().size(); ++i ){ +#ifdef _WIN64 + Blob* s_c_ogpu = p2p->dataQueue().pop(); +#else // Linux support + Blob* s_c_ogpu = NULL; + while ( !p2p->dataQueue().try_pop(&s_c_ogpu) ) + ; +#endif + //Blob* s_c_ogpu = p2p->dataQueue().pop(); + CUDA_CHECK(cudaMemcpyAsync(statistics_child.mutable_gpu_data(), s_c_ogpu->gpu_data(), channels_*sizeof( Dtype ), cudaMemcpyDeviceToDevice, cudaStreamDefault)); + CUDA_CHECK(cudaStreamSynchronize(cudaStreamDefault)); + caffe_gpu_add(channels_, statistics_child.gpu_data(), statistics_all_.gpu_data(), statistics_all_.mutable_gpu_data()); + } + if ( p2p->parent() ){ + p2p->parent()->dataQueue().push(&statistics_all_); +#ifdef _WIN64 + Blob* statistics_final = p2p->dataQueue().pop(); +#else // Linux support + Blob* statistics_final = NULL; + while ( !p2p->dataQueue().try_pop(&statistics_final) ) + ; +#endif + //Blob* statistics_final = p2p->dataQueue().pop(); + CUDA_CHECK(cudaMemcpyAsync(dx_.mutable_gpu_data(), statistics_final->gpu_data(), channels_*sizeof( Dtype ), cudaMemcpyDeviceToDevice, cudaStreamDefault)); + CUDA_CHECK(cudaStreamSynchronize(cudaStreamDefault)); + } + else { + caffe_gpu_scal(channels_, Dtype(1.0 / Caffe::solver_count()), statistics_all_.mutable_gpu_data()); + Blob e2x_(1, channels_, 1, 1); + caffe_gpu_powx(ex_.count(), ex_.gpu_data(), Dtype(2), e2x_.mutable_gpu_data()); + caffe_gpu_sub(ex_.count(), statistics_all_.gpu_data(), e2x_.gpu_data(), dx_.mutable_gpu_data()); + } + for ( int i = 0; i < p2p->children().size(); ++i ){ + p2p->children()[ i ]->dataQueue().push(&dx_); + } + } + else{ + Blob e2x_(1, channels_, 1, 1); + caffe_gpu_powx(ex_.count(), ex_.gpu_data(), Dtype(2), e2x_.mutable_gpu_data()); + caffe_gpu_sub(ex_.count(), batch_statistic_.gpu_data(), e2x_.gpu_data(), dx_.mutable_gpu_data()); + } + + + // save history variance + caffe_gpu_axpby(dx_.count(), Dtype(1) - decay_, dx_.gpu_data(), decay_, + this->blobs_[3]->mutable_gpu_data()); + } + if (this->phase_ == TEST ) { + // use moving average variance + if ( moving_average_ ) + caffe_copy(dx_.count(), this->blobs_[ 3 ]->gpu_data(), dx_.mutable_gpu_data()); + else{ + Blob e2x_(1, channels_, 1, 1); + caffe_gpu_powx(ex_.count(), ex_.gpu_data(), Dtype(2), e2x_.mutable_gpu_data()); + caffe_gpu_sub(ex_.count(), batch_statistic_.gpu_data(), e2x_.gpu_data(), dx_.mutable_gpu_data()); + } + } + + // Until now, dx_ should be calculated + + // add eps + caffe_gpu_add_scalar(dx_.count(), var_eps_, dx_.mutable_gpu_data()); + // std + caffe_gpu_powx(dx_.count(), dx_.gpu_data(), Dtype(0.5), + batch_statistic_.mutable_gpu_data()); + // put std blob into buffer_blob_ + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num_, channels_, 1, Dtype(1), + batch_sum_multiplier_.gpu_data(), batch_statistic_.gpu_data(), Dtype(0), + spatial_statistic_.mutable_gpu_data()); + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num_ * channels_, height_ * width_, 1, Dtype(1), + spatial_statistic_.gpu_data(), spatial_sum_multiplier_.gpu_data(), Dtype(0), + buffer_blob_.mutable_gpu_data()); + // variance normalization + caffe_gpu_div(buffer_blob_.count(), const_top_data, buffer_blob_.gpu_data(), top_data); + + // ---------- save x_norm and x_std ---------- // + caffe_copy(buffer_blob_.count(), const_top_data, x_norm_.mutable_gpu_data()); + caffe_copy(batch_statistic_.count(), batch_statistic_.gpu_data(), x_std_.mutable_gpu_data()); + + // ---------- scale ---------- // + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num_, channels_, 1, Dtype(1), + batch_sum_multiplier_.gpu_data(), scale_data, Dtype(0), + spatial_statistic_.mutable_gpu_data()); + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num_ * channels_, height_ * width_, 1, Dtype(1), + spatial_statistic_.gpu_data(), spatial_sum_multiplier_.gpu_data(), Dtype(0), + buffer_blob_.mutable_gpu_data()); + caffe_gpu_mul(buffer_blob_.count(), const_top_data, buffer_blob_.gpu_data(), top_data); + + // ---------- shift ---------- // + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num_, channels_, 1, Dtype(1), + batch_sum_multiplier_.gpu_data(), shift_data, Dtype(0), + spatial_statistic_.mutable_gpu_data()); + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num_ * channels_, height_ * width_, 1, Dtype(1), + spatial_statistic_.gpu_data(), spatial_sum_multiplier_.gpu_data(), Dtype(0), + buffer_blob_.mutable_gpu_data()); + caffe_gpu_add(buffer_blob_.count(), const_top_data, buffer_blob_.gpu_data(), top_data); + + } + + template + void BNLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + const Dtype* const_bottom_diff = bottom[0]->gpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + const Dtype* const_top_diff = top[0]->gpu_diff(); + + Dtype* scale_diff = this->blobs_[0]->mutable_gpu_diff(); + Dtype* shift_diff = this->blobs_[1]->mutable_gpu_diff(); + const Dtype* scale_data = this->blobs_[0]->gpu_data(); + + // ---------- gradient w.r.t. scale ---------- // + caffe_gpu_mul(buffer_blob_.count(), x_norm_.gpu_data(), const_top_diff, buffer_blob_.mutable_gpu_data()); + // statistic across spatial + caffe_gpu_gemv(CblasNoTrans, num_ * channels_, height_ * width_, Dtype(1), buffer_blob_.gpu_data(), + spatial_sum_multiplier_.gpu_data(), Dtype(0), spatial_statistic_.mutable_gpu_data()); + // statistic across batch + caffe_gpu_gemv(CblasTrans, num_, channels_, Dtype(1), spatial_statistic_.gpu_data(), + batch_sum_multiplier_.gpu_data(), Dtype(0), scale_diff); + + // ---------- gradient w.r.t. shift ---------- // + // statistic across spatial + caffe_gpu_gemv(CblasNoTrans, num_ * channels_, height_ * width_, Dtype(1), const_top_diff, + spatial_sum_multiplier_.gpu_data(), Dtype(0), spatial_statistic_.mutable_gpu_data()); + // statistic across batch + caffe_gpu_gemv(CblasTrans, num_, channels_, Dtype(1), spatial_statistic_.gpu_data(), + batch_sum_multiplier_.gpu_data(), Dtype(0), shift_diff); + + // ---------- gradient w.r.t. to bottom blob ---------- // + // put scale * top_diff to buffer_blob_ + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num_, channels_, 1, Dtype(1), + batch_sum_multiplier_.gpu_data(), scale_data, Dtype(0), + spatial_statistic_.mutable_gpu_data()); + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num_ * channels_, height_ * width_, 1, Dtype(1), + spatial_statistic_.gpu_data(), spatial_sum_multiplier_.gpu_data(), Dtype(0), + buffer_blob_.mutable_gpu_data()); + caffe_gpu_mul(buffer_blob_.count(), const_top_diff, buffer_blob_.gpu_data(), buffer_blob_.mutable_gpu_data()); + + if (this->phase_ == TRAIN) { + // use new top diff for computation + caffe_gpu_mul(buffer_blob_.count(), x_norm_.gpu_data(), buffer_blob_.gpu_data(), bottom_diff); + // statistic across spatial + caffe_gpu_gemv(CblasNoTrans, num_ * channels_, height_ * width_, Dtype(1), const_bottom_diff, + spatial_sum_multiplier_.gpu_data(), Dtype(0), spatial_statistic_.mutable_gpu_data()); + // statistic across batch + caffe_gpu_gemv(CblasTrans, num_, channels_, Dtype(1), spatial_statistic_.gpu_data(), + batch_sum_multiplier_.gpu_data(), Dtype(0), batch_statistic_.mutable_gpu_data()); + + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num_, channels_, 1, Dtype(1), + batch_sum_multiplier_.gpu_data(), batch_statistic_.gpu_data(), Dtype(0), + spatial_statistic_.mutable_gpu_data()); + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num_ * channels_, height_ * width_, 1, Dtype(1), + spatial_statistic_.gpu_data(), spatial_sum_multiplier_.gpu_data(), Dtype(0), + bottom_diff); + + caffe_gpu_mul(buffer_blob_.count(), x_norm_.gpu_data(), const_bottom_diff, bottom_diff); + + // statistic across spatial + caffe_gpu_gemv(CblasNoTrans, num_ * channels_, height_ * width_, Dtype(1), buffer_blob_.gpu_data(), + spatial_sum_multiplier_.gpu_data(), Dtype(0), spatial_statistic_.mutable_gpu_data()); + // statistic across batch + caffe_gpu_gemv(CblasTrans, num_, channels_, Dtype(1), spatial_statistic_.gpu_data(), + batch_sum_multiplier_.gpu_data(), Dtype(0), batch_statistic_.mutable_gpu_data()); + + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num_, channels_, 1, Dtype(1), + batch_sum_multiplier_.gpu_data(), batch_statistic_.gpu_data(), Dtype(0), + spatial_statistic_.mutable_gpu_data()); + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num_ * channels_, height_ * width_, 1, Dtype(1), + spatial_statistic_.gpu_data(), spatial_sum_multiplier_.gpu_data(), Dtype(1), + bottom_diff); + + caffe_gpu_axpby(buffer_blob_.count(), Dtype(1), buffer_blob_.gpu_data(), Dtype(-1. / (num_ * height_ * width_)), + bottom_diff); + } + if (this->phase_ == TEST && moving_average_) { + // use moving average variance + caffe_copy(buffer_blob_.count(), buffer_blob_.gpu_data(), bottom_diff); + } + + // variance normalization + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num_, channels_, 1, Dtype(1), + batch_sum_multiplier_.gpu_data(), x_std_.gpu_data(), Dtype(0), + spatial_statistic_.mutable_gpu_data()); + caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num_ * channels_, height_ * width_, 1, Dtype(1), + spatial_statistic_.gpu_data(), spatial_sum_multiplier_.gpu_data(), Dtype(0), + buffer_blob_.mutable_gpu_data()); + + caffe_gpu_div(buffer_blob_.count(), const_bottom_diff, buffer_blob_.gpu_data(), bottom_diff); + + } + + INSTANTIATE_LAYER_GPU_FUNCS(BNLayer); + +} // namespace caffe diff --git a/cuda_code/bnll_layer_10.cu b/cuda_code/bnll_layer_10.cu new file mode 100644 index 0000000000000000000000000000000000000000..768a92bba26ad914299c2fcb58078bb427273469 --- /dev/null +++ b/cuda_code/bnll_layer_10.cu @@ -0,0 +1,59 @@ +#include +#include + +#include "caffe/layers/bnll_layer.hpp" + +namespace caffe { + +__constant__ float kBNLL_THRESHOLD = 50.; + +template +__global__ void BNLLForward(const int n, const Dtype* in, Dtype* out) { + CUDA_KERNEL_LOOP(index, n) { + out[index] = in[index] > 0 ? + in[index] + log(1. + exp(-in[index])) : + log(1. + exp(in[index])); + } +} + +template +void BNLLLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); + const int count = bottom[0]->count(); + // NOLINT_NEXT_LINE(whitespace/operators) + BNLLForward<<>>( + count, bottom_data, top_data); + CUDA_POST_KERNEL_CHECK; +} + +template +__global__ void BNLLBackward(const int n, const Dtype* in_diff, + const Dtype* in_data, Dtype* out_diff) { + CUDA_KERNEL_LOOP(index, n) { + Dtype expval = exp(min(in_data[index], Dtype(kBNLL_THRESHOLD))); + out_diff[index] = in_diff[index] * expval / (expval + 1.); + } +} + +template +void BNLLLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + if (propagate_down[0]) { + const Dtype* bottom_data = bottom[0]->gpu_data(); + const Dtype* top_diff = top[0]->gpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + const int count = bottom[0]->count(); + // NOLINT_NEXT_LINE(whitespace/operators) + BNLLBackward<<>>( + count, top_diff, bottom_data, bottom_diff); + CUDA_POST_KERNEL_CHECK; + } +} + +INSTANTIATE_LAYER_GPU_FUNCS(BNLLLayer); + + +} // namespace caffe diff --git a/cuda_code/bnll_layer_39.cu b/cuda_code/bnll_layer_39.cu new file mode 100644 index 0000000000000000000000000000000000000000..ac8fc2476e0e90d3450709a87b719ffd7db01231 --- /dev/null +++ b/cuda_code/bnll_layer_39.cu @@ -0,0 +1,59 @@ +#include +#include + +#include "caffe/layers/bnll_layer.hpp" + +namespace caffe { + +__constant__ float kBNLL_THRESHOLD = 50.; + +template +__global__ void BNLLForward(const int n, const Dtype* in, Dtype* out) { + CUDA_KERNEL_LOOP(index, n) { + out[index] = in[index] > 0 ? + in[index] + log(1. + exp(-in[index])) : + log(1. + exp(in[index])); + } +} + +template +void BNLLLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); + const int count = bottom[0]->count(); + // NOLINT_NEXT_LINE(whitespace/operators) + BNLLForward<<>>( + count, bottom_data, top_data); + CUDA_POST_KERNEL_CHECK; +} + +template +__global__ void BNLLBackward(const int n, const Dtype* in_diff, + const Dtype* in_data, Dtype* out_diff) { + CUDA_KERNEL_LOOP(index, n) { + Dtype expval = exp(min(in_data[index], Dtype(kBNLL_THRESHOLD))); + out_diff[index] = in_diff[index] * expval / (expval + 1.); + } +} + +template +void BNLLLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + if (propagate_down[0]) { + const Dtype* bottom_data = bottom[0]->gpu_data(); + const Dtype* top_diff = top[0]->gpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + const int count = bottom[0]->count(); + // NOLINT_NEXT_LINE(whitespace/operators) + BNLLBackward<<>>( + count, top_diff, bottom_data, bottom_diff); + CUDA_POST_KERNEL_CHECK; + } +} + +INSTANTIATE_LAYER_GPU_FUNCS(BNLLLayer); + + +} // namespace caffe diff --git a/cuda_code/bondsKernelsCpu.cu b/cuda_code/bondsKernelsCpu.cu new file mode 100644 index 0000000000000000000000000000000000000000..d3dc4e5e5d5f3a004a5a446847b0716f2aa29283 --- /dev/null +++ b/cuda_code/bondsKernelsCpu.cu @@ -0,0 +1,833 @@ +// bondsKernelsCpu.cu +// Scott Grauer-Gray +// Bonds kernels to run on the CPU + +#include "bondsKernelsCpu.cuh" + +int monthLengthKernelCpu(int month, bool leapYear) { + int MonthLength[] = {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}; + + int MonthLeapLength[] = {31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}; + + return (leapYear ? MonthLeapLength[month - 1] : MonthLength[month - 1]); +} + +int monthOffsetKernelCpu(int m, bool leapYear) { + int MonthOffset[] = { + 0, 31, 59, 90, 120, 151, // Jan - Jun + 181, 212, 243, 273, 304, 334, // Jun - Dec + 365 // used in dayOfMonth to bracket day + }; + + int MonthLeapOffset[] = { + 0, 31, 60, 91, 121, 152, // Jan - Jun + 182, 213, 244, 274, 305, 335, // Jun - Dec + 366 // used in dayOfMonth to bracket day + }; + + return (leapYear ? MonthLeapOffset[m - 1] : MonthOffset[m - 1]); +} + +int yearOffsetKernelCpu(int y) { + // the list of all December 31st in the preceding year + // e.g. for 1901 yearOffset[1] is 366, that is, December 31 1900 + int YearOffset[] = { + // 1900-1909 + 0, 366, 731, 1096, 1461, 1827, 2192, 2557, 2922, 3288, + // 1910-1919 + 3653, 4018, 4383, 4749, 5114, 5479, 5844, 6210, 6575, 6940, + // 1920-1929 + 7305, 7671, 8036, 8401, 8766, 9132, 9497, 9862, 10227, 10593, + // 1930-1939 + 10958, 11323, 11688, 12054, 12419, 12784, 13149, 13515, 13880, 14245, + // 1940-1949 + 14610, 14976, 15341, 15706, 16071, 16437, 16802, 17167, 17532, 17898, + // 1950-1959 + 18263, 18628, 18993, 19359, 19724, 20089, 20454, 20820, 21185, 21550, + // 1960-1969 + 21915, 22281, 22646, 23011, 23376, 23742, 24107, 24472, 24837, 25203, + // 1970-1979 + 25568, 25933, 26298, 26664, 27029, 27394, 27759, 28125, 28490, 28855, + // 1980-1989 + 29220, 29586, 29951, 30316, 30681, 31047, 31412, 31777, 32142, 32508, + // 1990-1999 + 32873, 33238, 33603, 33969, 34334, 34699, 35064, 35430, 35795, 36160, + // 2000-2009 + 36525, 36891, 37256, 37621, 37986, 38352, 38717, 39082, 39447, 39813, + // 2010-2019 + 40178, 40543, 40908, 41274, 41639, 42004, 42369, 42735, 43100, 43465, + // 2020-2029 + 43830, 44196, 44561, 44926, 45291, 45657, 46022, 46387, 46752, 47118, + // 2030-2039 + 47483, 47848, 48213, 48579, 48944, 49309, 49674, 50040, 50405, 50770, + // 2040-2049 + 51135, 51501, 51866, 52231, 52596, 52962, 53327, 53692, 54057, 54423, + // 2050-2059 + 54788, 55153, 55518, 55884, 56249, 56614, 56979, 57345, 57710, 58075, + // 2060-2069 + 58440, 58806, 59171, 59536, 59901, 60267, 60632, 60997, 61362, 61728, + // 2070-2079 + 62093, 62458, 62823, 63189, 63554, 63919, 64284, 64650, 65015, 65380, + // 2080-2089 + 65745, 66111, 66476, 66841, 67206, 67572, 67937, 68302, 68667, 69033, + // 2090-2099 + 69398, 69763, 70128, 70494, 70859, 71224, 71589, 71955, 72320, 72685, + // 2100-2109 + 73050, 73415, 73780, 74145, 74510, 74876, 75241, 75606, 75971, 76337, + // 2110-2119 + 76702, 77067, 77432, 77798, 78163, 78528, 78893, 79259, 79624, 79989, + // 2120-2129 + 80354, 80720, 81085, 81450, 81815, 82181, 82546, 82911, 83276, 83642, + // 2130-2139 + 84007, 84372, 84737, 85103, 85468, 85833, 86198, 86564, 86929, 87294, + // 2140-2149 + 87659, 88025, 88390, 88755, 89120, 89486, 89851, 90216, 90581, 90947, + // 2150-2159 + 91312, 91677, 92042, 92408, 92773, 93138, 93503, 93869, 94234, 94599, + // 2160-2169 + 94964, 95330, 95695, 96060, 96425, 96791, 97156, 97521, 97886, 98252, + // 2170-2179 + 98617, 98982, 99347, 99713, 100078, 100443, 100808, 101174, 101539, + 101904, + // 2180-2189 + 102269, 102635, 103000, 103365, 103730, 104096, 104461, 104826, 105191, + 105557, + // 2190-2199 + 105922, 106287, 106652, 107018, 107383, 107748, 108113, 108479, 108844, + 109209, + // 2200 + 109574}; + + return YearOffset[y - 1900]; +} + +bool isLeapKernelCpu(int y) { + bool YearIsLeap[] = { + // 1900 is leap in agreement with Excel's bug + // 1900 is out of valid date range anyway + // 1900-1909 + true, false, false, false, true, false, false, false, true, false, + // 1910-1919 + false, false, true, false, false, false, true, false, false, false, + // 1920-1929 + true, false, false, false, true, false, false, false, true, false, + // 1930-1939 + false, false, true, false, false, false, true, false, false, false, + // 1940-1949 + true, false, false, false, true, false, false, false, true, false, + // 1950-1959 + false, false, true, false, false, false, true, false, false, false, + // 1960-1969 + true, false, false, false, true, false, false, false, true, false, + // 1970-1979 + false, false, true, false, false, false, true, false, false, false, + // 1980-1989 + true, false, false, false, true, false, false, false, true, false, + // 1990-1999 + false, false, true, false, false, false, true, false, false, false, + // 2000-2009 + true, false, false, false, true, false, false, false, true, false, + // 2010-2019 + false, false, true, false, false, false, true, false, false, false, + // 2020-2029 + true, false, false, false, true, false, false, false, true, false, + // 2030-2039 + false, false, true, false, false, false, true, false, false, false, + // 2040-2049 + true, false, false, false, true, false, false, false, true, false, + // 2050-2059 + false, false, true, false, false, false, true, false, false, false, + // 2060-2069 + true, false, false, false, true, false, false, false, true, false, + // 2070-2079 + false, false, true, false, false, false, true, false, false, false, + // 2080-2089 + true, false, false, false, true, false, false, false, true, false, + // 2090-2099 + false, false, true, false, false, false, true, false, false, false, + // 2100-2109 + false, false, false, false, true, false, false, false, true, false, + // 2110-2119 + false, false, true, false, false, false, true, false, false, false, + // 2120-2129 + true, false, false, false, true, false, false, false, true, false, + // 2130-2139 + false, false, true, false, false, false, true, false, false, false, + // 2140-2149 + true, false, false, false, true, false, false, false, true, false, + // 2150-2159 + false, false, true, false, false, false, true, false, false, false, + // 2160-2169 + true, false, false, false, true, false, false, false, true, false, + // 2170-2179 + false, false, true, false, false, false, true, false, false, false, + // 2180-2189 + true, false, false, false, true, false, false, false, true, false, + // 2190-2199 + false, false, true, false, false, false, true, false, false, false, + // 2200 + false}; + + return YearIsLeap[y - 1900]; +} + +bondsDateStruct intializeDateKernelCpu(int d, int m, int y) { + bondsDateStruct currDate; + + currDate.day = d; + currDate.month = m; + currDate.year = y; + + bool leap = isLeapKernelCpu(y); + int offset = monthOffsetKernelCpu(m, leap); + + currDate.dateSerialNum = d + offset + yearOffsetKernelCpu(y); + + return currDate; +} + +dataType yearFractionCpu(bondsDateStruct d1, bondsDateStruct d2, + int dayCounter) { + return dayCountCpu(d1, d2, dayCounter) / 360.0; +} + +int dayCountCpu(bondsDateStruct d1, bondsDateStruct d2, int dayCounter) { + if (dayCounter == USE_EXACT_DAY) { + int dd1 = d1.day, dd2 = d2.day; + int mm1 = d1.month, mm2 = d2.month; + int yy1 = d1.year, yy2 = d2.year; + + if (dd2 == 31 && dd1 < 30) { + dd2 = 1; + mm2++; + } + + return 360 * (yy2 - yy1) + 30 * (mm2 - mm1 - 1) + MAX(0, 30 - dd1) + + MIN(30, dd2); + } + + else { + return (d2.dateSerialNum - d1.dateSerialNum); + } +} + +dataType couponNotionalCpu() { return 100.0; } + +dataType bondNotionalCpu() { return 100.0; } + +dataType fixedRateCouponNominalCpu() { return 100.0; } + +bool eventHasOccurredCpu(bondsDateStruct currDate, bondsDateStruct eventDate) { + return eventDate.dateSerialNum > currDate.dateSerialNum; +} + +bool cashFlowHasOccurredCpu(bondsDateStruct refDate, + bondsDateStruct eventDate) { + return eventHasOccurredCpu(refDate, eventDate); +} + +bondsDateStruct advanceDateCpu(bondsDateStruct date, int numMonthsAdvance) { + int d = date.day; + int m = date.month + numMonthsAdvance; + int y = date.year; + + while (m > 12) { + m -= 12; + y += 1; + } + + while (m < 1) { + m += 12; + y -= 1; + } + + int length = monthLengthKernelCpu(m, isLeapKernelCpu(y)); + if (d > length) + d = length; + + bondsDateStruct newDate = intializeDateKernelCpu(d, m, y); + + return newDate; +} + +int getNumCashFlowsCpu(inArgsStruct inArgs, int bondNum) { + int numCashFlows = 0; + + // bondsDateStruct endDate = inArgs.bond[bondNum].maturityDate; + bondsDateStruct currCashflowDate = inArgs.bond[bondNum].maturityDate; + + while (currCashflowDate.dateSerialNum > + inArgs.bond[bondNum].startDate.dateSerialNum) { + numCashFlows++; + currCashflowDate = advanceDateCpu(currCashflowDate, -6); + } + + return numCashFlows + 1; +} + +void getBondsResultsCpu(inArgsStruct inArgs, resultsStruct results, + int totNumRuns) { + for (int bondNum = 0; bondNum < totNumRuns; bondNum++) { + int numLegs = getNumCashFlowsCpu(inArgs, bondNum); + cashFlowsStruct cashFlows; + cashFlows.legs = (couponStruct *)malloc(numLegs * sizeof(couponStruct)); + + cashFlows.intRate.dayCounter = USE_EXACT_DAY; + cashFlows.intRate.rate = inArgs.bond[bondNum].rate; + cashFlows.intRate.freq = ANNUAL_FREQ; + cashFlows.intRate.comp = SIMPLE_INTEREST; + cashFlows.dayCounter = USE_EXACT_DAY; + cashFlows.nominal = 100.0; + + // bondsDateStruct currPaymentDate; + bondsDateStruct currStartDate = + advanceDateCpu(inArgs.bond[bondNum].maturityDate, (numLegs - 1) * -6); + ; + bondsDateStruct currEndDate = advanceDateCpu(currStartDate, 6); + + for (int cashFlowNum = 0; cashFlowNum < numLegs - 1; cashFlowNum++) { + cashFlows.legs[cashFlowNum].paymentDate = currEndDate; + + cashFlows.legs[cashFlowNum].accrualStartDate = currStartDate; + cashFlows.legs[cashFlowNum].accrualEndDate = currEndDate; + + cashFlows.legs[cashFlowNum].amount = COMPUTE_AMOUNT; + + currStartDate = currEndDate; + currEndDate = advanceDateCpu(currEndDate, 6); + } + + cashFlows.legs[numLegs - 1].paymentDate = inArgs.bond[bondNum].maturityDate; + cashFlows.legs[numLegs - 1].accrualStartDate = inArgs.currDate[bondNum]; + cashFlows.legs[numLegs - 1].accrualEndDate = inArgs.currDate[bondNum]; + cashFlows.legs[numLegs - 1].amount = 100.0; + + results.bondForwardVal[bondNum] = + getBondYieldCpu(inArgs.bondCleanPrice[bondNum], USE_EXACT_DAY, + COMPOUNDED_INTEREST, 2.0, inArgs.currDate[bondNum], + ACCURACY, 100, inArgs, bondNum, cashFlows, numLegs); + inArgs.discountCurve[bondNum].forward = results.bondForwardVal[bondNum]; + results.dirtyPrice[bondNum] = + getDirtyPriceCpu(inArgs, bondNum, cashFlows, numLegs); + results.accruedAmountCurrDate[bondNum] = getAccruedAmountCpu( + inArgs, inArgs.currDate[bondNum], bondNum, cashFlows, numLegs); + results.cleanPrice[bondNum] = + results.dirtyPrice[bondNum] - results.accruedAmountCurrDate[bondNum]; + + free(cashFlows.legs); + } +} + +dataType getDirtyPriceCpu(inArgsStruct inArgs, int bondNum, + cashFlowsStruct cashFlows, int numLegs) { + dataType currentNotional = bondNotionalCpu(); + return discountingBondEngineCalculateSettlementValueCpu(inArgs, bondNum, + cashFlows, numLegs) * + 100.0 / currentNotional; +} + +dataType getAccruedAmountCpu(inArgsStruct inArgs, bondsDateStruct date, + int bondNum, cashFlowsStruct cashFlows, + int numLegs) { + return bondAccruedAmountCpu(inArgs, date, bondNum, cashFlows, numLegs); +} + +dataType discountingBondEngineCalculateSettlementValueCpu( + inArgsStruct inArgs, int bondNum, cashFlowsStruct cashFlows, int numLegs) { + bondsDateStruct currDate = inArgs.currDate[bondNum]; + + if (currDate.dateSerialNum < inArgs.bond[bondNum].startDate.dateSerialNum) { + currDate = inArgs.bond[bondNum].startDate; + } + + // a bond's cashflow on settlement date is never taken into account + return cashFlowsNpvCpu(cashFlows, inArgs.discountCurve[bondNum], false, + currDate, currDate, numLegs); +} + +dataType bondAccruedAmountCpu(inArgsStruct inArgs, bondsDateStruct date, + int bondNum, cashFlowsStruct cashFlows, + int numLegs) { + dataType currentNotional = bondNotionalCpu(); + if (currentNotional == 0.0) + return 0.0; + + return bondFunctionsAccruedAmountCpu(inArgs, date, bondNum, cashFlows, + numLegs); +} + +dataType bondFunctionsAccruedAmountCpu(inArgsStruct inArgs, + bondsDateStruct date, int bondNum, + cashFlowsStruct cashFlows, int numLegs) { + return cashFlowsAccruedAmountCpu(cashFlows, false, date, numLegs, inArgs, + bondNum) * + 100.0 / bondNotionalCpu(); +} + +dataType cashFlowsAccruedAmountCpu(cashFlowsStruct cashFlows, + bool includecurrDateFlows, + bondsDateStruct currDate, int numLegs, + inArgsStruct inArgs, int bondNum) { + int legComputeNum = cashFlowsNextCashFlowNumCpu(cashFlows, currDate, numLegs); + + dataType result = 0.0; + + for (int i = legComputeNum; i < (numLegs); ++i) { + result += fixedRateCouponAccruedAmountCpu(cashFlows, i, currDate, inArgs, + bondNum); + } + + return result; +} + +dataType fixedRateCouponAccruedAmountCpu(cashFlowsStruct cashFlows, int numLeg, + bondsDateStruct d, inArgsStruct inArgs, + int bondNum) { + if (d.dateSerialNum <= + cashFlows.legs[numLeg].accrualStartDate.dateSerialNum || + d.dateSerialNum > inArgs.maturityDate[bondNum].dateSerialNum) { + return 0.0; + } else { + bondsDateStruct endDate = cashFlows.legs[numLeg].accrualEndDate; + if (d.dateSerialNum < cashFlows.legs[numLeg].accrualEndDate.dateSerialNum) { + endDate = d; + } + + return fixedRateCouponNominalCpu() * + (interestRateCompoundFactorCpu( + cashFlows.intRate, cashFlows.legs[numLeg].accrualStartDate, + endDate, cashFlows.dayCounter) - + 1.0); + } +} + +dataType cashFlowsNpvCpu(cashFlowsStruct cashFlows, + bondsYieldTermStruct discountCurve, + bool includecurrDateFlows, bondsDateStruct currDate, + bondsDateStruct npvDate, int numLegs) { + npvDate = currDate; + + dataType totalNPV = 0.0; + + for (int i = 0; i < numLegs; ++i) { + + if (!(cashFlowHasOccurredCpu(cashFlows.legs[i].paymentDate, currDate))) + totalNPV += fixedRateCouponAmountCpu(cashFlows, i) * + bondsYieldTermStructureDiscountCpu( + discountCurve, cashFlows.legs[i].paymentDate); + } + + return totalNPV / bondsYieldTermStructureDiscountCpu(discountCurve, npvDate); +} + +dataType bondsYieldTermStructureDiscountCpu(bondsYieldTermStruct ytStruct, + bondsDateStruct t) { + ytStruct.intRate.rate = ytStruct.forward; + ytStruct.intRate.freq = ytStruct.frequency; + ytStruct.intRate.comp = ytStruct.compounding; + return flatForwardDiscountImplCpu( + ytStruct.intRate, + yearFractionCpu(ytStruct.refDate, t, ytStruct.dayCounter)); +} + +dataType flatForwardDiscountImplCpu(intRateStruct intRate, dataType t) { + return interestRateDiscountFactorCpu(intRate, t); +} + +dataType interestRateDiscountFactorCpu(intRateStruct intRate, dataType t) { + return 1.0 / interestRateCompoundFactorCpu(intRate, t); +} + +dataType interestRateCompoundFactorCpu(intRateStruct intRate, dataType t) { + switch (intRate.comp) { + case SIMPLE_INTEREST: + return 1.0 + intRate.rate * t; + case COMPOUNDED_INTEREST: + return pow(1.0f + intRate.rate / intRate.freq, intRate.freq * t); + case CONTINUOUS_INTEREST: + return exp(intRate.rate * t); + // case SimpleThenCompounded: + // if (t<=1.0/Real(freq_)) + // return 1.0 + intRate.rate*t; + // else + // return pow(1.0+r_/freq_, freq_*t); + } + + return 0.0f; +} + +dataType fixedRateCouponAmountCpu(cashFlowsStruct cashFlows, int numLeg) { + if (cashFlows.legs[numLeg].amount == COMPUTE_AMOUNT) { + return fixedRateCouponNominalCpu() * + (interestRateCompoundFactorCpu( + cashFlows.intRate, cashFlows.legs[numLeg].accrualStartDate, + cashFlows.legs[numLeg].accrualEndDate, cashFlows.dayCounter) - + 1.0); + } else { + return cashFlows.legs[numLeg].amount; + } +} + +dataType interestRateCompoundFactorCpu(intRateStruct intRate, + bondsDateStruct d1, bondsDateStruct d2, + int dayCounter) { + dataType t = yearFractionCpu(d1, d2, dayCounter); + return interestRateCompoundFactorCpu(intRate, t); +} + +dataType interestRateImpliedRateCpu(dataType compound, int comp, dataType freq, + dataType t) { + dataType r = 0.0f; + if (compound == 1.0) { + r = 0.0; + } else { + switch (comp) { + case SIMPLE_INTEREST: + r = (compound - 1.0) / t; + break; + case COMPOUNDED_INTEREST: + r = (pow((dataType)compound, 1.0f / ((freq)*t)) - 1.0f) * (freq); + break; + } + } + + return r; +} + +dataType getMarketRepoRateCpu(bondsDateStruct d, int comp, dataType freq, + bondsDateStruct referenceDate, + inArgsStruct inArgs, int bondNum) { + dataType compound = + 1.0 / bondsYieldTermStructureDiscountCpu(inArgs.repoCurve[bondNum], d); + return interestRateImpliedRateCpu( + compound, comp, freq, + yearFractionCpu(referenceDate, d, inArgs.repoCurve[bondNum].dayCounter)); +} + +couponStruct cashFlowsNextCashFlowCpu(cashFlowsStruct cashFlows, + bondsDateStruct currDate, int numLegs) { + for (int i = 0; i < numLegs; ++i) { + + if (!(cashFlowHasOccurredCpu(cashFlows.legs[i].paymentDate, currDate))) + return cashFlows.legs[i]; + } + return cashFlows.legs[numLegs - 1]; +} + +int cashFlowsNextCashFlowNumCpu(cashFlowsStruct cashFlows, + bondsDateStruct currDate, int numLegs) { + for (int i = 0; i < numLegs; ++i) { + + if (!(cashFlowHasOccurredCpu(cashFlows.legs[i].paymentDate, currDate))) + return i; + } + + return (numLegs - 1); +} + +dataType getBondYieldCpu(dataType cleanPrice, int dc, int comp, dataType freq, + bondsDateStruct settlement, dataType accuracy, + int maxEvaluations, inArgsStruct currInArgs, + int bondNum, cashFlowsStruct cashFlows, int numLegs) { + dataType currentNotional = bondNotionalCpu(); + + if (currentNotional == 0.0) + return 0.0; + + if (currInArgs.bond[bondNum].startDate.dateSerialNum > + settlement.dateSerialNum) { + settlement = currInArgs.bond[bondNum].startDate; + } + + return getBondFunctionsYieldCpu(cleanPrice, dc, comp, freq, settlement, + accuracy, maxEvaluations, currInArgs, bondNum, + cashFlows, numLegs); +} + +dataType getBondFunctionsYieldCpu(dataType cleanPrice, int dc, int comp, + dataType freq, bondsDateStruct settlement, + dataType accuracy, int maxEvaluations, + inArgsStruct currInArgs, int bondNum, + cashFlowsStruct cashFlows, int numLegs) { + dataType dirtyPrice = + cleanPrice + bondFunctionsAccruedAmountCpu(currInArgs, settlement, + bondNum, cashFlows, numLegs); + dirtyPrice /= 100.0 / bondNotionalCpu(); + + return getCashFlowsYieldCpu(cashFlows, dirtyPrice, dc, comp, freq, false, + settlement, settlement, numLegs, accuracy, + maxEvaluations); +} + +dataType getCashFlowsYieldCpu(cashFlowsStruct leg, dataType npv, int dayCounter, + int compounding, dataType frequency, + bool includecurrDateFlows, + bondsDateStruct currDate, bondsDateStruct npvDate, + int numLegs, dataType accuracy, int maxIterations, + dataType guess) { + // Brent solver; + solverStruct solver; + solver.maxEvaluations_ = maxIterations; + irrFinderStruct objFunction; + + objFunction.npv = npv; + objFunction.dayCounter = dayCounter; + objFunction.comp = compounding; + objFunction.freq = frequency; + objFunction.includecurrDateFlows = includecurrDateFlows; + objFunction.currDate = currDate; + objFunction.npvDate = npvDate; + + return solverSolveCpu(solver, objFunction, accuracy, guess, guess / 10.0, leg, + numLegs); +} + +dataType solverSolveCpu(solverStruct solver, irrFinderStruct f, + dataType accuracy, dataType guess, dataType step, + cashFlowsStruct cashFlows, int numLegs) { + // check whether we really want to use epsilon + accuracy = MAX(accuracy, QL_EPSILON_GPU); + + dataType growthFactor = 1.6; + int flipflop = -1; + + solver.root_ = guess; + solver.fxMax_ = fOpCpu(f, solver.root_, cashFlows, numLegs); + + // monotonically crescent bias, as in optionValue(volatility) + if (closeCpu(solver.fxMax_, 0.0)) { + return solver.root_; + } else if (closeCpu(solver.fxMax_, 0.0)) { + solver.xMin_ = /*enforceBounds*/ (solver.root_ - step); + solver.fxMin_ = fOpCpu(f, solver.xMin_, cashFlows, numLegs); + solver.xMax_ = solver.root_; + } else { + solver.xMin_ = solver.root_; + solver.fxMin_ = solver.fxMax_; + solver.xMax_ = /*enforceBounds*/ (solver.root_ + step); + solver.fxMax_ = fOpCpu(f, solver.xMax_, cashFlows, numLegs); + } + + solver.evaluationNumber_ = 2; + while (solver.evaluationNumber_ <= solver.maxEvaluations_) { + if (solver.fxMin_ * solver.fxMax_ <= 0.0) { + if (closeCpu(solver.fxMin_, 0.0)) + return solver.xMin_; + if (closeCpu(solver.fxMax_, 0.0)) + return solver.xMax_; + solver.root_ = (solver.xMax_ + solver.xMin_) / 2.0; + return solveImplCpu(solver, f, accuracy, cashFlows, numLegs); + } + if (fabs(solver.fxMin_) < fabs(solver.fxMax_)) { + solver.xMin_ = /*enforceBounds*/ ( + solver.xMin_ + growthFactor * (solver.xMin_ - solver.xMax_)); + solver.fxMin_ = fOpCpu(f, solver.xMin_, cashFlows, numLegs); + } else if (fabs(solver.fxMin_) > fabs(solver.fxMax_)) { + solver.xMax_ = /*enforceBounds*/ ( + solver.xMax_ + growthFactor * (solver.xMax_ - solver.xMin_)); + solver.fxMax_ = fOpCpu(f, solver.xMax_, cashFlows, numLegs); + } else if (flipflop == -1) { + solver.xMin_ = /*enforceBounds*/ ( + solver.xMin_ + growthFactor * (solver.xMin_ - solver.xMax_)); + solver.fxMin_ = fOpCpu(f, solver.xMin_, cashFlows, numLegs); + solver.evaluationNumber_++; + flipflop = 1; + } else if (flipflop == 1) { + solver.xMax_ = /*enforceBounds*/ ( + solver.xMax_ + growthFactor * (solver.xMax_ - solver.xMin_)); + solver.fxMax_ = fOpCpu(f, solver.xMax_, cashFlows, numLegs); + flipflop = -1; + } + solver.evaluationNumber_++; + } + + return 0.0f; +} + +dataType cashFlowsNpvYieldCpu(cashFlowsStruct cashFlows, intRateStruct y, + bool includecurrDateFlows, + bondsDateStruct currDate, bondsDateStruct npvDate, + int numLegs) { + + dataType npv = 0.0; + dataType discount = 1.0; + bondsDateStruct lastDate; + bool first = true; + + for (int i = 0; i < numLegs; ++i) { + + if (cashFlowHasOccurredCpu(cashFlows.legs[i].paymentDate, currDate)) + continue; + + bondsDateStruct couponDate = cashFlows.legs[i].paymentDate; + dataType amount = fixedRateCouponAmountCpu(cashFlows, i); + if (first) { + first = false; + + if (i > 0) { + lastDate = advanceDateCpu(cashFlows.legs[i].paymentDate, -1 * 6); + } else { + lastDate = cashFlows.legs[i].accrualStartDate; + } + discount *= interestRateDiscountFactorCpu( + y, yearFractionCpu(npvDate, couponDate, y.dayCounter)); + + } else { + discount *= interestRateDiscountFactorCpu( + y, yearFractionCpu(lastDate, couponDate, y.dayCounter)); + } + + lastDate = couponDate; + + npv += amount * discount; + } + + return npv; +} + +dataType fOpCpu(irrFinderStruct f, dataType y, cashFlowsStruct cashFlows, + int numLegs) { + intRateStruct yield; + + yield.rate = y; + yield.comp = f.comp; + yield.freq = f.freq; + yield.dayCounter = f.dayCounter; + + dataType NPV = cashFlowsNpvYieldCpu(cashFlows, yield, false, f.currDate, + f.npvDate, numLegs); + + return (f.npv - NPV); +} + +dataType fDerivativeCpu(irrFinderStruct f, dataType y, + cashFlowsStruct cashFlows, int numLegs) { + intRateStruct yield; + yield.rate = y; + yield.dayCounter = f.dayCounter; + yield.comp = f.comp; + yield.freq = f.freq; + + return modifiedDurationCpu(cashFlows, yield, f.includecurrDateFlows, + f.currDate, f.npvDate, numLegs); +} + +bool closeCpu(dataType x, dataType y) { return closeCpu(x, y, 42); } + +bool closeCpu(dataType x, dataType y, int n) { + dataType diff = fabs(x - y); + dataType tolerance = n * QL_EPSILON_GPU; + + return diff <= tolerance * fabs(x) && diff <= tolerance * fabs(y); +} + +dataType enforceBoundsCpu(dataType x) { + /*if Cpu(lowerBoundEnforced_ && x < lowerBound_) + return lowerBound_; + if (upperBoundEnforced_ && x > upperBound_) + return upperBound_;*/ + return x; +} + +dataType solveImplCpu(solverStruct solver, irrFinderStruct f, + dataType xAccuracy, cashFlowsStruct cashFlows, + int numLegs) { + dataType froot, dfroot, dx, dxold; + dataType xh, xl; + + // Orient the search so that f(xl) < 0 + if (solver.fxMin_ < 0.0) { + xl = solver.xMin_; + xh = solver.xMax_; + } else { + xh = solver.xMin_; + xl = solver.xMax_; + } + + // the "stepsize before last" + dxold = solver.xMax_ - solver.xMin_; + // it was dxold=std::fabs(xMax_-xMin_); in Numerical Recipes + // here (xMax_-xMin_ > 0) is verified in the constructor + + // and the last step + dx = dxold; + + froot = fOpCpu(f, solver.root_, cashFlows, numLegs); + dfroot = fDerivativeCpu(f, solver.root_, cashFlows, numLegs); + + ++solver.evaluationNumber_; + + while (solver.evaluationNumber_ <= solver.maxEvaluations_) { + // Bisect if (out of range || not decreasing fast enough) + if ((((solver.root_ - xh) * dfroot - froot) * + ((solver.root_ - xl) * dfroot - froot) > + 0.0) || + (fabs(2.0 * froot) > fabs(dxold * dfroot))) { + dxold = dx; + dx = (xh - xl) / 2.0; + solver.root_ = xl + dx; + } else { + dxold = dx; + dx = froot / dfroot; + solver.root_ -= dx; + } + + // Convergence criterion + if (fabs(dx) < xAccuracy) + return solver.root_; + froot = fOpCpu(f, solver.root_, cashFlows, numLegs); + dfroot = fDerivativeCpu(f, solver.root_, cashFlows, numLegs); + ++solver.evaluationNumber_; + if (froot < 0.0) + xl = solver.root_; + else + xh = solver.root_; + } + + return solver.root_; +} + +dataType modifiedDurationCpu(cashFlowsStruct cashFlows, intRateStruct y, + bool includecurrDateFlows, + bondsDateStruct currDate, bondsDateStruct npvDate, + int numLegs) { + dataType P = 0.0; + dataType dPdy = 0.0; + dataType r = y.rate; + dataType N = y.freq; + int dc = y.dayCounter; + + for (int i = 0; i < numLegs; ++i) { + + if (!cashFlowHasOccurredCpu(cashFlows.legs[i].paymentDate, currDate)) { + dataType t = yearFractionCpu(npvDate, cashFlows.legs[i].paymentDate, dc); + dataType c = fixedRateCouponAmountCpu(cashFlows, i); + dataType B = interestRateDiscountFactorCpu(y, t); + + P += c * B; + switch (y.comp) { + case SIMPLE_INTEREST: + dPdy -= c * B * B * t; + break; + case COMPOUNDED_INTEREST: + dPdy -= c * t * B / (1 + r / N); + break; + case CONTINUOUS_INTEREST: + dPdy -= c * B * t; + break; + case SIMPLE_THEN_COMPOUNDED_INTEREST: + if (t <= 1.0 / N) + dPdy -= c * B * B * t; + else + dPdy -= c * t * B / (1 + r / N); + break; + } + } + } + + if (P == 0.0) // no cashflows + { + return 0.0; + } + return (-1 * dPdy) / P; // reverse derivative sign +} diff --git a/cuda_code/bonus.cu b/cuda_code/bonus.cu new file mode 100644 index 0000000000000000000000000000000000000000..86513f2864d3b83df18e41c4d69a958395f3b339 --- /dev/null +++ b/cuda_code/bonus.cu @@ -0,0 +1,204 @@ +#include +#include +#include + +// Define constants and data types +#define PAGE_SIZE 32 +#define PHYSICAL_MEM_SIZE 32768 +#define MEMMORY_SEGMENT 32768 +#define STORAGE_SIZE 131072 +#define DATAFILE "./data.bin" +#define OUTPUTFILE "./snapshot.bin" +typedef unsigned char uchar; +typedef uint32_t u32; +const uint32_t VALID = 0 | 1; +const uint32_t INVALID = 0; +const uint32_t PAGENUMBERMASK = 0x00003FFE; +const uint32_t LASTTIMEMASK = 0xFFFFC000; +const uint32_t PIDMASK = 0xC0000000; +const uint32_t DNE = 0xFFFFFFFF; + +// Lock & Unlock +#define __LOCK(); for(int p = 0; p < 4; p++) {if(threadIdx.x == p) { +#define __UNLOCK(); } __syncthreads(); } +#define __GET_BASE() p*MEMMORY_SEGMENT +// Declare variables +__device__ __managed__ int PAGE_ENTRIES = 0; +__device__ __managed__ int PAGEFAULT = 0; +__device__ __managed__ int CURRENTTIME = 0; +__device__ __managed__ uchar storage[STORAGE_SIZE]; +__device__ __managed__ uchar results[STORAGE_SIZE]; +__device__ __managed__ uchar input[STORAGE_SIZE]; +extern __shared__ u32 pageTable[]; + +// Function +// ****************************************************************** +// Initialize +__device__ void initPageTable(int entries) { + for (int i = 0; i < entries; i++) { + pageTable[i] = INVALID; + } +} +// ****************************************************************** + +// ****************************************************************** +// File I/O +int loadBinaryFile(char *fileName, uchar *input, int storageSize) { + FILE *fptr = fopen(fileName, "rb"); + // Get size + fseek(fptr, 0, SEEK_END); + int size = ftell(fptr); + rewind(fptr); + // Read data from input file + fread(input, sizeof(unsigned char), size, fptr); + if (storageSize < size) { + printf("ERROR: Storage size is too small to store input data!\n"); + } + fclose(fptr); + return size; +} + +void writeBinaryFile(char *fileName, uchar *input, int storageSize) { + FILE *fptr = fopen(fileName, "wb"); + // Read data from input file + fwrite(input, sizeof(unsigned char), storageSize, fptr); + fclose(fptr); +} +// ****************************************************************** + +// ****************************************************************** +// Read/Write +__device__ u32 getPid(u32 PTE) { + return (PTE & PIDMASK) >> 30; +} +__device__ u32 isValid(u32 PTE) { + return PTE & VALID; +} +__device__ u32 getPageNumber(u32 PTE) { + return (PTE & PAGENUMBERMASK) >> 1; +} +__device__ u32 getLastUsedTime(u32 PTE) { + return (PTE & LASTTIMEMASK) >> 14; +} +__device__ u32 makePTE(u32 pid, u32 time, u32 pageNumber, u32 validbit) { + return (pid << 30) | (time << 14) | (pageNumber << 1) | validbit; +} +__device__ u32 paging(uchar *memory, u32 pageNumber, u32 pageOffset) { + // ******************************************************************** // + // How I store infomation in a PTE: // + // |--|----------------|-------------|-| // + // |33|2222222222111111|1111-8-6-4-2-|0| // + // |10|9876543210987654|32109-7-5-3-1|-| // + // |--|----------------|-------------|-| // + // | |Last used time | Page Number | | <-- last one bit is valid bit // + // |--|----------------|-------------|-| // + // ******************************************************************** // + + CURRENTTIME++; + // Find if the target page exists + for (u32 i = 0; i < PAGE_ENTRIES; i++) { + if (isValid(pageTable[i]) + && threadIdx.x == getPid(pageTable[i]) + && pageNumber == getPageNumber(pageTable[i])) { + // Update time + pageTable[i] = makePTE(threadIdx.x, CURRENTTIME, pageNumber, VALID); + return i * PAGE_SIZE + pageOffset; + } + } + + // Find if there is a empty entry to place + for (u32 i = 0; i < PAGE_ENTRIES; i++) { + if (isValid(pageTable[i]) == 0) { + // Because of a empty hole, it must be a pagefault + PAGEFAULT++; + // Update PTE + pageTable[i] = makePTE(threadIdx.x, CURRENTTIME, pageNumber, VALID); + return i * PAGE_SIZE + pageOffset; + } + } + + // Find a place for swaping in by the RULE of LRU + u32 leastEntry = DNE; + u32 leastTime = DNE; + for (u32 i = 0; i < PAGE_ENTRIES; i++) { + if (leastTime > getLastUsedTime(pageTable[i]) && threadIdx.x == getPid(pageTable[i])) { + leastTime = getLastUsedTime(pageTable[i]); + leastEntry = i; + } + } + // Replace & update infos + PAGEFAULT++; + for (u32 j = 0; + j < PAGE_SIZE; + j++) { + u32 memoryAddress = leastEntry * PAGE_SIZE + j; + u32 storageAddress = pageNumber * PAGE_SIZE + j; + u32 toStorageAddress = getPageNumber(pageTable[leastEntry]) * PAGE_SIZE + j; + storage[toStorageAddress] = memory[memoryAddress]; + memory[memoryAddress] = storage[storageAddress]; + } + pageTable[leastEntry] = makePTE(threadIdx.x, CURRENTTIME, pageNumber, VALID); + return leastEntry * PAGE_SIZE + pageOffset; +} + +__device__ uchar Gread(uchar *memory, u32 address) { + u32 pageNumber = address/PAGE_SIZE; + u32 pageOffset = address%PAGE_SIZE; + + u32 reMappingAddress = paging(memory, pageNumber, pageOffset); + return memory[reMappingAddress]; +} + +__device__ void Gwrite(uchar *memory, u32 address, uchar writeValue) { + u32 pageNumber = address/PAGE_SIZE; + u32 pageOffset = address%PAGE_SIZE; + + u32 reMappingAddress = paging(memory, pageNumber, pageOffset); + memory[reMappingAddress] = writeValue; +} + +__device__ void snapshot(uchar *result, uchar *memory, int offset, int input_size) { + for (int i = 0; i < input_size; i++) { + result[i] = Gread(memory, i+offset); + } +} +// ****************************************************************** + +// ****************************************************************** +// Kernel function +__global__ void mykernel(int input_size) { + __shared__ uchar data[PHYSICAL_MEM_SIZE]; + PAGE_ENTRIES = PHYSICAL_MEM_SIZE/PAGE_SIZE; + if (threadIdx.x == 0) + initPageTable(PAGE_ENTRIES); + + //##Gwrite / Gread code section start### + __LOCK(); + for(int i = 0; i < input_size; i++) { + Gwrite(data, i+__GET_BASE(), input[i+__GET_BASE()]); + } + __UNLOCK(); + for(int i = input_size - 1; i >= input_size - 10; i--) { + __LOCK(); + int value = Gread(data, i+__GET_BASE()); + __UNLOCK(); + } + //the last line of Gwrite/Gread code section should be snapshot () + __LOCK(); + snapshot(results+__GET_BASE(), data, __GET_BASE(), input_size); + __UNLOCK(); + //###Gwrite/Gread code section end### +} +// ****************************************************************** + +int main() { + int input_size = loadBinaryFile(DATAFILE, input, STORAGE_SIZE); + cudaSetDevice(2); + mykernel<<<1, 4, 16384>>>(input_size/4); + cudaDeviceSynchronize(); + cudaDeviceReset(); + + writeBinaryFile(OUTPUTFILE, results, input_size); + printf("pagefault times = %d\n", PAGEFAULT); + return 0; +} diff --git a/cuda_code/bounding_box_15.cu b/cuda_code/bounding_box_15.cu new file mode 100644 index 0000000000000000000000000000000000000000..ca1140194f42a3b604847416f6b064c0ec991f99 --- /dev/null +++ b/cuda_code/bounding_box_15.cu @@ -0,0 +1,746 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + + /*! + * Copyright (c) 2017 by Contributors + * \file bounding_box.cu + * \brief Bounding box util functions and operators + * \author Joshua Zhang + */ + +#include + +#include "./bounding_box-inl.cuh" +#include "./bounding_box-inl.h" +#include "../elemwise_op_common.h" + +namespace mxnet { +namespace op { + +namespace { + +using mshadow::Tensor; +using mshadow::Stream; + +template +struct TempWorkspace { + size_t scores_temp_space; + DType* scores; + size_t scratch_space; + uint8_t* scratch; + size_t buffer_space; + DType* buffer; + size_t nms_scratch_space; + uint32_t* nms_scratch; + size_t indices_temp_spaces; + index_t* indices; +}; + +inline size_t ceil_div(size_t x, size_t y) { + return (x + y - 1) / y; +} + +inline size_t align(size_t x, size_t alignment) { + return ceil_div(x, alignment) * alignment; +} + +template +__global__ void FilterAndPrepareAuxDataKernel(const DType* data, DType* out, DType* scores, + index_t num_elements_per_batch, + const index_t element_width, + const index_t N, + const float threshold, + const int id_index, const int score_index, + const int background_id) { + index_t tid = blockIdx.x * blockDim.x + threadIdx.x; + bool first_in_element = (tid % element_width == 0); + index_t start_of_my_element = tid - (tid % element_width); + + if (tid < N) { + DType my_score = data[start_of_my_element + score_index]; + bool filtered_out = my_score <= threshold; + if (id_index != -1 && background_id != -1) { + DType my_id = data[start_of_my_element + id_index]; + filtered_out = filtered_out || (my_id == background_id); + } + if (!filtered_out) { + out[tid] = data[tid]; + } else { + out[tid] = -1; + my_score = -1; + } + + if (first_in_element) { + index_t offset = tid / element_width; + scores[offset] = my_score; + } + } +} + +template +void FilterAndPrepareAuxData(const Tensor& data, + Tensor* out, + const TempWorkspace& workspace, + const BoxNMSParam& param, + Stream* s) { + const int n_threads = 512; + index_t N = data.shape_.Size(); + const auto blocks = ceil_div(N, n_threads); + FilterAndPrepareAuxDataKernel<<::GetStream(s)>>>( + data.dptr_, out->dptr_, workspace.scores, + data.shape_[1], data.shape_[2], N, + param.valid_thresh, param.id_index, + param.score_index, param.background_id); +} + +template +__global__ void CompactDataKernel(const index_t* indices, const DType* source, + DType* destination, const index_t topk, + const index_t element_width, + const index_t num_elements_per_batch, + const int score_index, + const index_t N) { + const index_t tid_start = blockIdx.x * blockDim.x + threadIdx.x; + for (index_t tid = tid_start; tid < N; tid += blockDim.x * gridDim.x) { + const index_t my_element = tid / element_width; + const index_t my_element_in_batch = my_element % num_elements_per_batch; + if (check_topk && my_element_in_batch >= topk) { + destination[tid] = -1; + } else { + DType ret; + const index_t source_element = indices[my_element]; + DType score = 0; + if (check_score) { + score = source[source_element * element_width + score_index]; + } + if (score >= 0) { + ret = source[source_element * element_width + tid % element_width]; + } else { + ret = -1; + } + destination[tid] = ret; + } + } +} + +template +void CompactData(const Tensor& indices, + const Tensor& source, + Tensor* destination, + const index_t topk, + const int score_index, + Stream* s) { + const int n_threads = 512; + const size_t max_blocks = 320; + index_t N = source.shape_.Size(); + const auto blocks = std::min(ceil_div(N, n_threads), max_blocks); + if (topk > 0) { + CompactDataKernel<<::GetStream(s)>>>( + indices.dptr_, source.dptr_, + destination->dptr_, topk, + source.shape_[2], source.shape_[1], + score_index, N); + } else { + CompactDataKernel<<::GetStream(s)>>>( + indices.dptr_, source.dptr_, + destination->dptr_, topk, + source.shape_[2], source.shape_[1], + score_index, N); + } +} + +template +void WorkspaceForSort(const index_t num_elem, + const index_t topk, + const int alignment, + TempWorkspace* workspace) { + const size_t sort_scores_temp_space = + mxnet::op::SortByKeyWorkspaceSize(num_elem, false, false); + const size_t sort_topk_scores_temp_space = + mxnet::op::SortByKeyWorkspaceSize(topk, 1, false, false); + workspace->scratch_space = align(std::max(sort_scores_temp_space, sort_topk_scores_temp_space), + alignment); +} + +template +__global__ void CalculateGreedyNMSResultsKernel(const DType* data, uint32_t* result, + const index_t current_start, + const index_t num_elems, + const index_t num_batches, + const index_t num_blocks_per_row_batch, + const index_t num_blocks_per_row, + const index_t topk, + const index_t element_width, + const index_t num_elements_per_batch, + const int coord_index, + const int class_index, + const int score_index, + const float threshold); + +template +__global__ void ReduceNMSResultTriangleKernel(uint32_t* nms_results, + DType * data, + const index_t score_index, + const index_t element_width, + const index_t num_batches, + const index_t num_elems, + const index_t start_index, + const index_t topk); + +template +__global__ void ReduceNMSResultRestKernel(DType* data, + const uint32_t* nms_results, + const index_t score_index, + const index_t element_width, + const index_t num_batches, + const index_t num_elements_per_batch, + const index_t start_index, + const index_t topk, + const index_t num_blocks_per_batch); + +template +struct NMS { + static constexpr int THRESHOLD = 512; + + void operator()(Tensor* data, + Tensor* scratch, + const index_t topk, + const BoxNMSParam& param, + Stream* s) { + const int n_threads = 512; + const index_t num_batches = data->shape_[0]; + const index_t num_elements_per_batch = data->shape_[1]; + const index_t element_width = data->shape_[2]; + for (index_t current_start = 0; current_start < topk; current_start += THRESHOLD) { + const index_t n_elems = topk - current_start; + const index_t num_blocks_per_row_batch = ceil_div(n_elems, n_threads); + const index_t num_blocks_per_row = num_blocks_per_row_batch * num_batches; + const index_t n_blocks = THRESHOLD / (sizeof(uint32_t) * 8) * num_blocks_per_row; + if (param.in_format == box_common_enum::kCorner) { + CalculateGreedyNMSResultsKernel + <<::GetStream(s)>>>( + data->dptr_, scratch->dptr_, current_start, n_elems, num_batches, + num_blocks_per_row_batch, num_blocks_per_row, topk, element_width, + num_elements_per_batch, param.coord_start, + param.force_suppress ? -1 : param.id_index, + param.score_index, param.overlap_thresh); + } else { + CalculateGreedyNMSResultsKernel + <<::GetStream(s)>>>( + data->dptr_, scratch->dptr_, current_start, n_elems, num_batches, + num_blocks_per_row_batch, num_blocks_per_row, topk, element_width, + num_elements_per_batch, param.coord_start, + param.force_suppress ? -1 : param.id_index, + param.score_index, param.overlap_thresh); + } + ReduceNMSResultTriangleKernel<<::GetStream(s)>>>( + scratch->dptr_, data->dptr_, param.score_index, + element_width, num_batches, num_elements_per_batch, + current_start, topk); + const index_t n_rest_elems = n_elems - THRESHOLD; + const index_t num_rest_blocks_per_batch = ceil_div(n_rest_elems, n_threads); + const index_t num_rest_blocks = num_rest_blocks_per_batch * num_batches; + if (n_rest_elems > 0) { + ReduceNMSResultRestKernel<<::GetStream(s)>>>( + data->dptr_, scratch->dptr_, param.score_index, element_width, + num_batches, num_elements_per_batch, current_start, topk, + num_rest_blocks_per_batch); + } + } + } +}; + +template +__device__ __forceinline__ DType calculate_area(const DType b0, const DType b1, + const DType b2, const DType b3) { + DType width = b2; + DType height = b3; + if (encode == box_common_enum::kCorner) { + width -= b0; + height -= b1; + } + if (width < 0 || height < 0) return 0; + return width * height; +} + +template +__device__ __forceinline__ DType calculate_intersection(const DType a0, const DType a1, + const DType a2, const DType a3, + const DType b0, const DType b1, + const DType b2, const DType b3) { + DType wx, wy; + if (encode == box_common_enum::kCorner) { + const DType left = a0 > b0 ? a0 : b0; + const DType bottom = a1 > b1 ? a1 : b1; + const DType right = a2 < b2 ? a2 : b2; + const DType top = a3 < b3 ? a3 : b3; + wx = right - left; + wy = top - bottom; + } else { + const DType al = 2 * a0 - a2; + const DType ar = 2 * a0 + a2; + const DType bl = 2 * b0 - b2; + const DType br = 2 * b0 + b2; + const DType left = bl > al ? bl : al; + const DType right = br < ar ? br : ar; + wx = right - left; + const DType ab = 2 * a1 - a3; + const DType at = 2 * a1 + a3; + const DType bb = 2 * b1 - b3; + const DType bt = 2 * b1 + b3; + const DType bottom = bb > ab ? bb : ab; + const DType top = bt < at ? bt : at; + wy = top - bottom; + wy = wy / 4; // To compensate for both wx and wy being 2x too large + } + if (wx <= 0 || wy <= 0) { + return 0; + } else { + return (wx * wy); + } +} + +template +__launch_bounds__(512) +__global__ void CalculateGreedyNMSResultsKernel(const DType* data, uint32_t* result, + const index_t current_start, + const index_t num_elems, + const index_t num_batches, + const index_t num_blocks_per_row_batch, + const index_t num_blocks_per_row, + const index_t topk, + const index_t element_width, + const index_t num_elements_per_batch, + const int coord_index, + const int class_index, + const int score_index, + const float threshold) { + constexpr int max_elem_width = 20; + constexpr int num_other_boxes = sizeof(uint32_t) * 8; + __shared__ DType other_boxes[max_elem_width * num_other_boxes]; + __shared__ DType other_boxes_areas[num_other_boxes]; + const index_t my_row = blockIdx.x / num_blocks_per_row; + const index_t my_block_offset_in_row = blockIdx.x % num_blocks_per_row; + const index_t my_block_offset_in_batch = my_block_offset_in_row % num_blocks_per_row_batch; + const index_t my_batch = (my_block_offset_in_row) / num_blocks_per_row_batch; + const index_t my_element_in_batch = my_block_offset_in_batch * blockDim.x + + current_start + threadIdx.x; + + // Load other boxes + const index_t offset = (my_batch * num_elements_per_batch + + current_start + my_row * num_other_boxes) * + element_width; + for (int i = threadIdx.x; i < element_width * num_other_boxes; i += blockDim.x) { + other_boxes[i] = data[offset + i]; + } + __syncthreads(); + + if (threadIdx.x < num_other_boxes) { + const int other_boxes_offset = element_width * threadIdx.x; + const DType their_area = calculate_area( + other_boxes[other_boxes_offset + coord_index + 0], + other_boxes[other_boxes_offset + coord_index + 1], + other_boxes[other_boxes_offset + coord_index + 2], + other_boxes[other_boxes_offset + coord_index + 3]); + other_boxes_areas[threadIdx.x] = their_area; + } + __syncthreads(); + + if (my_element_in_batch >= topk) return; + + DType my_box[4]; + DType my_class = -1; + DType my_score = -1; + const index_t my_offset = (my_batch * num_elements_per_batch + my_element_in_batch) * + element_width; + my_score = data[my_offset + score_index]; +#pragma unroll + for (int i = 0; i < 4; ++i) { + my_box[i] = data[my_offset + coord_index + i]; + } + if (class_index != -1) { + my_class = data[my_offset + class_index]; + } + DType my_area = calculate_area(my_box[0], my_box[1], my_box[2], my_box[3]); + + uint32_t ret = 0; + if (my_score != -1) { +#pragma unroll + for (int i = 0; i < num_other_boxes; ++i) { + const int other_boxes_offset = element_width * i; + if ((class_index == -1 || my_class == other_boxes[other_boxes_offset + class_index]) && + other_boxes[other_boxes_offset + score_index] != -1) { + const DType their_area = other_boxes_areas[i]; + + const DType intersect = calculate_intersection( + my_box[0], my_box[1], my_box[2], my_box[3], + other_boxes[other_boxes_offset + coord_index + 0], + other_boxes[other_boxes_offset + coord_index + 1], + other_boxes[other_boxes_offset + coord_index + 2], + other_boxes[other_boxes_offset + coord_index + 3]); + if (intersect > threshold * (my_area + their_area - intersect)) { + ret = ret | (1u << i); + } + } + } + } + result[(my_row * num_batches + my_batch) * topk + my_element_in_batch] = ~ret; +} + +template +__launch_bounds__(NMS::THRESHOLD) +__global__ void ReduceNMSResultTriangleKernel(uint32_t* nms_results, + DType * data, + const index_t score_index, + const index_t element_width, + const index_t num_batches, + const index_t num_elements_per_batch, + const index_t start_index, + const index_t topk) { + constexpr int n_threads = NMS::THRESHOLD; + constexpr int warp_size = 32; + const index_t my_batch = blockIdx.x; + const index_t my_element_in_batch = threadIdx.x + start_index; + const index_t my_element = my_batch * topk + my_element_in_batch; + const int my_warp = threadIdx.x / warp_size; + const int my_lane = threadIdx.x % warp_size; + + __shared__ uint32_t current_valid_boxes[n_threads / warp_size]; + const uint32_t full_mask = 0xFFFFFFFF; + const uint32_t my_lane_mask = 1 << my_lane; + const uint32_t earlier_threads_mask = (1 << (my_lane + 1)) - 1; + uint32_t valid = my_lane_mask; + uint32_t valid_boxes = full_mask; + + uint32_t my_next_mask = my_element_in_batch < topk ? + nms_results[my_element]: + full_mask; +#pragma unroll + for (int i = 0; i < n_threads / warp_size; ++i) { + uint32_t my_mask = my_next_mask; + my_next_mask = (((i + 1) < n_threads / warp_size) && + (my_element_in_batch < topk)) ? + nms_results[(i + 1) * topk * num_batches + my_element]: + full_mask; + if (my_warp == i && !__all_sync(full_mask, my_mask == full_mask)) { + my_mask = my_mask | earlier_threads_mask; + // Loop over warp_size - 1 because the last + // thread does not contribute to the mask anyway +#pragma unroll + for (int j = 0; j < warp_size - 1; ++j) { + const uint32_t mask = __shfl_sync(full_mask, valid ? my_mask : full_mask, j); + valid = valid & mask; + } + valid_boxes = __ballot_sync(full_mask, valid); + } + if (my_lane == 0 && my_warp == i) { + current_valid_boxes[i] = valid_boxes; + } + __syncthreads(); + if ((my_warp > i) && (((~my_mask) & current_valid_boxes[i]) != 0)) { + valid = 0; + } + } + if (my_lane == 0) { + nms_results[my_element] = valid_boxes; + } + if (valid == 0) { + data[(my_batch * num_elements_per_batch + my_element_in_batch) * element_width + + score_index] = -1; + } +} + +template +__launch_bounds__(512) +__global__ void ReduceNMSResultRestKernel(DType* data, + const uint32_t* nms_results, + const index_t score_index, + const index_t element_width, + const index_t num_batches, + const index_t num_elements_per_batch, + const index_t start_index, + const index_t topk, + const index_t num_blocks_per_batch) { + constexpr int num_other_boxes = sizeof(uint32_t) * 8; + constexpr int num_iterations = NMS::THRESHOLD / num_other_boxes; + constexpr int warp_size = 32; + const index_t my_block_offset_in_batch = blockIdx.x % num_blocks_per_batch; + const index_t my_batch = blockIdx.x / num_blocks_per_batch; + const index_t my_element_in_batch = my_block_offset_in_batch * blockDim.x + + start_index + NMS::THRESHOLD + threadIdx.x; + const index_t my_element = my_batch * topk + my_element_in_batch; + + if (my_element_in_batch >= topk) return; + + bool valid = true; + +#pragma unroll + for (int i = 0; i < num_iterations; ++i) { + const uint32_t my_mask = nms_results[i * topk * num_batches + my_element]; + const uint32_t valid_boxes = nms_results[my_batch * topk + i * warp_size + start_index]; + + const bool no_hit = (valid_boxes & (~my_mask)) == 0; + valid = valid && no_hit; + } + + if (!valid) { + data[(my_batch * num_elements_per_batch + my_element_in_batch) * element_width + + score_index] = -1; + } +} + +template +TempWorkspace GetWorkspace(const index_t num_batch, + const index_t num_elem, + const int width_elem, + const index_t topk, + const OpContext& ctx) { + TempWorkspace workspace; + Stream *s = ctx.get_stream(); + const int alignment = 128; + + // Get the workspace size + workspace.scores_temp_space = 2 * align(num_batch * num_elem * sizeof(DType), alignment); + workspace.indices_temp_spaces = 2 * align(num_batch * num_elem * sizeof(index_t), alignment); + WorkspaceForSort(num_elem, topk, alignment, &workspace); + // Place for a buffer + workspace.buffer_space = align(num_batch * num_elem * width_elem * sizeof(DType), alignment); + workspace.nms_scratch_space = align(NMS::THRESHOLD / (sizeof(uint32_t) * 8) * + num_batch * topk * sizeof(uint32_t), alignment); + + const size_t workspace_size = workspace.scores_temp_space + + workspace.scratch_space + + workspace.buffer_space + + workspace.nms_scratch_space + + workspace.indices_temp_spaces; + + // Obtain the memory for workspace + Tensor scratch_memory = ctx.requested[box_nms_enum::kTempSpace] + .get_space_typed(mshadow::Shape1(ceil_div(workspace_size, sizeof(double))), s); + + // Populate workspace pointers + workspace.scores = reinterpret_cast(scratch_memory.dptr_); + workspace.scratch = reinterpret_cast(workspace.scores) + + workspace.scores_temp_space; + workspace.buffer = reinterpret_cast(workspace.scratch + + workspace.scratch_space); + workspace.nms_scratch = reinterpret_cast( + reinterpret_cast(workspace.buffer) + + workspace.buffer_space); + workspace.indices = reinterpret_cast( + reinterpret_cast(workspace.nms_scratch) + + workspace.nms_scratch_space); + return workspace; +} + +template +__global__ void ExtractScoresKernel(const DType* data, DType* scores, + const index_t N, const int element_width, + const int score_index) { + const index_t tid = blockIdx.x * blockDim.x + threadIdx.x; + if (tid < N) { + scores[tid] = data[tid * element_width + score_index]; + } +} + +template +void CompactNMSResults(const Tensor& data, + Tensor* out, + Tensor* indices, + Tensor* scores, + Tensor* sorted_indices, + Tensor* sorted_scores, + Tensor* scratch, + const int score_index, + const index_t topk, + Stream* s) { + using mshadow::Shape1; + constexpr int n_threads = 512; + const index_t num_elements = scores->shape_.Size(); + const index_t num_elements_per_batch = data.shape_[1]; + const index_t num_batches = data.shape_[0]; + const int element_width = data.shape_[2]; + const index_t n_blocks = ceil_div(num_elements, n_threads); + ExtractScoresKernel<<::GetStream(s)>>>( + data.dptr_, scores->dptr_, num_elements, element_width, score_index); + *indices = mshadow::expr::range(0, num_elements); + for (index_t i = 0; i < num_batches; ++i) { + // Sort each batch separately + Tensor scores_batch(scores->dptr_ + i * num_elements_per_batch, + Shape1(topk), + s); + Tensor indices_batch(indices->dptr_ + i * num_elements_per_batch, + Shape1(topk), + s); + Tensor sorted_scores_batch(sorted_scores->dptr_ + i * num_elements_per_batch, + Shape1(topk), + s); + Tensor sorted_indices_batch(sorted_indices->dptr_ + i * num_elements_per_batch, + Shape1(topk), + s); + mxnet::op::SortByKey(scores_batch, indices_batch, false, scratch, + 0, 8 * sizeof(DType), 1, &sorted_scores_batch, + &sorted_indices_batch); + } + CompactData(*sorted_indices, data, out, topk, score_index, s); +} + +} // namespace + +void BoxNMSForwardGPU_notemp(const nnvm::NodeAttrs& attrs, + const OpContext& ctx, + const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { + using mshadow::Shape1; + using mshadow::Shape2; + using mshadow::Shape3; + CHECK_NE(req[0], kAddTo) << "BoxNMS does not support kAddTo"; + CHECK_NE(req[0], kWriteInplace) << "BoxNMS does not support in place computation"; + CHECK_EQ(inputs.size(), 1U); + CHECK_EQ(outputs.size(), 2U) << "BoxNMS output: [output, temp]"; + const BoxNMSParam& param = nnvm::get(attrs.parsed); + Stream *s = ctx.get_stream(); + mxnet::TShape in_shape = inputs[box_nms_enum::kData].shape_; + int indim = in_shape.ndim(); + int num_batch = indim <= 2? 1 : in_shape.ProdShape(0, indim - 2); + int num_elem = in_shape[indim - 2]; + int width_elem = in_shape[indim - 1]; + + MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, DType, { + Tensor data = inputs[box_nms_enum::kData] + .get_with_shape(Shape3(num_batch, num_elem, width_elem), s); + Tensor out = outputs[box_nms_enum::kOut] + .get_with_shape(Shape3(num_batch, num_elem, width_elem), s); + + // Special case for topk == 0 + if (param.topk == 0) { + if (req[0] != kNullOp && + req[0] != kWriteInplace) { + out = mshadow::expr::F(data); + } + return; + } + + index_t topk = param.topk > 0 ? std::min(param.topk, num_elem) : num_elem; + const auto& workspace = GetWorkspace(num_batch, num_elem, + width_elem, topk, ctx); + + FilterAndPrepareAuxData(data, &out, workspace, param, s); + Tensor scores(workspace.scores, Shape1(num_batch * num_elem), s); + Tensor sorted_scores(workspace.scores + scores.MSize(), + Shape1(num_batch * num_elem), s); + Tensor indices(workspace.indices, Shape1(num_batch * num_elem), s); + Tensor sorted_indices(workspace.indices + indices.MSize(), + Shape1(num_batch * num_elem), s); + Tensor scratch(reinterpret_cast(workspace.scratch), + Shape1(workspace.scratch_space), s); + Tensor buffer(workspace.buffer, + Shape3(num_batch, num_elem, width_elem), s); + Tensor nms_scratch(workspace.nms_scratch, + Shape2(NMS::THRESHOLD / (sizeof(uint32_t) * 8), + topk * num_batch), + s); + indices = mshadow::expr::range(0, num_batch * num_elem); + for (index_t i = 0; i < num_batch; ++i) { + // Sort each batch separately + Tensor scores_batch(scores.dptr_ + i * num_elem, + Shape1(num_elem), + s); + Tensor indices_batch(indices.dptr_ + i * num_elem, + Shape1(num_elem), + s); + Tensor sorted_scores_batch(sorted_scores.dptr_ + i * num_elem, + Shape1(num_elem), + s); + Tensor sorted_indices_batch(sorted_indices.dptr_ + i * num_elem, + Shape1(num_elem), + s); + mxnet::op::SortByKey(scores_batch, indices_batch, false, &scratch, 0, + 8 * sizeof(DType), 1, &sorted_scores_batch, + &sorted_indices_batch); + } + CompactData(sorted_indices, out, &buffer, topk, -1, s); + NMS nms; + nms(&buffer, &nms_scratch, topk, param, s); + CompactNMSResults(buffer, &out, &indices, &scores, &sorted_indices, + &sorted_scores, &scratch, param.score_index, topk, s); + + // convert encoding + if (param.in_format != param.out_format) { + if (box_common_enum::kCenter == param.out_format) { + mxnet::op::mxnet_op::Kernel::Launch(s, num_batch * num_elem, + out.dptr_ + param.coord_start, width_elem); + } else { + mxnet::op::mxnet_op::Kernel::Launch(s, num_batch * num_elem, + out.dptr_ + param.coord_start, width_elem); + } + } + }); +} + +void BoxNMSForwardGPU(const nnvm::NodeAttrs& attrs, + const OpContext& ctx, + const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { + using namespace mshadow; + using namespace mshadow::expr; + using namespace mxnet_op; + CHECK_EQ(inputs.size(), 1U); + CHECK_EQ(outputs.size(), 2U) << "BoxNMS output: [output, temp]"; + if (req[1] == kNullOp) { + BoxNMSForwardGPU_notemp(attrs, ctx, inputs, req, outputs); + return; + } + BoxNMSForward(attrs, ctx, inputs, req, outputs); +} + + +NNVM_REGISTER_OP(_contrib_box_nms) +.set_attr("FCompute", BoxNMSForwardGPU); + +NNVM_REGISTER_OP(_backward_contrib_box_nms) +.set_attr("FCompute", BoxNMSBackward); + +NNVM_REGISTER_OP(_contrib_box_iou) +.set_attr("FCompute", BoxOverlapForward); + +NNVM_REGISTER_OP(_backward_contrib_box_iou) +.set_attr("FCompute", BoxOverlapBackward); + +NNVM_REGISTER_OP(_contrib_bipartite_matching) +.set_attr("FCompute", BipartiteMatchingForward); + +NNVM_REGISTER_OP(_backward_contrib_bipartite_matching) +.set_attr("FCompute", BipartiteMatchingBackward); + +NNVM_REGISTER_OP(_contrib_box_encode) +.set_attr("FCompute", BoxEncodeForward); + +NNVM_REGISTER_OP(_contrib_box_decode) +.set_attr("FCompute", BoxDecodeForward); + +} // namespace op +} // namespace mxnet diff --git a/cuda_code/box2d4r-128-1-128_kernel_1.cu b/cuda_code/box2d4r-128-1-128_kernel_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..c7a1326aa6caac6f449ae38f35ab3a6b577a5325 --- /dev/null +++ b/cuda_code/box2d4r-128-1-128_kernel_1.cu @@ -0,0 +1,540 @@ +#include "box2d4r-128-1-128_kernel.hu" +__device__ float __sbref_wrap(float *sb, size_t index) { return sb[index]; } + +__global__ void kernel0_1(float *A, int dimsize, int timestep, int c0) +{ +#ifndef AN5D_TYPE +#define AN5D_TYPE unsigned +#endif + const AN5D_TYPE __c0Len = (timestep - 0); + const AN5D_TYPE __c0Pad = (0); + #define __c0 c0 + const AN5D_TYPE __c1Len = (dimsize - 4 - 4); + const AN5D_TYPE __c1Pad = (4); + #define __c1 c1 + const AN5D_TYPE __c2Len = (dimsize - 4 - 4); + const AN5D_TYPE __c2Pad = (4); + #define __c2 c2 + const AN5D_TYPE __halo1 = 4; + const AN5D_TYPE __halo2 = 4; + const AN5D_TYPE __side0Len = 1; + const AN5D_TYPE __side1Len = 128; + const AN5D_TYPE __side2Len = 120; + const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); + const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); + const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); + const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); + const AN5D_TYPE __blockSize = 1 * __side2LenOl; + const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; + const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; + const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; + const AN5D_TYPE __local_c2 = __tid; + const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; + const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; + float __reg_0; + float __reg_1_0; + float __reg_1_1; + float __reg_1_2; + float __reg_1_3; + float __reg_1_4; + float __reg_1_5; + float __reg_1_6; + float __reg_1_7; + float __reg_1_8; + __shared__ float __a_sb_double[__blockSize * 2]; + float *__a_sb = __a_sb_double; + const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; + const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; + const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); + const AN5D_TYPE __storeValid = __writeValid1; + AN5D_TYPE __c1; + AN5D_TYPE __h; + const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; + #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) + #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) + #define __REGREF(reg, i2) reg + #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) + #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((0.00930f * (__SBREF(__a_sb, -4))) + (0.00931f * (__SBREF(__a_sb, -3)))) + (0.00932f * (__SBREF(__a_sb, -2)))) + (0.00933f * (__SBREF(__a_sb, -1)))) + (0.00934f * (__REGREF(__a, 0)))) + (0.00935f * (__SBREF(__a_sb, 1)))) + (0.00936f * (__SBREF(__a_sb, 2)))) + (0.00937f * (__SBREF(__a_sb, 3)))) + (0.00938f * (__SBREF(__a_sb, 4)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0) + #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) + #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) + #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); + #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) + #define __REGREF(reg, i2) reg + #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) + #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((0.00939f * (__SBREF(__a_sb, -4)))) + (0.00940f * (__SBREF(__a_sb, -3)))) + (0.00941f * (__SBREF(__a_sb, -2)))) + (0.00942f * (__SBREF(__a_sb, -1)))) + (0.00943f * (__REGREF(__a, 0)))) + (0.00944f * (__SBREF(__a_sb, 1)))) + (0.00945f * (__SBREF(__a_sb, 2)))) + (0.00946f * (__SBREF(__a_sb, 3)))) + (0.00947f * (__SBREF(__a_sb, 4))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0) + #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) + #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) + #define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); + #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) + #define __REGREF(reg, i2) reg + #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) + #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((0.00948f * (__SBREF(__a_sb, -4)))) + (0.00949f * (__SBREF(__a_sb, -3)))) + (0.00950f * (__SBREF(__a_sb, -2)))) + (0.00951f * (__SBREF(__a_sb, -1)))) + (0.00952f * (__REGREF(__a, 0)))) + (0.00953f * (__SBREF(__a_sb, 1)))) + (0.00954f * (__SBREF(__a_sb, 2)))) + (0.00955f * (__SBREF(__a_sb, 3)))) + (0.00956f * (__SBREF(__a_sb, 4)))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0) + #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) + #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) + #define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); + #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) + #define __REGREF(reg, i2) reg + #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) + #define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((0.00957f * (__SBREF(__a_sb, -4)))) + (0.00958f * (__SBREF(__a_sb, -3)))) + (0.00959f * (__SBREF(__a_sb, -2)))) + (0.00960f * (__SBREF(__a_sb, -1)))) + (0.00961f * (__REGREF(__a, 0)))) + (0.00962f * (__SBREF(__a_sb, 1)))) + (0.00963f * (__SBREF(__a_sb, 2)))) + (0.00964f * (__SBREF(__a_sb, 3)))) + (0.00965f * (__SBREF(__a_sb, 4))))))))))))))))))))))))))))))))))))))))))))))))); } while (0) + #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) + #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) + #define __CALCEXPR_3(out, a) do { float etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0); + #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) + #define __REGREF(reg, i2) reg + #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) + #define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((0.00966f * (__SBREF(__a_sb, -4)))) + (0.00967f * (__SBREF(__a_sb, -3)))) + (0.00968f * (__SBREF(__a_sb, -2)))) + (0.00969f * (__SBREF(__a_sb, -1)))) + (0.22400f * (__REGREF(__a, 0)))) + (0.00971f * (__SBREF(__a_sb, 1)))) + (0.00972f * (__SBREF(__a_sb, 2)))) + (0.00973f * (__SBREF(__a_sb, 3)))) + (0.00974f * (__SBREF(__a_sb, 4)))))))))))))))))))))))))))))))))))))))); } while (0) + #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) + #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) + #define __CALCEXPR_4(out, a) do { float etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0); + #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) + #define __REGREF(reg, i2) reg + #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) + #define __CALCEXPR_5_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((0.00975f * (__SBREF(__a_sb, -4)))) + (0.00976f * (__SBREF(__a_sb, -3)))) + (0.00977f * (__SBREF(__a_sb, -2)))) + (0.00978f * (__SBREF(__a_sb, -1)))) + (0.00979f * (__REGREF(__a, 0)))) + (0.00980f * (__SBREF(__a_sb, 1)))) + (0.00981f * (__SBREF(__a_sb, 2)))) + (0.00982f * (__SBREF(__a_sb, 3)))) + (0.00983f * (__SBREF(__a_sb, 4))))))))))))))))))))))))))))))); } while (0) + #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) + #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) + #define __CALCEXPR_5(out, a) do { float etmp; __CALCEXPR_5_wrap(etmp, a); out += etmp; } while (0); + #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) + #define __REGREF(reg, i2) reg + #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) + #define __CALCEXPR_6_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((0.00984f * (__SBREF(__a_sb, -4)))) + (0.00985f * (__SBREF(__a_sb, -3)))) + (0.00986f * (__SBREF(__a_sb, -2)))) + (0.00987f * (__SBREF(__a_sb, -1)))) + (0.00988f * (__REGREF(__a, 0)))) + (0.00989f * (__SBREF(__a_sb, 1)))) + (0.00990f * (__SBREF(__a_sb, 2)))) + (0.00991f * (__SBREF(__a_sb, 3)))) + (0.00992f * (__SBREF(__a_sb, 4)))))))))))))))))))))); } while (0) + #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) + #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) + #define __CALCEXPR_6(out, a) do { float etmp; __CALCEXPR_6_wrap(etmp, a); out += etmp; } while (0); + #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) + #define __REGREF(reg, i2) reg + #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) + #define __CALCEXPR_7_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((0.00993f * (__SBREF(__a_sb, -4)))) + (0.00994f * (__SBREF(__a_sb, -3)))) + (0.00995f * (__SBREF(__a_sb, -2)))) + (0.00996f * (__SBREF(__a_sb, -1)))) + (0.00997f * (__REGREF(__a, 0)))) + (0.00998f * (__SBREF(__a_sb, 1)))) + (0.00999f * (__SBREF(__a_sb, 2)))) + (0.01000f * (__SBREF(__a_sb, 3)))) + (0.01001f * (__SBREF(__a_sb, 4))))))))))))); } while (0) + #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) + #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) + #define __CALCEXPR_7(out, a) do { float etmp; __CALCEXPR_7_wrap(etmp, a); out += etmp; } while (0); + #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) + #define __REGREF(reg, i2) reg + #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) + #define __CALCEXPR_8_wrap(__rn0, __a) do { __rn0 = ((((((((((0.01002f * (__SBREF(__a_sb, -4)))) + (0.01003f * (__SBREF(__a_sb, -3)))) + (0.01004f * (__SBREF(__a_sb, -2)))) + (0.01005f * (__SBREF(__a_sb, -1)))) + (0.01006f * (__REGREF(__a, 0)))) + (0.01007f * (__SBREF(__a_sb, 1)))) + (0.01008f * (__SBREF(__a_sb, 2)))) + (0.01009f * (__SBREF(__a_sb, 3)))) + (0.01010f * (__SBREF(__a_sb, 4)))); } while (0) + #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) + #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) + #define __CALCEXPR_8(out, a) do { float etmp; __CALCEXPR_8_wrap(etmp, a); out += etmp; } while (0); + #define __CALCEXPR(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); __CALCEXPR_5(out5, reg); __CALCEXPR_6(out6, reg); __CALCEXPR_7(out7, reg); __CALCEXPR_8(out8, reg); } while (0); + #define __CALC1(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg); } else out4 = reg; } while (0) + #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) + if (__c1Id == 0) + { + __LOAD(__reg_0, 0); + __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0); + __LOAD(__reg_0, 1); + __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0); + __LOAD(__reg_0, 2); + __CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0); + __LOAD(__reg_0, 3); + __CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0); + __LOAD(__reg_0, 4); + __CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); + __LOAD(__reg_0, 5); + __CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); + __LOAD(__reg_0, 6); + __CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); + __LOAD(__reg_0, 7); + __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0); + __LOAD(__reg_0, 8); + __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0); + __STORE(4, __reg_1_4); + } + else + { + __LOAD(__reg_0, 0); + __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0); + __LOAD(__reg_0, 1); + __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0); + __LOAD(__reg_0, 2); + __CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0); + __LOAD(__reg_0, 3); + __CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0); + __LOAD(__reg_0, 4); + __CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); + __LOAD(__reg_0, 5); + __CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); + __LOAD(__reg_0, 6); + __CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); + __LOAD(__reg_0, 7); + __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0); + __LOAD(__reg_0, 8); + __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0); + __STORE(4, __reg_1_4); + } + __a_sb = __a_sb_double + __blockSize * 1; + if (__c1Id == __side1Num - 1) + { + for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 13;) + { + __LOAD(__reg_0, __h); + __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0); + __STORE(__h - 4, __reg_1_5); + __h++; + __LOAD(__reg_0, __h); + __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0); + __STORE(__h - 4, __reg_1_6); + __h++; + __LOAD(__reg_0, __h); + __CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0); + __STORE(__h - 4, __reg_1_7); + __h++; + __LOAD(__reg_0, __h); + __CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0); + __STORE(__h - 4, __reg_1_8); + __h++; + __LOAD(__reg_0, __h); + __CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); + __STORE(__h - 4, __reg_1_0); + __h++; + __LOAD(__reg_0, __h); + __CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); + __STORE(__h - 4, __reg_1_1); + __h++; + __LOAD(__reg_0, __h); + __CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); + __STORE(__h - 4, __reg_1_2); + __h++; + __LOAD(__reg_0, __h); + __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0); + __STORE(__h - 4, __reg_1_3); + __h++; + __LOAD(__reg_0, __h); + __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0); + __STORE(__h - 4, __reg_1_4); + __h++; + __DB_SWITCH(); __syncthreads(); + } + if (0) {} + else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) + { + __LOAD(__reg_0, __h + 0); + __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0); + __STORE(__h - 4, __reg_1_5); + __LOAD(__reg_0, __h + 1); + __CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0); + __STORE(__h - 3, __reg_1_6); + __LOAD(__reg_0, __h + 2); + __CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_8, __reg_1_7, __reg_0); + __STORE(__h - 2, __reg_1_7); + __LOAD(__reg_0, __h + 3); + __CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_8, __reg_0); + __STORE(__h - 1, __reg_1_8); + } + else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2) + { + __LOAD(__reg_0, __h + 0); + __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0); + __STORE(__h - 4, __reg_1_5); + __LOAD(__reg_0, __h + 1); + __CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0); + __STORE(__h - 3, __reg_1_6); + __LOAD(__reg_0, __h + 2); + __CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0); + __STORE(__h - 2, __reg_1_7); + __LOAD(__reg_0, __h + 3); + __CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_0, __reg_1_8, __reg_0); + __STORE(__h - 1, __reg_1_8); + __LOAD(__reg_0, __h + 4); + __CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_0, __reg_0); + __STORE(__h + 0, __reg_1_0); + } + else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2) + { + __LOAD(__reg_0, __h + 0); + __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0); + __STORE(__h - 4, __reg_1_5); + __LOAD(__reg_0, __h + 1); + __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0); + __STORE(__h - 3, __reg_1_6); + __LOAD(__reg_0, __h + 2); + __CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0); + __STORE(__h - 2, __reg_1_7); + __LOAD(__reg_0, __h + 3); + __CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0); + __STORE(__h - 1, __reg_1_8); + __LOAD(__reg_0, __h + 4); + __CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_1, __reg_1_0, __reg_0); + __STORE(__h + 0, __reg_1_0); + __LOAD(__reg_0, __h + 5); + __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0); + __STORE(__h + 1, __reg_1_1); + } + else if (__h + 7 == __c1Len - __side1Len * __c1Id + __halo1 * 2) + { + __LOAD(__reg_0, __h + 0); + __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0); + __STORE(__h - 4, __reg_1_5); + __LOAD(__reg_0, __h + 1); + __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0); + __STORE(__h - 3, __reg_1_6); + __LOAD(__reg_0, __h + 2); + __CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0); + __STORE(__h - 2, __reg_1_7); + __LOAD(__reg_0, __h + 3); + __CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0); + __STORE(__h - 1, __reg_1_8); + __LOAD(__reg_0, __h + 4); + __CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); + __STORE(__h + 0, __reg_1_0); + __LOAD(__reg_0, __h + 5); + __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0); + __STORE(__h + 1, __reg_1_1); + __LOAD(__reg_0, __h + 6); + __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0); + __STORE(__h + 2, __reg_1_2); + } + else if (__h + 8 == __c1Len - __side1Len * __c1Id + __halo1 * 2) + { + __LOAD(__reg_0, __h + 0); + __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0); + __STORE(__h - 4, __reg_1_5); + __LOAD(__reg_0, __h + 1); + __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0); + __STORE(__h - 3, __reg_1_6); + __LOAD(__reg_0, __h + 2); + __CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0); + __STORE(__h - 2, __reg_1_7); + __LOAD(__reg_0, __h + 3); + __CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0); + __STORE(__h - 1, __reg_1_8); + __LOAD(__reg_0, __h + 4); + __CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); + __STORE(__h + 0, __reg_1_0); + __LOAD(__reg_0, __h + 5); + __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); + __STORE(__h + 1, __reg_1_1); + __LOAD(__reg_0, __h + 6); + __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0); + __STORE(__h + 2, __reg_1_2); + __LOAD(__reg_0, __h + 7); + __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0); + __STORE(__h + 3, __reg_1_3); + } + else if (__h + 9 == __c1Len - __side1Len * __c1Id + __halo1 * 2) + { + __LOAD(__reg_0, __h + 0); + __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0); + __STORE(__h - 4, __reg_1_5); + __LOAD(__reg_0, __h + 1); + __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0); + __STORE(__h - 3, __reg_1_6); + __LOAD(__reg_0, __h + 2); + __CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0); + __STORE(__h - 2, __reg_1_7); + __LOAD(__reg_0, __h + 3); + __CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0); + __STORE(__h - 1, __reg_1_8); + __LOAD(__reg_0, __h + 4); + __CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); + __STORE(__h + 0, __reg_1_0); + __LOAD(__reg_0, __h + 5); + __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); + __STORE(__h + 1, __reg_1_1); + __LOAD(__reg_0, __h + 6); + __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); + __STORE(__h + 2, __reg_1_2); + __LOAD(__reg_0, __h + 7); + __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0); + __STORE(__h + 3, __reg_1_3); + __LOAD(__reg_0, __h + 8); + __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0); + __STORE(__h + 4, __reg_1_4); + } + else if (__h + 10 == __c1Len - __side1Len * __c1Id + __halo1 * 2) + { + __LOAD(__reg_0, __h + 0); + __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0); + __STORE(__h - 4, __reg_1_5); + __LOAD(__reg_0, __h + 1); + __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0); + __STORE(__h - 3, __reg_1_6); + __LOAD(__reg_0, __h + 2); + __CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0); + __STORE(__h - 2, __reg_1_7); + __LOAD(__reg_0, __h + 3); + __CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0); + __STORE(__h - 1, __reg_1_8); + __LOAD(__reg_0, __h + 4); + __CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); + __STORE(__h + 0, __reg_1_0); + __LOAD(__reg_0, __h + 5); + __CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); + __STORE(__h + 1, __reg_1_1); + __LOAD(__reg_0, __h + 6); + __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); + __STORE(__h + 2, __reg_1_2); + __LOAD(__reg_0, __h + 7); + __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0); + __STORE(__h + 3, __reg_1_3); + __LOAD(__reg_0, __h + 8); + __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_5, __reg_1_4, __reg_0); + __STORE(__h + 4, __reg_1_4); + __LOAD(__reg_0, __h + 9); + __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_5, __reg_0); + __STORE(__h + 5, __reg_1_5); + } + else if (__h + 11 == __c1Len - __side1Len * __c1Id + __halo1 * 2) + { + __LOAD(__reg_0, __h + 0); + __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0); + __STORE(__h - 4, __reg_1_5); + __LOAD(__reg_0, __h + 1); + __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0); + __STORE(__h - 3, __reg_1_6); + __LOAD(__reg_0, __h + 2); + __CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0); + __STORE(__h - 2, __reg_1_7); + __LOAD(__reg_0, __h + 3); + __CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0); + __STORE(__h - 1, __reg_1_8); + __LOAD(__reg_0, __h + 4); + __CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); + __STORE(__h + 0, __reg_1_0); + __LOAD(__reg_0, __h + 5); + __CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); + __STORE(__h + 1, __reg_1_1); + __LOAD(__reg_0, __h + 6); + __CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); + __STORE(__h + 2, __reg_1_2); + __LOAD(__reg_0, __h + 7); + __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0); + __STORE(__h + 3, __reg_1_3); + __LOAD(__reg_0, __h + 8); + __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0); + __STORE(__h + 4, __reg_1_4); + __LOAD(__reg_0, __h + 9); + __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_6, __reg_1_5, __reg_0); + __STORE(__h + 5, __reg_1_5); + __LOAD(__reg_0, __h + 10); + __CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_6, __reg_0); + __STORE(__h + 6, __reg_1_6); + } + else if (__h + 12 == __c1Len - __side1Len * __c1Id + __halo1 * 2) + { + __LOAD(__reg_0, __h + 0); + __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0); + __STORE(__h - 4, __reg_1_5); + __LOAD(__reg_0, __h + 1); + __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0); + __STORE(__h - 3, __reg_1_6); + __LOAD(__reg_0, __h + 2); + __CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0); + __STORE(__h - 2, __reg_1_7); + __LOAD(__reg_0, __h + 3); + __CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0); + __STORE(__h - 1, __reg_1_8); + __LOAD(__reg_0, __h + 4); + __CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); + __STORE(__h + 0, __reg_1_0); + __LOAD(__reg_0, __h + 5); + __CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); + __STORE(__h + 1, __reg_1_1); + __LOAD(__reg_0, __h + 6); + __CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); + __STORE(__h + 2, __reg_1_2); + __LOAD(__reg_0, __h + 7); + __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0); + __STORE(__h + 3, __reg_1_3); + __LOAD(__reg_0, __h + 8); + __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0); + __STORE(__h + 4, __reg_1_4); + __LOAD(__reg_0, __h + 9); + __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0); + __STORE(__h + 5, __reg_1_5); + __LOAD(__reg_0, __h + 10); + __CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_7, __reg_1_6, __reg_0); + __STORE(__h + 6, __reg_1_6); + __LOAD(__reg_0, __h + 11); + __CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_7, __reg_0); + __STORE(__h + 7, __reg_1_7); + } + } + else + { + for (__h = 9; __h <= __side1LenOl - 9;) + { + __LOAD(__reg_0, __h); + __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0); + __STORE(__h - 4, __reg_1_5); + __h++; + __LOAD(__reg_0, __h); + __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0); + __STORE(__h - 4, __reg_1_6); + __h++; + __LOAD(__reg_0, __h); + __CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0); + __STORE(__h - 4, __reg_1_7); + __h++; + __LOAD(__reg_0, __h); + __CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0); + __STORE(__h - 4, __reg_1_8); + __h++; + __LOAD(__reg_0, __h); + __CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); + __STORE(__h - 4, __reg_1_0); + __h++; + __LOAD(__reg_0, __h); + __CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); + __STORE(__h - 4, __reg_1_1); + __h++; + __LOAD(__reg_0, __h); + __CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); + __STORE(__h - 4, __reg_1_2); + __h++; + __LOAD(__reg_0, __h); + __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0); + __STORE(__h - 4, __reg_1_3); + __h++; + __LOAD(__reg_0, __h); + __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0); + __STORE(__h - 4, __reg_1_4); + __h++; + __DB_SWITCH(); __syncthreads(); + } + if (__h == __side1LenOl) return; + __LOAD(__reg_0, __h); + __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0); + __STORE(__h - 4, __reg_1_5); + __h++; + if (__h == __side1LenOl) return; + __LOAD(__reg_0, __h); + __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0); + __STORE(__h - 4, __reg_1_6); + __h++; + if (__h == __side1LenOl) return; + __LOAD(__reg_0, __h); + __CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0); + __STORE(__h - 4, __reg_1_7); + __h++; + if (__h == __side1LenOl) return; + __LOAD(__reg_0, __h); + __CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0); + __STORE(__h - 4, __reg_1_8); + __h++; + if (__h == __side1LenOl) return; + __LOAD(__reg_0, __h); + __CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); + __STORE(__h - 4, __reg_1_0); + __h++; + if (__h == __side1LenOl) return; + __LOAD(__reg_0, __h); + __CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); + __STORE(__h - 4, __reg_1_1); + __h++; + if (__h == __side1LenOl) return; + __LOAD(__reg_0, __h); + __CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); + __STORE(__h - 4, __reg_1_2); + __h++; + if (__h == __side1LenOl) return; + __LOAD(__reg_0, __h); + __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0); + __STORE(__h - 4, __reg_1_3); + __h++; + if (__h == __side1LenOl) return; + __LOAD(__reg_0, __h); + __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0); + __STORE(__h - 4, __reg_1_4); + __h++; + } +} diff --git a/cuda_code/box2d4r-512-4-512_host.cu b/cuda_code/box2d4r-512-4-512_host.cu new file mode 100644 index 0000000000000000000000000000000000000000..80ca8a77f389c7f2980d9694ad0f4f64ffcaf18c --- /dev/null +++ b/cuda_code/box2d4r-512-4-512_host.cu @@ -0,0 +1,383 @@ +#include +#include +#include "box2d4r-512-4-512_kernel.hu" +#define BENCH_DIM 2 +#define BENCH_FPP 161 +#define BENCH_RAD 4 + +#include "common.h" + +double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop) +{ + double start_time = sb_time(), end_time = 0.0; + int dimsize = compsize + BENCH_RAD * 2; + SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1; + + if (scop) { + if (dimsize >= 9 && timestep >= 1) { +#define cudaCheckReturn(ret) \ + do { \ + cudaError_t cudaCheckReturn_e = (ret); \ + if (cudaCheckReturn_e != cudaSuccess) { \ + fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(cudaCheckReturn_e)); \ + fflush(stderr); \ + } \ + assert(cudaCheckReturn_e == cudaSuccess); \ + } while(0) +#define cudaCheckKernel() \ + do { \ + cudaCheckReturn(cudaGetLastError()); \ + } while(0) + + double *dev_A; + + cudaCheckReturn(cudaMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double))); + +{ + cudaCheckReturn(cudaMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), cudaMemcpyHostToDevice)); +#ifdef STENCILBENCH +cudaDeviceSynchronize(); +SB_START_INSTRUMENTS; +#endif +} + { +#ifndef AN5D_TYPE +#define AN5D_TYPE unsigned +#endif + const AN5D_TYPE __c0Len = (timestep - 0); + const AN5D_TYPE __c0Pad = (0); + #define __c0 c0 + const AN5D_TYPE __c1Len = (dimsize - 4 - 4); + const AN5D_TYPE __c1Pad = (4); + #define __c1 c1 + const AN5D_TYPE __c2Len = (dimsize - 4 - 4); + const AN5D_TYPE __c2Pad = (4); + #define __c2 c2 + const AN5D_TYPE __halo1 = 4; + const AN5D_TYPE __halo2 = 4; + AN5D_TYPE c0; + AN5D_TYPE __side0LenMax; + { + const AN5D_TYPE __side0Len = 4; + const AN5D_TYPE __side1Len = 512; + const AN5D_TYPE __side2Len = 480; + const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); + const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); + const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); + const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); + const AN5D_TYPE __blockSize = 1 * __side2LenOl; + assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); + dim3 k0_dimBlock(__blockSize, 1, 1); + dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); + AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0; + __side0LenMax = __side0Len; + for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1) + { + kernel0_4<<>> (dev_A, dimsize, timestep, c0); + } + } + if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2)) + { + if (__c0Len % __side0LenMax == 0) + { + { + const AN5D_TYPE __side0Len = 2; + const AN5D_TYPE __side1Len = 512; + const AN5D_TYPE __side2Len = 496; + const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); + const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); + const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); + const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); + const AN5D_TYPE __blockSize = 1 * __side2LenOl; + assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); + dim3 k0_dimBlock(__blockSize, 1, 1); + dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); + kernel0_2<<>> (dev_A, dimsize, timestep, c0); + } + c0 += 1; + { + const AN5D_TYPE __side0Len = 2; + const AN5D_TYPE __side1Len = 512; + const AN5D_TYPE __side2Len = 496; + const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); + const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); + const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); + const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); + const AN5D_TYPE __blockSize = 1 * __side2LenOl; + assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); + dim3 k0_dimBlock(__blockSize, 1, 1); + dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); + kernel0_2<<>> (dev_A, dimsize, timestep, c0); + } + } + else if (__c0Len % __side0LenMax == 1) + { + { + const AN5D_TYPE __side0Len = 3; + const AN5D_TYPE __side1Len = 512; + const AN5D_TYPE __side2Len = 488; + const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); + const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); + const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); + const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); + const AN5D_TYPE __blockSize = 1 * __side2LenOl; + assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); + dim3 k0_dimBlock(__blockSize, 1, 1); + dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); + kernel0_3<<>> (dev_A, dimsize, timestep, c0); + } + c0 += 1; + { + const AN5D_TYPE __side0Len = 1; + const AN5D_TYPE __side1Len = 512; + const AN5D_TYPE __side2Len = 504; + const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); + const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); + const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); + const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); + const AN5D_TYPE __blockSize = 1 * __side2LenOl; + assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); + dim3 k0_dimBlock(__blockSize, 1, 1); + dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); + kernel0_1<<>> (dev_A, dimsize, timestep, c0); + } + c0 += 1; + { + const AN5D_TYPE __side0Len = 1; + const AN5D_TYPE __side1Len = 512; + const AN5D_TYPE __side2Len = 504; + const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); + const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); + const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); + const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); + const AN5D_TYPE __blockSize = 1 * __side2LenOl; + assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); + dim3 k0_dimBlock(__blockSize, 1, 1); + dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); + kernel0_1<<>> (dev_A, dimsize, timestep, c0); + } + } + else if (__c0Len % __side0LenMax == 2) + { + { + const AN5D_TYPE __side0Len = 1; + const AN5D_TYPE __side1Len = 512; + const AN5D_TYPE __side2Len = 504; + const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); + const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); + const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); + const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); + const AN5D_TYPE __blockSize = 1 * __side2LenOl; + assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); + dim3 k0_dimBlock(__blockSize, 1, 1); + dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); + kernel0_1<<>> (dev_A, dimsize, timestep, c0); + } + c0 += 1; + { + const AN5D_TYPE __side0Len = 1; + const AN5D_TYPE __side1Len = 512; + const AN5D_TYPE __side2Len = 504; + const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); + const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); + const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); + const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); + const AN5D_TYPE __blockSize = 1 * __side2LenOl; + assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); + dim3 k0_dimBlock(__blockSize, 1, 1); + dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); + kernel0_1<<>> (dev_A, dimsize, timestep, c0); + } + } + else if (__c0Len % __side0LenMax == 3) + { + { + const AN5D_TYPE __side0Len = 2; + const AN5D_TYPE __side1Len = 512; + const AN5D_TYPE __side2Len = 496; + const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); + const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); + const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); + const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); + const AN5D_TYPE __blockSize = 1 * __side2LenOl; + assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); + dim3 k0_dimBlock(__blockSize, 1, 1); + dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); + kernel0_2<<>> (dev_A, dimsize, timestep, c0); + } + c0 += 1; + { + const AN5D_TYPE __side0Len = 1; + const AN5D_TYPE __side1Len = 512; + const AN5D_TYPE __side2Len = 504; + const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); + const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); + const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); + const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); + const AN5D_TYPE __blockSize = 1 * __side2LenOl; + assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); + dim3 k0_dimBlock(__blockSize, 1, 1); + dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); + kernel0_1<<>> (dev_A, dimsize, timestep, c0); + } + } + } + else if (__c0Len % __side0LenMax) + { + if (__c0Len % __side0LenMax == 1) + { + const AN5D_TYPE __side0Len = 1; + const AN5D_TYPE __side1Len = 512; + const AN5D_TYPE __side2Len = 504; + const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); + const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); + const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); + const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); + const AN5D_TYPE __blockSize = 1 * __side2LenOl; + assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); + dim3 k0_dimBlock(__blockSize, 1, 1); + dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); + kernel0_1<<>> (dev_A, dimsize, timestep, c0); + } + else if (__c0Len % __side0LenMax == 2) + { + const AN5D_TYPE __side0Len = 2; + const AN5D_TYPE __side1Len = 512; + const AN5D_TYPE __side2Len = 496; + const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); + const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); + const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); + const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); + const AN5D_TYPE __blockSize = 1 * __side2LenOl; + assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); + dim3 k0_dimBlock(__blockSize, 1, 1); + dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); + kernel0_2<<>> (dev_A, dimsize, timestep, c0); + } + else if (__c0Len % __side0LenMax == 3) + { + const AN5D_TYPE __side0Len = 3; + const AN5D_TYPE __side1Len = 512; + const AN5D_TYPE __side2Len = 488; + const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); + const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); + const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); + const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); + const AN5D_TYPE __blockSize = 1 * __side2LenOl; + assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); + dim3 k0_dimBlock(__blockSize, 1, 1); + dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); + kernel0_3<<>> (dev_A, dimsize, timestep, c0); + } + } + } + cudaCheckKernel(); +{ +#ifdef STENCILBENCH +cudaDeviceSynchronize(); +SB_STOP_INSTRUMENTS; +#endif + cudaCheckReturn(cudaMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), cudaMemcpyDeviceToHost)); +} + cudaCheckReturn(cudaFree(dev_A)); + } + } + else { + for (int t = 0; t < timestep; t++) +#pragma omp parallel for + for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++) + for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++) + A[(t+1)%2][i][j] = + 0.00930f * A[t%2][i-4][j-4] + + 0.00931f * A[t%2][i-4][j-3] + + 0.00932f * A[t%2][i-4][j-2] + + 0.00933f * A[t%2][i-4][j-1] + + 0.00934f * A[t%2][i-4][j] + + 0.00935f * A[t%2][i-4][j+1] + + 0.00936f * A[t%2][i-4][j+2] + + 0.00937f * A[t%2][i-4][j+3] + + 0.00938f * A[t%2][i-4][j+4] + + + 0.00939f * A[t%2][i-3][j-4] + + 0.00940f * A[t%2][i-3][j-3] + + 0.00941f * A[t%2][i-3][j-2] + + 0.00942f * A[t%2][i-3][j-1] + + 0.00943f * A[t%2][i-3][j] + + 0.00944f * A[t%2][i-3][j+1] + + 0.00945f * A[t%2][i-3][j+2] + + 0.00946f * A[t%2][i-3][j+3] + + 0.00947f * A[t%2][i-3][j+4] + + + 0.00948f * A[t%2][i-2][j-4] + + 0.00949f * A[t%2][i-2][j-3] + + 0.00950f * A[t%2][i-2][j-2] + + 0.00951f * A[t%2][i-2][j-1] + + 0.00952f * A[t%2][i-2][j] + + 0.00953f * A[t%2][i-2][j+1] + + 0.00954f * A[t%2][i-2][j+2] + + 0.00955f * A[t%2][i-2][j+3] + + 0.00956f * A[t%2][i-2][j+4] + + + 0.00957f * A[t%2][i-1][j-4] + + 0.00958f * A[t%2][i-1][j-3] + + 0.00959f * A[t%2][i-1][j-2] + + 0.00960f * A[t%2][i-1][j-1] + + 0.00961f * A[t%2][i-1][j] + + 0.00962f * A[t%2][i-1][j+1] + + 0.00963f * A[t%2][i-1][j+2] + + 0.00964f * A[t%2][i-1][j+3] + + 0.00965f * A[t%2][i-1][j+4] + + + 0.00966f * A[t%2][i][j-4] + + 0.00967f * A[t%2][i][j-3] + + 0.00968f * A[t%2][i][j-2] + + 0.00969f * A[t%2][i][j-1] + + 0.22400f * A[t%2][i][j] + + 0.00971f * A[t%2][i][j+1] + + 0.00972f * A[t%2][i][j+2] + + 0.00973f * A[t%2][i][j+3] + + 0.00974f * A[t%2][i][j+4] + + + 0.00975f * A[t%2][i+1][j-4] + + 0.00976f * A[t%2][i+1][j-3] + + 0.00977f * A[t%2][i+1][j-2] + + 0.00978f * A[t%2][i+1][j-1] + + 0.00979f * A[t%2][i+1][j] + + 0.00980f * A[t%2][i+1][j+1] + + 0.00981f * A[t%2][i+1][j+2] + + 0.00982f * A[t%2][i+1][j+3] + + 0.00983f * A[t%2][i+1][j+4] + + + 0.00984f * A[t%2][i+2][j-4] + + 0.00985f * A[t%2][i+2][j-3] + + 0.00986f * A[t%2][i+2][j-2] + + 0.00987f * A[t%2][i+2][j-1] + + 0.00988f * A[t%2][i+2][j] + + 0.00989f * A[t%2][i+2][j+1] + + 0.00990f * A[t%2][i+2][j+2] + + 0.00991f * A[t%2][i+2][j+3] + + 0.00992f * A[t%2][i+2][j+4] + + + 0.00993f * A[t%2][i+3][j-4] + + 0.00994f * A[t%2][i+3][j-3] + + 0.00995f * A[t%2][i+3][j-2] + + 0.00996f * A[t%2][i+3][j-1] + + 0.00997f * A[t%2][i+3][j] + + 0.00998f * A[t%2][i+3][j+1] + + 0.00999f * A[t%2][i+3][j+2] + + 0.01000f * A[t%2][i+3][j+3] + + 0.01001f * A[t%2][i+3][j+4] + + + 0.01002f * A[t%2][i+4][j-4] + + 0.01003f * A[t%2][i+4][j-3] + + 0.01004f * A[t%2][i+4][j-2] + + 0.01005f * A[t%2][i+4][j-1] + + 0.01006f * A[t%2][i+4][j] + + 0.01007f * A[t%2][i+4][j+1] + + 0.01008f * A[t%2][i+4][j+2] + + 0.01009f * A[t%2][i+4][j+3] + + 0.01010f * A[t%2][i+4][j+4]; + } + + return (((end_time != 0.0) ? end_time : sb_time()) - start_time); +} diff --git a/cuda_code/box_encoder_15.cu b/cuda_code/box_encoder_15.cu new file mode 100644 index 0000000000000000000000000000000000000000..59186d54ee2bd7393b99b5759d45a1599779a970 --- /dev/null +++ b/cuda_code/box_encoder_15.cu @@ -0,0 +1,323 @@ +// Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "dali/pipeline/operators/detection/box_encoder.cuh" +#include +#include +#include + +namespace dali { +__host__ __device__ inline float4 ToCenterWidthHeight(const float4 &box) { + return { + 0.5f * (box.x + box.z), + 0.5f * (box.y + box.w), + box.z - box.x, + box.w - box.y}; +} + +void BoxEncoder::PrepareAnchors(const vector &anchors) { + DALI_ENFORCE( + (anchors.size() % BoundingBox::kSize) == 0, + "Anchors size must be divisible by 4, actual value = " + std::to_string(anchors.size())); + + anchors_count_ = anchors.size() / BoundingBox::kSize; + anchors_.Resize({anchors_count_, BoundingBox::kSize}); + anchors_as_center_wh_.Resize({anchors_count_, BoundingBox::kSize}); + + auto anchors_data_cpu = reinterpret_cast(anchors.data()); + + vector anchors_as_center_wh(anchors_count_); + for (unsigned int anchor = 0; anchor < anchors_count_; ++anchor) + anchors_as_center_wh[anchor] = ToCenterWidthHeight(anchors_data_cpu[anchor]); + + auto anchors_data = anchors_.mutable_data(); + auto anchors_as_center_wh_data = anchors_as_center_wh_.mutable_data(); + MemCopy(anchors_data, anchors.data(), anchors_count_ * BoundingBox::kSize * sizeof(float)); + MemCopy( + anchors_as_center_wh_data, + anchors_as_center_wh.data(), + anchors_count_ * BoundingBox::kSize * sizeof(float)); +} + +__device__ __forceinline__ float CalculateIou(const float4 &b1, const float4 &b2) { + float l = max(b1.x, b2.x); + float t = max(b1.y, b2.y); + float r = min(b1.z, b2.z); + float b = min(b1.w, b2.w); + float first = max(r - l, 0.0f); + float second = max(b - t, 0.0f); + volatile float intersection = first * second; + volatile float area1 = (b1.w - b1.y) * (b1.z - b1.x); + volatile float area2 = (b2.w - b2.y) * (b2.z - b2.x); + + return intersection / (area1 + area2 - intersection); +} + +__device__ inline void FindBestMatch(const int N, volatile float *vals, volatile int *idx) { + for (unsigned int stride = blockDim.x / 2; stride > 0; stride >>= 1) { + if (threadIdx.x < stride) { + if (vals[threadIdx.x] <= vals[threadIdx.x + stride]) { + if (vals[threadIdx.x] == vals[threadIdx.x + stride]) { + idx[threadIdx.x] = max(idx[threadIdx.x], idx[threadIdx.x + stride]); + } else { + vals[threadIdx.x] = vals[threadIdx.x + stride]; + idx[threadIdx.x] = idx[threadIdx.x + stride]; + } + } + } + __syncthreads(); + } +} + +__device__ float4 MatchOffsets( + float4 box, float4 anchor, const float *means, const float *stds, float scale) { + box.x *= scale; box.y *= scale; box.z *= scale; box.w *= scale; + anchor.x *= scale; anchor.y *= scale; anchor.z *= scale; anchor.w *= scale; + + float x = ((box.x - anchor.x) / anchor.z - means[0]) / stds[0]; + float y = ((box.y - anchor.y) / anchor.w - means[1]) / stds[1]; + float z = (log(box.z / anchor.z) - means[2]) / stds[2]; + float w = (log(box.w / anchor.w) - means[3]) / stds[3]; + + return {x, y, z, w}; +} + +__device__ void WriteMatchesToOutput( + int anchors_count, float criteria, int *labels_out, const int *labels_in, + float4 *boxes_out, const float4 *boxes_in, + volatile int *best_box_idx, volatile float *best_box_iou, bool offset, + const float* means, const float* stds, float scale, const float4 *anchors_as_cwh) { + for (unsigned int anchor = threadIdx.x; anchor < anchors_count; anchor += blockDim.x) { + if (best_box_iou[anchor] > criteria) { + int box_idx = best_box_idx[anchor]; + labels_out[anchor] = labels_in[box_idx]; + float4 box = boxes_in[box_idx]; + + if (!offset) + boxes_out[anchor] = ToCenterWidthHeight(box); + else + boxes_out[anchor] = MatchOffsets( + ToCenterWidthHeight(box), anchors_as_cwh[anchor], means, stds, scale); + } + } +} + +__device__ void MatchBoxWithAnchors( + const float4 &box, const int box_idx, unsigned int anchors_count, const float4 *anchors, + volatile int *best_anchor_idx_tmp, volatile float *best_anchor_iou_tmp, + volatile int *best_box_idx, volatile float *best_box_iou) { + float best_anchor_iou = -1.0f; + int best_anchor_idx = -1; + + for (unsigned int anchor = threadIdx.x; anchor < anchors_count; anchor += blockDim.x) { + float new_val = CalculateIou(box, anchors[anchor]); + + if (new_val >= best_anchor_iou) { + best_anchor_iou = new_val; + best_anchor_idx = anchor; + } + + if (new_val >= best_box_iou[anchor]) { + best_box_iou[anchor] = new_val; + best_box_idx[anchor] = box_idx; + } + } + + best_anchor_iou_tmp[threadIdx.x] = best_anchor_iou; + best_anchor_idx_tmp[threadIdx.x] = best_anchor_idx; +} + +template +__global__ void Encode( + const float4 *boxes_in, const int *labels_in, const int *offsets, const int anchors_count, + const float4 *anchors, const float criteria, float4 *boxes_out, int *labels_out, + int *box_idx_buffer, float *box_iou_buffer, bool offset, const float* means, + const float* stds, float scale, const float4 *anchors_as_cwh) { + const int sample = blockIdx.x; + + // Remark: This algorithm is very fragile to floating point arithmetic effects. + // For now, excessive use of volatile in this code, + // makes it conform to reference solution in terms of resulting encoding. + + __shared__ volatile int best_anchor_idx_tmp[BLOCK_SIZE]; + __shared__ volatile float best_anchor_iou_tmp[BLOCK_SIZE]; + + volatile int *best_box_idx = box_idx_buffer + sample * anchors_count; + volatile float *best_box_iou = box_iou_buffer + sample * anchors_count; + + int box_idx = 0; + for (int box_global_idx = offsets[sample]; box_global_idx < offsets[sample+1]; ++box_global_idx) { + MatchBoxWithAnchors( + boxes_in[box_global_idx], + box_idx, + anchors_count, + anchors, + best_anchor_idx_tmp, + best_anchor_iou_tmp, + best_box_idx, + best_box_iou); + + __syncthreads(); + + FindBestMatch(blockDim.x, best_anchor_iou_tmp, best_anchor_idx_tmp); + __syncthreads(); + + if (threadIdx.x == 0) { + int idx = best_anchor_idx_tmp[0]; + best_box_idx[idx] = box_idx; + best_box_iou[idx] = 2.f; + } + __syncthreads(); + + box_idx++; + } + __syncthreads(); + + WriteMatchesToOutput( + anchors_count, + criteria, + labels_out + sample * anchors_count, + labels_in + offsets[sample], + boxes_out + sample * anchors_count, + boxes_in + offsets[sample], + best_box_idx, + best_box_iou, + offset, + means, + stds, + scale, + anchors_as_cwh); +} + +std::pair BoxEncoder::ClearBuffers(const cudaStream_t &stream) { + auto best_box_idx_data = best_box_idx_.mutable_data(); + auto best_box_iou_data = best_box_iou_.mutable_data(); + + CUDA_CALL(cudaMemsetAsync( + best_box_idx_data, 0, batch_size_ * anchors_count_ * sizeof(int), stream)); + CUDA_CALL(cudaMemsetAsync( + best_box_iou_data, 0, batch_size_ * anchors_count_ * sizeof(float), stream)); + + return {best_box_idx_data, best_box_iou_data}; +} + +void BoxEncoder::WriteAnchorsToOutput( + float4 *boxes_out_data, int *labels_out_data, const cudaStream_t &stream) { + CUDA_CALL(cudaMemsetAsync( + labels_out_data, + 0, + batch_size_ * anchors_count_ * sizeof(int), stream)); + + for (int sample = 0; sample < batch_size_; ++sample) + MemCopy( + boxes_out_data + sample * anchors_count_, + anchors_as_center_wh_.data(), + anchors_count_ * BoundingBox::kSize * sizeof(float), + stream); +} + +void BoxEncoder::ClearOutput( + float4 *boxes_out_data, int *labels_out_data, const cudaStream_t &stream) { + CUDA_CALL(cudaMemsetAsync( + labels_out_data, + 0, + batch_size_ * anchors_count_ * sizeof(int), + stream)); + + for (int sample = 0; sample < batch_size_; ++sample) + CUDA_CALL(cudaMemsetAsync( + boxes_out_data + sample * anchors_count_, + 0, + anchors_count_ * BoundingBox::kSize * sizeof(float), + stream)); +} + +std::pair, vector> BoxEncoder::CalculateDims( + const TensorList &boxes_input) { + vector boxes_output_dim; + vector labels_output_dim; + for (const auto &sample_boxes_shape : boxes_input.shape()) { + boxes_output_dim.push_back({anchors_count_, BoundingBox::kSize}); + labels_output_dim.push_back({anchors_count_}); + } + + return {boxes_output_dim, labels_output_dim}; +} + +int *BoxEncoder::CalculateBoxesOffsets( + const TensorList &boxes_input, const cudaStream_t &stream) { + vector offsets {0}; + for (const auto &sample_boxes_shape : boxes_input.shape()) + offsets.push_back(sample_boxes_shape[0] + offsets.back()); + + auto offsets_data = boxes_offsets_.mutable_data(); + MemCopy(offsets_data, offsets.data(), (batch_size_ + 1) * sizeof(int), stream); + + return offsets_data; +} + +void BoxEncoder::RunImpl(Workspace *ws, const int idx) { + const auto &boxes_input = ws->Input(0); + const auto &labels_input = ws->Input(1); + + const auto anchors_data = reinterpret_cast(anchors_.data()); + const auto anchors_as_cwh_data = + reinterpret_cast(anchors_as_center_wh_.data()); + const auto boxes_data = reinterpret_cast(boxes_input.data()); + const auto labels_data = labels_input.data(); + + const auto buffers = ClearBuffers(ws->stream()); + + auto boxes_offsets_data = CalculateBoxesOffsets(boxes_input, ws->stream()); + auto dims = CalculateDims(boxes_input); + + auto &boxes_output = ws->Output(0); + boxes_output.set_type(boxes_input.type()); + boxes_output.Resize(dims.first); + auto boxes_out_data = reinterpret_cast(boxes_output.mutable_data()); + + auto &labels_output = ws->Output(1); + labels_output.set_type(labels_input.type()); + labels_output.Resize(dims.second); + auto labels_out_data = labels_output.mutable_data(); + + const auto means_data = means_.data(); + const auto stds_data = stds_.data(); + + if (!offset_) + WriteAnchorsToOutput(boxes_out_data, labels_out_data, ws->stream()); + else + ClearOutput(boxes_out_data, labels_out_data, ws->stream()); + + Encode<<stream()>>>( + boxes_data, + labels_data, + boxes_offsets_data, + anchors_count_, + anchors_data, + criteria_, + boxes_out_data, + labels_out_data, + buffers.first, + buffers.second, + offset_, + means_data, + stds_data, + scale_, + anchors_as_cwh_data); +} + +DALI_REGISTER_OPERATOR(BoxEncoder, BoxEncoder, GPU); + +} // namespace dali diff --git a/cuda_code/brightness_contrast_gpu_test_1.cu b/cuda_code/brightness_contrast_gpu_test_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..6dae8901df16c8abac369bcada6c701766b8ee89 --- /dev/null +++ b/cuda_code/brightness_contrast_gpu_test_1.cu @@ -0,0 +1,199 @@ +// Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include +#include "dali/kernels/scratch.h" +#include "dali/kernels/tensor_shape.h" +#include "dali/kernels/common/copy.h" +#include "dali/kernels/test/tensor_test_utils.h" +#include "dali/kernels/test/kernel_test_utils.h" +#include "dali/kernels/imgproc/color_manipulation/brightness_contrast_gpu.h" + +namespace dali { +namespace kernels { +namespace brightness_contrast { +namespace test { + +namespace { + +constexpr size_t kNdims = 3; + + +/** + * Rounding to nearest even (like GPU does it) + */ +template +std::enable_if_t::value, Out> custom_round(float val) { + return static_cast(std::nearbyint(val)); +} + + +template +std::enable_if_t::value, Out> custom_round(float val) { + return val; +} + + +} // namespace + +template +class BrightnessContrastGpuTest : public ::testing::Test { + using In = typename InputOutputTypes::In; + using Out = typename InputOutputTypes::Out; + + public: + BrightnessContrastGpuTest() { + input_host_.resize(dataset_size()); + } + + + void SetUp() final { + std::mt19937_64 rng; + UniformRandomFill(input_host_, rng, 0., 10.); + calc_output(0); + CUDA_CALL(cudaMalloc(&input_device_, sizeof(In) * dataset_size())); + CUDA_CALL(cudaMemcpy(input_device_, input_host_.data(), input_host_.size() * sizeof(In), + cudaMemcpyDefault)); + CUDA_CALL(cudaMalloc(&output_, dataset_size() * sizeof(Out))); + cudaDeviceSynchronize(); + + verify_test(); + } + + + In *input_device_; + Out *output_; + std::vector input_host_; + std::vector ref_output_; + std::vector> shapes_ = {{2, 4, 3}}; + std::vector brightness_ = {4}; + std::vector contrast_ = {3}; + + + void verify_test() { + assert(shapes_.size() == brightness_.size()); + assert(brightness_.size() == contrast_.size()); + assert(dataset_size() == input_host_.size()); + assert(dataset_size() == ref_output_.size()); + } + + + void calc_output(int idx) { + for (auto in : input_host_) { + ref_output_.push_back(custom_round(in * contrast_[idx] + brightness_[idx])); + } + } + + + size_t dataset_size() { + int ret = 0; + for (auto sh : shapes_) { + ret += volume(sh); + } + return ret; + } +}; + +using TestTypes = std::tuple; +/* Cause the line below takes RIDICULOUSLY long time to compile */ +// using TestTypes = std::tuple; + +INPUT_OUTPUT_TYPED_TEST_SUITE(BrightnessContrastGpuTest, TestTypes); + +namespace { + +template +using TheKernel = BrightnessContrastGpu + ; + +} // namespace + + +TYPED_TEST(BrightnessContrastGpuTest, check_kernel) { + check_kernel>(); +} + + +TYPED_TEST(BrightnessContrastGpuTest, setup_test) { + TheKernel kernel; + KernelContext ctx; + InListGPU in(this->input_device_, this->shapes_); + auto reqs = kernel.Setup(ctx, in, this->brightness_, this->contrast_); + ASSERT_EQ(this->shapes_.size(), static_cast(reqs.output_shapes[0].num_samples())) + << "Kernel::Setup provides incorrect shape"; + for (size_t i = 0; i < this->shapes_.size(); i++) { + EXPECT_EQ(this->shapes_[i], reqs.output_shapes[0][i]) + << "Kernel::Setup provides incorrect shape"; + } +} + + +TYPED_TEST(BrightnessContrastGpuTest, run_test) { + TheKernel kernel; + KernelContext c; + InListGPU in(this->input_device_, this->shapes_); + OutListGPU out(this->output_, + TensorListShape(this->shapes_)); + + auto reqs = kernel.Setup(c, in, this->brightness_, this->contrast_); + + ScratchpadAllocator sa; + sa.Reserve(reqs.scratch_sizes); + auto scratchpad = sa.GetScratchpad(); + c.scratchpad = &scratchpad; + kernel.Run(c, out, in, this->brightness_, this->contrast_); + cudaDeviceSynchronize(); + + auto res = copy(out[0]); + ASSERT_EQ(static_cast(this->ref_output_.size()), res.first.num_elements()); + for (size_t i = 0; i < this->ref_output_.size(); i++) { + EXPECT_EQ(this->ref_output_[i], res.second.get()[i]) << "Failed for index " << i; + } +} + + +TYPED_TEST(BrightnessContrastGpuTest, sample_descriptors) { + { + InListGPU in(this->input_device_, this->shapes_); + OutListGPU out(this->output_, + TensorListShape<3>(this->shapes_)); + auto res = CreateSampleDescriptors(out, in, this->brightness_, this->contrast_); + EXPECT_EQ(this->input_device_, res[0].in); + EXPECT_EQ(this->output_, res[0].out); + ivec ref_pitch = {2, 12}; + EXPECT_EQ(ref_pitch, res[0].in_pitch); + EXPECT_EQ(ref_pitch, res[0].out_pitch); + EXPECT_EQ(this->brightness_[0], res[0].brightness); + EXPECT_EQ(this->contrast_[0], res[0].contrast); + } + + { + constexpr int ndims = 7; + std::vector> vts = {{7, 2, 4, 6, 1, 8, 4}}; + TensorListShape tls(vts); + InListGPU in(this->input_device_, tls); + OutListGPU out(this->output_, tls); + auto res = CreateSampleDescriptors(out, in, this->brightness_, this->contrast_); + ivec ref = {7, 2, 4, 6, 1, 32}; + EXPECT_EQ(ref, res[0].in_pitch); + } +} + + +} // namespace test +} // namespace brightness_contrast +} // namespace kernels +} // namespace dali diff --git a/cuda_code/broadcast_impl_14.cu b/cuda_code/broadcast_impl_14.cu new file mode 100644 index 0000000000000000000000000000000000000000..1675976004730d9842f992f50c9aea68668ddfe7 --- /dev/null +++ b/cuda_code/broadcast_impl_14.cu @@ -0,0 +1,566 @@ +/** + * Copyright 2020-2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include "backend/kernel_compiler/gpu/cuda_impl/broadcast_impl.cuh" +#include "runtime/device/gpu/cuda_common.h" + +// Basic function +template +struct GreaterFunc { + __device__ __host__ __forceinline__ bool operator()(const T &lhs, const T &rhs) { return lhs > rhs ? true : false; } +}; + +template +struct LessFunc { + __device__ __host__ __forceinline__ bool operator()(const T &lhs, const T &rhs) { return lhs < rhs ? true : false; } +}; + +template +struct EqualFunc { + __device__ __host__ __forceinline__ bool operator()(const T &lhs, const T &rhs) { return lhs == rhs ? true : false; } +}; + +template <> +struct EqualFunc { + __device__ __host__ __forceinline__ bool operator()(const half &lhs, const half &rhs) { + return std::abs(__half2float(lhs) - __half2float(rhs)) < 1e-9 ? true : false; + } +}; + +template <> +struct EqualFunc { + __device__ __host__ __forceinline__ bool operator()(const float &lhs, const float &rhs) { + return std::abs(lhs - rhs) < 1e-9 ? true : false; + } +}; + +template +struct MinimumFunc { + __device__ __host__ __forceinline__ T operator()(const T &lhs, const T &rhs) { return lhs < rhs ? lhs : rhs; } +}; + +template +struct MaximumFunc { + __device__ __host__ __forceinline__ T operator()(const T &lhs, const T &rhs) { return lhs > rhs ? lhs : rhs; } +}; + +template +struct PowerFunc { + __device__ __host__ __forceinline__ T operator()(const T &lhs, const T &rhs) { return pow(lhs, rhs); } +}; + +template <> +struct PowerFunc { + __device__ __host__ __forceinline__ half operator()(const half &lhs, const half &rhs) { + return __float2half(pow(__half2float(lhs), __half2float(rhs))); + } +}; + +template <> +struct PowerFunc { + __device__ __host__ __forceinline__ half2 operator()(const half2 &lhs, const half2 &rhs) { + float2 base = __half22float2(lhs); + float2 index = __half22float2(rhs); + base.x = pow(base.x, index.x); + base.y = pow(base.y, index.y); + return __float22half2_rn(base); + } +}; + +template +struct RealDivFunc { + __device__ __host__ __forceinline__ T operator()(const T &lhs, const T &rhs) { return (lhs / rhs); } +}; + +template +struct DivFunc { + __device__ __host__ __forceinline__ T operator()(const T &lhs, const T &rhs) { return (lhs / rhs); } +}; + +template +struct MulFunc { + __device__ __host__ __forceinline__ T operator()(const T &lhs, const T &rhs) { return (lhs * rhs); } +}; + +template +struct SubFunc { + __device__ __host__ __forceinline__ T operator()(const T &lhs, const T &rhs) { return (lhs - rhs); } +}; + +template +struct AddFunc { + __device__ __host__ __forceinline__ T operator()(const T &lhs, const T &rhs) { return (lhs + rhs); } +}; + +// DivNoNan check if rhs is less than epsilon +template +struct DivNoNanFunc { + // default T is float + __device__ __host__ __forceinline__ T operator()(const T &lhs, const T &rhs) { + return rhs < kFloatEplison && rhs > -kFloatEplison ? 0.0 : (lhs / rhs); + } +}; + +template <> +struct DivNoNanFunc { + __device__ __host__ __forceinline__ int operator()(const int &lhs, const int &rhs) { + return rhs == 0 ? 0 : (lhs / rhs); + } +}; + +template <> +struct DivNoNanFunc { + __device__ __host__ __forceinline__ half operator()(const half &lhs, const half &rhs) { + if (__half2float(rhs) < (0.00007) && __half2float(rhs) > -0.00007) { + return static_cast(0.0); + } + return __float2half_rn(__half2float(lhs) / __half2float(rhs)); + } +}; + +template <> +struct DivNoNanFunc { + __device__ __host__ __forceinline__ half2 operator()(const half2 &lhs, const half2 &rhs) { + float2 l = __half22float2(lhs); + float2 r = __half22float2(rhs); + if ((r.x < kFloatEplison && r.x > -kFloatEplison) || (r.y < kFloatEplison && r.y > -kFloatEplison)) { + l.x = 0.0; + l.y = 0.0; + } else { + l.x = l.x / r.x; + l.y = l.y / r.y; + } + return __float22half2_rn(l); + } +}; + +// convert to float to fix accuracy issue +template +struct FloorDivFunc { + __device__ __host__ __forceinline__ T operator()(const T &lhs, const T &rhs) { + return floorf(static_cast(lhs) / static_cast(rhs)); + } +}; + +template <> +struct FloorDivFunc { + __device__ __host__ __forceinline__ half operator()(const half &lhs, const half &rhs) { + return floorf(__half2float(lhs) / __half2float(rhs)); + } +}; + +template <> +struct FloorDivFunc { + __device__ __host__ __forceinline__ half2 operator()(const half2 &lhs, const half2 &rhs) { + float2 l = __half22float2(lhs); + float2 r = __half22float2(rhs); + l.x = floorf(l.x / r.x); + l.y = floorf(l.y / r.y); + return __float22half2_rn(l); + } +}; + +template +struct AbsGradFunc { + __device__ __forceinline__ T operator()(const T &lhs, const T &rhs) { + T zero = 0.0; + return lhs < zero ? -rhs : lhs > zero ? rhs : zero; + } +}; + +template <> +struct AbsGradFunc { + __device__ __forceinline__ half2 operator()(const half2 &lhs, const half2 &rhs) { + half2 zero(0.0, 0.0); + return lhs < zero ? -rhs : lhs > zero ? rhs : zero; + } +}; + +template +struct SquaredDifferenceFunc { + __device__ __forceinline__ T operator()(const T &lhs, const T &rhs) { + T diff = lhs - rhs; + return diff * diff; + } +}; + +// Element-wise Comparison +template +__global__ void ElewiseCmpKernel(const int nums, const T *x0, const T *x1, bool *y) { + for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < nums; pos += blockDim.x * gridDim.x) { + y[pos] = Func()(x0[pos], x1[pos]); + } +} + +template +void ElewiseCmp(const int &nums, enum BroadcastOpType op, const T *x0, const T *x1, bool *y, cudaStream_t stream) { + switch (op) { + case BROADCAST_TYPE_GREATER: + return ElewiseCmpKernel><<<(nums + 255) / 256, 256, 0, stream>>>(nums, x0, x1, y); + case BROADCAST_TYPE_LESS: + return ElewiseCmpKernel><<<(nums + 255) / 256, 256, 0, stream>>>(nums, x0, x1, y); + case BROADCAST_TYPE_EQUAL: + return ElewiseCmpKernel><<<(nums + 255) / 256, 256, 0, stream>>>(nums, x0, x1, y); + default: + break; + } +} + +template void ElewiseCmp(const int &nums, enum BroadcastOpType op, const double *x0, const double *x1, bool *y, + cudaStream_t stream); +template void ElewiseCmp(const int &nums, enum BroadcastOpType op, const float *x0, const float *x1, bool *y, + cudaStream_t stream); +template void ElewiseCmp(const int &nums, enum BroadcastOpType op, const half *x0, const half *x1, bool *y, + cudaStream_t stream); +template void ElewiseCmp(const int &nums, enum BroadcastOpType op, const int *x0, const int *x1, bool *y, + cudaStream_t stream); +template void ElewiseCmp(const int &nums, enum BroadcastOpType op, const int8_t *x0, const int8_t *x1, bool *y, + cudaStream_t stream); +template void ElewiseCmp(const int &nums, enum BroadcastOpType op, const uint8_t *x0, const uint8_t *x1, bool *y, + cudaStream_t stream); +template void ElewiseCmp(const int &nums, enum BroadcastOpType op, const int64_t *x0, const int64_t *x1, bool *y, + cudaStream_t stream); + +// Element-wise ArithMetic +template +__global__ void ElewiseArithKernel(const int nums, const T *x0, const T *x1, T *y) { + for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < nums; pos += blockDim.x * gridDim.x) { + y[pos] = Func()(x0[pos], x1[pos]); + } +} + +template +void ElewiseArithKernel(const int &nums, enum BroadcastOpType op, const T *x0, const T *x1, T *y, cudaStream_t stream) { + switch (op) { + case BROADCAST_TYPE_MINIMUM: + return ElewiseArithKernel><<<(nums + 255) / 256, 256, 0, stream>>>(nums, x0, x1, y); + case BROADCAST_TYPE_MAXIMUM: + return ElewiseArithKernel><<<(nums + 255) / 256, 256, 0, stream>>>(nums, x0, x1, y); + case BROADCAST_TYPE_POWER: + return ElewiseArithKernel><<<(nums + 255) / 256, 256, 0, stream>>>(nums, x0, x1, y); + case BROADCAST_TYPE_REALDIV: + return ElewiseArithKernel><<<(nums + 255) / 256, 256, 0, stream>>>(nums, x0, x1, y); + case BROADCAST_TYPE_MUL: + return ElewiseArithKernel><<<(nums + 255) / 256, 256, 0, stream>>>(nums, x0, x1, y); + case BROADCAST_TYPE_SUB: + return ElewiseArithKernel><<<(nums + 255) / 256, 256, 0, stream>>>(nums, x0, x1, y); + case BROADCAST_TYPE_ADD: + return ElewiseArithKernel><<<(nums + 255) / 256, 256, 0, stream>>>(nums, x0, x1, y); + case BROADCAST_TYPE_FLOORDIV: + return ElewiseArithKernel><<<(nums + 255) / 256, 256, 0, stream>>>(nums, x0, x1, y); + case BROADCAST_TYPE_ABSGRAD: + return ElewiseArithKernel><<<(nums + 255) / 256, 256, 0, stream>>>(nums, x0, x1, y); + case BROADCAST_TYPE_DIV: + return ElewiseArithKernel><<<(nums + 255) / 256, 256, 0, stream>>>(nums, x0, x1, y); + case BROADCAST_TYPE_DIVNONAN: + return ElewiseArithKernel><<<(nums + 255) / 256, 256, 0, stream>>>(nums, x0, x1, y); + case BROADCAST_TYPE_SQUARED_DIFFERENCE: + return ElewiseArithKernel><<<(nums + 255) / 256, 256, 0, stream>>>(nums, x0, x1, y); + default: + break; + } +} + +template +void ElewiseArith(const int &nums, enum BroadcastOpType op, const T *x0, const T *x1, T *y, cudaStream_t stream) { + return ElewiseArithKernel(nums, op, x0, x1, y, stream); +} + +template <> +void ElewiseArith(const int &nums, enum BroadcastOpType op, const half *x0, const half *x1, half *y, + cudaStream_t stream) { + // `>` return true iff both half result are true. fallback to half + if (nums % 2 == 0 && op != BROADCAST_TYPE_MINIMUM && op != BROADCAST_TYPE_MAXIMUM && op != BROADCAST_TYPE_ABSGRAD) { + ElewiseArithKernel(nums / 2, op, reinterpret_cast(x0), reinterpret_cast(x1), + reinterpret_cast(y), stream); + } else { + return ElewiseArithKernel(nums, op, x0, x1, y, stream); + } +} + +template void ElewiseArith(const int &nums, enum BroadcastOpType op, const double *x0, const double *x1, double *y, + cudaStream_t stream); +template void ElewiseArith(const int &nums, enum BroadcastOpType op, const float *x0, const float *x1, float *y, + cudaStream_t stream); +template void ElewiseArith(const int &nums, enum BroadcastOpType op, const half *x0, const half *x1, half *y, + cudaStream_t stream); +template void ElewiseArith(const int &nums, enum BroadcastOpType op, const int *x0, const int *x1, int *y, + cudaStream_t stream); +template void ElewiseArith(const int &nums, enum BroadcastOpType op, const int8_t *x0, const int8_t *x1, int8_t *y, + cudaStream_t stream); +template void ElewiseArith(const int &nums, enum BroadcastOpType op, const uint8_t *x0, const uint8_t *x1, uint8_t *y, + cudaStream_t stream); +template void ElewiseArith(const int &nums, enum BroadcastOpType op, const int64_t *x0, const int64_t *x1, int64_t *y, + cudaStream_t stream); + +// Broadcast comparison +__device__ __forceinline__ size_t Index(const size_t &index, const size_t &dim) { return dim == 1 ? 0 : index; } + +template +__global__ void BroadcastCmpKernel(const size_t l0, const size_t l1, const size_t l2, const size_t l3, const size_t l4, + const size_t l5, const size_t l6, const size_t r0, const size_t r1, const size_t r2, + const size_t r3, const size_t r4, const size_t r5, const size_t r6, const size_t d0, + const size_t d1, const size_t d2, const size_t d3, const size_t d4, const size_t d5, + const size_t d6, const T *x0, const T *x1, bool *y) { + for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < d0 * d1 * d2 * d3 * d4 * d5 * d6; + pos += blockDim.x * gridDim.x) { + size_t i = pos / (d1 * d2 * d3 * d4 * d5 * d6) % d0; + size_t j = pos / (d2 * d3 * d4 * d5 * d6) % d1; + size_t k = pos / (d3 * d4 * d5 * d6) % d2; + size_t l = pos / (d4 * d5 * d6) % d3; + size_t m = pos / (d5 * d6) % d4; + size_t n = pos / d6 % d5; + size_t o = pos % d6; + + size_t l_index = Index(i, l0) * l1 * l2 * l3 * l4 * l5 * l6; + l_index += Index(j, l1) * l2 * l3 * l4 * l5 * l6; + l_index += Index(k, l2) * l3 * l4 * l5 * l6; + l_index += Index(l, l3) * l4 * l5 * l6; + l_index += Index(m, l4) * l5 * l6; + l_index += Index(n, l5) * l6; + l_index += Index(o, l6); + size_t r_index = Index(i, r0) * r1 * r2 * r3 * r4 * r5 * r6; + r_index += Index(j, r1) * r2 * r3 * r4 * r5 * r6; + r_index += Index(k, r2) * r3 * r4 * r5 * r6; + r_index += Index(l, r3) * r4 * r5 * r6; + r_index += Index(m, r4) * r5 * r6; + r_index += Index(n, r5) * r6; + r_index += Index(o, r6); + y[pos] = Func()(x0[l_index], x1[r_index]); + } +} + +template +void BroadcastCmp(const std::vector &x0_dims, const std::vector &x1_dims, + const std::vector &y_dims, enum BroadcastOpType op, const T *x0, const T *x1, bool *y, + cudaStream_t stream) { + size_t size = 1; + for (auto d : y_dims) { + size *= d; + } + + switch (op) { + case BROADCAST_TYPE_GREATER: + return BroadcastCmpKernel><<<(size + 255) / 256, 256, 0, stream>>>( + x0_dims[0], x0_dims[1], x0_dims[2], x0_dims[3], x0_dims[4], x0_dims[5], x0_dims[6], x1_dims[0], x1_dims[1], + x1_dims[2], x1_dims[3], x1_dims[4], x1_dims[5], x1_dims[6], y_dims[0], y_dims[1], y_dims[2], y_dims[3], + y_dims[4], y_dims[5], y_dims[6], x0, x1, y); + case BROADCAST_TYPE_LESS: + return BroadcastCmpKernel><<<(size + 255) / 256, 256, 0, stream>>>( + x0_dims[0], x0_dims[1], x0_dims[2], x0_dims[3], x0_dims[4], x0_dims[5], x0_dims[6], x1_dims[0], x1_dims[1], + x1_dims[2], x1_dims[3], x1_dims[4], x1_dims[5], x1_dims[6], y_dims[0], y_dims[1], y_dims[2], y_dims[3], + y_dims[4], y_dims[5], y_dims[6], x0, x1, y); + case BROADCAST_TYPE_EQUAL: + return BroadcastCmpKernel><<<(size + 255) / 256, 256, 0, stream>>>( + x0_dims[0], x0_dims[1], x0_dims[2], x0_dims[3], x0_dims[4], x0_dims[5], x0_dims[6], x1_dims[0], x1_dims[1], + x1_dims[2], x1_dims[3], x1_dims[4], x1_dims[5], x1_dims[6], y_dims[0], y_dims[1], y_dims[2], y_dims[3], + y_dims[4], y_dims[5], y_dims[6], x0, x1, y); + default: + break; + } +} + +template void BroadcastCmp(const std::vector &x0_dims, const std::vector &x1_dims, + const std::vector &y_dims, enum BroadcastOpType op, const double *x0, + const double *x1, bool *y, cudaStream_t stream); +template void BroadcastCmp(const std::vector &x0_dims, const std::vector &x1_dims, + const std::vector &y_dims, enum BroadcastOpType op, const float *x0, const float *x1, + bool *y, cudaStream_t stream); +template void BroadcastCmp(const std::vector &x0_dims, const std::vector &x1_dims, + const std::vector &y_dims, enum BroadcastOpType op, const half *x0, const half *x1, + bool *y, cudaStream_t stream); +template void BroadcastCmp(const std::vector &x0_dims, const std::vector &x1_dims, + const std::vector &y_dims, enum BroadcastOpType op, const int *x0, const int *x1, + bool *y, cudaStream_t stream); +template void BroadcastCmp(const std::vector &x0_dims, const std::vector &x1_dims, + const std::vector &y_dims, enum BroadcastOpType op, const int8_t *x0, + const int8_t *x1, bool *y, cudaStream_t stream); +template void BroadcastCmp(const std::vector &x0_dims, const std::vector &x1_dims, + const std::vector &y_dims, enum BroadcastOpType op, const uint8_t *x0, + const uint8_t *x1, bool *y, cudaStream_t stream); +template void BroadcastCmp(const std::vector &x0_dims, const std::vector &x1_dims, + const std::vector &y_dims, enum BroadcastOpType op, const int64_t *x0, + const int64_t *x1, bool *y, cudaStream_t stream); + +// Broadcast Arithmetic +template +__global__ void BroadcastArithKernel(const size_t l0, const size_t l1, const size_t l2, const size_t l3, + const size_t l4, const size_t l5, const size_t l6, const size_t r0, + const size_t r1, const size_t r2, const size_t r3, const size_t r4, + const size_t r5, const size_t r6, const size_t d0, const size_t d1, + const size_t d2, const size_t d3, const size_t d4, const size_t d5, + const size_t d6, const T *x0, const T *x1, T *y) { + for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < d0 * d1 * d2 * d3 * d4 * d5 * d6; + pos += blockDim.x * gridDim.x) { + size_t i = pos / (d1 * d2 * d3 * d4 * d5 * d6) % d0; + size_t j = pos / (d2 * d3 * d4 * d5 * d6) % d1; + size_t k = pos / (d3 * d4 * d5 * d6) % d2; + size_t l = pos / (d4 * d5 * d6) % d3; + size_t m = pos / (d5 * d6) % d4; + size_t n = pos / d6 % d5; + size_t o = pos % d6; + + size_t l_index = Index(i, l0) * l1 * l2 * l3 * l4 * l5 * l6; + l_index += Index(j, l1) * l2 * l3 * l4 * l5 * l6; + l_index += Index(k, l2) * l3 * l4 * l5 * l6; + l_index += Index(l, l3) * l4 * l5 * l6; + l_index += Index(m, l4) * l5 * l6; + l_index += Index(n, l5) * l6; + l_index += Index(o, l6); + size_t r_index = Index(i, r0) * r1 * r2 * r3 * r4 * r5 * r6; + r_index += Index(j, r1) * r2 * r3 * r4 * r5 * r6; + r_index += Index(k, r2) * r3 * r4 * r5 * r6; + r_index += Index(l, r3) * r4 * r5 * r6; + r_index += Index(m, r4) * r5 * r6; + r_index += Index(n, r5) * r6; + r_index += Index(o, r6); + y[pos] = Func()(x0[l_index], x1[r_index]); + } +} + +template +void BroadcastArith(const std::vector &x0_dims, const std::vector &x1_dims, + const std::vector &y_dims, enum BroadcastOpType op, const T *x0, const T *x1, T *y, + cudaStream_t stream) { + size_t size = 1; + for (auto d : y_dims) { + size *= d; + } + switch (op) { + case BROADCAST_TYPE_MAXIMUM: + return BroadcastArithKernel><<<(size + 255) / 256, 256, 0, stream>>>( + x0_dims[0], x0_dims[1], x0_dims[2], x0_dims[3], x0_dims[4], x0_dims[5], x0_dims[6], x1_dims[0], x1_dims[1], + x1_dims[2], x1_dims[3], x1_dims[4], x1_dims[5], x1_dims[6], y_dims[0], y_dims[1], y_dims[2], y_dims[3], + y_dims[4], y_dims[5], y_dims[6], x0, x1, y); + case BROADCAST_TYPE_MINIMUM: + return BroadcastArithKernel><<<(size + 255) / 256, 256, 0, stream>>>( + x0_dims[0], x0_dims[1], x0_dims[2], x0_dims[3], x0_dims[4], x0_dims[5], x0_dims[6], x1_dims[0], x1_dims[1], + x1_dims[2], x1_dims[3], x1_dims[4], x1_dims[5], x1_dims[6], y_dims[0], y_dims[1], y_dims[2], y_dims[3], + y_dims[4], y_dims[5], y_dims[6], x0, x1, y); + case BROADCAST_TYPE_POWER: + return BroadcastArithKernel><<<(size + 255) / 256, 256, 0, stream>>>( + x0_dims[0], x0_dims[1], x0_dims[2], x0_dims[3], x0_dims[4], x0_dims[5], x0_dims[6], x1_dims[0], x1_dims[1], + x1_dims[2], x1_dims[3], x1_dims[4], x1_dims[5], x1_dims[6], y_dims[0], y_dims[1], y_dims[2], y_dims[3], + y_dims[4], y_dims[5], y_dims[6], x0, x1, y); + case BROADCAST_TYPE_REALDIV: + return BroadcastArithKernel><<<(size + 255) / 256, 256, 0, stream>>>( + x0_dims[0], x0_dims[1], x0_dims[2], x0_dims[3], x0_dims[4], x0_dims[5], x0_dims[6], x1_dims[0], x1_dims[1], + x1_dims[2], x1_dims[3], x1_dims[4], x1_dims[5], x1_dims[6], y_dims[0], y_dims[1], y_dims[2], y_dims[3], + y_dims[4], y_dims[5], y_dims[6], x0, x1, y); + case BROADCAST_TYPE_MUL: + return BroadcastArithKernel><<<(size + 255) / 256, 256, 0, stream>>>( + x0_dims[0], x0_dims[1], x0_dims[2], x0_dims[3], x0_dims[4], x0_dims[5], x0_dims[6], x1_dims[0], x1_dims[1], + x1_dims[2], x1_dims[3], x1_dims[4], x1_dims[5], x1_dims[6], y_dims[0], y_dims[1], y_dims[2], y_dims[3], + y_dims[4], y_dims[5], y_dims[6], x0, x1, y); + case BROADCAST_TYPE_SUB: + return BroadcastArithKernel><<<(size + 255) / 256, 256, 0, stream>>>( + x0_dims[0], x0_dims[1], x0_dims[2], x0_dims[3], x0_dims[4], x0_dims[5], x0_dims[6], x1_dims[0], x1_dims[1], + x1_dims[2], x1_dims[3], x1_dims[4], x1_dims[5], x1_dims[6], y_dims[0], y_dims[1], y_dims[2], y_dims[3], + y_dims[4], y_dims[5], y_dims[6], x0, x1, y); + case BROADCAST_TYPE_ADD: + return BroadcastArithKernel><<<(size + 255) / 256, 256, 0, stream>>>( + x0_dims[0], x0_dims[1], x0_dims[2], x0_dims[3], x0_dims[4], x0_dims[5], x0_dims[6], x1_dims[0], x1_dims[1], + x1_dims[2], x1_dims[3], x1_dims[4], x1_dims[5], x1_dims[6], y_dims[0], y_dims[1], y_dims[2], y_dims[3], + y_dims[4], y_dims[5], y_dims[6], x0, x1, y); + case BROADCAST_TYPE_FLOORDIV: + return BroadcastArithKernel><<<(size + 255) / 256, 256, 0, stream>>>( + x0_dims[0], x0_dims[1], x0_dims[2], x0_dims[3], x0_dims[4], x0_dims[5], x0_dims[6], x1_dims[0], x1_dims[1], + x1_dims[2], x1_dims[3], x1_dims[4], x1_dims[5], x1_dims[6], y_dims[0], y_dims[1], y_dims[2], y_dims[3], + y_dims[4], y_dims[5], y_dims[6], x0, x1, y); + case BROADCAST_TYPE_ABSGRAD: + return BroadcastArithKernel><<<(size + 255) / 256, 256, 0, stream>>>( + x0_dims[0], x0_dims[1], x0_dims[2], x0_dims[3], x0_dims[4], x0_dims[5], x0_dims[6], x1_dims[0], x1_dims[1], + x1_dims[2], x1_dims[3], x1_dims[4], x1_dims[5], x1_dims[6], y_dims[0], y_dims[1], y_dims[2], y_dims[3], + y_dims[4], y_dims[5], y_dims[6], x0, x1, y); + case BROADCAST_TYPE_DIV: + return BroadcastArithKernel><<<(size + 255) / 256, 256, 0, stream>>>( + x0_dims[0], x0_dims[1], x0_dims[2], x0_dims[3], x0_dims[4], x0_dims[5], x0_dims[6], x1_dims[0], x1_dims[1], + x1_dims[2], x1_dims[3], x1_dims[4], x1_dims[5], x1_dims[6], y_dims[0], y_dims[1], y_dims[2], y_dims[3], + y_dims[4], y_dims[5], y_dims[6], x0, x1, y); + case BROADCAST_TYPE_DIVNONAN: + return BroadcastArithKernel><<<(size + 255) / 256, 256, 0, stream>>>( + x0_dims[0], x0_dims[1], x0_dims[2], x0_dims[3], x0_dims[4], x0_dims[5], x0_dims[6], x1_dims[0], x1_dims[1], + x1_dims[2], x1_dims[3], x1_dims[4], x1_dims[5], x1_dims[6], y_dims[0], y_dims[1], y_dims[2], y_dims[3], + y_dims[4], y_dims[5], y_dims[6], x0, x1, y); + case BROADCAST_TYPE_SQUARED_DIFFERENCE: + return BroadcastArithKernel><<<(size + 255) / 256, 256, 0, stream>>>( + x0_dims[0], x0_dims[1], x0_dims[2], x0_dims[3], x0_dims[4], x0_dims[5], x0_dims[6], x1_dims[0], x1_dims[1], + x1_dims[2], x1_dims[3], x1_dims[4], x1_dims[5], x1_dims[6], y_dims[0], y_dims[1], y_dims[2], y_dims[3], + y_dims[4], y_dims[5], y_dims[6], x0, x1, y); + default: + break; + } +} + +template void BroadcastArith(const std::vector &x0_dims, const std::vector &x1_dims, + const std::vector &y_dims, enum BroadcastOpType op, const double *x0, + const double *x1, double *y, cudaStream_t stream); +template void BroadcastArith(const std::vector &x0_dims, const std::vector &x1_dims, + const std::vector &y_dims, enum BroadcastOpType op, const float *x0, + const float *x1, float *y, cudaStream_t stream); +template void BroadcastArith(const std::vector &x0_dims, const std::vector &x1_dims, + const std::vector &y_dims, enum BroadcastOpType op, const half *x0, const half *x1, + half *y, cudaStream_t stream); +template void BroadcastArith(const std::vector &x0_dims, const std::vector &x1_dims, + const std::vector &y_dims, enum BroadcastOpType op, const int *x0, const int *x1, + int *y, cudaStream_t stream); +template void BroadcastArith(const std::vector &x0_dims, const std::vector &x1_dims, + const std::vector &y_dims, enum BroadcastOpType op, const int8_t *x0, + const int8_t *x1, int8_t *y, cudaStream_t stream); +template void BroadcastArith(const std::vector &x0_dims, const std::vector &x1_dims, + const std::vector &y_dims, enum BroadcastOpType op, const uint8_t *x0, + const uint8_t *x1, uint8_t *y, cudaStream_t stream); +template void BroadcastArith(const std::vector &x0_dims, const std::vector &x1_dims, + const std::vector &y_dims, enum BroadcastOpType op, const int64_t *x0, + const int64_t *x1, int64_t *y, cudaStream_t stream); + +// BroadcastTo +template +__global__ void BroadcastToKernel(const size_t i0, const size_t i1, const size_t i2, const size_t i3, const size_t o0, + const size_t o1, const size_t o2, const size_t o3, const T *input_addr, + T *output_addr) { + for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < o0 * o1 * o2 * o3; pos += blockDim.x * gridDim.x) { + size_t i = pos / (o1 * o2 * o3) % o0; + size_t j = pos / (o2 * o3) % o1; + size_t k = pos / o3 % o2; + size_t l = pos % o3; + + size_t input_idx = Index(i, i0) * i1 * i2 * i3 + Index(j, i1) * i2 * i3 + Index(k, i2) * i3 + Index(l, i3); + output_addr[pos] = input_addr[input_idx]; + } +} + +template +void BroadcastTo(const size_t &i0, const size_t &i1, const size_t &i2, const size_t &i3, const size_t &o0, + const size_t &o1, const size_t &o2, const size_t &o3, const T *input_addr, T *output_addr, + cudaStream_t stream) { + size_t nums = o0 * o1 * o2 * o3; + BroadcastToKernel<<>>(i0, i1, i2, i3, o0, o1, o2, o3, input_addr, + output_addr); +} + +template void BroadcastTo(const size_t &i0, const size_t &i1, const size_t &i2, const size_t &i3, const size_t &o0, + const size_t &o1, const size_t &o2, const size_t &o3, const float *input_addr, + float *output_addr, cudaStream_t stream); +template void BroadcastTo(const size_t &i0, const size_t &i1, const size_t &i2, const size_t &i3, const size_t &o0, + const size_t &o1, const size_t &o2, const size_t &o3, const half *input_addr, + half *output_addr, cudaStream_t stream); +template void BroadcastTo(const size_t &i0, const size_t &i1, const size_t &i2, const size_t &i3, const size_t &o0, + const size_t &o1, const size_t &o2, const size_t &o3, const int64_t *input_addr, + int64_t *output_addr, cudaStream_t stream); diff --git a/cuda_code/broadcasting_14.cu b/cuda_code/broadcasting_14.cu new file mode 100644 index 0000000000000000000000000000000000000000..d5836b71970cce67e450240df43b80ef16f0410f --- /dev/null +++ b/cuda_code/broadcasting_14.cu @@ -0,0 +1,185 @@ +// +// @author raver119@gmail.com +// + +#include +#include +#include + + +template +__device__ void broadcastSimpleGeneric( + T *x, + Nd4jLong *xShapeInfo, + T *y, + Nd4jLong *yShapeInfo, + T *result, + Nd4jLong *resultShapeInfo, + int *dimension, + int dimensionLength, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadOnlyShapeInfoZ, Nd4jLong *tadOffsetsZ) { + + + functions::broadcast::Broadcast::template transformCuda( + x, + xShapeInfo, + y, + yShapeInfo, + result, + resultShapeInfo, + dimension, + dimensionLength, + NULL, + tadOnlyShapeInfo, + tadOffsets, + tadOnlyShapeInfoZ, + tadOffsetsZ); +} + +// broadcast kernel call +DISPATCH_KERNEL_SIMPLE(broadcastSimple_, broadcastSimpleGeneric, float, INPUT(float *x, Nd4jLong *xShapeInfo, float *y, Nd4jLong *yShapeInfo, float *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadOnlyShapeInfoZ, Nd4jLong *tadOffsetsZ), PARAMS(x, xShapeInfo, y, yShapeInfo, result, resultShapeInfo, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ), OPS_A(BROADCAST_OPS)) +DISPATCH_KERNEL_SIMPLE(broadcastSimple_, broadcastSimpleGeneric, double, INPUT(double *x, Nd4jLong *xShapeInfo, double *y, Nd4jLong *yShapeInfo, double *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadOnlyShapeInfoZ, Nd4jLong *tadOffsetsZ), PARAMS(x, xShapeInfo, y, yShapeInfo, result, resultShapeInfo, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ), OPS_A(BROADCAST_OPS)) +DISPATCH_KERNEL_SIMPLE(broadcastSimple_, broadcastSimpleGeneric, float16, INPUT(float16 *x, Nd4jLong *xShapeInfo, float16 *y, Nd4jLong *yShapeInfo, float16 *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadOnlyShapeInfoZ, Nd4jLong *tadOffsetsZ), PARAMS(x, xShapeInfo, y, yShapeInfo, result, resultShapeInfo, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ), OPS_A(BROADCAST_OPS)) + + +namespace functions { + namespace broadcast { + + template <> + __host__ void Broadcast::executeBroadcast(dim3 launchDims, cudaStream_t *stream, int opNum, float *x, Nd4jLong *xShapeInfo, float *y, Nd4jLong *yShapeInfo, float *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadOnlyShapeInfoZ, Nd4jLong *tadOffsetsZ) { + DISPATCH_SIMPLE(broadcastSimple, float, PARAMS(x, xShapeInfo, y, yShapeInfo, result, resultShapeInfo, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ), OPS_A(BROADCAST_OPS)) + + DEBUG_KERNEL(stream, opNum); + } + + template <> + __host__ void Broadcast::executeBroadcast(dim3 launchDims, cudaStream_t *stream, int opNum, float16 *x, Nd4jLong *xShapeInfo, float16 *y, Nd4jLong *yShapeInfo, float16 *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadOnlyShapeInfoZ, Nd4jLong *tadOffsetsZ) { + DISPATCH_SIMPLE(broadcastSimple, float16, PARAMS(x, xShapeInfo, y, yShapeInfo, result, resultShapeInfo, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ), OPS_A(BROADCAST_OPS)) + + DEBUG_KERNEL(stream, opNum); + } + + template <> + __host__ void Broadcast::executeBroadcast(dim3 launchDims, cudaStream_t *stream, int opNum, double *x, Nd4jLong *xShapeInfo, double *y, Nd4jLong *yShapeInfo, double *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadOnlyShapeInfoZ, Nd4jLong *tadOffsetsZ) { + DISPATCH_SIMPLE(broadcastSimple, double, PARAMS(x, xShapeInfo, y, yShapeInfo, result, resultShapeInfo, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ), OPS_A(BROADCAST_OPS)) + + DEBUG_KERNEL(stream, opNum); + } + + + template + template + __device__ void Broadcast::transformCuda( + T *x, + Nd4jLong *xShapeInfo, + T *y, + Nd4jLong *yShapeInfo, + T *result, + Nd4jLong *resultShapeInfo, + int *dimension, + int dimensionLength, UnifiedSharedMemory *manager, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadOnlyShapeInfoZ, Nd4jLong *tadOffsetsZ) { + + //decompose in to several sub tads after + //moving all dimensions (in sorted order) + //to the back. + //permuted version of the x shape info for setting up the tad problem + __shared__ Nd4jLong tadLength; + __shared__ Nd4jLong tadEWS; + __shared__ int tadRank; + __shared__ int numTads; + __shared__ Nd4jLong *tadShape; + __shared__ Nd4jLong *tadStride; + __shared__ Nd4jLong yEWS; + __shared__ Nd4jLong zEWS; + __shared__ int zRank; + __shared__ Nd4jLong *zShape; + __shared__ Nd4jLong *zStride; + __shared__ int yRank; + __shared__ Nd4jLong *yShape; + __shared__ Nd4jLong *yStride; + if (threadIdx.x == 0) { + if (tadOnlyShapeInfoZ == nullptr) { + tadOnlyShapeInfoZ = tadOnlyShapeInfo; + tadOffsetsZ = tadOffsets; + } + + tadLength = shape::tadLength(xShapeInfo, dimension, dimensionLength); + tadEWS = shape::elementWiseStride(tadOnlyShapeInfo); + numTads = shape::length(xShapeInfo) / tadLength; + yEWS = shape::elementWiseStride(yShapeInfo); + zEWS = shape::elementWiseStride(tadOnlyShapeInfoZ); + + if (tadEWS < 1 || zEWS < 1 || yEWS < 1 || dimensionLength > 1) { + tadRank = shape::rank(tadOnlyShapeInfo); + tadShape = shape::shapeOf(tadOnlyShapeInfo); + tadStride = shape::stride(tadOnlyShapeInfo); + zRank = shape::rank(tadOnlyShapeInfoZ); + zShape = shape::shapeOf(tadOnlyShapeInfoZ); + zStride = shape::stride(tadOnlyShapeInfoZ); + yRank = shape::rank(yShapeInfo); + yShape = shape::shapeOf(yShapeInfo); + yStride = shape::stride(yShapeInfo); + } + } + __syncthreads(); + + for (int r = blockIdx.x; r < numTads; r += gridDim.x) { + + + __shared__ Nd4jLong tadOffsetForBlock; + __shared__ Nd4jLong tadOffsetForBlockZ; + __shared__ T *rR; + __shared__ T *rX; + if (threadIdx.x == 0) { + tadOffsetForBlockZ = tadOffsetsZ[r]; + if (result != x) + tadOffsetForBlock = tadOffsets[r]; + else + tadOffsetForBlock = tadOffsetForBlockZ; + + rR = result + tadOffsetForBlockZ; + rX = x + tadOffsetForBlock; + } + __syncthreads(); + + + if(tadEWS > 0 && zEWS > 0 && yEWS > 0 && dimensionLength == 1) { + if (tadEWS == 1 && yEWS == 1 && zEWS == 1) { + for (int i = threadIdx.x; i < tadLength; i+= blockDim.x) { + rR[i] = OpType::op(rX[i], y[i]); + } + } else { + for (int i = threadIdx.x; i < tadLength; i+= blockDim.x) { + rR[i * zEWS] = OpType::op(rX[i * tadEWS], y[i * yEWS]); + } + } + } + else { + Nd4jLong xCoord[MAX_RANK]; + Nd4jLong yCoord[MAX_RANK]; + Nd4jLong zCoord[MAX_RANK]; + + for (Nd4jLong i = threadIdx.x; i < tadLength; i+= blockDim.x) { + + if (shape::order(tadOnlyShapeInfo) == 'c') { + shape::ind2subC(tadRank,tadShape, i, tadLength, xCoord); + shape::ind2subC(yRank, yShape, i, tadLength, yCoord); + } else { + shape::ind2sub(tadRank,tadShape, i, tadLength, xCoord); + shape::ind2sub(yRank, yShape, i, tadLength, yCoord); + } + + if (shape::order(tadOnlyShapeInfoZ) == 'c') + shape::ind2subC(zRank,zShape, i, tadLength, zCoord); + else + shape::ind2sub(zRank,zShape, i, tadLength, zCoord); + + auto xOffset = shape::getOffset(tadOffsetForBlock, tadShape, tadStride, xCoord, tadRank); + auto zOffset = shape::getOffset(tadOffsetForBlockZ, zShape, zStride, zCoord, zRank); + auto yOffset = shape::getOffset(0, yShape, yStride, yCoord, yRank); + result[zOffset] = OpType::op(x[xOffset], y[yOffset]); + } + } + } + } + } +} \ No newline at end of file diff --git a/cuda_code/btGpuDemo2dCudaFunc_1.cu b/cuda_code/btGpuDemo2dCudaFunc_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..620602ec5cf0e984acaa78d87ac58c4ee92197d9 --- /dev/null +++ b/cuda_code/btGpuDemo2dCudaFunc_1.cu @@ -0,0 +1,42 @@ +/* +Impulse based Rigid body simulation using CUDA +Copyright (c) 2007 Takahiro Harada http://www.iii.u-tokyo.ac.jp/~takahiroharada/projects/impulseCUDA.html + +This software is provided 'as-is', without any express or implied warranty. +In no event will the authors be held liable for any damages arising from the use of this software. +Permission is granted to anyone to use this software for any purpose, +including commercial applications, and to alter it and redistribute it freely, +subject to the following restrictions: + +1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. +2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. +3. This notice may not be removed or altered from any source distribution. +*/ + +#include +#include +#include + +#include "cutil_math.h" +#include "math_constants.h" + +#include + + + +#include "btCudaDefines.h" + + + +#include "../../src/BulletMultiThreaded/btGpuUtilsSharedDefs.h" +#include "../../Demos/Gpu2dDemo/btGpuDemo2dSharedTypes.h" +#include "../../Demos/Gpu2dDemo/btGpuDemo2dSharedDefs.h" + + + +texture posTex; + + + +#include "../../Demos/Gpu2dDemo/btGpuDemo2dSharedCode.h" + diff --git a/cuda_code/btree.cu b/cuda_code/btree.cu new file mode 100644 index 0000000000000000000000000000000000000000..2f395acec397f17dfdeed7afad1b4bbb00bc064e --- /dev/null +++ b/cuda_code/btree.cu @@ -0,0 +1,37 @@ +//#include +// +//#include "btree.cuh" +// +//#include "concurrent-xfasttrie-fixture.cu" +// +//using BTREE = gpu::BTree; +//using BTreeInsertionFixture = XTrieInsertionFixture; +//using BTreeGetThreadFixture = XTrieGetThreadFixture; +//using BTreeGetWarpFixture = XTrieGetWarpFixture; +//using BTreePredecessorFixture = XTriePredecessorFixture; +//using BTreeSuccessorFixture = XTrieSuccessorFixture; +// +//BENCHMARK_F(BTreeInsertionFixture, InsertionBtree, NUMBER_OF_RUNS, NUMBER_OF_ITERATIONS) +//{ +// insert(); +//} +///* +//BENCHMARK_F(BTreeGetThreadFixture, GetThreadBtree, NUMBER_OF_RUNS, NUMBER_OF_ITERATIONS) +//{ +// get_thread(); +//} +// +//BENCHMARK_F(BTreeGetWarpFixture, GetWarpBtree, NUMBER_OF_RUNS, NUMBER_OF_ITERATIONS) +//{ +// get_warp(); +//} +// +//BENCHMARK_F(BTreePredecessorFixture, PredecessorBtree, NUMBER_OF_RUNS, NUMBER_OF_ITERATIONS) +//{ +// predecessor(); +//}*/ +///* +//BENCHMARK_F(BTreeSuccessorFixture, SuccessorBtree, NUMBER_OF_RUNS, NUMBER_OF_ITERATIONS) +//{ +// successor(); +//}*/ diff --git a/cuda_code/build_info_1.cu b/cuda_code/build_info_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..d1016b5b7032c41d892d2c37451fe721a2de96db --- /dev/null +++ b/cuda_code/build_info_1.cu @@ -0,0 +1,62 @@ +/******************************************************************************* + * Copyright (c) 2019 Konduit K.K. + * + * This program and the accompanying materials are made available under the + * terms of the Apache License, Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0. + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + * + * SPDX-License-Identifier: Apache-2.0 + ******************************************************************************/ + +#include +#include + +const char* buildInfo() { + return "" +#if defined(__clang__) + "Clang: " TOSTRING(__clang_version__) +#elif defined(_MSC_VER) + "MSVC: " TOSTRING(_MSC_FULL_VER) +#else + "GCC: " TOSTRING(__VERSION__) +#endif +#if defined(_MSC_VER) && defined(_MSVC_LANG) + "\nSTD version: " TOSTRING(_MSVC_LANG) +#elif defined(__cplusplus) + "\nSTD version: " TOSTRING(__cplusplus) +#endif + +#if defined(__CUDACC__) + "\nCUDA: " TOSTRING(__CUDACC_VER_MAJOR__) + "." TOSTRING(__CUDACC_VER_MINOR__) + "." TOSTRING(__CUDACC_VER_BUILD__) +#endif +#if defined(DEFAULT_ENGINE) + "\nDEFAULT_ENGINE: " TOSTRING(DEFAULT_ENGINE) +#endif +#if defined(HAVE_FLATBUFFERS) + "\nHAVE_FLATBUFFERS" +#endif +#if defined(HAVE_MKLDNN) + "\nHAVE_MKLDNN" +#endif +#if defined(__EXTERNAL_BLAS__) + "\nHAVE_EXTERNAL_BLAS" +#endif +#if defined(HAVE_OPENBLAS) + "\nHAVE_OPENBLAS" +#endif +#if defined(HAVE_CUDNN) + "\nHAVE_CUDNN" +#endif +#if defined(HAVE_ARMCOMPUTE) + "\nHAVE_ARMCOMPUTE" +#endif + ; +} diff --git a/cuda_code/build_tree.cu b/cuda_code/build_tree.cu new file mode 100644 index 0000000000000000000000000000000000000000..ceef0dd245ca21b2c78bb94da1bcc998920dd2d1 --- /dev/null +++ b/cuda_code/build_tree.cu @@ -0,0 +1,868 @@ +// //#include "/home/jbedorf/papers/GBPZ2010/codes/jb/build_tree/CUDA/support_kernels.cu" +#include "support_kernels.cu" + +#include + +////////////////////////////// +////////////////////////////// +////////////////////////////// +#define LEVEL_MIN 3 + +extern "C" __global__ void boundaryReduction(const int n_particles, + real4 *positions, + float3 *output_min, + float3 *output_max) +{ + const uint bid = blockIdx.y * gridDim.x + blockIdx.x; + const uint tid = threadIdx.x; + //const uint idx = bid * blockDim.x + tid; + + volatile __shared__ float3 shmem[512]; + float3 r_min = (float3){+1e10f, +1e10f, +1e10f}; + float3 r_max = (float3){-1e10f, -1e10f, -1e10f}; + + volatile float3 *sh_rmin = (float3*)&shmem [ 0]; + volatile float3 *sh_rmax = (float3*)&shmem[256]; + sh_rmin[tid].x = r_min.x; sh_rmin[tid].y = r_min.y; sh_rmin[tid].z = r_min.z; + sh_rmax[tid].x = r_max.x; sh_rmax[tid].y = r_max.y; sh_rmax[tid].z = r_max.z; + + // perform first level of reduction, + // reading from global memory, writing to shared memory + const int blockSize = blockDim.x; +// unsigned int tid = threadIdx.x; + unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x; + unsigned int gridSize = blockSize*2*gridDim.x; + + real4 pos; + // we reduce multiple elements per thread. The number is determined by the + // number of active thread blocks (via gridSize). More blocks will result + // in a larger gridSize and therefore fewer elements per thread + //based on reduce6 example + while (i < n_particles) { + if (i < n_particles) + { + pos = positions[i]; + r_min.x = fminf(pos.x, r_min.x); + r_min.y = fminf(pos.y, r_min.y); + r_min.z = fminf(pos.z, r_min.z); + r_max.x = fmaxf(pos.x, r_max.x); + r_max.y = fmaxf(pos.y, r_max.y); + r_max.z = fmaxf(pos.z, r_max.z); + } + if (i + blockSize < n_particles) + { + pos = positions[i + blockSize]; + r_min.x = fminf(pos.x, r_min.x); + r_min.y = fminf(pos.y, r_min.y); + r_min.z = fminf(pos.z, r_min.z); + r_max.x = fmaxf(pos.x, r_max.x); + r_max.y = fmaxf(pos.y, r_max.y); + r_max.z = fmaxf(pos.z, r_max.z); + } + i += gridSize; + } + + sh_rmin[tid].x = r_min.x; sh_rmin[tid].y = r_min.y; sh_rmin[tid].z = r_min.z; + sh_rmax[tid].x = r_max.x; sh_rmax[tid].y = r_max.y; sh_rmax[tid].z = r_max.z; + + __syncthreads(); + // do reduction in shared mem + if(blockDim.x >= 512) if (tid < 256) {sh_MinMax(tid, tid + 256, &r_min, &r_max, sh_rmin, sh_rmax);} __syncthreads(); + if(blockDim.x >= 256) if (tid < 128) {sh_MinMax(tid, tid + 128, &r_min, &r_max, sh_rmin, sh_rmax);} __syncthreads(); + if(blockDim.x >= 128) if (tid < 64) {sh_MinMax(tid, tid + 64, &r_min, &r_max, sh_rmin, sh_rmax);} __syncthreads(); + + if (tid < 32) + { + sh_MinMax(tid, tid + 32, &r_min, &r_max, sh_rmin,sh_rmax); + sh_MinMax(tid, tid + 16, &r_min, &r_max, sh_rmin,sh_rmax); + sh_MinMax(tid, tid + 8, &r_min, &r_max, sh_rmin,sh_rmax); + sh_MinMax(tid, tid + 4, &r_min, &r_max, sh_rmin,sh_rmax); + sh_MinMax(tid, tid + 2, &r_min, &r_max, sh_rmin,sh_rmax); + sh_MinMax(tid, tid + 1, &r_min, &r_max, sh_rmin,sh_rmax); + } + + // write result for this block to global mem + if (tid == 0) + { + //Compiler doesnt allow: volatile float3 = float3 + output_min[bid].x = sh_rmin[0].x; output_min[bid].y = sh_rmin[0].y; output_min[bid].z = sh_rmin[0].z; + output_max[bid].x = sh_rmax[0].x; output_max[bid].y = sh_rmax[0].y; output_max[bid].z = sh_rmax[0].z; + } + +} + + +//Get the domain size, by taking into account the group size +extern "C" __global__ void boundaryReductionGroups(const int n_groups, + real4 *positions, + real4 *sizes, + float3 *output_min, + float3 *output_max) +{ + const uint bid = blockIdx.y * gridDim.x + blockIdx.x; + const uint tid = threadIdx.x; + //const uint idx = bid * blockDim.x + tid; + + volatile __shared__ float3 shmem[512]; + float3 r_min = (float3){+1e10f, +1e10f, +1e10f}; + float3 r_max = (float3){-1e10f, -1e10f, -1e10f}; + + volatile float3 *sh_rmin = (float3*)&shmem [ 0]; + volatile float3 *sh_rmax = (float3*)&shmem[256]; + sh_rmin[tid].x = r_min.x; sh_rmin[tid].y = r_min.y; sh_rmin[tid].z = r_min.z; + sh_rmax[tid].x = r_max.x; sh_rmax[tid].y = r_max.y; sh_rmax[tid].z = r_max.z; + + // perform first level of reduction, + // reading from global memory, writing to shared memory + const int blockSize = blockDim.x; +// unsigned int tid = threadIdx.x; + unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x; + unsigned int gridSize = blockSize*2*gridDim.x; + + real4 pos; + real4 size; + // we reduce multiple elements per thread. The number is determined by the + // number of active thread blocks (via gridSize). More blocks will result + // in a larger gridSize and therefore fewer elements per thread + //based on reduce6 example + while (i < n_groups) { + if (i < n_groups) + { + pos = positions[i]; + size = sizes[i]; + r_min.x = fminf(pos.x-size.x, r_min.x); + r_min.y = fminf(pos.y-size.y, r_min.y); + r_min.z = fminf(pos.z-size.z, r_min.z); + r_max.x = fmaxf(pos.x+size.x, r_max.x); + r_max.y = fmaxf(pos.y+size.y, r_max.y); + r_max.z = fmaxf(pos.z+size.z, r_max.z); + } + if (i + blockSize < n_groups) + { + pos = positions[i + blockSize]; + size = sizes[i + blockSize]; + r_min.x = fminf(pos.x-size.x, r_min.x); + r_min.y = fminf(pos.y-size.y, r_min.y); + r_min.z = fminf(pos.z-size.z, r_min.z); + r_max.x = fmaxf(pos.x+size.x, r_max.x); + r_max.y = fmaxf(pos.y+size.y, r_max.y); + r_max.z = fmaxf(pos.z+size.z, r_max.z); + } + i += gridSize; + } + + sh_rmin[tid].x = r_min.x; sh_rmin[tid].y = r_min.y; sh_rmin[tid].z = r_min.z; + sh_rmax[tid].x = r_max.x; sh_rmax[tid].y = r_max.y; sh_rmax[tid].z = r_max.z; + + __syncthreads(); + // do reduction in shared mem + if(blockDim.x >= 512) if (tid < 256) {sh_MinMax(tid, tid + 256, &r_min, &r_max, sh_rmin, sh_rmax);} __syncthreads(); + if(blockDim.x >= 256) if (tid < 128) {sh_MinMax(tid, tid + 128, &r_min, &r_max, sh_rmin, sh_rmax);} __syncthreads(); + if(blockDim.x >= 128) if (tid < 64) {sh_MinMax(tid, tid + 64, &r_min, &r_max, sh_rmin, sh_rmax);} __syncthreads(); + + if (tid < 32) + { + sh_MinMax(tid, tid + 32, &r_min, &r_max, sh_rmin,sh_rmax); + sh_MinMax(tid, tid + 16, &r_min, &r_max, sh_rmin,sh_rmax); + sh_MinMax(tid, tid + 8, &r_min, &r_max, sh_rmin,sh_rmax); + sh_MinMax(tid, tid + 4, &r_min, &r_max, sh_rmin,sh_rmax); + sh_MinMax(tid, tid + 2, &r_min, &r_max, sh_rmin,sh_rmax); + sh_MinMax(tid, tid + 1, &r_min, &r_max, sh_rmin,sh_rmax); + } + + // write result for this block to global mem + if (tid == 0) + { + //Compiler doesnt allow: volatile float3 = float3 + output_min[bid].x = sh_rmin[0].x; output_min[bid].y = sh_rmin[0].y; output_min[bid].z = sh_rmin[0].z; + output_max[bid].x = sh_rmax[0].x; output_max[bid].y = sh_rmax[0].y; output_max[bid].z = sh_rmax[0].z; + } + +} + +//#define EXACT_KEY + + +#if 0 +extern "C" __global__ void cl_build_key_list(uint2 *body_key, + real4 *body_pos, + int n_bodies, + real4 corner) { + + uint bid = blockIdx.y * gridDim.x + blockIdx.x; + uint tid = threadIdx.x; + uint id = bid * blockDim.x + tid; + + if (id > n_bodies) return; + + real4 pos = body_pos[id]; + int4 crd; + + real domain_fac = corner.w; + + #ifndef EXACT_KEY + crd.x = (int)roundf(__fdividef((pos.x - corner.x), domain_fac)); + crd.y = (int)roundf(__fdividef((pos.y - corner.y) , domain_fac)); + crd.z = (int)roundf(__fdividef((pos.z - corner.z) , domain_fac)); + #else + crd.x = (int)((pos.x - corner.x) / domain_fac); + crd.y = (int)((pos.y - corner.y) / domain_fac); + crd.z = (int)((pos.z - corner.z) / domain_fac); + #endif + +// crd.x = (int)((pos.x - corner.x) / domain_fac + 0.5); +// crd.y = (int)((pos.y - corner.y) / domain_fac + 0.5); +// crd.z = (int)((pos.z - corner.z) / domain_fac + 0.5); + + uint2 key = get_key(crd); + + + if (id == n_bodies) key = (uint2){0xFFFFFFFF, 0xFFFFFFFF}; + + body_key[id] = key; + +} + +#endif + +extern "C" __global__ void cl_build_key_list(uint4 *body_key, + real4 *body_pos, + int n_bodies, + real4 corner) { + + uint bid = blockIdx.y * gridDim.x + blockIdx.x; + uint tid = threadIdx.x; + uint id = bid * blockDim.x + tid; + + if (id > n_bodies) return; + + real4 pos = body_pos[id]; + int4 crd; + + real domain_fac = corner.w; + + #ifndef EXACT_KEY + crd.x = (int)roundf(__fdividef((pos.x - corner.x), domain_fac)); + crd.y = (int)roundf(__fdividef((pos.y - corner.y) , domain_fac)); + crd.z = (int)roundf(__fdividef((pos.z - corner.z) , domain_fac)); + #else + crd.x = (int)((pos.x - corner.x) / domain_fac); + crd.y = (int)((pos.y - corner.y) / domain_fac); + crd.z = (int)((pos.z - corner.z) / domain_fac); + #endif + +// crd.x = (int)((pos.x - corner.x) / domain_fac + 0.5); +// crd.y = (int)((pos.y - corner.y) / domain_fac + 0.5); +// crd.z = (int)((pos.z - corner.z) / domain_fac + 0.5); + + uint4 key = get_key(crd); + + + if (id == n_bodies) key = (uint4){0xFFFFFFFF, 0xFFFFFFFF, 0, 0}; + + body_key[id] = key; + +} + + +#if 1 +extern "C" __global__ void build_phkey_list(uint4 *body_key, + real4 *body_pos, + int n_bodies, + real4 corner, + uint *reorder) { + + uint bid = blockIdx.y * gridDim.x + blockIdx.x; + uint tid = threadIdx.x; + uint id = bid * blockDim.x + tid; + + if (id > n_bodies) return; + + real4 pos = body_pos[id]; +// real4 pos = body_pos[reorder[id]]; + int4 crd; + + real domain_fac = corner.w; + + //Get the integer position, will be used for the key calculation + #if 1 + crd.x = (int)roundf(__fdividef((pos.x - corner.x), domain_fac)); + crd.y = (int)roundf(__fdividef((pos.y - corner.y) , domain_fac)); + crd.z = (int)roundf(__fdividef((pos.z - corner.z) , domain_fac)); + #else + + crd.x = (int)((pos.x - corner.x) / domain_fac); + crd.y = (int)((pos.y - corner.y) / domain_fac); + crd.z = (int)((pos.z - corner.z) / domain_fac); + #endif + + + uint4 key_new = get_key(crd); + + if (id == n_bodies) key_new = (uint4){0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}; + + body_key[id] = key_new; + +} +#else + +extern "C" __global__ void build_phkey_list(uint2 *body_key, + real4 *body_pos, + int n_bodies, + real4 corner) { + + uint bid = blockIdx.y * gridDim.x + blockIdx.x; + uint tid = threadIdx.x; + uint id = bid * blockDim.x + tid; + + if (id > n_bodies) return; + + real4 pos = body_pos[id]; + int4 crd; + + real domain_fac = corner.w; + + //Get the integer position, will be used for the key calculation + #ifndef EXACT_KEY + crd.x = (int)roundf(__fdividef((pos.x - corner.x), domain_fac)); + crd.y = (int)roundf(__fdividef((pos.y - corner.y) , domain_fac)); + crd.z = (int)roundf(__fdividef((pos.z - corner.z) , domain_fac)); + #else + crd.x = (int)((pos.x - corner.x) / domain_fac); + crd.y = (int)((pos.y - corner.y) / domain_fac); + crd.z = (int)((pos.z - corner.z) / domain_fac); + #endif + + + const int bits = 18; + int i,xi, yi, zi; + int mask; + long key; + + //0= 000, 1=001, 2=011, 3=010, 4=110, 5=111, 6=101, 7=100 + //000=0=0, 001=1=1, 011=3=2, 010=2=3, 110=6=4, 111=7=5, 101=5=6, 100=4=7 + const int C[8] = {0, 1, 7, 6, 3, 2, 4, 5}; + + int temp; + + mask = 1 << (bits - 1); + key = 0; + + + for(i = 0; i < bits; i++, mask >>= 1) + { + xi = (crd.x & mask) ? 1 : 0; + yi = (crd.y & mask) ? 1 : 0; + zi = (crd.z & mask) ? 1 : 0; + + if(xi == 0 && yi == 0 && zi == 0) + { + temp = crd.z; crd.z = crd.y; crd.y = temp; + } + else if(xi == 0 && yi == 0 && zi == 1) + { + temp = crd.x; crd.x = crd.y; crd.y = temp; + } + else if(xi == 1 && yi == 0 && zi == 1) + { + temp = crd.x; crd.x = crd.y; crd.y = temp; + } + else if(xi == 1 && yi == 0 && zi == 0) + { + crd.x = (crd.x) ^ (-1); + crd.z = (crd.z) ^ (-1); + } + else if(xi == 1 && yi == 1 && zi == 0) + { + crd.x = (crd.x) ^ (-1); + crd.z = (crd.z) ^ (-1); + } + else if(xi == 1 && yi == 1 && zi == 1) + { + temp = (crd.x) ^ (-1); + crd.x = (crd.y) ^ (-1); + crd.y = temp; + } + else if(xi == 0 && yi == 1 && zi == 1) + { + temp = (crd.x) ^ (-1); + crd.x = (crd.y) ^ (-1); + crd.y = temp; + } + else + { + temp = (crd.z) ^ (-1); + crd.z = (crd.y) ^ (-1); + crd.y = temp; + } + + int index = (xi << 2) + (yi << 1) + zi; + key = (key << 3) + C[index]; + } + + uint2 key_new; + key_new.x = key & 0xFFFFFFFF; + key_new.y = (key >> 32) & 0xFFFFFFFF; + + if (id == n_bodies) key_new = (uint2){0xFFFFFFFF, 0xFFFFFFFF}; + + body_key[id] = key_new; + +} + + +#endif + + +extern "C" __global__ void cl_build_valid_list(int n_bodies, + int level, + uint4 *body_key, + uint *valid_list){ +// uint2 *test_key_data) { + + const uint bid = blockIdx.y * gridDim.x + blockIdx.x; + const uint tid = threadIdx.x; + const uint id = bid * blockDim.x + tid; + const uint4 key_F = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}; + const uint4 key_B = {0xFFFFFFF1, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}; //A border, valid0 will become 1 + const uint4 key_I = {0xFFFFFFF2, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}; //Ignore + const uint4 key_E = {0xFFFFFFF3, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}; //End + const uint4 key_A = {0xFFFFFFF4, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}; //Start and End +// const uint2 key_TEST = {0x0, 0x0}; //Start and End + +//TODO clean this if we dont use it + + if (id >= n_bodies) return; // >= since the last particle is extra boudnary particle + + uint4 mask = get_mask(level); + mask.x = mask.x | ((uint)1 << 30) | ((uint)1 << 31); + + uint4 key_m; + + uint4 key_c = body_key[id]; + + + uint4 key_p; + if (id == 0) + { + key_m = key_F; + } + else + { + key_m = body_key[id-1]; + } + + if((id+1) < n_bodies) //The last particle gets a different key to compare with + { + key_p = body_key[id+1]; + } + else + key_p = (uint4){0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}; + + + int valid0 = 0; + int valid1 = 0; + + if (cmp_uint4(key_c, key_A) == 0) { + valid0 = 1; //Set a border + valid1 = 1; //Set a border + } + else if (cmp_uint4(key_c, key_B) == 0) { + valid0 = 1; //Set a border + } + else if (cmp_uint4(key_c, key_E) == 0) { + valid1 = 1; //Set a border + } + else if (cmp_uint4(key_c, key_I) == 0) { + //Do nothing + } + else if (cmp_uint4(key_c, key_F) != 0) { + key_c.x = key_c.x & mask.x; + key_c.y = key_c.y & mask.y; + key_c.z = key_c.z & mask.z; + + key_p.x = key_p.x & mask.x; + key_p.y = key_p.y & mask.y; + key_p.z = key_p.z & mask.z; + + key_m.x = key_m.x & mask.x; + key_m.y = key_m.y & mask.y; + key_m.z = key_m.z & mask.z; + + valid0 = abs(cmp_uint4(key_c, key_m)); + valid1 = abs(cmp_uint4(key_c, key_p)); + } + + valid_list[id*2] = id | ((valid0) << 31); + valid_list[id*2+1] = id | ((valid1) << 31); + +} + + +////////////////////////////// +////////////////////////////// +////////////////////////////// + + +extern "C" __global__ void cl_build_nodes(uint level, + uint compact_list_len, + uint offset, + uint *compact_list, +// uint *compact_list_end, + uint4 *bodies_key, + uint4 *node_key, + uint *n_children, + uint2 *node_bodies){ +// uint *testValidList) { + + uint bid = blockIdx.y * gridDim.x + blockIdx.x; + uint tid = threadIdx.x; + uint id = bid * blockDim.x + tid; + + if (id >= compact_list_len) return; + + uint bi = compact_list[id*2]; + uint bj = compact_list[id*2+1] + 1; + + + uint4 key = bodies_key[bi]; + uint4 mask = get_mask(level); + key = (uint4){key.x & mask.x, key.y & mask.y, key.z & mask.z, 0}; + + + node_bodies[offset+id] = (uint2){bi | (level << BITLEVELS), bj}; + node_key [offset+id] = key; + n_children [offset+id] = 0; + + if ((int)level > (int)(LEVEL_MIN - 1)) + if (bj - bi <= NLEAF) //Leaf can only have NLEAF particles, if its more there will be a split + for (int i = bi; i < bj; i++) + bodies_key[i] = (uint4){0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF}; //sets the key to FF to indicate the body is used + +} + +////////////////////////////// +////////////////////////////// +////////////////////////////// + + +extern "C" __global__ void cl_link_tree(int n_nodes, + uint *n_children, + uint2 *node_bodies, + real4 *bodies_pos, + real4 corner, + uint2 *level_list, //TODO could make this constant if it proves usefull +// uint* parent_id_list, + uint* valid_list, + uint4 *node_keys, + uint4 *bodies_key, + int maxLevel) { + + const uint bid = blockIdx.y * gridDim.x + blockIdx.x; + const uint tid = threadIdx.x; + uint id = bid * blockDim.x + tid; + + if (id >= n_nodes) return; + + uint2 bij = node_bodies[id]; + uint level = (bij.x & LEVELMASK) >> BITLEVELS; + uint bi = bij.x & ILEVELMASK; + uint bj = bij.y; + + real4 pos = bodies_pos[bi]; + int4 crd; + real domain_fac = corner.w; + + #ifndef EXACT_KEY + crd.x = (int)roundf(__fdividef((pos.x - corner.x), domain_fac)); + crd.y = (int)roundf(__fdividef((pos.y - corner.y) , domain_fac)); + crd.z = (int)roundf(__fdividef((pos.z - corner.z) , domain_fac)); + #else + crd.x = (int)((pos.x - corner.x) / domain_fac); + crd.y = (int)((pos.y - corner.y) / domain_fac); + crd.z = (int)((pos.z - corner.z) / domain_fac); + #endif + + + uint4 key = get_key(crd); + + + /********* accumulate children *****/ + + uint4 mask = get_mask(level - 1); + key = (uint4){key.x & mask.x, key.y & mask.y, key.z & mask.z, 0}; + + uint2 cij; + + + if(id > 0) + cij = level_list[level-1]; + + int ci; + //Jeroen, modified this since we dont use textures in find_key, + //the function will fail because out of bound memory access when id==0 + if(id > 0) + ci = find_key(key, cij, node_keys); + else + ci = 0; + + //ci now points to the node that is the parent, was used in previous group method +// parent_id_list[id] = ci; + + mask = get_imask(mask); + key = (uint4) {key.x | mask.x, key.y | mask.y, key.z | mask.z, 0 }; + if (id > 0) + atomicAdd(&n_children[ci], (1 << 28)); + + key = get_key(crd); + mask = get_mask(level); + key = (uint4) {key.x & mask.x, key.y & mask.y, key.z & mask.z, 0}; + + /********* store the 1st child *****/ + + cij = level_list[level+1]; + int cj = -1; + + cj = find_key(key, cij, node_keys); + + atomicOr(&n_children[id], cj); //Atomic since multiple threads can work on this + + uint valid = id | (uint)(0 << 31); + + + if ((int)level > (int)(LEVEL_MIN - 1)) + if ((bj - bi) <= NLEAF) + valid = id | (uint)(1 << 31); //Distinguish leaves and nodes + + valid_list[id] = valid; +} + +//Determines which level of node starts at which offset +extern "C" __global__ void build_level_list(const int n_nodes, + const int n_leafs, + uint *leafsIdxs, + uint2 *node_bodies, + uint* valid_list) +{ + + const uint bid = blockIdx.y * gridDim.x + blockIdx.x; + const uint tid = threadIdx.x; + const uint id = bid * blockDim.x + tid; + + if (id >= n_nodes-n_leafs) return; + + const int nodeID = leafsIdxs[id+n_leafs]; //Get the idx into the node_bodies array + + int level_c, level_m, level_p; + + + uint2 bij = node_bodies[leafsIdxs[id+n_leafs]]; //current non-leaf + level_c = (bij.x & LEVELMASK) >> BITLEVELS; + + if((id+1) < (n_nodes-n_leafs)) //The last node gets a default lvl + { + bij = node_bodies[leafsIdxs[id+1+n_leafs]]; //next non-leaf + level_p = (bij.x & LEVELMASK) >> BITLEVELS; + } + else + level_p = MAXLEVELS+5; //Last is always an end + + //Compare level with the node before and node after + if(nodeID == 0) + { + level_m = -1; + } + else + { + bij = node_bodies[ leafsIdxs[id-1+n_leafs]]; //Get info of previous non-leaf node + level_m = (bij.x & LEVELMASK) >> BITLEVELS; + } + + int valid0 = 0; + int valid1 = 0; + + valid0 = (level_c != level_m) << 31 | (id+n_leafs); + valid1 = (level_c != level_p) << 31 | (id+n_leafs); + + valid_list[id*2] = valid0; + valid_list[id*2+1] = valid1; + +} //end build_level_list + + +//Finds nodes/leafs that will become groups +//After executions valid_list contains the +//valid nodes/leafs that form groups +extern "C" __global__ void build_group_list(int n_nodes, + uint* parent_id_list, + uint2 *node_bodies, + uint* valid_list) +{ + + uint bid = blockIdx.y * gridDim.x + blockIdx.x; + uint tid = threadIdx.x; + uint id = bid * blockDim.x + tid; + + if (id >= n_nodes) return; + + uint2 bij = node_bodies[id]; + int ownChildren = bij.y - (bij.x & ILEVELMASK); + + + bij = node_bodies[parent_id_list[id]]; + int parentChildren = bij.y - (bij.x & ILEVELMASK); + + + //group if nchild <= NCRIT AND parent_nchild > NCRIT + //if((ownChildren <= NCRIT) && (parentChildren > NCRIT)) + if((ownChildren <= NCRIT) && (parentChildren > NCRIT)) + valid_list[id] = id | (uint)(1 << 31); //Group + else + valid_list[id] = id | (0 << 31); //Not a group +} + +//Finds nodes/leafs that will become groups +//After executions valid_list contains the +//valid nodes/leafs that form groups +extern "C" __global__ void build_group_list2(int n_particles, + uint *validList, + real4 *bodies_pos, + const float DIST) +{ + uint bid = blockIdx.y * gridDim.x + blockIdx.x; + uint tid = threadIdx.x; + uint idx = bid * blockDim.x + tid; + + //TODO use shared mem ffor the positions +//since we use them multiple times? + + + //Note that we do not include the final particle + //Since there is no reason to check it + if (idx >= n_particles) return; + + //Get the current + float4 curPos, nexPos, prevPos; + + curPos = bodies_pos[idx]; + + //Have to check the first and last to prevent out of bound access + if(idx+1 == n_particles) + nexPos = curPos; + else + nexPos = bodies_pos[idx+1]; + + if(idx == 0) + prevPos = curPos; + else + prevPos = bodies_pos[idx-1]; + + //Compute geometrical distance + float dsPlus = ((curPos.x-nexPos.x)*(curPos.x-nexPos.x)) + + ((curPos.y-nexPos.y)*(curPos.y-nexPos.y)) + + ((curPos.z-nexPos.z)*(curPos.z-nexPos.z)); + + float dsMin = ((curPos.x-prevPos.x)*(curPos.x-prevPos.x)) + + ((curPos.y-prevPos.y)*(curPos.y-prevPos.y)) + + ((curPos.z-prevPos.z)*(curPos.z-prevPos.z)); + + //Multiples of the preferred group size are _always_ valid + int validStart = ((idx % NCRIT) == 0); + int validEnd = (((idx+1) % NCRIT) == 0); + +// const int DIST = 1; +// const float DIST = 44; + + //The extra possible split(s) if the distance between two particles is too large + if(dsPlus > DIST) validEnd = 1; + if(dsMin > DIST) validStart = 1; + + //Last particle is always the end, n_particles dont have to be a multiple of NCRIT + //so this is required + if(idx+1 == n_particles) validEnd = 1; + + //Set valid + validList[2*idx + 0] = (idx) | (uint)(validStart << 31); + validList[2*idx + 1] = (idx+1) | (uint)(validEnd << 31); +} + + +extern "C" __global__ void store_group_list(int n_particles, + int n_groups, + uint *validList, + uint *body2group_list, + uint2 *group_list) +{ + uint bid = blockIdx.y * gridDim.x + blockIdx.x; + uint tid = threadIdx.x; +// uint idx = bid * blockDim.x + tid; + + if(bid >= n_groups) return; + + int start = validList[2*bid]; + int end = validList[2*bid+1]; + + if((start + tid) < end) + { + body2group_list[start + tid] = bid; + } + + if(tid == 0) + { + group_list[bid] = (uint2){start,end}; + } +} + +extern "C" __global__ void expandLeafList(int n_leafs, + uint *leaf2NodeIdx, + uint2 *node_bodies, + uint *leafPart2Body) +{ + uint bid = blockIdx.y * gridDim.x + blockIdx.x; + uint tid = threadIdx.x; + uint idx = bid * blockDim.x + tid; + + + if(bid >= n_leafs) return; + + uint2 bij = node_bodies[leaf2NodeIdx[bid]]; + uint bi = bij.x & ILEVELMASK; + uint bj = bij.y; + + //Write the particle id at the correct location, only if we are + //below the end particle id + if(bi+tid < bj) + { + leafPart2Body[idx] = idx; + } +} + + +//Assign a grp id to each particle of that grp to +//create particle -> group relation using the +//group -> particle relation +extern "C" __global__ void build_body2group_list(const int n_groups, + uint *group_list, + uint2 *node_bodies, + uint *body2group_list) +{ + const int bid = gridDim.x * blockIdx.y + blockIdx.x; + const int tid = threadIdx.x; + + if (bid >= n_groups) return; + + const int nodeID = group_list[bid]; + + uint2 bij = node_bodies[nodeID]; + + const uint firstChild = bij.x & ILEVELMASK; + const uint nChildren = bij.y - (bij.x & ILEVELMASK); + + int idx = firstChild+tid; + + //Save the group id for this particle + if (tid < nChildren) + body2group_list[idx] = bid; +} + + diff --git a/cuda_code/c_manager.cu b/cuda_code/c_manager.cu new file mode 100644 index 0000000000000000000000000000000000000000..65acf2c6b38847c4d2ef37cb32af5d8b002134d3 --- /dev/null +++ b/cuda_code/c_manager.cu @@ -0,0 +1,1262 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include //inet_addr +#include //hostent +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include "builtin_types.h" +#include "device_launch_parameters.h" + +#define SOCK_REUSE 0 +#define MSGSIZE (1024) +#define CHUNKSIZE (1024*1024*5) +#define PORT 4949 + +using namespace std; +using namespace chrono; + +typedef unsigned int uint; +typedef unsigned char uchar; + +template class vispark_data; + +typedef pair*> vpair; + + +#define RUN_LOG 1 + +#if defined(RUN_LOG) && RUN_LOG > 0 + #define log_print(fmt, args...) fprintf(stderr, "[%s] %s():%04d - " fmt, \ + host_name,__func__, __LINE__, ##args) +#else + #define log_print(fmt, args...) /* Don't do anything in release builds */ +#endif + +#define NUM_THREADS 2 + +void omp_memcpy(char *dst, char *src, size_t len) +{ + #pragma omp parallel + { + int tid = omp_get_thread_num(); + int num_threads = omp_get_num_threads(); + + size_t start = (len/num_threads)*tid; + size_t end = (len/num_threads)*(tid+1); + + if (tid == num_threads - 1) + end = len; + + memcpy(dst + start , src + start , end-start); + } +} + + +void call_memcpy(char *dst, char *src, int start, int end) +{ + memcpy(dst + start , src + start , end-start); +} + +void mt_memcpy(char *dst, char *src, size_t len) +{ + auto th1 = thread(call_memcpy,dst,src,len*0.00,len*0.25); + auto th2 = thread(call_memcpy,dst,src,len*0.25,len*0.50); + auto th3 = thread(call_memcpy,dst,src,len*0.50,len*0.75); + auto th4 = thread(call_memcpy,dst,src,len*0.75,len*1.00); + + th1.join(); + th2.join(); + th3.join(); + th4.join(); +} + +void error(const char *msg) +{ + perror(msg); + exit(0); +} + +void msg_print(vector msg) +{ + for (auto n : msg) + cout << n << " "; + cout << endl; +} + +vector msg_parser(const char* msg_buffer) +{ + const string s(msg_buffer); + istringstream ist(s); + + vector tmp,ss; + copy(istream_iterator(ist), istream_iterator(), + back_inserter(tmp)); + + for (auto n : tmp) { + if (strcmp(n.c_str(),"END") == 0) + break; + ss.push_back(n); + } + + return ss; +} + +string RandomString(const char * prefix, int len, int type) +{ +// if (type > 0) + string str = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"; + string newstr(prefix); + int pos; + while(newstr.size() != len) { + pos = ((rand() % (str.size() - 1))); + newstr += str.substr(pos,1); + } + //newstr += '\n'; + return newstr; +} + +class msg_create +{ + char *msg; + size_t msg_size; + int lenn; + + public: + msg_create(uint num_msg = 1){ + + uint size = (num_msg + 1)*MSGSIZE; + this->msg_size = sizeof(char)*size; + + msg = new char[size]; + bzero(msg,size); + } + + void set_head(string s){ + + memcpy(msg,s.c_str(),s.size()); + } + + void set_msg(string s){ + memcpy(msg + MSGSIZE, s.c_str(),s.size()); + } + + char *ptr(){ + return msg; + } + + size_t size(){ + return msg_size; + } + + void print(){ + for (int i =0 ; i < msg_size ; i++) + printf("%c",msg[i]); + printf("\n"); + } + +}; + +template +class vispark_data +{ + T* host_ptr = nullptr; + T* dev_ptr = nullptr; + size_t malloc_size = 0; + size_t data_size = 0; + string data_type = "char"; + string data_key = ""; + bool in_mem_flag = false; + cudaStream_t *stream_list; + int stream_num; + + + public: + vispark_data(uint data_size){ + // printf("Constructor \n"); + this->malloc_size = data_size*sizeof(T) + MSGSIZE; + this->data_size = data_size*sizeof(T); + cudaHostAlloc((void**)&host_ptr, malloc_size, cudaHostAllocDefault); + //host_ptr = new T[malloc_size]; + cudaMalloc((void**)&dev_ptr, data_size); + + int stream_num = data_size % CHUNKSIZE > 0 ? data_size/CHUNKSIZE + 1 : data_size/CHUNKSIZE; + stream_list = new cudaStream_t[stream_num]; + + for (int i = 0 ; i < stream_num ; i++) + cudaStreamCreate(&(stream_list[i])); + } + + ~vispark_data() + { + // printf("Distructor \n"); + cudaFreeHost(host_ptr); + cudaFree(dev_ptr); + } + + + vispark_data(const vispark_data &A) + { + // printf("Copy \n"); +/* + this->malloc_size = A.malloc_size; + this->data_size = A.data_size; + cudaHostAlloc((void**)&host_ptr, malloc_size, cudaHostAllocDefault); + // host_ptr = new T[malloc_size]; + cudaMalloc((void**)&dev_ptr, malloc_size); + + memcpy(this->host_ptr,A.host_ptr,malloc_size); +*/ + } + + + //void htod(cudaStream_t stream){ + void htod(){ + //cudaMemsetAsync(dev_ptr,0,malloc_size,stream); + cudaMemcpy(dev_ptr,host_ptr,data_size,cudaMemcpyHostToDevice); + in_mem_flag = true; + } + + void dtoh(){ + cudaMemcpy(host_ptr,dev_ptr,data_size,cudaMemcpyDeviceToHost); + //cudaMemsetAsync(dev_ptr,0,malloc_size,stream); + } + + vector dtoh_stream(){ + vector stream_list; + + int offset =0; + while(offset < data_size){ + cudaStream_t *stream = new cudaStream_t; + cudaStreamCreate(stream); + + + cudaMemcpyAsync(host_ptr + offset,dev_ptr+offset,CHUNKSIZE,cudaMemcpyDeviceToHost,*stream); + + offset += CHUNKSIZE; + stream_list.push_back(stream); + } + + return stream_list; + } + + + size_t getMallocSize(){ + return malloc_size; + } + + uint getDataSize(){ + return data_size; + } + + T* getHostPtr(){ + return host_ptr; + } + + T* getDevPtr(){ + return dev_ptr; + } + + void setDataKey(string data_key){ + this->data_key = data_key; + } + + string getDataKey(){ + return data_key; + } + + bool inGPU(){ + return in_mem_flag; + } + + void setInGPU(bool flag){ + in_mem_flag = flag; + } + +}; + + +// -*- -*- -*- + +CUcontext context; +CUdevice device; +CUfunction kernelfunc; +CUmodule module; + +// -*- -*- -*- + +CUresult kernel_call(vispark_data* out_data, vispark_data* in_data, vector< tuple >* args) { + CUdeviceptr devInArr1, devOutArr1; + CUresult err; + + devInArr1 = (CUdeviceptr) in_data->getDevPtr(); + devOutArr1 = (CUdeviceptr) out_data->getDevPtr(); + + vector kernelParams; + kernelParams.push_back(&devOutArr1); + kernelParams.push_back(&devInArr1); + + for (auto n : *args){ + + string type = get<0>(n); + int len = get<1>(n); + char * data = get<2>(n); + + if (strcmp(type.c_str(),"int") == 0){ + int *data_ptr = (int *) data; + if (len == 1) + kernelParams.push_back(const_cast(data_ptr)); + else{ + + CUdeviceptr* local_arr = new CUdeviceptr; + cuMemAlloc(local_arr, sizeof(int) * len); + cuMemcpyHtoD(*local_arr, data_ptr, sizeof(int) * len); + kernelParams.push_back(local_arr); + } + } + + if (strcmp(type.c_str(),"double") == 0){ + double *data_ptr = (double *) data; + if (len == 1) + kernelParams.push_back(const_cast(data_ptr)); + else{ + + CUdeviceptr* local_arr = new CUdeviceptr; + cuMemAlloc(local_arr, sizeof(double) * len); + cuMemcpyHtoD(*local_arr, data_ptr, sizeof(double) * len); + kernelParams.push_back(local_arr); + } + + } + + if (strcmp(type.c_str(),"float") == 0){ + float *data_ptr = (float *) data; + if (len == 1) + kernelParams.push_back(const_cast(data_ptr)); + else{ + + CUdeviceptr* local_arr = new CUdeviceptr; + cuMemAlloc(local_arr, sizeof(float) * len); + cuMemcpyHtoD(*local_arr, data_ptr, sizeof(float) * len); + kernelParams.push_back(local_arr); + } + + } + + } + + //in_data->htod(); + //auto host_ptr = in_data->getHostPtr(); + //memset(host_ptr,0,512*512*3*sizeof(char)); + /* + vector stream_list; + for (int i = 0 ; i < 16 ; i++){ + cudaStream_t* stream = new cudaStream_t; + cudaStreamCreate(stream); + stream_list.push_back(stream); + } +*/ +// for (int i = 0 ; i < 16 ; i++){ + err = cuLaunchKernel(kernelfunc, 256,256,1, 16, 16, 1, 0, 0, &kernelParams[0], 0); + if (err != CUDA_SUCCESS) return err; + + out_data->setInGPU(true); + //out_data->dtoh(); + //in_data->dtoh(); + + return err; +}; + +// map*> data_dict; +// map>*> args_dict; + +int GPU_TEST(const char *ptxfile, const char* func_name, vispark_data* out_data, vispark_data* in_data, vector>* args) { + CUresult err; + + /* + int deviceCount = 0; + + + //err = cuInit(0); + if (err != CUDA_SUCCESS) { printf("cuInit error... .\n"); return err; } + err = cuDeviceGetCount(&deviceCount); + if (err != CUDA_SUCCESS) { printf("cuDeviceGetCount error... .\n"); return err; } + if (deviceCount == 0) { printf("No CUDA-capable devices... .\n"); return err; } + err = cuDeviceGet(&device, 0); + if (err != CUDA_SUCCESS) { printf("cuDeviceGet error... .\n"); return err; } + err = cuCtxCreate(&context, 0, device); + if (err != CUDA_SUCCESS) { printf("cuCtxCreate error... .\n"); return err; } + */ + err = cuModuleLoad(&module, ptxfile); + if (err != CUDA_SUCCESS) { printf("cuModuleLoad error... .\n"); return err; } + err = cuModuleGetFunction(&kernelfunc, module, func_name); + if (err != CUDA_SUCCESS) { printf("cuModuleGetFunction error... .\n"); return err; } + + err = kernel_call(out_data,in_data,args); + if (err != CUDA_SUCCESS) { printf("Kernel invocation failed... .\n"); return err; } + // for (int i = 0; i < 10; ++i) printf("%d + %d = %d\n", inArr1[i], inArr2[i], outArr1[i]); + //cuCtxSynchronize(); + //cuCtxDetach(context); + return 0; +} + + +vector workers; +int num_workers; +char *host_name; +int w_idx; + +int main(int argc, char* argv[]) +{ + srand(getpid()); + + //omp_set_num_threads(NUM_THREADS); + + //Argument + if (argc > 3){ + + char *slave_name = argv[1]; + host_name = argv[2]; + char *pid_name = argv[3]; + + string line; + ifstream slave_file(slave_name); + while (getline(slave_file,line)) + { + line.erase(std::remove(line.begin(), line.end(), ' '), line.end()); + line = line.substr(0,line.find("#")); + + if (line.size() > 0) + workers.push_back(line); + } + + num_workers = workers.size(); + + for (int i = 0 ; i < workers.size() ; i++){ + auto n = workers[i]; + if (strcmp(n.c_str(),host_name) == 0){ + w_idx = i; + break; + } + } + + ofstream pid_file(pid_name,ios::trunc); + pid_file << getpid(); + pid_file.close(); + + log_print("Launch Process among %d/%d (%d)\n",w_idx,num_workers,getpid()); + } + + //Dict + map*> data_dict; + map>*> args_dict; + map code_dict; + + //Variable + int sockfd, newsockfd, portno; + socklen_t clilen; + struct sockaddr_in serv_addr, cli_addr; + int n; + //int lenn; + + //Timer + time_point start, end; + double etime; + int throughput; + + + //CUDA + CUresult err; + int deviceCount = 0; + + err = cuInit(0); + if (err != CUDA_SUCCESS) { printf("cuInit error... .\n"); return err; } + err = cuDeviceGetCount(&deviceCount); + if (err != CUDA_SUCCESS) { printf("cuDeviceGetCount error... .\n"); return err; } + if (deviceCount == 0) { printf("No CUDA-capable devices... .\n"); return err; } + err = cuDeviceGet(&device, 0); + if (err != CUDA_SUCCESS) { printf("cuDeviceGet error... .\n"); return err; } + err = cuCtxCreate(&context, 0, device); + if (err != CUDA_SUCCESS) { printf("cuCtxCreate error... .\n"); return err; } + + //Socket Binding + sockfd = socket(AF_INET, SOCK_STREAM, 0); + if (sockfd < 0) + error("ERROR opening socket"); + + +#if SOCK_REUSE + //add reuse + int enable = 1; + if (setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, &enable, sizeof(int)) < 0) + error("setsockopt(SO_REUSEADDR) failed"); +#endif + + bzero((char *) &serv_addr, sizeof(serv_addr)); + portno = PORT; + serv_addr.sin_family = AF_INET; + serv_addr.sin_addr.s_addr = INADDR_ANY; + serv_addr.sin_port = htons(portno); + if (bind(sockfd, (struct sockaddr *) &serv_addr, + sizeof(serv_addr)) < 0) + error("ERROR on binding"); + listen(sockfd,64); + + //printf("Port %d Open \n",portno); + + // const char* test = "SEND 8 9 uchar END 00000000000"; + + + char *buffer, *send_buf; + buffer = new char[MSGSIZE]; + send_buf= new char[MSGSIZE]; + memset(buffer,0,sizeof(char)*MSGSIZE); + memset(send_buf,0,sizeof(char)*MSGSIZE); + + + while(true) + { + vector log_msg; + clilen = sizeof(cli_addr); + newsockfd = accept(sockfd, (struct sockaddr *) &cli_addr, &clilen); + + start = system_clock::now(); + + if (newsockfd < 0) + error("ERROR on accept"); + + + string data_key = RandomString("Task_",24,0); + + + n = read(newsockfd,buffer,MSGSIZE); + if (n < 0) error("ERROR reading from socket"); + + auto msg = msg_parser(buffer); + //msg_print(msg); + vector::iterator msg_iter= msg.begin(); + int total_lenn = stoi(msg_iter[1]); + int total_recv = 0; + + //log_print("Start %s (%d) \n",data_key.c_str(),total_lenn); + + char *data_ptr = new char[total_lenn * MSGSIZE]; + + while(true){ + n = read(newsockfd,data_ptr+total_recv,MSGSIZE); + total_recv += n ; + //if (n < 0) error("ERROR reading from socket"); + //else if (n < MSGSIZE) printf("Reading %d packet \n",n); + if (n == 0) break; + } + + + if (total_lenn*MSGSIZE != total_recv){ + log_print("%d != %d \n",total_lenn*MSGSIZE,total_recv); + } + assert(total_lenn*MSGSIZE == total_recv); + + + end = system_clock::now(); + + duration elapsed = end-start; + throughput = (total_lenn*MSGSIZE)/(1024*1024); + etime = elapsed.count(); + log_print("[MSGRECV] %f MB/s (%d / %f)\n",throughput/etime,total_lenn*MSGSIZE,etime); + + //while(false) + //for (int read_ptr = 0 ; read_ptr < total_recv ; read_ptr += MSGSIZE) + //start = system_clock::now(); + int read_ptr =0; + + while (read_ptr < total_recv) + { + start = system_clock::now(); + + memcpy(buffer,data_ptr+read_ptr,sizeof(char)*MSGSIZE); + read_ptr += MSGSIZE; + auto msg = msg_parser(buffer); + + + vector::iterator iter = msg.begin(); + auto cmd = *iter; + + + if (strcmp(cmd.c_str(),"SEND")==0){ + + int lenn = stoi(iter[1]); + int data_len = stoi(iter[2]); + string data_type = iter[3]; + + //cout << lenn<< " " << data_len << " "<(data_len); + + char* dest_ptr = data->getHostPtr(); + + memcpy(dest_ptr,data_ptr + read_ptr,lenn*MSGSIZE*sizeof(char)); + read_ptr += lenn*MSGSIZE; + /* + for (int i = 0 ; i < lenn; i++){ + n = read(newsockfd,data_ptr+i*MSGSIZE,MSGSIZE); + if (n < 0) error("ERROR reading from socket"); + else if (n < MSGSIZE) printf("Reading %d packet \n",n); + } + */ + /* + while (true){ + n = read(newsockfd,data_ptr+i*MSGSIZE,MSGSIZE); + if (n < 0) error("ERROR reading from socket"); + else if (n < MSGSIZE) printf("Reading %d packet \n",n); + if (n == 0) break; + } */ + + data->htod(); + data_dict.insert(vpair(data_key,data)); + + } + else if (strcmp(cmd.c_str(),"SEQ")==0){ + + int lenn = stoi(iter[1]); + int data_len = stoi(iter[2]); + string data_type = iter[3]; + + + vector target_list; + + for (int i = 0 ; i < lenn; i++){ + memcpy(buffer,data_ptr+read_ptr,sizeof(char)*MSGSIZE); + read_ptr += MSGSIZE; + auto local_msg= msg_parser(buffer); + auto local_iter = local_msg.begin(); + target_list.push_back(local_iter[1]); + } + + //for ( auto n : target_list) + // cout<second; + + data_len += struct_ptr->getDataSize(); + } + + auto data = new vispark_data(data_len); + //char* dest_ptr = data->getHostPtr(); + char* dest_ptr = data->getDevPtr(); + char* source_ptr; + + int copy_off = 0; + for ( auto target_key : target_list){ + auto struct_ptr = data_dict.find(target_key)->second; + auto source_size = struct_ptr->getDataSize(); + + if (struct_ptr->inGPU() == true){ + source_ptr = struct_ptr->getDevPtr(); + cudaMemcpy(dest_ptr + copy_off, source_ptr,source_size*sizeof(char),cudaMemcpyDeviceToDevice); + }else { + source_ptr = struct_ptr->getHostPtr(); + cudaMemcpy(dest_ptr + copy_off, source_ptr,source_size*sizeof(char),cudaMemcpyHostToDevice); + } + copy_off += source_size; + } + + string result_key = RandomString("Task_",24,0); + data ->setDataKey(result_key); + data -> setInGPU(true); + + data_key = result_key; + + //data->htod(); + data_dict.insert(vpair(result_key,data)); + + //memcpy(dest_ptr,data_ptr + read_ptr,lenn*MSGSIZE*sizeof(char)); + //read_ptr += lenn*MSGSIZE; + /* + for (int i = 0 ; i < lenn; i++){ + n = read(newsockfd,data_ptr+i*MSGSIZE,MSGSIZE); + if (n < 0) error("ERROR reading from socket"); + else if (n < MSGSIZE) printf("Reading %d packet \n",n); + } + */ + /* + while (true){ + n = read(newsockfd,data_ptr+i*MSGSIZE,MSGSIZE); + if (n < 0) error("ERROR reading from socket"); + else if (n < MSGSIZE) printf("Reading %d packet \n",n); + if (n == 0) break; + } */ + + + + } + else if (strcmp(cmd.c_str(),"CHECK")==0){ + + int lenn = stoi(iter[1]); + int data_len = stoi(iter[2]); + string data_type = iter[3]; + + + vector target_list; + + for (int i = 0 ; i < lenn; i++){ + memcpy(buffer,data_ptr+read_ptr,sizeof(char)*MSGSIZE); + read_ptr += MSGSIZE; + auto local_msg= msg_parser(buffer); + auto local_iter = local_msg.begin(); + target_list.push_back(local_iter[1]); + } + + //cout<<"REQUIRED"< target_list; + + for (int i = 0 ; i < lenn; i++){ + memcpy(buffer,data_ptr+read_ptr,sizeof(char)*MSGSIZE); + read_ptr += MSGSIZE; + auto local_msg= msg_parser(buffer); + auto local_iter = local_msg.begin(); + target_list.push_back(local_iter[1]); + } + + //cout<<"REQUIRED"< missing_list; + + for ( auto target_key : target_list){ + auto struct_iter = data_dict.find(target_key); + if (struct_iter == data_dict.end()) + missing_list.push_back(target_key); + } + + //cout<<"MISSING"< 0){ + + auto send_obj =msg_create(); + string head = "Start 1 END "; + string cont = "REQUEST "; + + for (auto n : missing_list) + cont = cont + n + " "; + + cont += "END "; + + //while (head.size() < MSGSIZE) + // head += '0'; + + //while (cont.size() < MSGSIZE) + // cont += '0'; + + + //string send_obj = head+cont; + + //cout<h_addr_list; + + for(int i = 0; addr_list[i] != NULL; i++) + { + //strcpy(ip , inet_ntoa(*addr_list[i]) ); + other_addr.sin_addr = *addr_list[i]; + + //cout<= 0) + { + //log_print("Connected %s and %s \n",host_name,address.c_str()); + + for (uint offset = 0 ; offset < send_len; offset += MSGSIZE) + { + n = write(send_sock,send_ptr + offset,MSGSIZE); + //if (n < MSGSIZE) error("ERROR reading from socket 1"); + } + shutdown(send_sock,SHUT_WR); + + } + else + ; + //log_print("Fail to Connected %s and %s \n",host_name,address.c_str()); + } + + } + } + else if (strcmp(cmd.c_str(),"RECV")==0){ + + // string data_key = iter[1]; + //cout<<"RECV KEY "<< data_key <second; + if (struct_ptr->inGPU() == true) + struct_ptr->dtoh(); + char* data_ptr = struct_ptr->getHostPtr(); + //uint send_len = struct_ptr->getMallocSize(); + uint data_len = struct_ptr->getDataSize(); + // uint lenn = send_len / MSGSIZE; + + + for (uint offset = 0 ; offset < data_len ; offset += MSGSIZE) + { + uint send_size = min(MSGSIZE,data_len-offset); + n = write(newsockfd,data_ptr + offset,send_size); + //if (n < 0) error("ERROR reading from socket 1"); + //else if (n < MSGSIZE) printf("Sending %d packet \n",n); + } + shutdown(newsockfd,SHUT_WR); + + //cout<<"Finish Task : "<second; + if (struct_ptr->inGPU() == true) + struct_ptr->dtoh(); + char* data_ptr = struct_ptr->getHostPtr(); + //uint send_len = struct_ptr->getMallocSize(); + uint data_len = struct_ptr->getDataSize(); + // uint lenn = send_len / MSGSIZE; + + struct sockaddr_in other_addr; + auto send_sock = socket(AF_INET , SOCK_STREAM , 0); + bzero((char *) &other_addr, sizeof(other_addr)); + + other_addr.sin_addr.s_addr = inet_addr("192.168.1.11"); + other_addr.sin_family = AF_INET; + other_addr.sin_port = htons( loc_port ); + + if (connect(send_sock , (struct sockaddr *)&other_addr , sizeof(other_addr)) >= 0) + { + for (uint offset = 0 ; offset < data_len; offset += MSGSIZE){ + uint send_size = min(MSGSIZE,data_len-offset); + n = write(send_sock,data_ptr + offset,send_size); + } + shutdown(send_sock,SHUT_WR); + } + else + ; + + n = write(newsockfd,data_key.c_str(),data_key.size()); + shutdown(newsockfd,SHUT_WR); + + + + //cout<<"Finish Task : "<>; + + for (auto arg_iter = iter+7 ; arg_iter != msg.end() ; arg_iter++) + { + string elem_type = *arg_iter; + + int elem_len = 1; + //cout<< elem_type<push_back(make_pair(n.first,string(data_read))); + args_data->push_back(make_tuple(elem_type,elem_num,data_read)); + offset += elem_num*elem_len; + + } + + args_dict.insert(make_pair(data_key,args_data)); + + + /***************************************/ + /* CUDA compile */ + /***************************************/ + string filename = RandomString("/tmp/cuda_",16,code_len); + string cudafile = filename + ".ptx"; + //string ptxfile = filename + ".ptx"; + //cout<(result_len); + string result_key = RandomString("Task_",24,0); + result_data->setDataKey(result_key); + + auto data_elem = data_dict.find(data_key)->second; + auto args_elem = args_dict.find(data_key)->second; + + n = GPU_TEST(cudafile.c_str(),func_name.c_str(),result_data,data_elem,args_elem); + + data_dict.insert(vpair(result_key,result_data)); + + data_key = result_key; + + } + else if (strcmp(cmd.c_str(),"HIT")==0){ + data_key = iter[1]; + //cout<<"HIT KEY "<< data_key < recv_list; + + for (auto n = iter+1; n != msg.end(); n++){ + + auto struct_iter = data_dict.find(*n); + if (struct_iter != data_dict.end()) + recv_list.push_back(*n); + } + + //cout<second; + //struct_ptr->dtoh(); + //auto stream_iter = struct_ptr->dtoh_stream().begin(); + auto stream_list = struct_ptr->dtoh_stream(); + auto stream_iter = stream_list.begin(); + //auto stream_end = struct_ptr->dtoh_stream().end(); + int proc_size = 0; +// cudaDeviceSynchronize(); + + /* + for (auto stream_iter : struct_ptr->dtoh_stream()){ + auto err = cudaStreamSynchronize(*(stream_iter)); + if (err != CUDA_SUCCESS) { log_print("Stream error... .\n"); } + proc_size += CHUNKSIZE; + log_print("PROCESSED %d \n",proc_size); + } + */ + + //log_print("NUM STREAM %d \n",stream_list.size()); + int host_len = struct_ptr->getDataSize(); + char* host_ptr = struct_ptr->getHostPtr(); + int lenn = host_len%MSGSIZE == 0 ? host_len/MSGSIZE : host_len/MSGSIZE + 1; + +// log_print("Send %d/%d data \n",lenn,host_len); + + auto send_obj =msg_create(); + string head = "Start "+ to_string(lenn+1) + " END"; + string cont = "Transfer " + n + " " + to_string(lenn) + " " + + to_string(host_len) + " END"; + + send_obj.set_head(head); + send_obj.set_msg(cont); + + struct sockaddr_in other_addr; + auto send_sock = socket(AF_INET , SOCK_STREAM , 0); + bzero((char *) &other_addr, sizeof(other_addr)); + + other_addr.sin_addr.s_addr = cli_addr.sin_addr.s_addr; + other_addr.sin_family = AF_INET; + other_addr.sin_port = htons( portno ); + + if (connect(send_sock , (struct sockaddr *)&other_addr , sizeof(other_addr)) >= 0) + { + char *send_ptr = send_obj.ptr(); + for (uint offset = 0 ; offset < 2*MSGSIZE; offset += MSGSIZE) + n = write(send_sock,send_ptr + offset,MSGSIZE); + + for (uint offset = 0 ; offset < lenn*MSGSIZE; offset += MSGSIZE){ + if (offset >= proc_size){ + auto err = cudaStreamSynchronize(*(*stream_iter)); + if (err != CUDA_SUCCESS) { log_print("Stream error...[%d]\n",err); } + proc_size += CHUNKSIZE; + //log_print("PROCESSED %d \n",proc_size); + stream_iter++; + } + n = write(send_sock,host_ptr + offset,MSGSIZE); + } + shutdown(send_sock,SHUT_WR); + } + else + ; + // log_print("Fail to Connected \n"); + } + + } + else if (strcmp(cmd.c_str(),"Transfer")==0){ + + string recv_key = iter[1]; + int lenn = stoi(iter[2]); + int data_len = stoi(iter[3]); + //string data_type = iter[3]; + + //cout << lenn<< " " << data_len << " "<(data_len); + + char* dest_ptr = data->getHostPtr(); + + //memcpy(dest_ptr,data_ptr + read_ptr,lenn*MSGSIZE*sizeof(char)); + //adv_memcpy(dest_ptr,data_ptr + read_ptr,lenn*MSGSIZE*sizeof(char)); + mt_memcpy(dest_ptr,data_ptr + read_ptr,lenn*MSGSIZE*sizeof(char)); + //memmove(dest_ptr,data_ptr + read_ptr,lenn*MSGSIZE*sizeof(char)); + read_ptr += lenn*MSGSIZE; + + //log_print("GET %s (%d) \n",recv_key.c_str(),lenn); + /* + for (int i = 0 ; i < lenn; i++){ + n = read(newsockfd,data_ptr+i*MSGSIZE,MSGSIZE); + if (n < 0) error("ERROR reading from socket"); + else if (n < MSGSIZE) printf("Reading %d packet \n",n); + } + */ + /* + while (true){ + n = read(newsockfd,data_ptr+i*MSGSIZE,MSGSIZE); + if (n < 0) error("ERROR reading from socket"); + else if (n < MSGSIZE) printf("Reading %d packet \n",n); + if (n == 0) break; + } */ + + + data_dict.insert(vpair(recv_key,data)); + + } + else { + log_print("ERROR %s \n",cmd.c_str()); + // break; + } + +// fflush(stdout); + // cout.flush(); + //log_print("[%s] Recv : %f MB/s (%d / %f)\n",cmd.c_str(),throughput/etime,total_lenn*MSGSIZE,etime); + end = system_clock::now(); + elapsed = end-start; + etime = elapsed.count(); + log_print("[%s] %f MB/s (%d / %f)\n",cmd.c_str(),throughput/etime,total_lenn*MSGSIZE,etime); + } + + //log_print("EXEC Bandwidth : %f MB/s (%d / %f)\n",throughput/etime,total_lenn*MSGSIZE,etime); + + /* + GetTimeDiff(0); + //Data transfer + for (int i = 0 ; i < lenn ;i++){ + int offset = i*MSGSIZE; + + n = read(newsockfd,buffer + offset,MSGSIZE); + if (n < 0) error("ERROR reading from socket"); + } + n = write(newsockfd,"I got your message",18); + if (n < 0) error("ERROR writing to socket"); + + clock_gettime(CLOCK_REALTIME, &spec); + s = spec.tv_sec; + ms = round(spec.tv_nsec / 1.0e6); // Convert nanoseconds to milliseconds + + printf("%ld.%03ld\n", (intmax_t)s, ms); + */ + } + return 0; + +} + diff --git a/cuda_code/c_string.cu b/cuda_code/c_string.cu new file mode 100644 index 0000000000000000000000000000000000000000..d434fb098a9a1d50c81d8f7b1bd34e24ae19c89f --- /dev/null +++ b/cuda_code/c_string.cu @@ -0,0 +1,544 @@ +#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN +#include "common.cuh" +#include + +// TODO: Pass expected values to the device, and results back to the host for +// comparison. At the moment, errors only tell you which check failed, not +// what the actual and expected values were, nor what arguments the tested +// function was invoked with. + +// Note: +// Testcases are adapted from those used in the Public-Domain C Library. See: +// https://rootdirectory.ddns.net/dokuwiki/doku.php?id=pdclib:start + +constexpr const std::size_t max_num_checks_per_test { 100 }; + +namespace kernels { + +__global__ void test_strcmp(bool* results, std::size_t* num_checks) +{ + bool* result = results; + + auto single_check = [&](const char* s1, const char* s2, bool (*predicate)(int) ) { + *(result++) = predicate(kat::c_std_lib::strcmp(s1, s2)); + }; + + const auto abcde = "abcde"; + const auto abcdx = "abcdx"; + const auto cmpabcde = "abcde"; + const auto cmpabcd_ = "abcd\xfc"; + const auto empty = ""; + + auto is_negative = [](int i) { return i < 0; }; + auto is_positive = [](int i) { return i > 0; }; + auto is_zero = [](int i) { return i == 0; }; + + constexpr int line_before_first_check = __LINE__; + single_check(abcde, cmpabcde, is_zero ); + single_check(abcde, abcdx, is_negative ); + single_check(abcdx, abcde, is_positive ); + single_check(empty, abcde, is_negative ); + single_check(abcde, empty, is_positive ); + single_check(abcde, cmpabcd_, is_negative ); + constexpr int line_after_last_check = __LINE__; + *num_checks = line_after_last_check - line_before_first_check - 1; +} + +__global__ void test_strncmp(bool* results, std::size_t* num_checks) +{ + bool* result = results; + + auto single_check = [&](const char* s1, const char* s2, std::size_t n, bool (*predicate)(int) ) { + *(result++) = predicate(kat::c_std_lib::strncmp(s1, s2, n)); + }; + + const auto abcde = "abcde"; + const auto abcdx = "abcdx"; + const auto cmpabcde = "abcde"; + const auto cmpabcd_ = "abcd\xfc"; + const auto empty = ""; + const auto x = "x"; + + auto is_negative = [](int i) { return i < 0; }; + auto is_positive = [](int i) { return i > 0; }; + auto is_zero = [](int i) { return i == 0; }; + + constexpr int line_before_first_check = __LINE__; + single_check(abcde, cmpabcde, 5, is_zero); + single_check(abcde, cmpabcde, 10, is_zero); + single_check(abcde, abcdx, 5, is_negative); + single_check(abcdx, abcde, 5, is_positive); + single_check(empty, abcde, 5, is_negative); + single_check(abcde, empty, 5, is_positive); + single_check(abcde, abcdx, 4, is_zero); + single_check(abcde, x, 0, is_zero); + single_check(abcde, x, 1, is_negative); + single_check(abcde, cmpabcd_, 10, is_negative); + constexpr int line_after_last_check = __LINE__; + *num_checks = line_after_last_check - line_before_first_check - 1; +} + +__global__ void test_memcmp(bool* results, std::size_t* num_checks) +{ + bool* result = results; + + auto single_check = [&](const void* s1, const void* s2, std::size_t n, bool (*predicate)(int) ) { + *(result++) = predicate(kat::c_std_lib::memcmp(s1, s2, n)); + }; + + const auto abcde = "abcde"; + const auto abcdx = "abcdx"; + const auto xxxxx = "xxxxx"; + + auto is_negative = [](int i) { return i < 0; }; + auto is_positive = [](int i) { return i > 0; }; + auto is_zero = [](int i) { return i == 0; }; + + constexpr int line_before_first_check = __LINE__; + single_check(abcde, abcdx, 5, is_negative); + single_check(abcde, abcdx, 4, is_zero); + single_check(abcdx, xxxxx, 0, is_zero); + single_check(xxxxx, abcde, 1, is_positive); + constexpr int line_after_last_check = __LINE__; + *num_checks = line_after_last_check - line_before_first_check - 1; +} + +__global__ void test_strcpy(bool* results, std::size_t* num_checks) +{ + bool* result = results; + + auto single_check_invocation = [&](char* dest, const char* src ) { + auto ret = kat::c_std_lib::strcpy(dest, src); + *(result++) = (ret == dest); + }; + auto single_check_char_value = [&](const char* strcpy_dest, std::size_t pos, char expected_value) { + *(result++) = (strcpy_dest[pos] == expected_value); + }; + + const auto abcde = "abcde"; + + char s[] = "xxxxx"; + + constexpr int line_before_first_check = __LINE__; + single_check_invocation(s, "" ); + single_check_char_value(s, 0, '\0' ); + single_check_char_value(s, 1, 'x' ); + single_check_invocation(s, abcde); + single_check_char_value(s, 0, 'a' ); + single_check_char_value(s, 4, 'e' ); + single_check_char_value(s, 5, '\0' ); + constexpr int line_after_last_check = __LINE__; + *num_checks = line_after_last_check - line_before_first_check - 1; +} + +__global__ void test_strncpy(bool* results, std::size_t* num_checks) +{ + bool* result = results; + + auto single_check_invocation = [&](char* dest, const char* src, std::size_t n ) { + auto ret = kat::c_std_lib::strncpy(dest, src, n); + *(result++) = (ret == dest); + }; + auto single_check_char_value = [&](const char* strncpy_dest, std::size_t pos, char expected_value) { + *(result++) = (strncpy_dest[pos] == expected_value); + }; + + const auto abcde = "abcde"; + + char s[] = "xxxxxxx"; + + constexpr int line_before_first_check = __LINE__; + single_check_invocation( s, "", 1 ); + single_check_char_value( s, 0, '\0' ); + single_check_char_value( s, 1, 'x' ); + single_check_invocation( s, abcde, 6 ); + single_check_char_value( s, 0, 'a' ); + single_check_char_value( s, 4, 'e' ); + single_check_char_value( s, 5, '\0' ); + single_check_char_value( s, 6, 'x' ); + single_check_invocation( s, abcde, 7 ); + single_check_char_value( s, 6, '\0' ); + single_check_invocation( s, "xxxx", 3 ); + single_check_char_value( s, 0, 'x' ); + single_check_char_value( s, 2, 'x' ); + single_check_char_value( s, 3, 'd' ); + constexpr int line_after_last_check = __LINE__; + *num_checks = line_after_last_check - line_before_first_check - 1; +} + +__global__ void test_strlen(bool* results, std::size_t* num_checks) +{ + bool* result = results; + + auto single_check = [&](const char* s, std::size_t expected) { + *(result++) = (kat::c_std_lib::strlen(s) == expected); + }; + + constexpr int line_before_first_check = __LINE__; + single_check( "abcde", 5 ); + single_check( "", 0 ); + constexpr int line_after_last_check = __LINE__; + *num_checks = line_after_last_check - line_before_first_check - 1; +} + +__global__ void test_strcat(bool* results, std::size_t* num_checks) +{ + bool* result = results; + + auto single_check_invocation = [&](char* dest, const char* src ) { + auto ret = kat::c_std_lib::strcat(dest, src); + *(result++) = (ret == dest); + }; + auto single_check_char_value = [&](const char* strcat_dest, std::size_t pos, char expected_value) { + *(result++) = (strcat_dest[pos] == expected_value); + }; + + const auto abcde = "abcde"; + const auto abcdx = "abcdx"; + + char s[] = "xx\0xxxxxx"; + + constexpr int line_before_first_check = __LINE__; + single_check_invocation(s, abcde); + single_check_char_value(s, 2, 'a' ); + single_check_char_value(s, 6, 'e' ); + single_check_char_value(s, 7, '\0' ); + single_check_char_value(s, 8, 'x' ); + s[0] = '\0'; single_check_invocation(s, abcdx); + single_check_char_value(s, 4, 'x' ); + single_check_char_value(s, 5, '\0' ); + single_check_invocation(s, "\0"); + single_check_char_value(s, 5, '\0' ); + single_check_char_value(s, 6, 'e' ); + constexpr int line_after_last_check = __LINE__; + *num_checks = line_after_last_check - line_before_first_check - 1; +} + +__global__ void test_strncat(bool* results, std::size_t* num_checks) +{ + bool* result = results; + + auto single_check_invocation = [&](char* dest, const char* src, std::size_t n ) { + auto ret = kat::c_std_lib::strncat(dest, src, n); + *(result++) = (ret == dest); + }; + auto single_check_char_value = [&](const char* strncat_dest, std::size_t pos, char expected_value) { + *(result++) = (strncat_dest[pos] == expected_value); + }; + + const auto abcde = "abcde"; + const auto abcdx = "abcdx"; + + char s[] = "xx\0xxxxxx"; + + constexpr int line_before_first_check = __LINE__; + single_check_invocation(s, abcde, 10); + single_check_char_value(s, 2, 'a' ); + single_check_char_value(s, 6, 'e' ); + single_check_char_value(s, 7, '\0' ); + single_check_char_value(s, 8, '\0' ); // Additional nulls must have been written, even beyond the end of the concatenation string + s[0] = '\0'; single_check_invocation(s, abcdx, 10); + single_check_char_value(s, 4, 'x' ); + single_check_char_value(s, 5, '\0' ); + single_check_invocation(s, "\0", 10); + single_check_char_value(s, 5, '\0' ); + single_check_char_value(s, 6, '\0' ); // Additional nulls must have been written, even beyond the end of the concatenation string + single_check_invocation(s, abcde, 0); + single_check_char_value(s, 4, 'x' ); + single_check_char_value(s, 5, '\0' ); + single_check_char_value(s, 6, '\0' ); // Additional nulls must have been written, even beyond the end of the concatenation string + single_check_invocation(s, abcde, 3); + single_check_char_value(s, 5, 'a' ); + single_check_char_value(s, 7, 'c' ); + single_check_char_value(s, 8, '\0' ); + constexpr int line_after_last_check = __LINE__; + *num_checks = line_after_last_check - line_before_first_check - 1; +} + + +__global__ void test_memcpy(bool* results, std::size_t* num_checks) +{ + bool* result = results; + + auto single_check_invocation = [&](char* dest, const char* src, std::size_t n ) { + auto ret = kat::c_std_lib::memcpy(dest, src, n); + *(result++) = (ret == dest); + }; + auto single_check_char_value = [&](const char* memcpy_dest, std::size_t pos, char expected_value) { + *(result++) = (memcpy_dest[pos] == expected_value); + }; + + const auto abcde = "abcde"; + + char s[] = "xxxxxxxxxxx"; + + constexpr int line_before_first_check = __LINE__; + single_check_invocation(s, abcde, 6); + single_check_char_value(s, 4, 'e' ); + single_check_char_value(s, 5, '\0' ); + single_check_char_value(s, 6, 'x' ); + single_check_invocation(s + 5, abcde, 5); + single_check_char_value(s, 9, 'e' ); + single_check_char_value(s, 10, 'x' ); + constexpr int line_after_last_check = __LINE__; + *num_checks = line_after_last_check - line_before_first_check - 1; +} + +__global__ void test_memset(bool* results, std::size_t* num_checks) +{ + bool* result = results; + + auto single_check_invocation = [&](void* s, int c, std::size_t n ) { + auto ret = kat::c_std_lib::memset(s, c, n); + *(result++) = (ret == s); + }; + auto single_check_char_value = [&](const char* memset_dest, std::size_t pos, char expected_value) { + *(result++) = (memset_dest[pos] == expected_value); + }; + + char s[] = "xxxxxxxxx"; + + constexpr int line_before_first_check = __LINE__; + single_check_invocation(s, 'o', 10); + single_check_char_value(s, 0, 'o' ); + single_check_char_value(s, 9, 'o' ); + single_check_invocation(s, '_', 0); + single_check_char_value(s, 0, 'o' ); + single_check_invocation(s, '_', 1); + single_check_char_value(s, 0, '_' ); + single_check_invocation(s, '\xfd', 3); + single_check_char_value(s, 2, '\xfd' ); + constexpr int line_after_last_check = __LINE__; + *num_checks = line_after_last_check - line_before_first_check - 1; +} + +__global__ void test_memchr(bool* results, std::size_t* num_checks) +{ + bool* result = results; + + auto single_check = [&](const char* s, int c, std::size_t n, const char* expected ) { + *(result++) = (kat::c_std_lib::memchr(s, c, n) == expected); + }; + + const auto abcde = "abcde"; + + constexpr int line_before_first_check = __LINE__; + single_check(abcde, 'c', 5, &abcde[2] ); + single_check(abcde, 'a', 1, &abcde[0] ); + single_check(abcde, 'a', 0, nullptr ); + single_check(abcde, '\0', 5, nullptr ); + single_check(abcde, '\0', 6, &abcde[5] ); + constexpr int line_after_last_check = __LINE__; + *num_checks = line_after_last_check - line_before_first_check - 1; +} + +__global__ void test_strchr(bool* results, std::size_t* num_checks) +{ + bool* result = results; + + auto single_check = [&](const char* s, int c, const char* expected ) { + *(result++) = (kat::c_std_lib::strchr(s, c) == expected); + }; + + const auto abccd = "abccd"; + + constexpr int line_before_first_check = __LINE__; + single_check(abccd, 'x', nullptr ); + single_check(abccd, 'a', &abccd[0] ); + single_check(abccd, 'd', &abccd[4] ); + single_check(abccd, '\0', &abccd[5] ); + single_check(abccd, 'c', &abccd[2] ); + constexpr int line_after_last_check = __LINE__; + *num_checks = line_after_last_check - line_before_first_check - 1; +} + +__global__ void test_strrchr(bool* results, std::size_t* num_checks) +{ + bool* result = results; + + auto single_check = [&](const char* s, int c, const char* expected ) { + *(result++) = (kat::c_std_lib::strrchr(s, c) == expected); + }; + + const auto abcde = "abcde"; + const auto abccd = "abccd"; + + constexpr int line_before_first_check = __LINE__; + single_check(abcde, '\0', &abcde[5] ); + single_check(abcde, 'e', &abcde[4] ); + single_check(abcde, 'a', &abcde[0] ); + single_check(abccd, 'c', &abccd[3] ); + constexpr int line_after_last_check = __LINE__; + *num_checks = line_after_last_check - line_before_first_check - 1; +} + +__global__ void test_strpbrk(bool* results, std::size_t* num_checks) +{ + bool* result = results; + + auto single_check = [&](const char* s, const char* accept, const char* expected ) { + *(result++) = (kat::c_std_lib::strpbrk(s, accept) == expected); + }; + + const auto abcde = "abcde"; + const auto abcdx = "abcdx"; + + constexpr int line_before_first_check = __LINE__; + single_check(abcde, "x", nullptr ); + single_check(abcde, "xyz", nullptr ); + single_check(abcdx, "x", &abcdx[4] ); + single_check(abcdx, "xyz", &abcdx[4] ); + single_check(abcdx, "zyx", &abcdx[4] ); + single_check(abcde, "a", &abcde[0] ); + single_check(abcde, "abc", &abcde[0] ); + single_check(abcde, "cba", &abcde[0] ); + constexpr int line_after_last_check = __LINE__; + *num_checks = line_after_last_check - line_before_first_check - 1; +} + +__global__ void test_strspn(bool* results, std::size_t* num_checks) +{ + bool* result = results; + + auto single_check = [&](const char* s, const char* accept, std::size_t expected ) { + *(result++) = (kat::c_std_lib::strspn(s, accept) == expected); + }; + + const auto abcde = "abcde"; + + constexpr int line_before_first_check = __LINE__; + single_check(abcde, "abc", 3 ); + single_check(abcde, "b", 0 ); + single_check(abcde, abcde, 5 ); + constexpr int line_after_last_check = __LINE__; + *num_checks = line_after_last_check - line_before_first_check - 1; +} + +__global__ void test_strcspn(bool* results, std::size_t* num_checks) +{ + bool* result = results; + + auto single_check = [&](const char* s, const char* reject, std::size_t expected ) { + *(result++) = (kat::c_std_lib::strcspn(s, reject) == expected); + }; + + const auto abcde = "abcde"; + const auto abcdx = "abcdx"; + + constexpr int line_before_first_check = __LINE__; + single_check(abcde, "x", 5 ); + single_check(abcde, "xyz", 5 ); + single_check(abcde, "zyx", 5 ); + single_check(abcdx, "x", 4 ); + single_check(abcdx, "xyz", 4 ); + single_check(abcdx, "zyx", 4 ); + single_check(abcde, "a", 0 ); + single_check(abcde, "abc", 0 ); + single_check(abcde, "cba", 0 ); + constexpr int line_after_last_check = __LINE__; + *num_checks = line_after_last_check - line_before_first_check - 1; +} + + +__global__ void test_strstr(bool* results, std::size_t* num_checks) +{ + bool* result = results; + + auto single_check = [&](const char* haystack, const char* needle, const char* expected ) { +// printf("Haystack: %s , Needle: %s , strstr pos: %d\n", haystack, needle, kat::c_std_lib::strstr(haystack, needle) == nullptr ? -1 : kat::c_std_lib::strstr(haystack, needle) - haystack); + *(result++) = (kat::c_std_lib::strstr(haystack, needle) == expected); + }; + + char s[] = "abcabcabcdabcde"; + + constexpr int line_before_first_check = __LINE__; + single_check(s, "x", nullptr ); + single_check(s, "xyz", nullptr ); + single_check(s, "a", &s[0] ); + single_check(s, "abc", &s[0] ); + single_check(s, "abcd", &s[6] ); + single_check(s, "abcde", &s[10] ); + constexpr int line_after_last_check = __LINE__; + *num_checks = line_after_last_check - line_before_first_check - 1; +} + +__global__ void test_strrstr(bool* results, std::size_t* num_checks) +{ + bool* result = results; + + auto single_check = [&](const char* haystack, const char* needle, const char* expected ) { + *(result++) = (kat::c_std_lib::strrstr(haystack, needle) == expected); + }; + + const auto s = "abcabcabcdabcde"; + + constexpr int line_before_first_check = __LINE__; + single_check(s, "x", nullptr ); + single_check(s, "xyz", nullptr ); + single_check(s, "a", &s[10] ); + single_check(s, "abc", &s[10] ); + single_check(s, "abca", &s[3] ); + single_check(s, "abcab", &s[3] ); + single_check(s, "abcabca", &s[0] ); + constexpr int line_after_last_check = __LINE__; + *num_checks = line_after_last_check - line_before_first_check - 1; +} + + +} // namespace kernels + +TEST_SUITE("c_string") { + +using kernel_type = void (*)(bool*, std::size_t*); + +void conduct_test(kernel_type kernel, const char* kernel_name) +{ + cuda::device_t device { cuda::device::current::get() }; + auto block_size { 1 }; + auto num_grid_blocks { 1 }; + auto launch_config { cuda::make_launch_config(block_size, num_grid_blocks) }; + auto device_side_results { cuda::memory::device::make_unique(device, max_num_checks_per_test) }; + auto device_side_num_checks { cuda::memory::device::make_unique(device) }; + bool host_side_results[max_num_checks_per_test]; + std::size_t host_side_num_checks; + + cuda::launch( + kernel, + launch_config, + device_side_results.get(), device_side_num_checks.get() + ); + + cuda::memory::copy(host_side_results, device_side_results.get(), sizeof(bool) * max_num_checks_per_test); + cuda::memory::copy_single(&host_side_num_checks, device_side_num_checks.get()); + + for(std::size_t i = 0; i < host_side_num_checks; i++) { + CHECK(host_side_results[i] == true); + if (not host_side_results[i]) { + auto width_4 { std::setw(4) }; + auto i_plus_1 { i+1 }; + CHECK_MESSAGE(false, kernel_name << " check " << width_4 << i_plus_1 << " (1-based) of " << host_side_num_checks << " failed."); + } + } +} + +TEST_CASE("strcmp" ) { conduct_test(kernels::test_strcmp, "strcmp"); } +TEST_CASE("strncmp") { conduct_test(kernels::test_strncmp, "strncmp"); } +TEST_CASE("memcmp" ) { conduct_test(kernels::test_memcmp, "memcmp"); } +TEST_CASE("strcpy" ) { conduct_test(kernels::test_strcpy, "strcpy"); } +TEST_CASE("strncpy") { conduct_test(kernels::test_strncpy, "strncpy"); } +TEST_CASE("strlen" ) { conduct_test(kernels::test_strlen, "strlen"); } +TEST_CASE("strcat" ) { conduct_test(kernels::test_strcat, "strcat"); } +TEST_CASE("strncat") { conduct_test(kernels::test_strncat, "strncat"); } +TEST_CASE("memcpy" ) { conduct_test(kernels::test_memcpy, "memcpy"); } +TEST_CASE("memset" ) { conduct_test(kernels::test_memset, "memset"); } +TEST_CASE("memchr" ) { conduct_test(kernels::test_memchr, "memchr"); } +TEST_CASE("strchr" ) { conduct_test(kernels::test_strchr, "strchr"); } +TEST_CASE("strrchr") { conduct_test(kernels::test_strrchr, "strrchr"); } +TEST_CASE("strpbrk") { conduct_test(kernels::test_strpbrk, "strpbrk"); } +TEST_CASE("strspn" ) { conduct_test(kernels::test_strspn, "strspn"); } +TEST_CASE("strcspn") { conduct_test(kernels::test_strcspn, "strcspn"); } +TEST_CASE("strstr" ) { conduct_test(kernels::test_strstr, "strstr"); } +TEST_CASE("strrstr") { conduct_test(kernels::test_strrstr, "strrstr"); } + + +} // TEST_SUITE("c_string") diff --git a/cuda_code/categorical_accuracy_5.cu b/cuda_code/categorical_accuracy_5.cu new file mode 100644 index 0000000000000000000000000000000000000000..6079f02f8edc8d32a48584d80d7eeb14d232eacb --- /dev/null +++ b/cuda_code/categorical_accuracy_5.cu @@ -0,0 +1,392 @@ +//////////////////////////////////////////////////////////////////////////////// +// Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC. +// Produced at the Lawrence Livermore National Laboratory. +// Written by the LBANN Research Team (B. Van Essen, et al.) listed in +// the CONTRIBUTORS file. +// +// LLNL-CODE-697807. +// All rights reserved. +// +// This file is part of LBANN: Livermore Big Artificial Neural Network +// Toolkit. For details, see http://software.llnl.gov/LBANN or +// https://github.com/LLNL/LBANN. +// +// Licensed under the Apache License, Version 2.0 (the "Licensee"); you +// may not use this file except in compliance with the License. You may +// obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the license. +//////////////////////////////////////////////////////////////////////////////// + +#define LBANN_CATEGORICAL_ACCURACY_LAYER_INSTANTIATE +#include "lbann/layers/loss/categorical_accuracy.hpp" +#include "lbann/utils/cuda.hpp" + +namespace lbann { + +namespace { + +/** Fill matrix with corresponding indices. + * Indices are equivalent to the global row indices of the input + * matrix. + */ +__global__ void fill_indices_kernel(El::Int local_height, + El::Int local_width, + El::Int col_shift, + El::Int col_stride, + El::Int* __restrict__ indices) { + const El::Int gid = threadIdx.x + blockIdx.x * blockDim.x; + const El::Int nthreads = blockDim.x * gridDim.x; + const El::Int size = local_height * local_width; + for (El::Int pos = gid; pos < size; pos += nthreads) { + const auto& row = pos % local_height; + const auto& col = pos / local_height; + indices[row + col*local_height] = col_shift + row * col_stride; + } +} + +/** Find largest entry within each CUDA block. + * Each block is assigned several entries from the same mini-batch + * sample and it finds the largest entry. Results are output to + * nblocksx x width matrices. + */ +template +__global__ void reduce_max_entries_kernel(El::Int height, El::Int width, + const DataType* __restrict__ values, + El::Int values_row_stride, + El::Int values_col_stride, + const El::Int* __restrict__ indices, + El::Int indices_row_stride, + El::Int indices_col_stride, + DataType* __restrict__ max_values, + El::Int* __restrict__ max_indices) { + + // Indices + const El::Int tid = threadIdx.x; + const El::Int gidx = threadIdx.x + blockIdx.x * blockDim.x; + const El::Int bidx = blockIdx.x; + const El::Int bidy = blockIdx.y; + const El::Int nthreadsx = blockDim.x * gridDim.x; + const El::Int nblocksx = gridDim.x; + + // Reduce each matrix column independently + for (El::Int col = bidy; col < width; col += gridDim.y) { + + // Find largest entry for each thread + DataType private_max_val = -cuda::infinity(); + El::Int private_max_ind = cuda::max(); + for (El::Int row = gidx; row < height; row += nthreadsx) { + const auto& val = values[row * values_row_stride + + col * values_col_stride]; + const auto& ind = indices[row * indices_row_stride + + col * indices_col_stride]; + if (val > private_max_val + || (val == private_max_val && ind < private_max_ind)) { + private_max_val = val; + private_max_ind = ind; + } + } + + // Shared memory reduction to get largest entry for each block + __shared__ DataType shared_max_vals[block_size]; + __shared__ El::Int shared_max_inds[block_size]; + shared_max_vals[tid] = private_max_val; + shared_max_inds[tid] = private_max_ind; + for (El::Int stride = block_size / 2; stride > 0; stride /= 2) { + __syncthreads(); + if (tid < stride) { + const auto& val = shared_max_vals[tid + stride]; + const auto& ind = shared_max_inds[tid + stride]; + if (val > shared_max_vals[tid] + || (val == shared_max_vals[tid] && ind < shared_max_inds[tid])) { + shared_max_vals[tid] = val; + shared_max_inds[tid] = ind; + } + } + } + if (tid == 0) { + max_values[bidx + col*nblocksx] = shared_max_vals[0]; + max_indices[bidx + col*nblocksx] = shared_max_inds[0]; + } + + } + +} + +/** Compute sample-wise categorical accuracy. + * Outputs one if the prediction and label indices match and + * otherwise outputs zero. + */ +__global__ void compute_accuracy_kernel(El::Int local_width, + const El::Int* __restrict__ prediction_indices, + const El::Int* __restrict__ label_indices, + DataType* __restrict__ loss, + El::Int loss_ldim) { + const El::Int gid = threadIdx.x + blockIdx.x * blockDim.x; + const El::Int nthreads = blockDim.x * gridDim.x; + constexpr El::Int max_ind = cuda::max(); + for (El::Int col = gid; col < local_width; col += nthreads) { + const auto& prediction = prediction_indices[col]; + const auto& label = label_indices[col]; + loss[col*loss_ldim] = (prediction == label && prediction < max_ind ? + DataType(1) : DataType(0)); + } +} + +/** GPU implementation of categorical accuracy layer forward prop. */ +void fp_gpu(lbann_comm& comm, + const AbsDistMat& predictions, + const AbsDistMat& labels, + AbsDistMat& loss) { + + // Local matrices + const auto& local_predictions = predictions.LockedMatrix(); + const auto& local_labels = labels.LockedMatrix(); + auto& local_loss = loss.Matrix(); + + // Dimensions + const auto& height = predictions.Height(); + const auto& local_height = local_predictions.Height(); + const auto& local_width = local_predictions.Width(); + if (local_width < 1) { return; } + + // Column communicator + auto&& col_comm = predictions.ColComm(); + const auto& col_comm_rank = El::mpi::Rank(col_comm); + const auto& col_comm_size = El::mpi::Size(col_comm); + const auto& col_comm_root = loss.RowOwner(0); + + // GPU objects + auto&& stream = El::GPUManager::Stream(); + auto&& event = El::GPUManager::Event(); + El::SyncInfo sync_info{stream, event}; + + // Initialize CUDA threads/blocks for reduction kernel + // Note: reduce_max_entries_kernel uses a 2D thread distribution + // with a 256 x 1 block and nblocksx x local_width grid. + constexpr El::Int block_size = 256; + dim3 block_dims, grid_dims; + block_dims.x = block_size; + grid_dims.y = local_width; + + // Get indices for all input entries + cuda::thrust::vector full_inds(local_height * local_width); + if (full_inds.size() > 0) { + const El::Int grid_size = (full_inds.size() + block_size - 1) / block_size; + fill_indices_kernel<<>>( + local_height, local_width, + predictions.ColShift(), predictions.ColStride(), + full_inds.data().get()); + } + + // Find largest prediction entries in local data + grid_dims.x = (local_height + block_size - 1) / block_size; + if (grid_dims.x < 1) { grid_dims.x = 1; } + cuda::thrust::vector prediction_vals(grid_dims.x * local_width); + cuda::thrust::vector prediction_inds(grid_dims.x * local_width); + reduce_max_entries_kernel + <<>>( + local_height, local_width, + local_predictions.LockedBuffer(), 1, local_predictions.LDim(), + full_inds.data().get(), 1, local_height, + prediction_vals.data().get(), + prediction_inds.data().get()); + while (grid_dims.x > 1) { + const El::Int prev_height = grid_dims.x; + grid_dims.x = (prev_height + block_size - 1) / block_size; + cuda::thrust::vector prev_vals(std::move(prediction_vals)); + cuda::thrust::vector prev_inds(std::move(prediction_inds)); + prediction_vals.resize(grid_dims.x * local_width); + prediction_inds.resize(grid_dims.x * local_width); + reduce_max_entries_kernel + <<>>( + prev_height, local_width, + prev_vals.data().get(), 1, prev_height, + prev_inds.data().get(), 1, prev_height, + prediction_vals.data().get(), + prediction_inds.data().get()); + } + + // Gather large prediction entries + /// @todo Non-blocking gather + Al::request prediction_vals_req, prediction_inds_req; + cuda::thrust::vector gathered_prediction_vals; + cuda::thrust::vector gathered_prediction_inds; + if (col_comm_size > 1) { + if (col_comm_rank != col_comm_root) { + comm.gather(prediction_vals.data().get(), prediction_vals.size(), + col_comm_root, col_comm, sync_info); + comm.gather(prediction_inds.data().get(), prediction_inds.size(), + col_comm_root, col_comm, sync_info); + } else { + gathered_prediction_vals.resize(prediction_vals.size() * col_comm_size); + gathered_prediction_inds.resize(prediction_inds.size() * col_comm_size); + comm.gather(prediction_vals.data().get(), prediction_vals.size(), + gathered_prediction_vals.data().get(), + col_comm, sync_info); + comm.gather(prediction_inds.data().get(), prediction_inds.size(), + gathered_prediction_inds.data().get(), + col_comm, sync_info); + } + } + + // Find largest label entries in local data + grid_dims.x = (local_height + block_size - 1) / block_size; + if (grid_dims.x < 1) { grid_dims.x = 1; } + cuda::thrust::vector label_vals(grid_dims.x * local_width); + cuda::thrust::vector label_inds(grid_dims.x * local_width); + reduce_max_entries_kernel + <<>>( + local_height, local_width, + local_labels.LockedBuffer(), 1, local_labels.LDim(), + full_inds.data().get(), 1, local_height, + label_vals.data().get(), + label_inds.data().get()); + while (grid_dims.x > 1) { + const El::Int prev_height = grid_dims.x; + grid_dims.x = (prev_height + block_size - 1) / block_size; + cuda::thrust::vector prev_vals(std::move(label_vals)); + cuda::thrust::vector prev_inds(std::move(label_inds)); + label_vals.resize(grid_dims.x * local_width); + label_inds.resize(grid_dims.x * local_width); + reduce_max_entries_kernel + <<>>( + prev_height, local_width, + prev_vals.data().get(), 1, prev_height, + prev_inds.data().get(), 1, prev_height, + label_vals.data().get(), + label_inds.data().get()); + } + + // Gather large label entries + /// @todo Non-blocking gather + Al::request label_vals_req, label_inds_req; + cuda::thrust::vector gathered_label_vals; + cuda::thrust::vector gathered_label_inds; + if (col_comm_size > 1) { + if (col_comm_rank != col_comm_root) { + comm.gather(label_vals.data().get(), label_vals.size(), + col_comm_root, col_comm, sync_info); + comm.gather(label_inds.data().get(), label_inds.size(), + col_comm_root, col_comm, sync_info); + } else { + gathered_label_vals.resize(label_vals.size() * col_comm_size); + gathered_label_inds.resize(label_inds.size() * col_comm_size); + comm.gather(label_vals.data().get(), label_vals.size(), + gathered_label_vals.data().get(), + col_comm, sync_info); + comm.gather(label_inds.data().get(), label_inds.size(), + gathered_label_inds.data().get(), + col_comm, sync_info); + } + } + + // Clean up temporary arrays + full_inds.clear(); + + // Find largest prediction entry in global data + comm.wait(prediction_vals_req); + comm.wait(prediction_inds_req); + if (col_comm_size > 1 && col_comm_rank == col_comm_root) { + grid_dims.x = (col_comm_size + block_size - 1) / block_size; + if (grid_dims.x < 1) { grid_dims.x = 1; } + prediction_vals.resize(grid_dims.x * local_width); + prediction_inds.resize(grid_dims.x * local_width); + reduce_max_entries_kernel + <<>>( + col_comm_size, local_width, + gathered_prediction_vals.data().get(), col_comm_size, 1, + gathered_prediction_inds.data().get(), col_comm_size, 1, + prediction_vals.data().get(), + prediction_inds.data().get()); + while (grid_dims.x > 1) { + const El::Int prev_height = grid_dims.x; + grid_dims.x = (prev_height + block_size - 1) / block_size; + cuda::thrust::vector prev_vals(std::move(prediction_vals)); + cuda::thrust::vector prev_inds(std::move(prediction_inds)); + prediction_vals.resize(grid_dims.x * local_width); + prediction_inds.resize(grid_dims.x * local_width); + reduce_max_entries_kernel + <<>>( + prev_height, local_width, + prev_vals.data().get(), 1, prev_height, + prev_inds.data().get(), 1, prev_height, + prediction_vals.data().get(), + prediction_inds.data().get()); + } + } + + // Find largest label entry in global data + comm.wait(label_vals_req); + comm.wait(label_inds_req); + if (col_comm_size > 1 && col_comm_rank == col_comm_root) { + grid_dims.x = (col_comm_size + block_size - 1) / block_size; + if (grid_dims.x < 1) { grid_dims.x = 1; } + label_vals.resize(grid_dims.x * local_width); + label_inds.resize(grid_dims.x * local_width); + reduce_max_entries_kernel + <<>>( + col_comm_size, local_width, + gathered_label_vals.data().get(), col_comm_size, 1, + gathered_label_inds.data().get(), col_comm_size, 1, + label_vals.data().get(), + label_inds.data().get()); + while (grid_dims.x > 1) { + const El::Int prev_height = grid_dims.x; + grid_dims.x = (prev_height + block_size - 1) / block_size; + cuda::thrust::vector prev_vals(std::move(label_vals)); + cuda::thrust::vector prev_inds(std::move(label_inds)); + label_vals.resize(grid_dims.x * local_width); + label_inds.resize(grid_dims.x * local_width); + reduce_max_entries_kernel + <<>>( + prev_height, local_width, + prev_vals.data().get(), 1, prev_height, + prev_inds.data().get(), 1, prev_height, + label_vals.data().get(), + label_inds.data().get()); + } + } + + // Compute categorical accuracy + if (col_comm_rank == col_comm_root) { + const El::Int grid_size = (local_width + block_size - 1) / block_size; + compute_accuracy_kernel<<>>( + local_width, + prediction_inds.data().get(), label_inds.data().get(), + local_loss.Buffer(), local_loss.LDim()); + } + +} + +} // namespace + +template <> +void categorical_accuracy_layer + ::fp_compute() { + fp_gpu(*get_comm(), + get_prev_activations(0), + get_prev_activations(1), + get_activations()); +} +template <> +void categorical_accuracy_layer + ::fp_compute() { + fp_gpu(*get_comm(), + get_prev_activations(0), + get_prev_activations(1), + get_activations()); +} + +template class categorical_accuracy_layer< + data_layout::DATA_PARALLEL, El::Device::GPU>; +template class categorical_accuracy_layer< + data_layout::MODEL_PARALLEL, El::Device::GPU>; + +} // namespace lbann diff --git a/cuda_code/ccl_np.cu b/cuda_code/ccl_np.cu new file mode 100644 index 0000000000000000000000000000000000000000..22700c9295da34a5b5c2c5d4488d58dde362529d --- /dev/null +++ b/cuda_code/ccl_np.cu @@ -0,0 +1,197 @@ +// Marathon Match - CCL - Neighbour Propagation + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define NOMINMAX + +#ifdef _MSC_VER +#include +inline double get_time() +{ + return static_cast(std::clock()) / CLOCKS_PER_SEC; +} +#else +#include +inline double get_time() +{ + timeval tv; + gettimeofday(&tv, 0); + return tv.tv_sec + 1e-6 * tv.tv_usec; +} +#endif + +using namespace std; + +//const int BLOCK = 128; +const int BLOCK = 256; + +__global__ void init_CCL(int L[], int N) +{ + int id = blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x + threadIdx.x; + if (id >= N) return; + + L[id] = id; +} + +__device__ int diff(int d1, int d2) +{ + return abs(((d1>>16) & 0xff) - ((d2>>16) & 0xff)) + abs(((d1>>8) & 0xff) - ((d2>>8) & 0xff)) + abs((d1 & 0xff) - (d2 & 0xff)); +} + +__global__ void kernel(int D[], int L[], bool* m, int N, int W, int th) +{ + int id = blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x + threadIdx.x; + if (id >= N) return; + + int Did = D[id]; + int label = N; + if (id - W >= 0 && diff(Did, D[id-W]) <= th) label = min(label, L[id-W]); + if (id + W < N && diff(Did, D[id+W]) <= th) label = min(label, L[id+W]); + int r = id % W; + if (r && diff(Did, D[id-1]) <= th) label = min(label, L[id-1]); + if (r + 1 != W && diff(Did, D[id+1]) <= th) label = min(label, L[id+1]); + + if (label < L[id]) { + //atomicMin(&R[L[id]], label); + L[id] = label; + *m = true; + } +} + +__global__ void kernel8(int D[], int L[], bool* m, int N, int W, int th) +{ + int id = blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x + threadIdx.x; + if (id >= N) return; + + int Did = D[id]; + int label = N; + if (id - W >= 0 && diff(Did, D[id-W]) <= th) label = min(label, L[id-W]); + if (id + W < N && diff(Did, D[id+W]) <= th) label = min(label, L[id+W]); + int r = id % W; + if (r) { + if (diff(Did, D[id-1]) <= th) label = min(label, L[id-1]); + if (id - W - 1 >= 0 && diff(Did, D[id-W-1]) <= th) label = min(label, L[id-W-1]); + if (id + W - 1 < N && diff(Did, D[id+W-1]) <= th) label = min(label, L[id+W-1]); + } + if (r + 1 != W) { + if (diff(Did, D[id+1]) <= th) label = min(label, L[id+1]); + if (id - W + 1 >= 0 && diff(Did, D[id-W+1]) <= th) label = min(label, L[id-W+1]); + if (id + W + 1 < N && diff(Did, D[id+W+1]) <= th) label = min(label, L[id+W+1]); + } + + if (label < L[id]) { + //atomicMin(&R[L[id]], label); + L[id] = label; + *m = true; + } +} + +class CCL { +private: + int* Dd; + int* Ld; + +public: + vector cuda_ccl(vector& image, int W, int degree_of_connectivity, int threshold); +}; + +vector CCL::cuda_ccl(vector& image, int W, int degree_of_connectivity, int threshold) +{ + vector result; + int* D = static_cast(&image[0]); + int N = image.size(); + + cudaMalloc((void**)&Ld, sizeof(int) * N); + cudaMalloc((void**)&Dd, sizeof(int) * N); + cudaMemcpy(Dd, D, sizeof(int) * N, cudaMemcpyHostToDevice); + + bool* md; + cudaMalloc((void**)&md, sizeof(bool)); + + int width = static_cast(sqrt(static_cast(N) / BLOCK)) + 1; + dim3 grid(width, width, 1); + dim3 threads(BLOCK, 1, 1); + + init_CCL<<>>(Ld, N); + + for (;;) { + bool m = false; + cudaMemcpy(md, &m, sizeof(bool), cudaMemcpyHostToDevice); + if (degree_of_connectivity == 4) kernel<<>>(Dd, Ld, md, N, W, threshold); + else kernel8<<>>(Dd, Ld,md, N, W, threshold); + cudaMemcpy(&m, md, sizeof(bool), cudaMemcpyDeviceToHost); + if (!m) break; + } + + cudaMemcpy(D, Ld, sizeof(int) * N, cudaMemcpyDeviceToHost); + + cudaFree(Dd); + cudaFree(Ld); + + result.swap(image); + return result; +} + +void read_data(const string filename, vector& image, int& W, int& degree_of_connectivity, int& threshold) +{ + fstream fs(filename.c_str(), ios_base::in); + string line; + stringstream ss; + int data; + + getline(fs, line); + ss.str(line); + ss >> W >> degree_of_connectivity >> threshold; + getline(fs, line); + ss.str(""); ss.clear(); + for (ss.str(line); ss >> data; image.push_back(data)); +} + +int main(int argc, char* argv[]) +{ + ios_base::sync_with_stdio(false); + + if (argc < 2) { + cerr << "Usage: " << argv[0] << " input_file" << endl; + exit(1); + } + + cudaSetDevice(cutGetMaxGflopsDeviceId()); + + vector image; + int W, degree_of_connectivity, threshold; + read_data(argv[1], image, W, degree_of_connectivity, threshold); + + CCL ccl; + + double start = get_time(); + vector result(ccl.cuda_ccl(image, W, degree_of_connectivity, threshold)); + double end = get_time(); + cerr << "Time: " << end - start << endl; + + cout << result.size() << endl; /// number of pixels + cout << W << endl; /// width + for (int i = 0; i < static_cast(result.size()) / W; i++) { + for (int j = 0; j < W; j++) cout << result[i*W+j] << " "; + cout << endl; + } + + return 0; +} diff --git a/cuda_code/cd_mg_10.cu b/cuda_code/cd_mg_10.cu new file mode 100644 index 0000000000000000000000000000000000000000..f8fbb713f8e4821a1a19b56448e721a81a2b740a --- /dev/null +++ b/cuda_code/cd_mg_10.cu @@ -0,0 +1,510 @@ +/* + * Copyright (c) 2020-2021, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "shuffle.h" + +#include +#include +#include + +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +using namespace MLCommon; + +namespace ML { +namespace CD { +namespace opg { + +template +void fit_impl(raft::handle_t& handle, + std::vector*>& input_data, + Matrix::PartDescriptor& input_desc, + std::vector*>& labels, + T* coef, + T* intercept, + bool fit_intercept, + bool normalize, + int epochs, + T alpha, + T l1_ratio, + bool shuffle, + T tol, + cudaStream_t* streams, + int n_streams, + bool verbose) +{ + const auto& comm = handle.get_comms(); + const auto allocator = handle.get_device_allocator(); + + std::vector partsToRanks = input_desc.blocksOwnedBy(comm.get_rank()); + + size_t total_M = 0.0; + for (std::size_t i = 0; i < partsToRanks.size(); i++) { + total_M += partsToRanks[i]->size; + } + + device_buffer pred(allocator, streams[0], total_M); + device_buffer residual(allocator, streams[0], total_M); + device_buffer squared(allocator, streams[0], input_desc.N); + device_buffer mu_input(allocator, streams[0]); + device_buffer norm2_input(allocator, streams[0]); + device_buffer mu_labels(allocator, streams[0]); + + std::vector h_coef(input_desc.N, T(0)); + + if (fit_intercept) { + mu_input.resize(input_desc.N, streams[0]); + mu_labels.resize(1, streams[0]); + if (normalize) { norm2_input.resize(input_desc.N, streams[0]); } + + GLM::opg::preProcessData(handle, + input_data, + input_desc, + labels, + mu_input.data(), + mu_labels.data(), + norm2_input.data(), + fit_intercept, + normalize, + streams, + n_streams, + verbose); + } + + std::vector ri(input_desc.N); + std::mt19937 g(rand()); + + size_t memsize = input_desc.N * sizeof(int); + int* ri_h = (int*)malloc(memsize); + CUDA_CHECK(cudaHostRegister(ri_h, memsize, cudaHostRegisterDefault)); + + if (comm.get_rank() == 0) { + ML::Solver::initShuffle(ri, g); + for (std::size_t i = 0; i < input_desc.N; i++) { + ri_h[i] = ri[i]; + } + } + + comm.bcast(ri_h, input_desc.N, 0, streams[0]); + comm.sync_stream(streams[0]); + + T l2_alpha = (1 - l1_ratio) * alpha * input_desc.M; + alpha = l1_ratio * alpha * input_desc.M; + + if (normalize) { + T scalar = T(1.0) + l2_alpha; + raft::matrix::setValue(squared.data(), squared.data(), scalar, input_desc.N, streams[0]); + } else { + Matrix::Data squared_data{squared.data(), size_t(input_desc.N)}; + LinAlg::opg::colNorm2NoSeq(handle, squared_data, input_data, input_desc, streams, n_streams); + raft::linalg::addScalar(squared.data(), squared.data(), l2_alpha, input_desc.N, streams[0]); + } + + std::vector*> input_data_temp; + Matrix::PartDescriptor input_desc_temp = input_desc; + input_desc_temp.N = size_t(1); + std::vector*> residual_temp; + Matrix::Data coef_loc_data; + + T* rs = residual.data(); + for (std::size_t i = 0; i < partsToRanks.size(); i++) { + raft::copy(rs, labels[i]->ptr, partsToRanks[i]->size, streams[0]); + + Matrix::Data* rs_data = new Matrix::Data(); + rs_data->ptr = rs; + rs_data->totalSize = partsToRanks[i]->size; + residual_temp.push_back(rs_data); + + Matrix::Data* temp_data = new Matrix::Data(); + temp_data->totalSize = partsToRanks[i]->size; + input_data_temp.push_back(temp_data); + + rs += partsToRanks[i]->size; + } + + for (int i = 0; i < epochs; i++) { + if (i > 0 && shuffle) { + if (comm.get_rank() == 0) { + Solver::shuffle(ri, g); + for (std::size_t k = 0; k < input_desc.N; k++) { + ri_h[k] = ri[k]; + } + } + + comm.bcast(ri_h, input_desc.N, 0, streams[0]); + comm.sync_stream(streams[0]); + } + + T coef_max = 0.0; + T d_coef_max = 0.0; + T coef_prev = 0.0; + + for (std::size_t j = 0; j < input_desc.N; j++) { + int ci = ri_h[j]; + T* coef_loc = coef + ci; + T* squared_loc = squared.data() + ci; + T* input_col_loc; + T* pred_loc = pred.data(); + T* residual_loc = residual.data(); + + for (std::size_t k = 0; k < input_data.size(); k++) { + input_col_loc = input_data[k]->ptr + (ci * partsToRanks[k]->size); + + input_data_temp[k]->ptr = input_col_loc; + input_data_temp[k]->totalSize = partsToRanks[k]->size; + + raft::linalg::multiplyScalar( + pred_loc, input_col_loc, h_coef[ci], partsToRanks[k]->size, streams[k % n_streams]); + + raft::linalg::add( + residual_loc, residual_loc, pred_loc, partsToRanks[k]->size, streams[k % n_streams]); + + pred_loc = pred_loc + partsToRanks[k]->size; + residual_loc = residual_loc + partsToRanks[k]->size; + } + + for (int k = 0; k < n_streams; k++) { + CUDA_CHECK(cudaStreamSynchronize(streams[k])); + } + + coef_loc_data.ptr = coef_loc; + coef_loc_data.totalSize = size_t(1); + LinAlg::opg::mv_aTb( + handle, coef_loc_data, input_data_temp, input_desc_temp, residual_temp, streams, n_streams); + + if (l1_ratio > T(0.0)) Functions::softThres(coef_loc, coef_loc, alpha, 1, streams[0]); + + raft::linalg::eltwiseDivideCheckZero(coef_loc, coef_loc, squared_loc, 1, streams[0]); + + coef_prev = h_coef[ci]; + raft::update_host(&(h_coef[ci]), coef_loc, 1, streams[0]); + CUDA_CHECK(cudaStreamSynchronize(streams[0])); + + T diff = abs(coef_prev - h_coef[ci]); + + if (diff > d_coef_max) d_coef_max = diff; + + if (abs(h_coef[ci]) > coef_max) coef_max = abs(h_coef[ci]); + + pred_loc = pred.data(); + residual_loc = residual.data(); + + for (std::size_t k = 0; k < input_data.size(); k++) { + input_col_loc = input_data[k]->ptr + (ci * partsToRanks[k]->size); + + raft::linalg::multiplyScalar( + pred_loc, input_col_loc, h_coef[ci], partsToRanks[k]->size, streams[k % n_streams]); + + raft::linalg::subtract( + residual_loc, residual_loc, pred_loc, partsToRanks[k]->size, streams[k % n_streams]); + + pred_loc = pred_loc + partsToRanks[k]->size; + residual_loc = residual_loc + partsToRanks[k]->size; + } + + for (int k = 0; k < n_streams; k++) { + CUDA_CHECK(cudaStreamSynchronize(streams[k])); + } + } + + bool flag_continue = true; + if (coef_max == T(0)) { flag_continue = false; } + + if ((d_coef_max / coef_max) < tol) { flag_continue = false; } + + if (!flag_continue) { break; } + } + + CUDA_CHECK(cudaHostUnregister(ri_h)); + free(ri_h); + + for (std::size_t i = 0; i < partsToRanks.size(); i++) { + delete residual_temp[i]; + delete input_data_temp[i]; + } + + if (fit_intercept) { + GLM::opg::postProcessData(handle, + input_data, + input_desc, + labels, + coef, + intercept, + mu_input.data(), + mu_labels.data(), + norm2_input.data(), + fit_intercept, + normalize, + streams, + n_streams, + verbose); + } else { + *intercept = T(0); + } +} + +/** + * @brief performs MNMG fit operation for the ols + * @input param handle: the internal cuml handle object + * @input param rank_sizes: includes all the partition size information for the rank + * @input param n_parts: number of partitions + * @input param input: input data + * @input param labels: labels data + * @output param coef: learned regression coefficients + * @output param intercept: intercept value + * @input param fit_intercept: fit intercept or not + * @input param normalize: normalize the data or not + * @input param verbose + */ +template +void fit_impl(raft::handle_t& handle, + std::vector*>& input_data, + Matrix::PartDescriptor& input_desc, + std::vector*>& labels, + T* coef, + T* intercept, + bool fit_intercept, + bool normalize, + int epochs, + T alpha, + T l1_ratio, + bool shuffle, + T tol, + bool verbose) +{ + int rank = handle.get_comms().get_rank(); + + // TODO: These streams should come from raft::handle_t + // Tracking issue: https://github.com/rapidsai/cuml/issues/2470 + + int n_streams = input_desc.blocksOwnedBy(rank).size(); + ; + cudaStream_t streams[n_streams]; + for (int i = 0; i < n_streams; i++) { + CUDA_CHECK(cudaStreamCreate(&streams[i])); + } + + fit_impl(handle, + input_data, + input_desc, + labels, + coef, + intercept, + fit_intercept, + normalize, + epochs, + alpha, + l1_ratio, + shuffle, + tol, + streams, + n_streams, + verbose); + + for (int i = 0; i < n_streams; i++) { + CUDA_CHECK(cudaStreamSynchronize(streams[i])); + } + + for (int i = 0; i < n_streams; i++) { + CUDA_CHECK(cudaStreamDestroy(streams[i])); + } +} + +template +void predict_impl(raft::handle_t& handle, + std::vector*>& input_data, + Matrix::PartDescriptor& input_desc, + T* coef, + T intercept, + std::vector*>& preds, + cudaStream_t* streams, + int n_streams, + bool verbose) +{ + std::vector local_blocks = input_desc.partsToRanks; + T alpha = T(1); + T beta = T(0); + + for (std::size_t i = 0; i < input_data.size(); i++) { + int si = i % n_streams; + raft::linalg::gemm(handle, + input_data[i]->ptr, + local_blocks[i]->size, + input_desc.N, + coef, + preds[i]->ptr, + local_blocks[i]->size, + size_t(1), + CUBLAS_OP_N, + CUBLAS_OP_N, + alpha, + beta, + streams[si]); + + raft::linalg::addScalar( + preds[i]->ptr, preds[i]->ptr, intercept, local_blocks[i]->size, streams[si]); + } +} + +template +void predict_impl(raft::handle_t& handle, + Matrix::RankSizePair** rank_sizes, + size_t n_parts, + Matrix::Data** input, + size_t n_rows, + size_t n_cols, + T* coef, + T intercept, + Matrix::Data** preds, + bool verbose) +{ + int rank = handle.get_comms().get_rank(); + + std::vector ranksAndSizes(rank_sizes, rank_sizes + n_parts); + std::vector*> input_data(input, input + n_parts); + Matrix::PartDescriptor input_desc(n_rows, n_cols, ranksAndSizes, rank); + std::vector*> preds_data(preds, preds + n_parts); + + // TODO: These streams should come from raft::handle_t + // Tracking issue: https://github.com/rapidsai/cuml/issues/2470 + int n_streams = n_parts; + cudaStream_t streams[n_streams]; + for (int i = 0; i < n_streams; i++) { + CUDA_CHECK(cudaStreamCreate(&streams[i])); + } + + predict_impl( + handle, input_data, input_desc, coef, intercept, preds_data, streams, n_streams, verbose); + + for (int i = 0; i < n_streams; i++) { + CUDA_CHECK(cudaStreamSynchronize(streams[i])); + } + + for (int i = 0; i < n_streams; i++) { + CUDA_CHECK(cudaStreamDestroy(streams[i])); + } +} + +void fit(raft::handle_t& handle, + std::vector*>& input_data, + Matrix::PartDescriptor& input_desc, + std::vector*>& labels, + float* coef, + float* intercept, + bool fit_intercept, + bool normalize, + int epochs, + float alpha, + float l1_ratio, + bool shuffle, + float tol, + bool verbose) +{ + fit_impl(handle, + input_data, + input_desc, + labels, + coef, + intercept, + fit_intercept, + normalize, + epochs, + alpha, + l1_ratio, + shuffle, + tol, + verbose); +} + +void fit(raft::handle_t& handle, + std::vector*>& input_data, + Matrix::PartDescriptor& input_desc, + std::vector*>& labels, + double* coef, + double* intercept, + bool fit_intercept, + bool normalize, + int epochs, + double alpha, + double l1_ratio, + bool shuffle, + double tol, + bool verbose) +{ + fit_impl(handle, + input_data, + input_desc, + labels, + coef, + intercept, + fit_intercept, + normalize, + epochs, + alpha, + l1_ratio, + shuffle, + tol, + verbose); +} + +void predict(raft::handle_t& handle, + Matrix::RankSizePair** rank_sizes, + size_t n_parts, + Matrix::Data** input, + size_t n_rows, + size_t n_cols, + float* coef, + float intercept, + Matrix::Data** preds, + bool verbose) +{ + predict_impl(handle, rank_sizes, n_parts, input, n_rows, n_cols, coef, intercept, preds, verbose); +} + +void predict(raft::handle_t& handle, + Matrix::RankSizePair** rank_sizes, + size_t n_parts, + Matrix::Data** input, + size_t n_rows, + size_t n_cols, + double* coef, + double intercept, + Matrix::Data** preds, + bool verbose) +{ + predict_impl(handle, rank_sizes, n_parts, input, n_rows, n_cols, coef, intercept, preds, verbose); +} + +} // namespace opg +} // namespace CD +} // namespace ML diff --git a/cuda_code/cdpSimplePrint_2.cu b/cuda_code/cdpSimplePrint_2.cu new file mode 100644 index 0000000000000000000000000000000000000000..1dabb1489f55fdd5c67bd149c752cddd062c8da9 --- /dev/null +++ b/cuda_code/cdpSimplePrint_2.cu @@ -0,0 +1,172 @@ +/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of NVIDIA CORPORATION nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include + +#include +#include +#include + +//////////////////////////////////////////////////////////////////////////////// +// Variable on the GPU used to generate unique identifiers of blocks. +//////////////////////////////////////////////////////////////////////////////// +__device__ int g_uids = 0; + +//////////////////////////////////////////////////////////////////////////////// +// Print a simple message to signal the block which is currently executing. +//////////////////////////////////////////////////////////////////////////////// +__device__ void print_info(int depth, int thread, int uid, int parent_uid) { + if (threadIdx.x == 0) { + if (depth == 0) + printf("BLOCK %d launched by the host\n", uid); + else { + char buffer[32]; + + for (int i = 0; i < depth; ++i) { + buffer[3 * i + 0] = '|'; + buffer[3 * i + 1] = ' '; + buffer[3 * i + 2] = ' '; + } + + buffer[3 * depth] = '\0'; + printf("%sBLOCK %d launched by thread %d of block %d\n", buffer, uid, + thread, parent_uid); + } + } + + __syncthreads(); +} + +//////////////////////////////////////////////////////////////////////////////// +// The kernel using CUDA dynamic parallelism. +// +// It generates a unique identifier for each block. Prints the information +// about that block. Finally, if the 'max_depth' has not been reached, the +// block launches new blocks directly from the GPU. +//////////////////////////////////////////////////////////////////////////////// +__global__ void cdp_kernel(int max_depth, int depth, int thread, + int parent_uid) { + // We create a unique ID per block. Thread 0 does that and shares the value + // with the other threads. + __shared__ int s_uid; + + if (threadIdx.x == 0) { + s_uid = atomicAdd(&g_uids, 1); + } + + __syncthreads(); + + // We print the ID of the block and information about its parent. + print_info(depth, thread, s_uid, parent_uid); + + // We launch new blocks if we haven't reached the max_depth yet. + if (++depth >= max_depth) { + return; + } + + cdp_kernel<<>>(max_depth, depth, threadIdx.x, s_uid); +} + +//////////////////////////////////////////////////////////////////////////////// +// Main entry point. +//////////////////////////////////////////////////////////////////////////////// +int main(int argc, char **argv) { + printf("starting Simple Print (CUDA Dynamic Parallelism)\n"); + + // Parse a few command-line arguments. + int max_depth = 2; + + if (checkCmdLineFlag(argc, (const char **)argv, "help") || + checkCmdLineFlag(argc, (const char **)argv, "h")) { + printf( + "Usage: %s depth=\t(where max_depth is a value between 1 " + "and 8).\n", + argv[0]); + exit(EXIT_SUCCESS); + } + + if (checkCmdLineFlag(argc, (const char **)argv, "depth")) { + max_depth = getCmdLineArgumentInt(argc, (const char **)argv, "depth"); + + if (max_depth < 1 || max_depth > 8) { + printf("depth parameter has to be between 1 and 8\n"); + exit(EXIT_FAILURE); + } + } + + // Find/set the device. + int device = -1; + cudaDeviceProp deviceProp; + device = findCudaDevice(argc, (const char **)argv); + checkCudaErrors(cudaGetDeviceProperties(&deviceProp, device)); + + if (!(deviceProp.major > 3 || + (deviceProp.major == 3 && deviceProp.minor >= 5))) { + printf("GPU %d - %s does not support CUDA Dynamic Parallelism\n Exiting.", + device, deviceProp.name); + exit(EXIT_WAIVED); + } + + // Print a message describing what the sample does. + printf( + "*********************************************************************" + "******\n"); + printf( + "The CPU launches 2 blocks of 2 threads each. On the device each thread " + "will\n"); + printf( + "launch 2 blocks of 2 threads each. The GPU we will do that " + "recursively\n"); + printf("until it reaches max_depth=%d\n\n", max_depth); + printf("In total 2"); + int num_blocks = 2, sum = 2; + + for (int i = 1; i < max_depth; ++i) { + num_blocks *= 4; + printf("+%d", num_blocks); + sum += num_blocks; + } + + printf("=%d blocks are launched!!! (%d from the GPU)\n", sum, sum - 2); + printf( + "************************************************************************" + "***\n\n"); + + // We set the recursion limit for CDP to max_depth. + cudaDeviceSetLimit(cudaLimitDevRuntimeSyncDepth, max_depth); + + // Launch the kernel from the CPU. + printf("Launching cdp_kernel() with CUDA Dynamic Parallelism:\n\n"); + cdp_kernel<<<2, 2>>>(max_depth, 0, 0, -1); + checkCudaErrors(cudaGetLastError()); + + // Finalize. + checkCudaErrors(cudaDeviceSynchronize()); + + exit(EXIT_SUCCESS); +} diff --git a/cuda_code/cdp_lu_main_1.cu b/cuda_code/cdp_lu_main_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..79a22297b6e6aa81ec79a4edaf3c41efe45e0aa8 --- /dev/null +++ b/cuda_code/cdp_lu_main_1.cu @@ -0,0 +1,298 @@ +/** + * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. + * + * Please refer to the NVIDIA end user license agreement (EULA) associated + * with this source code for terms and conditions that govern your use of + * this software. Any use, reproduction, disclosure, or distribution of + * this software and related documentation outside the terms of the EULA + * is strictly prohibited. + * + */ + +/* This sample illustrates the usage of Cuda Dynamic Parallelism (CDP) + */ + +// includes, system +#include +// includes, cublas +#include +// helper functions +#include "helper_string.h" +#include "helper_cuda.h" +// includes, project +#include "cdp_lu.h" +#include "cdp_lu_utils.h" + +void memsetup(Parameters &host_params) +{ + srand(host_params.seed); + + // Initialise with the base params, and do any necessary randomisation + unsigned long long lda_ull = (unsigned long long) host_params.lda; + host_params.flop_count += lda_ull*lda_ull*lda_ull * 2ull / 3ull; + + host_params.data_size = sizeof(double); + host_params.data_len = host_params.n * host_params.lda; + host_params.piv_len = MIN(host_params.m, host_params.n); + + size_t len = host_params.data_len * host_params.data_size; + size_t piv_len = host_params.piv_len * sizeof(int); + + // Allocate memories + host_params.host_A = (double *) malloc(len); + host_params.host_LU = (double *) malloc(len); + host_params.host_piv = (int *) malloc(piv_len); + + checkCudaErrors(cudaMalloc((void **)&host_params.device_A, len)); + checkCudaErrors(cudaMalloc((void **)&host_params.device_LU, len)); + checkCudaErrors(cudaMalloc((void **)&host_params.device_piv, piv_len)); + checkCudaErrors(cudaMalloc((void **)&host_params.device_info, sizeof(int))); + + // Initialise source with random (seeded) data + // srand(params[b].seed); + double *ptr = host_params.host_A; + + for (int i=0; i= mn) + errorExit("Invalid pivot"); + + if (i != j) + { + int tmp = perm[i]; + perm[i] = perm[j]; + perm[j] = tmp; + } + } + + const double tol = 1.0e-6; + + // Verify that L*U = A. + checkCudaErrors(cudaMemcpy(host_result, device_result, len, cudaMemcpyDeviceToHost)); + bool ok = true; + + for (int i = 0; i < host_params.m; i++) + for (int j = 0; j < host_params.n; j++) + if (fabs(host_result[lda*j+i] - host_params.host_A[j*lda + perm[i]]) > tol) + { + printf("(%d,%d): found=%f, expected=%f\n", i, j, host_result[lda*j+i], host_params.host_A[j*lda + perm[i]]); + ok = false; + break; + } + + status = cublasDestroy(cb_handle); + if (status != CUBLAS_STATUS_SUCCESS) + errorExit("checkresult: cublas failed"); + + free(perm); + free(host_I); + free(host_result); + checkCudaErrors(cudaFree(device_I)); + checkCudaErrors(cudaFree(device_result)); + printf("done\n"); + return ok; +} + +bool launch_test(Parameters &host_params) +{ + memsetup(host_params); + launch(host_params); + bool result = checkresult(host_params); + finalize(host_params); + return result; +} + +void print_usage(const char *exec_name) +{ + printf("Usage: %s -matrix_size=N <-device=N>(optional)\n", exec_name); + printf(" matrix_size: the size of a NxN matrix. It must be greater than 0.\n"); +} + +//////////////////////////////////////////////////////////////////////////////// +// Program main +//////////////////////////////////////////////////////////////////////////////// +int main(int argc, char **argv) +{ +#if CUDART_VERSION < 5000 +#error cdpLU requires CUDA 5.0 to run, waiving testing... +#endif + + printf("Starting LU Decomposition (CUDA Dynamic Parallelism)\n"); + + int matrix_size = 1024; + + if (checkCmdLineFlag(argc, (const char **)argv, "help") || + checkCmdLineFlag(argc, (const char **)argv, "h")) + { + print_usage(argv[0]); + exit(EXIT_SUCCESS); + } + + if (checkCmdLineFlag(argc, (const char **)argv, "matrix_size")) + { + matrix_size = getCmdLineArgumentInt(argc, (const char **)argv, "matrix_size"); + + if (matrix_size <= 0) + { + printf("Invalid matrix size given on the command-line: %d\n", matrix_size); + exit(EXIT_FAILURE); + } + } + else if (argc > 3) + { + print_usage(argv[0]); + exit(EXIT_FAILURE); + } + + // The test requires CUDA 5 or greater. + // The test requires an architecture SM35 or greater (CDP capable). + int cuda_device = findCudaDevice(argc, (const char **)argv); + cudaDeviceProp deviceProps; + checkCudaErrors(cudaGetDeviceProperties(&deviceProps, cuda_device)); + int cdpCapable = (deviceProps.major == 3 && deviceProps.minor >= 5) || deviceProps.major >=4; + + printf("GPU device %s has compute capabilities (SM %d.%d)\n", deviceProps.name, deviceProps.major, deviceProps.minor); + + if (!cdpCapable) + { + printf("cdpLUDecomposition requires SM 3.5 or higher to use CUDA Dynamic Parallelism. Exiting...\n"); + + // cudaDeviceReset causes the driver to clean up all state. While + // not mandatory in normal operation, it is good practice. It is also + // needed to ensure correct operation when the application is being + // profiled. Calling cudaDeviceReset causes all profile data to be + // flushed before the application exits + cudaDeviceReset(); + exit(EXIT_WAIVED); + } + + Parameters host_params; + memset(&host_params, 0, sizeof(Parameters)); + set_defaults(host_params, matrix_size); + + printf("Compute LU decomposition of a random %dx%d matrix using CUDA Dynamic Parallelism\n", matrix_size, matrix_size); + printf("Launching single task from device...\n"); + bool result = launch_test(host_params); + + // cudaDeviceReset causes the driver to clean up all state. While + // not mandatory in normal operation, it is good practice. It is also + // needed to ensure correct operation when the application is being + // profiled. Calling cudaDeviceReset causes all profile data to be + // flushed before the application exits + cudaDeviceReset(); + + if (result) + { + printf("Tests suceeded\n"); + exit(EXIT_SUCCESS); + } + else + { + exit(EXIT_FAILURE); + } +} diff --git a/cuda_code/center_loss_layer_6.cu b/cuda_code/center_loss_layer_6.cu new file mode 100644 index 0000000000000000000000000000000000000000..b2cf93d32c3734a6f2a130264dab5bcd2e589566 --- /dev/null +++ b/cuda_code/center_loss_layer_6.cu @@ -0,0 +1,83 @@ +#include + +#include "caffe/filler.hpp" +#include "caffe/layers/center_loss_layer.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +template +__global__ void Compute_distance_data_gpu(int nthreads, const int K, const Dtype* bottom, + const Dtype* label, const Dtype* center, Dtype* distance) { + CUDA_KERNEL_LOOP(index, nthreads) { + int m = index / K; + int k = index % K; + const int label_value = static_cast(label[m]); + // distance(i) = x(i) - c_{y(i)} + distance[index] = bottom[index] - center[label_value * K + k]; + } +} + +template +__global__ void Compute_center_diff_gpu(int nthreads, const int M, const int K, + const Dtype* label, const Dtype* distance, Dtype* variation_sum, + Dtype* center_diff) { + CUDA_KERNEL_LOOP(index, nthreads) { + int count = 0; + for (int m = 0; m < M; m++) { + const int label_value = static_cast(label[m]); + if (label_value == index) { + count++; + for (int k = 0; k < K; k++) { + variation_sum[index * K + k] -= distance[m * K + k]; + } + } + } + for (int k = 0; k < K; k++) { + center_diff[index * K + k] = variation_sum[index * K + k] /(count + (Dtype)1.); + } + } +} + + +template +void CenterLossLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + int nthreads = M_ * K_; + Compute_distance_data_gpu<<>>(nthreads, K_, bottom[0]->gpu_data(), bottom[1]->gpu_data(), + this->blobs_[0]->gpu_data(), distance_.mutable_gpu_data()); + Dtype dot; + caffe_gpu_dot(M_ * K_, distance_.gpu_data(), distance_.gpu_data(), &dot); + Dtype loss = dot / M_ / Dtype(2); + top[0]->mutable_cpu_data()[0] = loss; +} + +template +void CenterLossLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + int nthreads = N_; + caffe_gpu_set(N_ * K_, (Dtype)0., variation_sum_.mutable_cpu_data()); + Compute_center_diff_gpu<<>>(nthreads, M_, K_, bottom[1]->gpu_data(), distance_.gpu_data(), + variation_sum_.mutable_cpu_data(), this->blobs_[0]->mutable_gpu_diff()); + + if (propagate_down[0]) { + caffe_gpu_scale(M_ * K_, top[0]->cpu_diff()[0] / M_, + distance_.gpu_data(), bottom[0]->mutable_gpu_diff()); + if(0){ + for(int i = 0; i < 4; i++){ + LOG(INFO) << "bottom_diff[" << i << "]=" << bottom[0]->cpu_diff()[i]; + } + } + } + if (propagate_down[1]) { + LOG(FATAL) << this->type() + << " Layer cannot backpropagate to label inputs."; + } +} + +INSTANTIATE_LAYER_GPU_FUNCS(CenterLossLayer); + +} // namespace caffe diff --git a/cuda_code/cg_kernels_3.cu b/cuda_code/cg_kernels_3.cu new file mode 100644 index 0000000000000000000000000000000000000000..9adb589a9eacc755f9e1010766a6c8f92fd9f8cd --- /dev/null +++ b/cuda_code/cg_kernels_3.cu @@ -0,0 +1,136 @@ +/************************************************************* +Copyright (c) 2017-2020, the Ginkgo authors +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*************************************************************/ + +#include "core/solver/cg_kernels.hpp" + + +#include +#include + + +#include "cuda/base/math.hpp" +#include "cuda/base/types.hpp" +#include "cuda/components/thread_ids.cuh" + + +namespace gko { +namespace kernels { +namespace cuda { +/** + * @brief The CG solver namespace. + * + * @ingroup cg + */ +namespace cg { + + +constexpr int default_block_size = 512; + + +#include "common/solver/cg_kernels.hpp.inc" + + +template +void initialize(std::shared_ptr exec, + const matrix::Dense *b, matrix::Dense *r, + matrix::Dense *z, matrix::Dense *p, + matrix::Dense *q, matrix::Dense *prev_rho, + matrix::Dense *rho, + Array *stop_status) +{ + const dim3 block_size(default_block_size, 1, 1); + const dim3 grid_size( + ceildiv(b->get_size()[0] * b->get_stride(), block_size.x), 1, 1); + + initialize_kernel<<>>( + b->get_size()[0], b->get_size()[1], b->get_stride(), + as_cuda_type(b->get_const_values()), as_cuda_type(r->get_values()), + as_cuda_type(z->get_values()), as_cuda_type(p->get_values()), + as_cuda_type(q->get_values()), as_cuda_type(prev_rho->get_values()), + as_cuda_type(rho->get_values()), as_cuda_type(stop_status->get_data())); +} + +GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_CG_INITIALIZE_KERNEL); + + +template +void step_1(std::shared_ptr exec, + matrix::Dense *p, const matrix::Dense *z, + const matrix::Dense *rho, + const matrix::Dense *prev_rho, + const Array *stop_status) +{ + const dim3 block_size(default_block_size, 1, 1); + const dim3 grid_size( + ceildiv(p->get_size()[0] * p->get_stride(), block_size.x), 1, 1); + + step_1_kernel<<>>( + p->get_size()[0], p->get_size()[1], p->get_stride(), + as_cuda_type(p->get_values()), as_cuda_type(z->get_const_values()), + as_cuda_type(rho->get_const_values()), + as_cuda_type(prev_rho->get_const_values()), + as_cuda_type(stop_status->get_const_data())); +} + +GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_CG_STEP_1_KERNEL); + + +template +void step_2(std::shared_ptr exec, + matrix::Dense *x, matrix::Dense *r, + const matrix::Dense *p, + const matrix::Dense *q, + const matrix::Dense *beta, + const matrix::Dense *rho, + const Array *stop_status) +{ + const dim3 block_size(default_block_size, 1, 1); + const dim3 grid_size( + ceildiv(p->get_size()[0] * p->get_stride(), block_size.x), 1, 1); + + step_2_kernel<<>>( + p->get_size()[0], p->get_size()[1], p->get_stride(), x->get_stride(), + as_cuda_type(x->get_values()), as_cuda_type(r->get_values()), + as_cuda_type(p->get_const_values()), + as_cuda_type(q->get_const_values()), + as_cuda_type(beta->get_const_values()), + as_cuda_type(rho->get_const_values()), + as_cuda_type(stop_status->get_const_data())); +} + +GKO_INSTANTIATE_FOR_EACH_VALUE_TYPE(GKO_DECLARE_CG_STEP_2_KERNEL); + + +} // namespace cg +} // namespace cuda +} // namespace kernels +} // namespace gko diff --git a/cuda_code/channel_shuffle_4.cu b/cuda_code/channel_shuffle_4.cu new file mode 100644 index 0000000000000000000000000000000000000000..32e0e81d4927e47deb493b04a2747100d29027a1 --- /dev/null +++ b/cuda_code/channel_shuffle_4.cu @@ -0,0 +1,257 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include "cudakernel/memory/channel_shuffle.h" +#include "cudakernel/common/divmod_fast.h" +#include "cudakernel/common/memory_utils.h" +#include "ppl/nn/common/tensor_shape.h" +#include "ppl/common/retcode.h" +#include "cudakernel/common/common.h" + +template +__global__ void ppl_cukernel_channel_shuffle( + int64_t num_elems, + int32_t group, + int32_t channels_per_group, + GArray input_strides_fast, + const T *input, + T *output) +{ + int index = blockIdx.x * blockDim.x + threadIdx.x; + if (index >= num_elems) + return; + int64_t output_offset = 0; + int n_idx, c_idx, hw_idx, remain = index; + + input_strides_fast[0].divmod(remain, n_idx, remain); + output_offset += (index - remain); + input_strides_fast[1].divmod(remain, c_idx, remain); + hw_idx = remain; + int out_c_idx = c_idx % channels_per_group * group + c_idx / channels_per_group; + output_offset += out_c_idx * input_strides_fast[1].d_ + hw_idx; + + output[output_offset] = input[index]; +} + +template +__global__ void ppl_cukernel_channel_shuffle_nhwc( + int64_t num_elems, + int32_t group, + int channels_per_group, + int pad_channels, + DivModFast channels_fast, + const T *input, + T *output) +{ + int index = blockIdx.x * blockDim.x + threadIdx.x; + if (index >= num_elems) + return; + int64_t input_offset = 0; + int64_t output_offset = 0; + int nhw_idx, c_idx, remain = index; + channels_fast.divmod(remain, nhw_idx, c_idx); + int out_c_idx = c_idx % channels_per_group * group + c_idx / channels_per_group; + input_offset += nhw_idx * pad_channels + c_idx; + output_offset += nhw_idx * pad_channels + out_c_idx; + + output[output_offset] = input[input_offset]; +} + +ppl::common::RetCode PPLCUDAChannelShuffleForwardImp( + cudaStream_t stream, + int group, + const ppl::nn::TensorShape *input_shape, + const void *input, + const ppl::nn::TensorShape *output_shape, + void *output) +{ + // num_dims must be equal to 4 + int num_dims = output_shape->GetDimCount(); + int64_t num_elems = output_shape->GetElementsExcludingPadding(); + + // for ndarray layout + int num_input_strides_dims = num_dims - 2; + GArray input_strides_fast(num_input_strides_dims); + int elems_hw = input_shape->GetDim(2) * input_shape->GetDim(3); + input_strides_fast[1] = DivModFast(elems_hw); + int elems_chw = input_shape->GetDim(1) * elems_hw; + input_strides_fast[0] = DivModFast(elems_chw); + // for nhwc layout + int pad_channels = input_shape->GetDim(1) + input_shape->GetPadding0(1) + input_shape->GetPadding1(1); + DivModFast channels_fast(input_shape->GetDim(1)); + + int block_size = 256; + int grid_size = (num_elems + block_size - 1) / block_size; + int channels_per_group = input_shape->GetDim(1) / group; + +#define SWITCH_CASE(TYPE) \ + case sizeof(TYPE): { \ + if (output_shape->GetDataFormat() == ppl::common::DATAFORMAT_NHWC8) { \ + ppl_cukernel_channel_shuffle_nhwc<<>>( \ + num_elems, group, channels_per_group, pad_channels, channels_fast, (const TYPE *)input, (TYPE *)output); \ + } else { \ + ppl_cukernel_channel_shuffle<<>>( \ + num_elems, group, channels_per_group, input_strides_fast, (const TYPE *)input, (TYPE *)output); \ + } \ + return ppl::common::RC_SUCCESS; \ + } + + switch (ppl::common::GetSizeOfDataType(input_shape->GetDataType())) { + SWITCH_CASE(int8_t); + SWITCH_CASE(int16_t); + SWITCH_CASE(int32_t); + SWITCH_CASE(int64_t); + default: + return ppl::common::RC_UNSUPPORTED; + } +#undef SWITCH_CASE +} + +template +__global__ void ppl_cukernel_fuse_channel_shuffle( + int64_t num_elems, + int32_t group, + int32_t channels_per_group, + GArray input_strides_fast, + const T *input1, + const T *input2, + T *output1, + T *output2) +{ + int index = blockIdx.x * blockDim.x + threadIdx.x; + if (index >= 2 * num_elems) + return; + int64_t output_offset = 0; + int n_idx, c_idx, hw_idx, remain = index; + int hw = input_strides_fast[1].d_; + + input_strides_fast[0].divmod(remain, n_idx, remain); // index / chw + output_offset += (index - remain); + input_strides_fast[1].divmod(remain, c_idx, remain); // index / hw + hw_idx = remain; + int out_c_idx = c_idx % channels_per_group * group + c_idx / channels_per_group; + output_offset += out_c_idx * input_strides_fast[1].d_ + hw_idx; + int out_div_hw = output_offset / hw; + int in_div_hw = index / hw; + + if (out_div_hw % 2) { + if (in_div_hw % 2) { + output2[(out_div_hw - 1) / 2 * hw + hw_idx] = input2[(in_div_hw - 1) / 2 * hw + hw_idx]; + } else { + output2[(out_div_hw - 1) / 2 * hw + hw_idx] = input1[in_div_hw / 2 * hw + hw_idx]; + } + } else { + if (in_div_hw % 2) { + output1[out_div_hw / 2 * hw + hw_idx] = input2[(in_div_hw - 1) / 2 * hw + hw_idx]; + } else { + output1[out_div_hw / 2 * hw + hw_idx] = input1[in_div_hw / 2 * hw + hw_idx]; + } + } +} + +template +__global__ void ppl_cukernel_fuse_channel_shuffle_nhwc( + int64_t num_elems, + int32_t group, + int channels_per_group, + int pad_channels, + DivModFast channels_fast, + const T *input1, + const T *input2, + T *output1, + T *output2, + int elems_nhw, + int elems_c) +{ + int index = blockIdx.x * blockDim.x + threadIdx.x; + if (index >= 2 * num_elems) + return; + int64_t input_offset = 0; + int64_t output_offset = 0; + int nhw_idx, c_idx, remain = index; + channels_fast.divmod(remain, nhw_idx, c_idx); + int out_c_idx = c_idx % channels_per_group * group + c_idx / channels_per_group; + input_offset += nhw_idx * 2 * elems_c + c_idx; + output_offset += nhw_idx * 2 * elems_c + out_c_idx; + if (output_offset % (2 * elems_c) >= elems_c) { + if (input_offset % (2 * elems_c) >= elems_c) { + output2[nhw_idx * pad_channels + out_c_idx - elems_c] = input2[nhw_idx * pad_channels + c_idx - elems_c]; + } else { + output2[nhw_idx * pad_channels + out_c_idx - elems_c] = input1[nhw_idx * pad_channels + c_idx]; + } + } else { + if (input_offset % (2 * elems_c) >= elems_c) { + output1[nhw_idx * pad_channels + out_c_idx] = input2[nhw_idx * pad_channels + c_idx - elems_c]; + } else { + output1[nhw_idx * pad_channels + out_c_idx] = input1[nhw_idx * pad_channels + c_idx]; + } + } +} + +ppl::common::RetCode PPLCUDAFuseChannelShuffleForwardImp( + cudaStream_t stream, + int group, + const ppl::nn::TensorShape *input_shape, + const void *input1, + const void *input2, + const ppl::nn::TensorShape *output_shape, + void *output1, + void *output2) +{ + // num_dims must be equal to 4 + int num_dims = output_shape->GetDimCount(); + int64_t num_elems = output_shape->GetElementsExcludingPadding(); + + // for ndarray layout + int num_input_strides_dims = num_dims - 2; + GArray input_strides_fast(num_input_strides_dims); + int elems_hw = input_shape->GetDim(2) * input_shape->GetDim(3); + input_strides_fast[1] = DivModFast(elems_hw); + int elems_chw = 2 * input_shape->GetDim(1) * elems_hw; + input_strides_fast[0] = DivModFast(elems_chw); + // for nhwc layout + int pad_channels = input_shape->GetDim(1) + input_shape->GetPadding0(1) + input_shape->GetPadding1(1); + DivModFast channels_fast(2 * input_shape->GetDim(1)); + int elems_nhw = elems_hw * input_shape->GetDim(0); + int elems_c = input_shape->GetDim(1); + + int block_size = 256; + int grid_size = (2 * num_elems + block_size - 1) / block_size; + int channels_per_group = (2 * input_shape->GetDim(1)) / group; + +#define SWITCH_CASE(TYPE) \ + case sizeof(TYPE): { \ + if (output_shape->GetDataFormat() == ppl::common::DATAFORMAT_NHWC8) { \ + ppl_cukernel_fuse_channel_shuffle_nhwc<<>>( \ + num_elems, group, channels_per_group, pad_channels, channels_fast, (const TYPE *)input1, (const TYPE *)input2, (TYPE *)output1, (TYPE *)output2, elems_nhw, elems_c); \ + } else { \ + ppl_cukernel_fuse_channel_shuffle<<>>( \ + num_elems, group, channels_per_group, input_strides_fast, (const TYPE *)input1, (const TYPE *)input2, (TYPE *)output1, (TYPE *)output2); \ + } \ + return ppl::common::RC_SUCCESS; \ + } + + switch (ppl::common::GetSizeOfDataType(input_shape->GetDataType())) { + SWITCH_CASE(int8_t); + SWITCH_CASE(int16_t); + SWITCH_CASE(int32_t); + SWITCH_CASE(int64_t); + default: + return ppl::common::RC_UNSUPPORTED; + } +#undef SWITCH_CASE +} diff --git a/cuda_code/channelwise_conv3d_op_cudnn_1.cu b/cuda_code/channelwise_conv3d_op_cudnn_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..1402cc0854df54e6e4bcba2648c8ef952e26098a --- /dev/null +++ b/cuda_code/channelwise_conv3d_op_cudnn_1.cu @@ -0,0 +1,611 @@ +#include "caffe2/core/common_cudnn.h" +#include "caffe2/core/context_gpu.h" +#include "caffe2/core/cudnn_wrappers.h" +#include "caffe2/operators/conv_op.h" +#include "caffe2/operators/conv_op_cache_cudnn.h" +#include "caffe2/operators/conv_pool_op_base.h" +#include "caffe2/utils/GpuAtomics.cuh" + +// Adopted from caffe2 depthwise conv at +// pytorch/caffe2/caffe2/operators/depthwise_3x3_conv_op_cudnn.cu + +namespace caffe2 { + +struct DepthwiseArgs { + // Input layer dimensions + int batch{0}; + int in_rows{0}; + int in_cols{0}; + int in_length{0}; + int in_depth{0}; + + // filter size + int filter_rows{0}; + int filter_cols{0}; + int filter_length{0}; + + // strides and pads + int stride{0}; + int temporal_stride{0}; + int pad_rows{0}; + int pad_cols{0}; + int pad_length{0}; + + // Output layer dimensions + int out_rows{0}; + int out_cols{0}; + int out_length{0}; + int out_depth{0}; +}; + +template +__global__ void DepthwiseConv3dGPUKernelNCHW( + const DepthwiseArgs args, + const T* input, + const T* filter, + T* output, + int num_outputs) { + const int in_rows = args.in_rows; + const int in_cols = args.in_cols; + const int in_length = args.in_length; + const int in_depth = args.in_depth; + const int filter_rows = args.filter_rows; + const int filter_cols = args.filter_cols; + const int filter_length = args.filter_length; + const int stride = args.stride; + const int temporal_stride = args.temporal_stride; + const int pad_rows = args.pad_rows; + const int pad_cols = args.pad_cols; + const int pad_length = args.pad_length; + const int out_rows = args.out_rows; + const int out_cols = args.out_cols; + const int out_length = args.out_length; + const int out_depth = args.out_depth; + + CUDA_1D_KERNEL_LOOP(thread_id, num_outputs) { + const int OW = thread_id % out_cols; + const int OH = (thread_id / out_cols) % out_rows; + const int OL = (thread_id / out_cols / out_rows) % out_length; + const int OC = (thread_id / out_cols / out_rows / out_length) % out_depth; + const int OB = thread_id / out_cols / out_rows / out_length / out_depth; + const int in_d = OC; + + const int input_offset_temp = + (OB * in_depth + OC) * (in_length * in_rows * in_cols); + const int input_row_start = OH * stride - pad_rows; + const int input_col_start = OW * stride - pad_cols; + const int input_length_start = OL * temporal_stride - pad_length; + const int input_row_end = input_row_start + filter_rows; + const int input_col_end = input_col_start + filter_cols; + const int input_length_end = input_length_start + filter_length; + const float* filter_start = + filter + in_d * filter_rows * filter_cols * filter_length; + + T sum = 0; + if (input_row_start >= 0 && input_col_start >= 0 && + input_length_start >= 0 && input_row_end < in_rows && + input_col_end < in_cols && input_length_end < in_length) { +// Loop that doesn't need to check for boundary conditions. +#pragma unroll + for (int f_l = 0; f_l < filter_length; ++f_l) { + const int in_l = input_length_start + f_l; +#pragma unroll + for (int f_r = 0; f_r < filter_rows; ++f_r) { + const int in_r = input_row_start + f_r; + const float* filter_offset = filter_start + + filter_cols * filter_rows * f_l + filter_cols * f_r; +#pragma unroll + for (int f_c = 0; f_c < filter_cols; ++f_c) { + const int in_c = input_col_start + f_c; + + const int input_offset = (input_offset_temp) + + (in_l * in_cols * in_rows) + (in_r * in_cols) + in_c; +#if __CUDA_ARCH__ >= 350 + sum += __ldg(input + input_offset) * __ldg(filter_offset + f_c); +#else + sum += input[input_offset] * filter_offset[f_c]; +#endif + } + } + } + } else { +// Loop that needs to check for boundary conditions. +#pragma unroll + for (int f_l = 0; f_l < filter_length; ++f_l) { + const int in_l = input_length_start + f_l; +#pragma unroll + for (int f_r = 0; f_r < filter_rows; ++f_r) { + const int in_r = input_row_start + f_r; + const float* filter_offset = filter_start + + filter_cols * filter_rows * f_l + filter_cols * f_r; +#pragma unroll + for (int f_c = 0; f_c < filter_cols; ++f_c) { + const int in_c = input_col_start + f_c; + if (in_r >= 0 && in_r < in_rows && in_c >= 0 && in_c < in_cols && + in_l >= 0 && in_l < in_length) { + const int input_offset = (input_offset_temp) + + (in_l * in_cols * in_rows) + (in_r * in_cols) + in_c; +#if __CUDA_ARCH__ >= 350 + sum += __ldg(input + input_offset) * __ldg(filter_offset + f_c); +#else + sum += input[input_offset] * filter_offset[f_c]; +#endif + } + } + } + } + } + + output[thread_id] = sum; + } +} + +// A Cuda kernel to compute the depthwise convolution backprop w.r.t. filter. +template +__global__ void DepthwiseConv3dBackpropFilterGPUKernelNCHW( + const DepthwiseArgs args, + const T* out_backprop, + const T* input, + T* filter_backprop, + int num_out_backprop) { + const int in_rows = args.in_rows; + const int in_cols = args.in_cols; + const int in_length = args.in_length; + const int in_depth = args.in_depth; + const int filter_rows = args.filter_rows; + const int filter_cols = args.filter_cols; + const int filter_length = args.filter_length; + const int stride = args.stride; + const int temporal_stride = args.temporal_stride; + const int pad_rows = args.pad_rows; + const int pad_cols = args.pad_cols; + const int pad_length = args.pad_length; + const int out_rows = args.out_rows; + const int out_cols = args.out_cols; + const int out_length = args.out_length; + const int out_depth = args.out_depth; + + CUDA_1D_KERNEL_LOOP(thread_id, num_out_backprop) { + // Compute the indexes of this thread in the output. + const int OW = thread_id % out_cols; + const int OH = (thread_id / out_cols) % out_rows; + const int OL = (thread_id / out_cols / out_rows) % out_length; + const int OC = (thread_id / out_cols / out_rows / out_length) % out_depth; + const int OB = thread_id / out_cols / out_rows / out_length / out_depth; + + // Compute the input depth and the index of depth multiplier. + const int in_d = OC; + + // Decide if all input is valid, if yes, we can skip the boundary checks + // for each input. + const int in_r_start = OH * stride - pad_rows; + const int in_c_start = OW * stride - pad_cols; + const int in_l_start = OL * temporal_stride - pad_length; + const int in_r_end = in_r_start + filter_rows; + const int in_c_end = in_c_start + filter_cols; + const int in_l_end = in_l_start + filter_length; + + const int out_backprop_offset = + (OB * out_depth * out_length * out_rows * out_cols) + + (OC * out_length * out_rows * out_cols) + (OL * out_rows * out_cols) + + (OH * out_cols) + (OW); + +#if __CUDA_ARCH__ >= 350 + const T out_bp = __ldg(out_backprop + out_backprop_offset); +#else + const T out_bp = out_backprop[out_backprop_offset]; +#endif + if (in_r_start >= 0 && in_c_start >= 0 && in_r_end < in_rows && + in_c_end < in_cols && in_l_start >= 0 && in_l_end < in_length) { +#pragma unroll + for (int f_l = 0; f_l < filter_length; ++f_l) { + const int in_l = in_l_start + f_l; +#pragma unroll + for (int f_r = 0; f_r < filter_rows; ++f_r) { + const int in_r = in_r_start + f_r; + // Avoid repeated computation. + const int input_offset_temp = + (OB * in_depth * in_length * in_rows * in_cols) + + (OC * in_length * in_rows * in_cols) + + (in_l * in_rows * in_cols) + (in_r * in_cols); + +#pragma unroll + for (int f_c = 0; f_c < filter_cols; ++f_c) { + const int in_c = in_c_start + f_c; + const int input_offset = input_offset_temp + in_c; +#if __CUDA_ARCH__ >= 350 + T partial_sum = __ldg(input + input_offset) * out_bp; +#else + T partial_sum = input[input_offset] * out_bp; +#endif + T* addr = filter_backprop + + (in_d * filter_rows * filter_cols * filter_length) + + (f_l * filter_rows * filter_cols) + (f_c + filter_cols * f_r); + gpu_atomic_add(addr, partial_sum); + } + } + } + } else { +#pragma unroll + for (int f_l = 0; f_l < filter_length; ++f_l) { + const int in_l = in_l_start + f_l; +#pragma unroll + for (int f_r = 0; f_r < filter_rows; ++f_r) { + const int in_r = in_r_start + f_r; + // Avoid repeated computation. + const int input_offset_temp = + (OB * in_depth * in_length * in_rows * in_cols) + + (OC * in_length * in_rows * in_cols) + + (in_l * in_rows * in_cols) + (in_r * in_cols); +#pragma unroll + for (int f_c = 0; f_c < filter_cols; ++f_c) { + const int in_c = in_c_start + f_c; + + if (in_r >= 0 && in_r < in_rows && in_c >= 0 && in_c < in_cols && + in_l >= 0 && in_l < in_length) { + const int input_offset = input_offset_temp + in_c; +#if __CUDA_ARCH__ >= 350 + T partial_sum = __ldg(input + input_offset) * out_bp; +#else + T partial_sum = input[input_offset] * out_bp; +#endif + T* addr = filter_backprop + + (in_d * filter_rows * filter_cols * filter_length) + + (f_l * filter_rows * filter_cols) + (f_c + filter_cols * f_r); + gpu_atomic_add(addr, partial_sum); + } + } + } + } + } + } +} + +template +__global__ void DepthwiseConv3dBackpropInputGPUKernelNCHW( + const DepthwiseArgs args, + const T* out_backprop, + const T* filter, + T* in_backprop, + int num_in_backprop) { + const int in_rows = args.in_rows; + const int in_cols = args.in_cols; + const int in_length = args.in_length; + const int in_depth = args.in_depth; + const int filter_rows = args.filter_rows; + const int filter_cols = args.filter_cols; + const int filter_length = args.filter_length; + const int stride = args.stride; + const int temporal_stride = args.temporal_stride; + const int pad_rows = args.pad_rows; + const int pad_cols = args.pad_cols; + const int pad_length = args.pad_length; + const int out_rows = args.out_rows; + const int out_cols = args.out_cols; + const int out_length = args.out_length; + const int out_depth = args.out_depth; + + CUDA_1D_KERNEL_LOOP(thread_id, num_in_backprop) { + const int IW = thread_id % in_cols; + const int IH = (thread_id / in_cols) % in_rows; + const int IL = (thread_id / in_cols / in_rows) % in_length; + const int IC = (thread_id / in_cols / in_rows / in_length) % in_depth; + const int IB = thread_id / in_cols / in_rows / in_length / in_depth; + + T sum = 0; + + const int out_r_start = + max(0, (IH - filter_rows + pad_rows + stride) / stride); + const int out_r_end = min(out_rows - 1, (IH + pad_rows) / stride); + const int out_c_start = + max(0, (IW - filter_cols + pad_cols + stride) / stride); + const int out_c_end = min(out_cols - 1, (IW + pad_cols) / stride); + const int out_l_start = max( + 0, + (IL - filter_length + pad_length + temporal_stride) / temporal_stride); + const int out_l_end = + min(out_length - 1, (IL + pad_length) / temporal_stride); + +#pragma unroll + for (int out_l = out_l_start; out_l <= out_l_end; ++out_l) { + const int f_l = IL + pad_length - out_l * temporal_stride; + for (int out_r = out_r_start; out_r <= out_r_end; ++out_r) { + const int f_r = IH + pad_rows - out_r * stride; + for (int out_c = out_c_start; out_c <= out_c_end; ++out_c) { + const int f_c = IW + pad_cols - out_c * stride; + const int filter_offset = + IC * filter_rows * filter_cols * filter_length + + f_l * filter_cols * filter_rows + f_r * filter_cols + f_c; + const int out_backprop_offset = + (IB * out_depth * out_length * out_rows * out_cols) + + (IC * out_length * out_rows * out_cols) + + (out_l * out_rows * out_cols) + (out_r * out_cols) + (out_c); + +#if __CUDA_ARCH__ >= 350 + sum += __ldg(out_backprop + out_backprop_offset) * + __ldg(filter + filter_offset); +#else + sum += out_backprop[out_backprop_offset] * filter[filter_offset]; +#endif + } + } + } + const int in_backprop_offset = + (IB * in_rows * in_cols * in_length * in_depth) + + (IC * in_rows * in_cols * in_length) + (IL * in_rows * in_cols) + + (IH * in_cols) + (IW); + in_backprop[in_backprop_offset] = sum; + } +} + +class ChannelwiseConv3dOp final : public ConvPoolOpBase { + public: + USE_CONV_POOL_BASE_FUNCTIONS(CUDAContext); + ChannelwiseConv3dOp(const OperatorDef& operator_def, Workspace* ws) + : ConvPoolOpBase(operator_def, ws), + cudnn_wrapper_(&context_) { + OPERATOR_NEEDS_FEATURE( + this->order_ == StorageOrder::NCHW, + "ChannelwiseConv3dOp only supports NCHW order"); + CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&bias_desc_)); + CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&top_desc_for_bias_)); + } + + ~ChannelwiseConv3dOp() { + CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(bias_desc_)); + CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(top_desc_for_bias_)); + } + + bool RunOnDeviceWithOrderNCHW() override { + const Tensor& X = Input(0); + auto& filter = Input(1); + const int C = X.dim32(1); + CAFFE_ENFORCE_EQ(X.ndim(), filter.ndim()); + const int M = filter.dim32(0); // number of output filters + + // enforce input/output filters are the same + CAFFE_ENFORCE_EQ(M, X.dim32(1)); + CAFFE_ENFORCE_EQ(C, X.dim32(1)); + + // check group parameters + CAFFE_ENFORCE_EQ(C, this->group_); + CAFFE_ENFORCE_GT(this->group_, 1); + + auto sizes = ConvPoolOpBase::GetOutputSize(X, filter.dim32(0)); + Tensor* Y = Output(0, sizes, at::dtype()); + + DepthwiseArgs args; + args.batch = X.dim32(0); + args.in_length = X.dim32(2); + args.in_rows = X.dim32(3); + args.in_cols = X.dim32(4); + args.in_depth = X.dim32(1); + + CAFFE_ENFORCE_EQ(kernel_.size(), 3); + args.filter_cols = kernel_[2]; + args.filter_rows = kernel_[1]; + args.filter_length = kernel_[0]; + + CAFFE_ENFORCE_EQ(stride_.size(), 3); + args.stride = stride_[1]; + CAFFE_ENFORCE_EQ(stride_[1], stride_[2]); + args.temporal_stride = stride_[0]; + + CAFFE_ENFORCE_EQ(pads_.size(), 6); + args.pad_length = pads_[0]; + args.pad_rows = pads_[1]; + args.pad_cols = pads_[2]; + + CAFFE_ENFORCE_EQ(Y->dim32(0), X.dim32(0)); + args.out_rows = Y->dim32(3); + args.out_cols = Y->dim32(4); + args.out_length = Y->dim32(2); + args.out_depth = Y->dim32(1); + + DepthwiseConv3dGPUKernelNCHW + <<size()), + CAFFE_CUDA_NUM_THREADS, + 0, + context_.cuda_stream()>>>( + args, + X.data(), + filter.data(), + Y->mutable_data(), + Y->size()); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + + if (InputSize() == 3) { + std::vector bias_dims(X.ndim(), 1); + bias_dims[1] = M; + std::vector strides = {M, 1, 1, 1, 1}; + CUDNN_ENFORCE(cudnnSetTensorNdDescriptor( + bias_desc_, + cudnnTypeWrapper::type, + X.ndim(), + bias_dims.data(), + strides.data())); + + vector dims = { + Y->dim32(0), M, Y->dim32(2), Y->dim32(3), Y->dim32(4)}; + strides = {M * Y->dim32(2) * Y->dim32(3) * Y->dim32(4), + Y->dim32(2) * Y->dim32(3) * Y->dim32(4), + Y->dim32(3) * Y->dim32(4), + Y->dim32(4), + 1}; + CUDNN_ENFORCE(cudnnSetTensorNdDescriptor( + top_desc_for_bias_, + cudnnTypeWrapper::type, + X.ndim(), + dims.data(), + strides.data())); + + auto& bias = Input(2); + CAFFE_ENFORCE_EQ(bias.ndim(), 1); + CAFFE_ENFORCE_EQ(bias.dim32(0), M); + CUDNN_ENFORCE(cudnnAddTensor( + cudnn_wrapper_.inline_cudnn_handle(), + cudnnTypeWrapper::kOne(), + bias_desc_, + bias.data(), + cudnnTypeWrapper::kOne(), + top_desc_for_bias_, + Y->mutable_data())); + } + + return true; + } + + private: + CuDNNWrapper cudnn_wrapper_; + cudnnTensorDescriptor_t bias_desc_; + cudnnTensorDescriptor_t top_desc_for_bias_; +}; + +class ChannelwiseConv3dGradientOp final : public ConvPoolOpBase { + public: + USE_CONV_POOL_BASE_FUNCTIONS(CUDAContext); + ChannelwiseConv3dGradientOp(const OperatorDef& operator_def, Workspace* ws) + : ConvPoolOpBase(operator_def, ws), + cudnn_wrapper_(&context_), + no_bias_(OperatorBase::GetSingleArgument("no_bias", 0)) { + CAFFE_ENFORCE( + !(no_bias_ && OutputSize() == 3), + "If bias is not present, you should not have 3 grad output."); + OPERATOR_NEEDS_FEATURE( + this->order_ == StorageOrder::NCHW, + "ChannelwiseConv3dGradientOp only supports NCHW order"); + CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&bias_desc_)); + CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&top_desc_for_bias_)); + } + + ~ChannelwiseConv3dGradientOp() { + CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(bias_desc_)); + CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(top_desc_for_bias_)); + } + + bool RunOnDeviceWithOrderNCHW() override { + auto& X = Input(INPUT); + auto& filter = Input(FILTER); + auto& dY = Input(OUTPUT_GRAD); + auto* dfilter = Output(FILTER_GRAD); + const int C = X.dim32(1); + + const vector input_dims = this->GetDims(X); + ConvPoolOpBase::ComputePads(input_dims); + CAFFE_ENFORCE_EQ(X.ndim(), filter.ndim()); + const int M = filter.dim32(0); + CAFFE_ENFORCE(filter.dim32(1) * group_ == C); + CAFFE_ENFORCE(M % group_ == 0); + dfilter->ResizeLike(filter); + + DepthwiseArgs args; + args.batch = X.dim32(0); + args.in_rows = X.dim32(3); + args.in_cols = X.dim32(4); + args.in_length = X.dim32(2); + args.in_depth = X.dim32(1); + + args.filter_cols = kernel_[2]; + args.filter_rows = kernel_[1]; + args.filter_length = kernel_[0]; + + args.stride = stride_[1]; + CAFFE_ENFORCE_EQ(stride_[1], stride_[2]); + args.temporal_stride = stride_[0]; + + args.pad_length = pads_[0]; + args.pad_rows = pads_[1]; + args.pad_cols = pads_[2]; + + args.out_rows = dY.dim32(3); + args.out_cols = dY.dim32(4); + args.out_length = dY.dim32(2); + args.out_depth = dY.dim32(1); + + CAFFE_ENFORCE(OutputSize() == 3 || (no_bias_ && (OutputSize() == 2))); + auto* dX = Output(no_bias_ ? BIAS_OR_INPUT_GRAD : INPUT_GRAD); + dX->ResizeLike(X); + math::Set( + dfilter->size(), 0, dfilter->mutable_data(), &context_); + + DepthwiseConv3dBackpropFilterGPUKernelNCHW + <<>>( + args, + dY.data(), + X.data(), + dfilter->mutable_data(), + dY.size()); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + + DepthwiseConv3dBackpropInputGPUKernelNCHW + <<size()), + CAFFE_CUDA_NUM_THREADS, + 0, + context_.cuda_stream()>>>( + args, + dY.data(), + filter.data(), + dX->mutable_data(), + dX->size()); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + + if (!no_bias_) { + std::vector bias_dims(X.ndim(), 1); + bias_dims[1] = M; + std::vector strides = {M, 1, 1, 1, 1}; + CUDNN_ENFORCE(cudnnSetTensorNdDescriptor( + bias_desc_, + cudnnTypeWrapper::type, + X.ndim(), + bias_dims.data(), + strides.data())); + + std::vector dims = { + dY.dim32(0), M, dY.dim32(2), dY.dim32(3), dY.dim32(4)}; + strides = {M * dY.dim32(2) * dY.dim32(3) * dY.dim32(4), + dY.dim32(2) * dY.dim32(3) * dY.dim32(4), + dY.dim32(3) * dY.dim32(4), + dY.dim32(4), + 1}; + CUDNN_ENFORCE(cudnnSetTensorNdDescriptor( + top_desc_for_bias_, + cudnnTypeWrapper::type, + X.ndim(), + dims.data(), + strides.data())); + + auto* dbias = Output(BIAS_OR_INPUT_GRAD); + dbias->Resize(M); + CUDNN_ENFORCE(cudnnConvolutionBackwardBias( + cudnn_wrapper_.inline_cudnn_handle(), + cudnnTypeWrapper::kOne(), + top_desc_for_bias_, + dY.data(), + cudnnTypeWrapper::kZero(), + bias_desc_, + dbias->mutable_data())); + } + return true; + } + + private: + CuDNNWrapper cudnn_wrapper_; + cudnnTensorDescriptor_t bias_desc_; + cudnnTensorDescriptor_t top_desc_for_bias_; + + bool no_bias_; + + INPUT_TAGS(INPUT, FILTER, OUTPUT_GRAD); + OUTPUT_TAGS(FILTER_GRAD, BIAS_OR_INPUT_GRAD, INPUT_GRAD); +}; + +REGISTER_CUDA_OPERATOR_WITH_ENGINE(Conv, CHANNELWISE_3D, ChannelwiseConv3dOp); +REGISTER_CUDA_OPERATOR_WITH_ENGINE( + ConvGradient, + CHANNELWISE_3D, + ChannelwiseConv3dGradientOp); + +} // namespace caffe2 diff --git a/cuda_code/chapter2.cu b/cuda_code/chapter2.cu new file mode 100644 index 0000000000000000000000000000000000000000..a8ec82ccd5a9d208527a575d454000a0c486246c --- /dev/null +++ b/cuda_code/chapter2.cu @@ -0,0 +1,67 @@ +#include +#include +#include "vec3.h" + +// limited version of checkCudaErrors from helper_cuda.h in CUDA examples +#define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ ) + +void check_cuda(cudaError_t result, char const *const func, const char *const file, int const line) { + if (result) { + std::cerr << "CUDA error = " << static_cast(result) << " at " << + file << ":" << line << " '" << func << "' \n"; + // Make sure we call CUDA Device Reset before exiting + cudaDeviceReset(); + exit(99); + } +} + +__global__ void render(vec3 *fb, int max_x, int max_y) { + int i = threadIdx.x + blockIdx.x * blockDim.x; + int j = threadIdx.y + blockIdx.y * blockDim.y; + if((i >= max_x) || (j >= max_y)) return; + int pixel_index = j*max_x + i; + fb[pixel_index] = vec3( float(i) / max_x, float(j) / max_y, 0.2f); +} + +int main() { + int nx = 1200; + int ny = 600; + int tx = 8; + int ty = 8; + + std::cerr << "Rendering a " << nx << "x" << ny << " image "; + std::cerr << "in " << tx << "x" << ty << " blocks.\n"; + + int num_pixels = nx*ny; + size_t fb_size = num_pixels*sizeof(vec3); + + // allocate FB + vec3 *fb; + checkCudaErrors(cudaMallocManaged((void **)&fb, fb_size)); + + clock_t start, stop; + start = clock(); + // Render our buffer + dim3 blocks(nx/tx+1,ny/ty+1); + dim3 threads(tx,ty); + render<<>>(fb, nx, ny); + checkCudaErrors(cudaGetLastError()); + checkCudaErrors(cudaDeviceSynchronize()); + stop = clock(); + double timer_seconds = ((double)(stop - start)) / CLOCKS_PER_SEC; + std::cerr << "took " << timer_seconds << " seconds.\n"; + + // Output FB as Image + std::cout << "P3\n" << nx << " " << ny << "\n255\n"; + for (int j = ny-1; j >= 0; j--) { + for (int i = 0; i < nx; i++) { + size_t pixel_index = j*nx + i; + int ir = int(255.99*fb[pixel_index].r()); + int ig = int(255.99*fb[pixel_index].g()); + int ib = int(255.99*fb[pixel_index].b()); + std::cout << ir << " " << ig << " " << ib << "\n"; + } + } + + checkCudaErrors(cudaFree(fb)); +} diff --git a/cuda_code/checkError.cu b/cuda_code/checkError.cu new file mode 100644 index 0000000000000000000000000000000000000000..16f6c09653975491523f175c45247a1709b4ba31 --- /dev/null +++ b/cuda_code/checkError.cu @@ -0,0 +1,27 @@ +#include + +__global__ void iota(float *a) +{ + int i = blockDim.x * blockIdx.x + threadIdx.x; + a[i] = i; +} + +int main(int argc, char *argv[]) +{ + int numElements = 1e+8; + + // Allocate vector a in device memory. + float *d_a; + checkCudaErrors(cudaMalloc((void **)&d_a, sizeof(float) * numElements)); + + // Determine the number of threads per block and the number of blocks per grid. + int numThreadsPerBlock = 256; + int numBlocksPerGrid = (numElements + numThreadsPerBlock - 1) / numThreadsPerBlock; + + // Invoke the kernel on device asynchronously. + iota<<>>(d_a); + + // Cleanup. + checkCudaErrors(cudaFree(d_a)); + checkCudaErrors(cudaDeviceReset()); +} diff --git a/cuda_code/check_finite_and_unscale_op_5.cu b/cuda_code/check_finite_and_unscale_op_5.cu new file mode 100644 index 0000000000000000000000000000000000000000..6840e4847c4c6485c2815e0634bcd7aaa16783b4 --- /dev/null +++ b/cuda_code/check_finite_and_unscale_op_5.cu @@ -0,0 +1,90 @@ +/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/amp/check_finite_and_unscale_op.h" +#include "paddle/fluid/operators/amp/fp16_type_traits.h" +#include "paddle/fluid/platform/float16.h" + +namespace paddle { +namespace operators { + +template +__global__ void InverseAndMemset(const T* s, T* o, bool* found_inf) { + *o = Inverse(*s); + *found_inf = false; +} + +template +__global__ void CheckFiniteAndUnscale(const T* in, const MT* scale, int num, + bool* found_inf, T* out) { + const int idx = threadIdx.x + blockIdx.x * blockDim.x; + + if (idx < num) { + MT val = static_cast(in[idx]) * (*scale); + T narrow_val = static_cast(val); + out[idx] = narrow_val; + if (!isfinite(narrow_val)) { + *found_inf = true; + } + } +} + +template +class CheckFiniteAndUnscaleGpuKernel : public framework::OpKernel { + using MPDType = typename details::MPTypeTrait::Type; + + public: + void Compute(const framework::ExecutionContext& ctx) const { + auto& dev_ctx = ctx.template device_context(); + const auto xs = ctx.MultiInput("X"); + const auto* scale = ctx.Input("Scale"); + auto outs = ctx.MultiOutput("Out"); + auto* found_inf = ctx.Output("FoundInfinite"); + + const MPDType* scale_data = scale->data(); + bool* found_inf_data = found_inf->mutable_data(dev_ctx.GetPlace()); + + framework::Tensor inverse_scale = + ctx.AllocateTmpTensor({1}, + dev_ctx); + MPDType* inverse_scale_v = inverse_scale.template data(); + + InverseAndMemset<<<1, 1, 0, dev_ctx.stream()>>>( + scale_data, inverse_scale_v, found_inf_data); + + for (size_t i = 0; i < xs.size(); ++i) { + const auto* x = xs[i]; + auto* out = outs[i]; + const T* x_data = x->data(); + T* out_data = out->mutable_data(dev_ctx.GetPlace()); + + int num = x->numel(); + int block = 1024; + int grid = (num + block - 1) / block; + VLOG(3) << "launch kernel"; + CheckFiniteAndUnscale<<>>( + x_data, inverse_scale_v, num, found_inf_data, out_data); + VLOG(3) << "finish kernel"; + } + } +}; +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +namespace plat = paddle::platform; +REGISTER_OP_CUDA_KERNEL(check_finite_and_unscale, + ops::CheckFiniteAndUnscaleGpuKernel, + ops::CheckFiniteAndUnscaleGpuKernel, + ops::CheckFiniteAndUnscaleGpuKernel); diff --git a/cuda_code/clacpy_sym_in.cu b/cuda_code/clacpy_sym_in.cu new file mode 100644 index 0000000000000000000000000000000000000000..97e7a31abbeb723ed3ce6564cddb18b3daca0b0a --- /dev/null +++ b/cuda_code/clacpy_sym_in.cu @@ -0,0 +1,281 @@ +/* + -- MAGMA (version 2.2.0) -- + Univ. of Tennessee, Knoxville + Univ. of California, Berkeley + Univ. of Colorado, Denver + @date November 2016 + + @author Mark Gates + @author Azzam Haidar + @author Ichitaro Yamazaki + + @generated from magmablas/zlacpy_sym_in.cu, normal z -> c, Sun Nov 20 20:20:29 2016 + +*/ +#include "magma_internal.h" + +#define BLK_X 64 +#define BLK_Y 32 + +/******************************************************************************/ +/* + Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks. + Each block has BLK_X threads. + Each thread loops across one row, updating BLK_Y entries. + + Code similar to claset, clacpy, clag2z, clag2z, cgeadd. +*/ +static __device__ +void clacpy_sym_in_full_device( + int m, int n, + const magmaFloatComplex *dA, int ldda, + magmaFloatComplex *dB, int lddb ) +{ + int ind = blockIdx.x*BLK_X + threadIdx.x; + int iby = blockIdx.y*BLK_Y; + /* check if full block-column */ + bool full = (iby + BLK_Y <= n); + /* do only rows inside matrix */ + if ( ind < m ) { + dA += ind + iby*ldda; + dB += ind + iby*lddb; + if ( full ) { + // full block-column + #pragma unroll + for( int j=0; j < BLK_Y; ++j ) { + dB[j*lddb] = dA[j*ldda]; + } + } + else { + // partial block-column + for( int j=0; j < BLK_Y && iby+j < n; ++j ) { + dB[j*lddb] = dA[j*ldda]; + } + } + } +} + + +/******************************************************************************/ +/* + Similar to clacpy_full, but updates only the diagonal and below. + Blocks that are fully above the diagonal exit immediately. + + Code similar to claset, clacpy, zlat2c, clat2z. +*/ +static __device__ +void clacpy_sym_in_lower_device( + int m, int n, magma_int_t *rows, magma_int_t *perm, + const magmaFloatComplex *dA, int ldda, + magmaFloatComplex *dB, int lddb ) +{ + int ind = blockIdx.x*BLK_X + threadIdx.x; // row + int iby = blockIdx.y*BLK_Y; // col + + /* check if full block-column && (below diag) */ + bool full = (iby + BLK_Y <= n); + for (int jj=0; jj < n; jj++) { + perm[rows[2*jj+1]] = rows[2*jj]; + } + /* do only rows inside matrix, and blocks not above diag */ + if ( ind < m ) { + if ( full ) { + // full block-column, off-diagonal block + //#pragma unroll + for( int jj=0; jj < BLK_Y; ++jj ) + { + int j = rows[2*(iby+jj)]; + if (perm[ind] <= j) + dB[ind + (iby+jj)*lddb] = MAGMA_C_CONJ( dA[j + perm[ind]*ldda] ); + else + dB[ind + (iby+jj)*lddb] = dA[perm[ind] + j*ldda]; + } + } + else { + // either partial block-column or diagonal block + for( int jj=0; jj < BLK_Y && iby+jj < n; ++jj ) + { + int j = rows[2*(iby+jj)]; + if (perm[ind] <= j) + dB[ind + (iby+jj)*lddb] = MAGMA_C_CONJ( dA[j + perm[ind]*ldda] ); + else + dB[ind + (iby+jj)*lddb] = dA[perm[ind] + j*ldda]; + } + } + } +} + + +/* + Similar to clacpy_full, but updates only the diagonal and above. + Blocks that are fully below the diagonal exit immediately. + + Code similar to claset, clacpy, zlat2c, clat2z. +*/ +static __device__ +void clacpy_sym_in_upper_device( + int m, int n, + const magmaFloatComplex *dA, int ldda, + magmaFloatComplex *dB, int lddb ) +{ + int ind = blockIdx.x*BLK_X + threadIdx.x; + int iby = blockIdx.y*BLK_Y; + /* check if full block-column && (above diag) */ + bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby)); + /* do only rows inside matrix, and blocks not below diag */ + if ( ind < m && ind < iby + BLK_Y ) { + dA += ind + iby*ldda; + dB += ind + iby*lddb; + if ( full ) { + // full block-column, off-diagonal block + #pragma unroll + for( int j=0; j < BLK_Y; ++j ) { + dB[j*lddb] = dA[j*ldda]; + } + } + else { + // either partial block-column or diagonal block + for( int j=0; j < BLK_Y && iby+j < n; ++j ) { + if ( ind <= iby+j ) { + dB[j*lddb] = dA[j*ldda]; + } + } + } + } +} + + +/******************************************************************************/ +/* + kernel wrappers to call the device functions. +*/ +__global__ +void clacpy_sym_in_full_kernel( + int m, int n, + const magmaFloatComplex *dA, int ldda, + magmaFloatComplex *dB, int lddb ) +{ + clacpy_sym_in_full_device(m, n, dA, ldda, dB, lddb); +} + +__global__ +void clacpy_sym_in_lower_kernel( + int m, int n, magma_int_t *rows, magma_int_t *perm, + const magmaFloatComplex *dA, int ldda, + magmaFloatComplex *dB, int lddb ) +{ + clacpy_sym_in_lower_device(m, n, rows, perm, dA, ldda, dB, lddb); +} + +__global__ +void clacpy_sym_in_upper_kernel( + int m, int n, + const magmaFloatComplex *dA, int ldda, + magmaFloatComplex *dB, int lddb ) +{ + clacpy_sym_in_upper_device(m, n, dA, ldda, dB, lddb); +} + + +/***************************************************************************//** + Purpose + ------- + CLACPY_SYM_IN copies all or part of a two-dimensional matrix dA to another + matrix dB. + + This is the same as CLACPY, but adds queue argument. + + Arguments + --------- + @param[in] + uplo magma_uplo_t + Specifies the part of the matrix dA to be copied to dB. + - = MagmaUpper: Upper triangular part + - = MagmaLower: Lower triangular part + - = MagmaFull: All of the matrix dA + + @param[in] + m INTEGER + The number of rows of the matrix dA. M >= 0. + + @param[in] + n INTEGER + The number of rows that are swapped. N >= 0. + + @param[in] + rows INTEGER array, on GPU, dimension (2*n) + On entry, it stores the new pivots such that rows[i]-th and rows[n+i]-th + rows are swapped. + + @param[in,out] + perm INTEGER array, on GPU, dimension (m) + On entry, it stores the identity permutation array. + On exit, it is updated with the new pivots given by rows such that + i-th row will be the original perm[i]-th row after the pivots are applied. + + @param[in] + dA COMPLEX array, dimension (LDDA,N) + The M-by-N matrix dA. + If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed; + if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed. + + @param[in] + ldda INTEGER + The leading dimension of the array dA. LDDA >= max(1,M). + + @param[out] + dB COMPLEX array, dimension (LDDB,N) + On exit, dB = stores the columns after the pivots are applied. + + @param[in] + lddb INTEGER + The leading dimension of the array dB. LDDB >= max(1,M). + + @param[in] + queue magma_queue_t + Queue to execute in. + + @ingroup magma_lacpy +*******************************************************************************/ +extern "C" void +magmablas_clacpy_sym_in( + magma_uplo_t uplo, magma_int_t m, magma_int_t n, + magma_int_t *rows, magma_int_t *perm, + magmaFloatComplex_const_ptr dA, magma_int_t ldda, + magmaFloatComplex_ptr dB, magma_int_t lddb, + magma_queue_t queue ) +{ + magma_int_t info = 0; + if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull ) + info = -1; + else if ( m < 0 ) + info = -2; + else if ( n < 0 ) + info = -3; + else if ( ldda < max(1,m)) + info = -5; + else if ( lddb < max(1,m)) + info = -7; + + if ( info != 0 ) { + magma_xerbla( __func__, -(info) ); + return; //info; + } + + if ( m == 0 || n == 0 ) { + return; + } + + dim3 threads( BLK_X, 1 ); + dim3 grid( magma_ceildiv(m, BLK_X), magma_ceildiv(n, BLK_Y) ); + + if ( uplo == MagmaLower ) { + clacpy_sym_in_lower_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, rows, perm, dA, ldda, dB, lddb ); + } + else if ( uplo == MagmaUpper ) { + clacpy_sym_in_upper_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, dA, ldda, dB, lddb ); + } + else { + clacpy_sym_in_full_kernel <<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, dA, ldda, dB, lddb ); + } +} diff --git a/cuda_code/clacpy_sym_in_1.cu b/cuda_code/clacpy_sym_in_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..452a7d7dfdc793f72109622837a12db9b0570db2 --- /dev/null +++ b/cuda_code/clacpy_sym_in_1.cu @@ -0,0 +1,281 @@ +/* + -- MAGMA (version 2.5.4) -- + Univ. of Tennessee, Knoxville + Univ. of California, Berkeley + Univ. of Colorado, Denver + @date October 2020 + + @author Mark Gates + @author Azzam Haidar + @author Ichitaro Yamazaki + + @generated from magmablas/zlacpy_sym_in.cu, normal z -> c, Thu Oct 8 23:05:32 2020 + +*/ +#include "magma_internal.h" + +#define BLK_X 64 +#define BLK_Y 32 + +/******************************************************************************/ +/* + Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks. + Each block has BLK_X threads. + Each thread loops across one row, updating BLK_Y entries. + + Code similar to claset, clacpy, clag2z, clag2z, cgeadd. +*/ +static __device__ +void clacpy_sym_in_full_device( + int m, int n, + const magmaFloatComplex *dA, int ldda, + magmaFloatComplex *dB, int lddb ) +{ + int ind = blockIdx.x*BLK_X + threadIdx.x; + int iby = blockIdx.y*BLK_Y; + /* check if full block-column */ + bool full = (iby + BLK_Y <= n); + /* do only rows inside matrix */ + if ( ind < m ) { + dA += ind + iby*ldda; + dB += ind + iby*lddb; + if ( full ) { + // full block-column + #pragma unroll + for( int j=0; j < BLK_Y; ++j ) { + dB[j*lddb] = dA[j*ldda]; + } + } + else { + // partial block-column + for( int j=0; j < BLK_Y && iby+j < n; ++j ) { + dB[j*lddb] = dA[j*ldda]; + } + } + } +} + + +/******************************************************************************/ +/* + Similar to clacpy_full, but updates only the diagonal and below. + Blocks that are fully above the diagonal exit immediately. + + Code similar to claset, clacpy, zlat2c, clat2z. +*/ +static __device__ +void clacpy_sym_in_lower_device( + int m, int n, magma_int_t *rows, magma_int_t *perm, + const magmaFloatComplex *dA, int ldda, + magmaFloatComplex *dB, int lddb ) +{ + int ind = blockIdx.x*BLK_X + threadIdx.x; // row + int iby = blockIdx.y*BLK_Y; // col + + /* check if full block-column && (below diag) */ + bool full = (iby + BLK_Y <= n); + for (int jj=0; jj < n; jj++) { + perm[rows[2*jj+1]] = rows[2*jj]; + } + /* do only rows inside matrix, and blocks not above diag */ + if ( ind < m ) { + if ( full ) { + // full block-column, off-diagonal block + //#pragma unroll + for( int jj=0; jj < BLK_Y; ++jj ) + { + int j = rows[2*(iby+jj)]; + if (perm[ind] <= j) + dB[ind + (iby+jj)*lddb] = MAGMA_C_CONJ( dA[j + perm[ind]*ldda] ); + else + dB[ind + (iby+jj)*lddb] = dA[perm[ind] + j*ldda]; + } + } + else { + // either partial block-column or diagonal block + for( int jj=0; jj < BLK_Y && iby+jj < n; ++jj ) + { + int j = rows[2*(iby+jj)]; + if (perm[ind] <= j) + dB[ind + (iby+jj)*lddb] = MAGMA_C_CONJ( dA[j + perm[ind]*ldda] ); + else + dB[ind + (iby+jj)*lddb] = dA[perm[ind] + j*ldda]; + } + } + } +} + + +/* + Similar to clacpy_full, but updates only the diagonal and above. + Blocks that are fully below the diagonal exit immediately. + + Code similar to claset, clacpy, zlat2c, clat2z. +*/ +static __device__ +void clacpy_sym_in_upper_device( + int m, int n, + const magmaFloatComplex *dA, int ldda, + magmaFloatComplex *dB, int lddb ) +{ + int ind = blockIdx.x*BLK_X + threadIdx.x; + int iby = blockIdx.y*BLK_Y; + /* check if full block-column && (above diag) */ + bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby)); + /* do only rows inside matrix, and blocks not below diag */ + if ( ind < m && ind < iby + BLK_Y ) { + dA += ind + iby*ldda; + dB += ind + iby*lddb; + if ( full ) { + // full block-column, off-diagonal block + #pragma unroll + for( int j=0; j < BLK_Y; ++j ) { + dB[j*lddb] = dA[j*ldda]; + } + } + else { + // either partial block-column or diagonal block + for( int j=0; j < BLK_Y && iby+j < n; ++j ) { + if ( ind <= iby+j ) { + dB[j*lddb] = dA[j*ldda]; + } + } + } + } +} + + +/******************************************************************************/ +/* + kernel wrappers to call the device functions. +*/ +__global__ +void clacpy_sym_in_full_kernel( + int m, int n, + const magmaFloatComplex *dA, int ldda, + magmaFloatComplex *dB, int lddb ) +{ + clacpy_sym_in_full_device(m, n, dA, ldda, dB, lddb); +} + +__global__ +void clacpy_sym_in_lower_kernel( + int m, int n, magma_int_t *rows, magma_int_t *perm, + const magmaFloatComplex *dA, int ldda, + magmaFloatComplex *dB, int lddb ) +{ + clacpy_sym_in_lower_device(m, n, rows, perm, dA, ldda, dB, lddb); +} + +__global__ +void clacpy_sym_in_upper_kernel( + int m, int n, + const magmaFloatComplex *dA, int ldda, + magmaFloatComplex *dB, int lddb ) +{ + clacpy_sym_in_upper_device(m, n, dA, ldda, dB, lddb); +} + + +/***************************************************************************//** + Purpose + ------- + CLACPY_SYM_IN copies all or part of a two-dimensional matrix dA to another + matrix dB. + + This is the same as CLACPY, but adds queue argument. + + Arguments + --------- + @param[in] + uplo magma_uplo_t + Specifies the part of the matrix dA to be copied to dB. + - = MagmaUpper: Upper triangular part + - = MagmaLower: Lower triangular part + - = MagmaFull: All of the matrix dA + + @param[in] + m INTEGER + The number of rows of the matrix dA. M >= 0. + + @param[in] + n INTEGER + The number of rows that are swapped. N >= 0. + + @param[in] + rows INTEGER array, on GPU, dimension (2*n) + On entry, it stores the new pivots such that rows[i]-th and rows[n+i]-th + rows are swapped. + + @param[in,out] + perm INTEGER array, on GPU, dimension (m) + On entry, it stores the identity permutation array. + On exit, it is updated with the new pivots given by rows such that + i-th row will be the original perm[i]-th row after the pivots are applied. + + @param[in] + dA COMPLEX array, dimension (LDDA,N) + The M-by-N matrix dA. + If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed; + if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed. + + @param[in] + ldda INTEGER + The leading dimension of the array dA. LDDA >= max(1,M). + + @param[out] + dB COMPLEX array, dimension (LDDB,N) + On exit, dB = stores the columns after the pivots are applied. + + @param[in] + lddb INTEGER + The leading dimension of the array dB. LDDB >= max(1,M). + + @param[in] + queue magma_queue_t + Queue to execute in. + + @ingroup magma_lacpy +*******************************************************************************/ +extern "C" void +magmablas_clacpy_sym_in( + magma_uplo_t uplo, magma_int_t m, magma_int_t n, + magma_int_t *rows, magma_int_t *perm, + magmaFloatComplex_const_ptr dA, magma_int_t ldda, + magmaFloatComplex_ptr dB, magma_int_t lddb, + magma_queue_t queue ) +{ + magma_int_t info = 0; + if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull ) + info = -1; + else if ( m < 0 ) + info = -2; + else if ( n < 0 ) + info = -3; + else if ( ldda < max(1,m)) + info = -5; + else if ( lddb < max(1,m)) + info = -7; + + if ( info != 0 ) { + magma_xerbla( __func__, -(info) ); + return; //info; + } + + if ( m == 0 || n == 0 ) { + return; + } + + dim3 threads( BLK_X, 1 ); + dim3 grid( magma_ceildiv(m, BLK_X), magma_ceildiv(n, BLK_Y) ); + + if ( uplo == MagmaLower ) { + clacpy_sym_in_lower_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, rows, perm, dA, ldda, dB, lddb ); + } + else if ( uplo == MagmaUpper ) { + clacpy_sym_in_upper_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, dA, ldda, dB, lddb ); + } + else { + clacpy_sym_in_full_kernel <<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, dA, ldda, dB, lddb ); + } +} diff --git a/cuda_code/classical_amg.cu b/cuda_code/classical_amg.cu new file mode 100644 index 0000000000000000000000000000000000000000..9c6c919c60f283d1df9820ea306854d72a0bc107 --- /dev/null +++ b/cuda_code/classical_amg.cu @@ -0,0 +1,489 @@ +#include + +#include + +// REMOVE THIS +#include +#include +#include + + +// TAKE THESE +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +#define CHECK_NAN(a) \ +{ \ + cusp::array1d h(a); \ + for(size_t i = 0; i < h.size(); i++) \ + if (isnan(h[i])) \ + printf("[%d] nan at index %d\n", __LINE__, (int) i); \ +} + +template +struct filter_strong_connections +{ + template + __host__ __device__ + IndexType operator()(const Tuple& t) + { + IndexType s_i = thrust::get<2>(t); + IndexType s_j = thrust::get<3>(t); + + if (!s_i && s_j) return 1; // F->C connection + if (!s_i && !s_j) return 0; // F->F connection + + IndexType i = thrust::get<0>(t); + IndexType j = thrust::get<1>(t); + + if (s_i && i == j) return 1; // C->C connection (self connection) + else return 0; + } +}; + +template +struct is_F_node : public thrust::unary_function +{ + __host__ __device__ + ValueType operator()(const IndexType& i) const + { + return (i) ? ValueType(0) : ValueType(1); + } +}; + +template +struct compute_weights +{ + template + __host__ __device__ + ValueType operator()(const Tuple& t, const ValueType& v) + { + if (thrust::get<0>(t)) // C node w_ij = 0 + return 1; + else // F node w_ij = |A_ij| / nu + return ((v < 0) ? -v : v) / thrust::get<1>(t); + } +}; + + +template +void classical_stength_of_connection(const cusp::coo_matrix& A, + const ValueType theta, + cusp::coo_matrix& C) +{ + // TODO implement with generalized spmv on device + cusp::coo_matrix A_copy(A); + cusp::array1d min_off_diagonal_copy(A.num_rows, 0); + for(IndexType n = 0; n < (IndexType) A_copy.num_entries; n++) + { + IndexType i = A_copy.row_indices[n]; + IndexType j = A_copy.column_indices[n]; + + if(i != j) + min_off_diagonal_copy[i] = std::min(min_off_diagonal_copy[i], A_copy.values[n]); + } + cusp::array1d stencil_copy(A.num_entries); + for(IndexType n = 0; n < (IndexType) A_copy.num_entries; n++) + { + IndexType i = A_copy.row_indices[n]; + IndexType j = A_copy.column_indices[n]; + + if(i == j) + stencil_copy[n] = 1; + else + stencil_copy[n] = (min_off_diagonal_copy[i] * theta < A_copy.values[n]) ? 0 : 1; + } + + cusp::array1d stencil(stencil_copy); + IndexType NNZ = thrust::count(stencil.begin(), stencil.end(), IndexType(1)); + C.resize(A.num_rows, A.num_cols, NNZ); + + // TODO merge these copy_if() with a zip_iterator + thrust::copy_if(A.row_indices.begin(), A.row_indices.end(), + stencil.begin(), + C.row_indices.begin(), + thrust::identity()); + thrust::copy_if(A.column_indices.begin(), A.column_indices.end(), + stencil.begin(), + C.column_indices.begin(), + thrust::identity()); + thrust::copy_if(A.values.begin(), A.values.end(), + stencil.begin(), + C.values.begin(), + thrust::identity()); +} + +template +void direct_interpolation(const cusp::coo_matrix& A, + const cusp::coo_matrix& C, + const ArrayType& cf_splitting, + cusp::coo_matrix& P) +{ + assert(A.num_rows == A.num_cols); + assert(C.num_rows == A.num_rows); + assert(C.num_rows == A.num_cols); + assert(cf_splitting.size() == A.num_rows); + + // dimensions of P + const IndexType num_rows = A.num_rows; + const IndexType num_cols = thrust::count(cf_splitting.begin(), cf_splitting.end(), 1); + + // mark the strong edges that are retained in P (either F->C or C->C self loops) + cusp::array1d stencil(C.num_entries); + thrust::transform(thrust::make_zip_iterator( + thrust::make_tuple(C.row_indices.begin(), + C.column_indices.begin(), + thrust::make_permutation_iterator(cf_splitting.begin(), C.row_indices.begin()), + thrust::make_permutation_iterator(cf_splitting.begin(), C.column_indices.begin()))), + thrust::make_zip_iterator( + thrust::make_tuple(C.row_indices.begin(), + C.column_indices.begin(), + thrust::make_permutation_iterator(cf_splitting.begin(), C.row_indices.begin()), + thrust::make_permutation_iterator(cf_splitting.begin(), C.column_indices.begin()))) + C.num_entries, + stencil.begin(), + filter_strong_connections()); + + // number of entries in P (number of F->C connections plus the number of C nodes) + const IndexType num_entries = thrust::reduce(stencil.begin(), stencil.end()); + + // sum the weights of the F nodes within each row + cusp::array1d nu(A.num_rows); + { + // nu = A * [F0F0F0] + // scale C(i,j) by nu + cusp::array1d F_nodes(A.num_rows); // 1.0 for F nodes, 0.0 for C nodes + thrust::transform(cf_splitting.begin(), cf_splitting.end(), F_nodes.begin(), is_F_node()); + cusp::multiply(A, F_nodes, nu); + +// std::cout << "cf_splitting" << std::endl; +// cusp::print_matrix(cf_splitting); +// std::cout << "F_nodes" << std::endl; +// cusp::print_matrix(F_nodes); + } + + // allocate storage for P + { + cusp::coo_matrix temp(num_rows, num_cols, num_entries); + P.swap(temp); + } + + // compute entries of P + { + // enumerate the C nodes + cusp::array1d coarse_index_map(A.num_rows); + thrust::exclusive_scan(cf_splitting.begin(), cf_splitting.end(), coarse_index_map.begin()); + + // TODO merge these copy_if() with a zip_iterator + thrust::copy_if(C.row_indices.begin(), C.row_indices.end(), + stencil.begin(), + P.row_indices.begin(), + thrust::identity()); + thrust::copy_if(thrust::make_permutation_iterator(coarse_index_map.begin(), C.column_indices.begin()), + thrust::make_permutation_iterator(coarse_index_map.begin(), C.column_indices.end()), + stencil.begin(), + P.column_indices.begin(), + thrust::identity()); + thrust::copy_if(C.values.begin(), C.values.end(), + stencil.begin(), + P.values.begin(), + thrust::identity()); + + // CHECK_NAN(P.values); + + //cusp::print_matrix(P); + + thrust::transform(thrust::make_permutation_iterator(thrust::make_zip_iterator(thrust::make_tuple(cf_splitting.begin(), nu.begin())), P.row_indices.begin()), + thrust::make_permutation_iterator(thrust::make_zip_iterator(thrust::make_tuple(cf_splitting.begin(), nu.begin())), P.row_indices.end()), + P.values.begin(), + P.values.begin(), + compute_weights()); + } + +// CHECK_NAN(P.values); + //cusp::print_matrix(nu); +} + +template +void _TestDirectInterpolation(const cusp::array2d& A, + const cusp::array1d& S, + const cusp::array2d& expected) +{ + cusp::coo_matrix A_(A); + + cusp::coo_matrix P; + + direct_interpolation(A_, A_, S, P); + + cusp::array2d result(P); + + ASSERT_EQUAL_QUIET(result, expected); +} + +template +void TestDirectInterpolation(void) +{ +#ifdef _MSC_VER +// I have no idea why this fails +KNOWN_FAILURE; +#endif + + // One-dimensional Poisson problem + { + cusp::array2d A(5,5); + A(0,0) = 2; A(0,1) = -1; A(0,2) = 0; A(0,3) = 0; A(0,4) = 0; + A(1,0) = -1; A(1,1) = 2; A(1,2) = -1; A(1,3) = 0; A(1,4) = 0; + A(2,0) = 0; A(2,1) = -1; A(2,2) = 2; A(2,3) = -1; A(2,4) = 0; + A(3,0) = 0; A(3,1) = 0; A(3,2) = -1; A(3,3) = 2; A(3,4) = -1; + A(4,0) = 0; A(4,1) = 0; A(4,2) = 0; A(4,3) = -1; A(4,4) = 2; + + cusp::array1d S(5); + S[0] = 1; + S[1] = 0; + S[2] = 1; + S[3] = 0; + S[4] = 1; + + cusp::array2d P(5, 3); + P(0,0) = 1.0; P(0,1) = 0.0; P(0,2) = 0.0; + P(1,0) = 0.5; P(1,1) = 0.5; P(1,2) = 0.0; + P(2,0) = 0.0; P(2,1) = 1.0; P(2,2) = 0.0; + P(3,0) = 0.0; P(3,1) = 0.5; P(3,2) = 0.5; + P(4,0) = 0.0; P(4,1) = 0.0; P(4,2) = 1.0; + + _TestDirectInterpolation(A,S,P); + } + + // Two-dimensional Poisson problem + { + cusp::array2d A(6,6); + A(0,0) = 4; A(0,1) = -1; A(0,2) = 0; A(0,3) = -1; A(0,4) = 0; A(0,5) = 0; + A(1,0) = -1; A(1,1) = 4; A(1,2) = -1; A(1,3) = 0; A(1,4) = -1; A(1,5) = 0; + A(2,0) = 0; A(2,1) = -1; A(2,2) = 4; A(2,3) = 0; A(2,4) = 0; A(2,5) = -1; + A(3,0) = -1; A(3,1) = 0; A(3,2) = 0; A(3,3) = 4; A(3,4) = -1; A(3,5) = 0; + A(4,0) = 0; A(4,1) = -1; A(4,2) = 0; A(4,3) = -1; A(4,4) = 4; A(4,5) = -1; + A(5,0) = 0; A(5,1) = 0; A(5,2) = -1; A(5,3) = 0; A(5,4) = -1; A(5,5) = 4; + + cusp::array1d S(6); + S[0] = 1; + S[1] = 0; + S[2] = 1; + S[3] = 0; + S[4] = 1; + S[5] = 0; + + cusp::array2d P(6, 3); + P(0,0) = 1.00; P(0,1) = 0.00; P(0,2) = 0.00; + P(1,0) = 0.25; P(1,1) = 0.25; P(1,2) = 0.25; + P(2,0) = 0.00; P(2,1) = 1.00; P(2,2) = 0.00; + P(3,0) = 0.25; P(3,1) = 0.00; P(3,2) = 0.25; + P(4,0) = 0.00; P(4,1) = 0.00; P(4,2) = 1.00; + P(5,0) = 0.00; P(5,1) = 0.25; P(5,2) = 0.25; + + _TestDirectInterpolation(A,S,P); + } +} +DECLARE_HOST_DEVICE_UNITTEST(TestDirectInterpolation); + +template +class ruge_stuben_solver +{ + + struct level + { + cusp::coo_matrix R; // restriction operator + cusp::coo_matrix A; // matrix + cusp::coo_matrix P; // prolongation operator + cusp::array1d splitting; // C/F splitting + + cusp::relaxation::jacobi smoother; + + ValueType rho; // spectral radius + //cusp::array1d temp1; + //cusp::array1d temp2; + }; + + std::vector levels; + + cusp::detail::lu_solver LU; + + public: + + ruge_stuben_solver(const cusp::coo_matrix& A) + { + levels.reserve(20); // avoid reallocations which force matrix copies + + levels.push_back(level()); + levels.back().A = A; // copy + + extend_hierarchy(); + extend_hierarchy(); + //extend_hierarchy(); + + // TODO make lu_solver accept sparse input + cusp::array2d coarse_dense(levels.back().A); + LU = cusp::detail::lu_solver(coarse_dense); + + //for (int i = 0; i < levels.size(); i++) + // printf("level[%2d] %10d unknowns %10d nonzeros\n", i, levels[i].A.num_rows, levels[i].A.num_entries); + + //cusp::io::write_matrix_market_file(levels[0].A, "/home/nathan/Desktop/AMG/A0.mtx"); + //cusp::io::write_matrix_market_file(levels[1].A, "/home/nathan/Desktop/AMG/A1.mtx"); + //cusp::io::write_matrix_market_file(levels[2].A, "/home/nathan/Desktop/AMG/A2.mtx"); + //cusp::io::write_matrix_market_file(levels[0].P, "/home/nathan/Desktop/AMG/P0.mtx"); + //cusp::io::write_matrix_market_file(levels[1].P, "/home/nathan/Desktop/AMG/P1.mtx"); + //cusp::io::write_matrix_market_file(levels[0].R, "/home/nathan/Desktop/AMG/R0.mtx"); + //cusp::io::write_matrix_market_file(levels[1].R, "/home/nathan/Desktop/AMG/R1.mtx"); + } + + void extend_hierarchy(void) + { + const cusp::coo_matrix& A = levels.back().A; + + // compute C/F splitting + cusp::array1d splitting(A.num_rows); + cusp::graph::maximal_independent_set(A, splitting); + + // // TODO XXX XXX XXX remove + // for(int i = 0; i < splitting.size(); i++) + // splitting[i] = (i + 1) % 2; + + // compute stength of connection matrix + cusp::coo_matrix C; + classical_stength_of_connection(A, 0.25f, C); + + //std::cout << "C has " << 100 * double(C.num_entries) / A.num_entries << "% of A" << std::endl; + + // compute prolongation operator + cusp::coo_matrix P; + direct_interpolation(A, C, splitting, P); + + // compute restriction operator (transpose of prolongator) + cusp::coo_matrix R; + cusp::transpose(P,R); + + // construct Galerkin product R*A*P + cusp::coo_matrix RAP; + { + // TODO test speed of R * (A * P) vs. (R * A) * P + cusp::coo_matrix AP; + cusp::multiply(A, P, AP); + cusp::multiply(R, AP, RAP); + } + + // 4/3 * 1/rho is a good default, where rho is the spectral radius of D^-1(A) + levels.back().smoother = cusp::relaxation::jacobi(A, 0.66f); // TODO estimate rho + levels.back().splitting.swap(splitting); + levels.back().R.swap(R); + levels.back().P.swap(P); + + levels.push_back(level()); + levels.back().A.swap(RAP); + //levels.back().temp1.resize(RAP.num_rows); + //levels.back().temp2.resize(RAP.num_rows); + } + + void solve(const cusp::array1d& b, + cusp::array1d& x) + { + // TODO check sizes + cusp::coo_matrix & A = levels[0].A; + + cusp::array1d residual(A.num_rows); // TODO eliminate temporaries + + // compute initial residual norm + cusp::multiply(A,x,residual); + cusp::blas::axpby(b, residual, residual, 1.0f, -1.0f); + float last_norm = cusp::blas::nrm2(residual); + + //printf("%10.8f\n", last_norm); + + // perform 25 V-cycles + for (size_t i = 0; i < 25; i++) + { + _solve(b, x, 0); + + // compute residual norm + cusp::multiply(A,x,residual); + cusp::blas::axpby(b, residual, residual, 1.0f, -1.0f); + float norm = cusp::blas::nrm2(residual); + + //printf("%10.8f %6.4f\n", norm, norm/last_norm); + + last_norm = norm; + } + } + + void _solve(const cusp::array1d& b, + cusp::array1d& x, + const size_t i) + { + if (i + 1 == levels.size()) + { + // coarse grid solve + // TODO streamline + cusp::array1d temp_b(b); + cusp::array1d temp_x(x.size()); + LU(temp_b, temp_x); + x = temp_x; + } + else + { + cusp::coo_matrix & R = levels[i].R; + cusp::coo_matrix & A = levels[i].A; + cusp::coo_matrix & P = levels[i].P; + + cusp::array1d residual(P.num_rows); // TODO eliminate temporaries + cusp::array1d coarse_b(P.num_cols); + cusp::array1d coarse_x(P.num_cols); + + // presmooth + levels[i].smoother(A,b,x); + + // compute residual <- b - A*x + cusp::multiply(A, x, residual); + cusp::blas::axpby(b, residual, residual, 1.0f, -1.0f); + + // restrict to coarse grid + cusp::multiply(R, residual, coarse_b); + + // compute coarse grid solution + _solve(coarse_b, coarse_x, i + 1); + + // apply coarse grid correction + cusp::multiply(P, coarse_x, residual); + cusp::blas::axpy(residual, x, 1.0f); + + // postsmooth + levels[i].smoother(A,b,x); + } + } +}; + + +void TestRugeStubenSolver(void) +{ + // Create 2D Poisson problem + cusp::coo_matrix A; +// cusp::gallery::poisson5pt(A, 21, 21); + cusp::gallery::poisson5pt(A, 50, 50); + + // setup linear system + cusp::array1d b(A.num_rows,0); + cusp::array1d x = unittest::random_samples(A.num_rows); + + ruge_stuben_solver rs(A); + rs.solve(b,x); +} +DECLARE_UNITTEST(TestRugeStubenSolver); + diff --git a/cuda_code/classifyMain_1.cu b/cuda_code/classifyMain_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..1dcad2ae22d03dc18aa93eeed2c29f9e302b7009 --- /dev/null +++ b/cuda_code/classifyMain_1.cu @@ -0,0 +1,113 @@ +/* Includes, system */ +#include +#include +#include +#include +/* Includes, cuda */ +#include "cublas.h" + +//#include "cutil.h" +#include +#define CUDA_SAFE_CALL checkCudaErrors + + +#include "cuda.h" + +/* Includes, project */ +#include "../common/framework.h" +#include "svmClassify.h" +#include "../common/svmIO.h" + + +void printHelp() { + printf("Usage: svmClassify modelFile dataFile [outputFile]\n"); +} + +/** + * This main function performs SVM classification from a file. + * It expects that the first command line argument is a model file (in the same format as LibSVM) and the second argument is a data file. + */ +int main( const int argc, const char** argv) { + + + int nSV; //total number of support vectors + int total_nPoints; //total number of test points + int nDimension; //data dimension + float* alpha; //alpha array + float* supportVectors; //support vector data + int dataDimension; //dimension in data (must be equal to nDimension) + float* labels; //labels for the test data (for measuring accuracy etc.) + float* data; //test data + + struct Kernel_params kp; + + if (argc < 3) { + printHelp(); + return(0); + } + + float class1Label, class2Label; + int success = readModel(argv[1], &alpha, &supportVectors, &nSV, &nDimension, &kp, &class1Label, &class2Label); + if (success == 0) { + printf("Invalid Model\n"); + exit(1); + } + + + success = readSvm(argv[2], &data, &labels, &total_nPoints, &dataDimension); + if (success == 0) { + printf("Invalid Data\n"); + exit(2); + } + if (dataDimension != nDimension) { + printf("This data isn't compatible with this model\n"); + exit(3); + } + char* outputFilename; + if (argc == 4) { + outputFilename = (char*)malloc(sizeof(char)*(strlen(argv[3]))); + strcpy(outputFilename, argv[3]); + + } else { + int inputNameLength = strlen(argv[2]); + outputFilename = (char*)malloc(sizeof(char)*(inputNameLength + 5)); + strncpy(outputFilename, argv[2], inputNameLength + 4); + char* period = strrchr(outputFilename, '.'); + if (period == NULL) { + period = outputFilename + inputNameLength; + } + strncpy(period, ".dat\0", 5); + } + + printf("Model found: %d support vectors\n", nSV); + printf("Data found: %d points\n", total_nPoints); + printf("Problem is %d dimensional\n", nDimension); + printf("Output file: %s\n", outputFilename); + struct timeval start; + gettimeofday(&start,0); + + + float * result; + performClassification(data, total_nPoints, supportVectors, nSV, nDimension, alpha, kp, &result); + struct timeval finish; + gettimeofday(&finish, 0); + float classificationTime = (float)(finish.tv_sec - start.tv_sec) + ((float)(finish.tv_usec - start.tv_usec)) * 1e-6; + + printf("Classification time : %f seconds\n", classificationTime); + int confusionMatrix[] = {0, 0, 0, 0}; + for (int i = 0; i < total_nPoints; i++) { + if ((labels[i] == class2Label) && (result[i] < 0)) { + confusionMatrix[0]++; + } else if ((labels[i] == class2Label) && (result[i] >= 0)) { + confusionMatrix[1]++; + } else if ((labels[i] == class1Label) && (result[i] < 0)) { + confusionMatrix[2]++; + } else if ((labels[i] == class1Label) && (result[i] >= 0)) { + confusionMatrix[3]++; + } + } + printf("Accuracy: %f (%d / %d) \n", (float)(confusionMatrix[0] + confusionMatrix[3])*100.0/((float)total_nPoints),confusionMatrix[0]+confusionMatrix[3], total_nPoints); + printClassification(outputFilename, result, total_nPoints); + +} + diff --git a/cuda_code/clat2z_1.cu b/cuda_code/clat2z_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..99a0cc43109357ec4f1896620acec3b70f811efa --- /dev/null +++ b/cuda_code/clat2z_1.cu @@ -0,0 +1,187 @@ +/* + -- MAGMA (version 2.5.4) -- + Univ. of Tennessee, Knoxville + Univ. of California, Berkeley + Univ. of Colorado, Denver + @date October 2020 + + @precisions mixed zc -> ds + @author Mark Gates +*/ +#include "magma_internal.h" + +#define BLK_X 64 +#define BLK_Y 32 + + +/* + Divides matrix into ceil( n/BLK_X ) x ceil( n/BLK_Y ) blocks. + Each block has BLK_X threads. + Each thread loops across one row, updating BLK_Y entries. + Updates only the diagonal and below. + Blocks that are fully above the diagonal exit immediately. + + Code similar to zlag2c and zlaset. +*/ +__global__ +void clat2z_lower( + int n, + const magmaFloatComplex *SA, int ldsa, + magmaDoubleComplex *A, int lda ) +{ + int ind = blockIdx.x*BLK_X + threadIdx.x; + int iby = blockIdx.y*BLK_Y; + /* check if full block-column && (below diag) */ + bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y)); + /* do only rows inside matrix, and blocks not above diag */ + if ( ind < n && ind + BLK_X > iby ) { + A += ind + iby*lda; + SA += ind + iby*ldsa; + if ( full ) { + // full block-column, off-diagonal block + #pragma unroll + for( int j=0; j < BLK_Y; ++j ) { + A[j*lda] = MAGMA_Z_MAKE( MAGMA_C_REAL( SA[j*ldsa] ), + MAGMA_C_IMAG( SA[j*ldsa] ) ); + } + } + else { + // either partial block-column or diagonal block + for( int j=0; j < BLK_Y && iby+j < n && ind >= iby+j; ++j ) { + A[j*lda] = MAGMA_Z_MAKE( MAGMA_C_REAL( SA[j*ldsa] ), + MAGMA_C_IMAG( SA[j*ldsa] ) ); + } + } + } +} + + +/* + Similar to clat2z_full, but updates only the diagonal and above. + Blocks that are fully below the diagonal exit immediately. + + Code similar to zlag2c and zlaset. +*/ +__global__ +void clat2z_upper( + int n, + const magmaFloatComplex *SA, int ldsa, + magmaDoubleComplex *A, int lda ) +{ + int ind = blockIdx.x*BLK_X + threadIdx.x; + int iby = blockIdx.y*BLK_Y; + /* check if full block-column && (above diag) */ + bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby)); + /* do only rows inside matrix, and blocks not below diag */ + if ( ind < n && ind < iby + BLK_Y ) { + A += ind + iby*lda; + SA += ind + iby*ldsa; + if ( full ) { + // full block-column, off-diagonal block + #pragma unroll + for( int j=0; j < BLK_Y; ++j ) { + A[j*lda] = MAGMA_Z_MAKE( MAGMA_C_REAL( SA[j*ldsa] ), + MAGMA_C_IMAG( SA[j*ldsa] ) ); + } + } + else { + // either partial block-column or diagonal block + for( int j=0; j < BLK_Y && iby+j < n; ++j ) { + if ( ind <= iby+j ) { + A[j*lda] = MAGMA_Z_MAKE( MAGMA_C_REAL( SA[j*ldsa] ), + MAGMA_C_IMAG( SA[j*ldsa] ) ); + } + } + } + } +} + + +/***************************************************************************//** + Purpose + ------- + CLAT2Z converts a single-complex matrix, SA, + to a double-complex matrix, A. + + Note that while it is possible to overflow while converting + from double to single, it is not possible to overflow when + converting from single to double. + + Arguments + --------- + @param[in] + uplo magma_uplo_t + Specifies the part of the matrix A to be converted. + - = MagmaUpper: Upper triangular part + - = MagmaLower: Lower triangular part + + @param[in] + n INTEGER + The number of columns of the matrix A. n >= 0. + + @param[in] + A COMPLEX_16 array, dimension (LDA,n) + On entry, the n-by-n coefficient matrix A. + + @param[in] + lda INTEGER + The leading dimension of the array A. LDA >= max(1,n). + + @param[out] + SA COMPLEX array, dimension (LDSA,n) + On exit, if INFO=0, the n-by-n coefficient matrix SA; + if INFO > 0, the content of SA is unspecified. + + @param[in] + ldsa INTEGER + The leading dimension of the array SA. LDSA >= max(1,n). + + @param[out] + info INTEGER + - = 0: successful exit. + - < 0: if INFO = -i, the i-th argument had an illegal value + + @param[in] + queue magma_queue_t + Queue to execute in. + + @ingroup magma_lat2 +*******************************************************************************/ +extern "C" void +magmablas_clat2z( + magma_uplo_t uplo, magma_int_t n, + magmaFloatComplex_const_ptr SA, magma_int_t ldsa, + magmaDoubleComplex_ptr A, magma_int_t lda, + magma_queue_t queue, + magma_int_t *info ) +{ + *info = 0; + if ( uplo != MagmaLower && uplo != MagmaUpper ) + *info = -1; + else if ( n < 0 ) + *info = -2; + else if ( lda < max(1,n) ) + *info = -4; + else if ( ldsa < max(1,n) ) + *info = -6; + + if (*info != 0) { + magma_xerbla( __func__, -(*info) ); + return; //*info; + } + + /* quick return */ + if ( n == 0 ) { + return; + } + + dim3 threads( BLK_X, 1 ); + dim3 grid( magma_ceildiv( n, BLK_X ), magma_ceildiv( n, BLK_Y ) ); + + if (uplo == MagmaLower) { + clat2z_lower<<< grid, threads, 0, queue->cuda_stream() >>> (n, SA, ldsa, A, lda); + } + else if (uplo == MagmaUpper) { + clat2z_upper<<< grid, threads, 0, queue->cuda_stream() >>> (n, SA, ldsa, A, lda); + } +} diff --git a/cuda_code/client-old.cu b/cuda_code/client-old.cu new file mode 100644 index 0000000000000000000000000000000000000000..d122c32cefe876b497965e3f5d5f8cf1766f62d8 --- /dev/null +++ b/cuda_code/client-old.cu @@ -0,0 +1,321 @@ +#include "client.hpp" +#include "include/veloc.h" +#include "common/file_util.hpp" +#include +#include +#include +#include +#include +#include +#include +#include + +#define __DEBUG +#include "common/debug.hpp" + +static bool validate_name(const char *name) { + std::regex e("[a-zA-Z0-9_\\.]+"); + return std::regex_match(name, e); +} + +static void launch_backend(const char *cfg_file) { + char *path = getenv("VELOC_BIN"); + std::string command; + if (path != NULL) + command = std::string(path) + "/"; + command += "veloc-backend " + std::string(cfg_file) + " --disable-ec > /dev/null"; + if (system(command.c_str()) != 0) + FATAL("cannot launch active backend for async mode, error: " << strerror(errno)); +} + +veloc_client_t::veloc_client_t(unsigned int id, const char *cfg_file) : + cfg(cfg_file), collective(false), rank(id) { + if (cfg.is_sync()) { + modules = new module_manager_t(); + modules->add_default_modules(cfg); + } else { + launch_backend(cfg_file); + queue = new client_t(rank); + } + ec_active = run_blocking(command_t(rank, command_t::INIT, 0, "")) > 0; + DBG("VELOC initialized"); +} + +veloc_client_t::veloc_client_t(MPI_Comm c, const char *cfg_file) : + cfg(cfg_file), comm(c), collective(true) { + MPI_Comm_rank(comm, &rank); + if (cfg.is_sync()) { + modules = new module_manager_t(); + modules->add_default_modules(cfg, comm, true); + } else { + launch_backend(cfg_file); + queue = new client_t(rank); + } + ec_active = run_blocking(command_t(rank, command_t::INIT, 0, "")) > 0; + DBG("VELOC initialized"); +} + +veloc_client_t::~veloc_client_t() { + delete queue; + delete modules; + DBG("VELOC finalized"); +} + +bool veloc_client_t::mem_protect(int id, void *ptr, size_t count, size_t base_size, unsigned int flags=NULL, release=NULL ) { + // mem_regions[id] = std::make_pair(ptr, base_size * count); + mem_regions[id] = std::make_tuple(ptr, base_size * count, flags, release); + return true; +} + +bool veloc_client_t::mem_unprotect(int id) { + return mem_regions.erase(id) > 0; +} + +bool veloc_client_t::checkpoint_wait() { + if (cfg.is_sync()) + return true; + if (checkpoint_in_progress) { + ERROR("need to finalize local checkpoint first by calling checkpoint_end()"); + return false; + } + return queue->wait_completion() == VELOC_SUCCESS; +} + +bool veloc_client_t::checkpoint_begin(const char *name, int version) { + TIMER_START(io_timer_ckpt_begin); + if (checkpoint_in_progress) { + ERROR("nested checkpoints not yet supported"); + return false; + } + if (!validate_name(name) || version < 0) { + ERROR("checkpoint name and/or version incorrect: name can only include [a-zA-Z0-9_] characters, version needs to be non-negative integer"); + return false; + } + + DBG("called checkpoint_begin"); + current_ckpt = command_t(rank, command_t::CHECKPOINT, version, name); + checkpoint_in_progress = true; + TIMER_STOP(io_timer_ckpt_begin, " --- CKPT BEGIN TIME --- "); + return true; +} + +bool veloc_client_t::checkpoint_mem(int mode, std::set &ids) { + TIMER_START(io_timer_ckpt_mem); + DBG("Starting checkpoint_mem"); + if (!checkpoint_in_progress) { + ERROR("must call checkpoint_begin() first"); + return false; + } + regions_t ckpt_regions; + if (mode == VELOC_CKPT_ALL) + ckpt_regions = mem_regions; + else if (mode == VELOC_CKPT_SOME) { + for (auto it = ids.begin(); it != ids.end(); it++) { + auto found = mem_regions.find(*it); + if (found != mem_regions.end()) + ckpt_regions.insert(*found); + } + } else if (mode == VELOC_CKPT_REST) { + ckpt_regions = mem_regions; + for (auto it = ids.begin(); it != ids.end(); it++) + ckpt_regions.erase(*it); + } + if (ckpt_regions.size() == 0) { + ERROR("empty selection of memory regions to checkpoint, please check protection and/or selective checkpointing primitives"); + return false; + } + + std::ofstream f; + f.exceptions(std::ofstream::failbit | std::ofstream::badbit); + try { + f.open(current_ckpt.filename(cfg.get("scratch")), std::ofstream::out | std::ofstream::binary | std::ofstream::trunc); + size_t regions_size = ckpt_regions.size(); + f.write((char *)®ions_size, sizeof(size_t)); + cudaPointerAttributes attributes; + cudaEvent_t start, stop; + cudaEventCreate(&start); + cudaEventCreate(&stop); + float milliseconds; + std::vector temp_ptrs; + for (auto &e : ckpt_regions) { + f.write((char *)&(e.first), sizeof(int)); + f.write((char *)&(e.second.second), sizeof(size_t)); + cudaPointerGetAttributes (&attributes, e.second.first); + // attributes.type can be one of cudaMemoryTypeUnregistered (unpinned), + // cudaMemoryTypeHost (pinned), cudaMemoryTypeDevice (on-GPU), or cudaMemoryTypeManaged (managed) + if(attributes.type==cudaMemoryTypeDevice || attributes.type==cudaMemoryTypeManaged) { + // Copy from device to host first, and then ckpt. + char *temp; + char *gpu_var = (char *)e.second.first; + cudaEventRecord(start); + cudaMallocHost((void**)&temp, e.second.second); + cudaMemcpy(temp, gpu_var, e.second.second, cudaMemcpyDeviceToHost); + temp_ptrs.push_back(temp); + cudaEventRecord(stop); + cudaEventSynchronize(stop); + cudaEventElapsedTime(&milliseconds, start, stop); + DBG("[CUDA TIME] Transferring " << e.first << " took " << milliseconds << " ms" ); + ckpt_regions[e.first] = std::make_pair(temp, e.second.second); + } + } + for (auto &e : ckpt_regions) + f.write((char *)e.second.first, e.second.second); + for (char *t : temp_ptrs) + cudaFreeHost(t); + TIMER_STOP(io_timer_ckpt_mem, " --- CKPT MEM TIME --- "); + } catch (std::ofstream::failure &f) { + ERROR("cannot write to checkpoint file: " << current_ckpt << ", reason: " << f.what()); + return false; + } + return true; +} + +bool veloc_client_t::checkpoint_end(bool /*success*/) { + TIMER_START(io_timer_ckpt_end); + checkpoint_in_progress = false; + if (cfg.is_sync()) { + TIMER_STOP(io_timer_ckpt_end, " --- CKPT END TIME --- "); + return modules->notify_command(current_ckpt) == VELOC_SUCCESS; + } + else { + queue->enqueue(current_ckpt); + TIMER_STOP(io_timer_ckpt_end, " --- CKPT END TIME --- "); + return true; + } +} + +int veloc_client_t::run_blocking(const command_t &cmd) { + if (cfg.is_sync()) + return modules->notify_command(cmd); + else { + queue->enqueue(cmd); + return queue->wait_completion(); + } +} + +int veloc_client_t::restart_test(const char *name, int needed_version) { + if (!validate_name(name) || needed_version < 0) { + ERROR("checkpoint name and/or version incorrect: name can only include [a-zA-Z0-9_] characters, version needs to be non-negative integer"); + return VELOC_FAILURE; + } + int version = run_blocking(command_t(rank, command_t::TEST, needed_version, name)); + DBG(name << ": latest version = " << version); + if (collective) { + int min_version; + MPI_Allreduce(&version, &min_version, 1, MPI_INT, MPI_MIN, comm); + return min_version; + } else + return version; +} + +std::string veloc_client_t::route_file(const char *original) { + char abs_path[PATH_MAX + 1]; + if (original[0] != '/' && getcwd(abs_path, PATH_MAX) != NULL) + current_ckpt.assign_path(current_ckpt.original, std::string(abs_path) + "/" + std::string(original)); + else + current_ckpt.assign_path(current_ckpt.original, std::string(original)); + return current_ckpt.filename(cfg.get("scratch")); +} + +bool veloc_client_t::restart_begin(const char *name, int version) { + if (checkpoint_in_progress) { + INFO("cannot restart while checkpoint in progress"); + return false; + } + if (!validate_name(name) || version < 0) { + ERROR("checkpoint name and/or version incorrect: name can only include [a-zA-Z0-9_] characters, version needs to be non-negative integer"); + return VELOC_FAILURE; + } + + int result, end_result; + current_ckpt = command_t(rank, command_t::RESTART, version, name); + result = run_blocking(current_ckpt); + if (collective) + MPI_Allreduce(&result, &end_result, 1, MPI_INT, MPI_LOR, comm); + else + end_result = result; + if (end_result == VELOC_SUCCESS) { + header_size = 0; + return true; + } else + return false; +} + +bool veloc_client_t::read_header() { + region_info.clear(); + try { + std::ifstream f; + size_t expected_size = 0; + + f.exceptions(std::ifstream::failbit | std::ifstream::badbit); + f.open(current_ckpt.filename(cfg.get("scratch")), std::ifstream::in | std::ifstream::binary); + size_t no_regions, region_size; + int id; + f.read((char *)&no_regions, sizeof(size_t)); + for (unsigned int i = 0; i < no_regions; i++) { + f.read((char *)&id, sizeof(int)); + f.read((char *)®ion_size, sizeof(size_t)); + region_info.insert(std::make_pair(id, region_size)); + expected_size += region_size; + } + header_size = f.tellg(); + f.seekg(0, f.end); + size_t file_size = (size_t)f.tellg() - header_size; + if (file_size != expected_size) + throw std::ifstream::failure("file size " + std::to_string(file_size) + " does not match expected size " + std::to_string(expected_size)); + } catch (std::ifstream::failure &e) { + ERROR("cannot validate header for checkpoint " << current_ckpt << ", reason: " << e.what()); + header_size = 0; + return false; + } + return true; +} + +size_t veloc_client_t::recover_size(int id) { + if (header_size == 0) + read_header(); + auto it = region_info.find(id); + if (it == region_info.end()) + return 0; + else + return it->second; +} + +bool veloc_client_t::recover_mem(int mode, std::set &ids) { + if (header_size == 0 && !read_header()) { + ERROR("cannot recover in memory mode if header unavailable or corrupted"); + return false; + } + try { + std::ifstream f; + f.exceptions(std::ifstream::failbit | std::ifstream::badbit); + f.open(current_ckpt.filename(cfg.get("scratch")), std::ifstream::in | std::ifstream::binary); + f.seekg(header_size); + for (auto &e : region_info) { + bool found = ids.find(e.first) != ids.end(); + if ((mode == VELOC_RECOVER_SOME && !found) || (mode == VELOC_RECOVER_REST && found)) { + f.seekg(e.second, std::ifstream::cur); + continue; + } + if (mem_regions.find(e.first) == mem_regions.end()) { + ERROR("no protected memory region defined for id " << e.first); + return false; + } + if (mem_regions[e.first].second < e.second) { + ERROR("protected memory region " << e.first << " is too small (" + << mem_regions[e.first].second << ") to hold required size (" + << e.second << ")"); + return false; + } + f.read((char *)mem_regions[e.first].first, e.second); + } + } catch (std::ifstream::failure &e) { + ERROR("cannot read checkpoint file " << current_ckpt << ", reason: " << e.what()); + return false; + } + return true; +} + +bool veloc_client_t::restart_end(bool /*success*/) { + return true; +} diff --git a/cuda_code/cmergebicgstab_2.cu b/cuda_code/cmergebicgstab_2.cu new file mode 100644 index 0000000000000000000000000000000000000000..d151aee9477e04abf6790c6a63eb644fe29ccfe4 --- /dev/null +++ b/cuda_code/cmergebicgstab_2.cu @@ -0,0 +1,357 @@ +/* + -- MAGMA (version 1.6.2) -- + Univ. of Tennessee, Knoxville + Univ. of California, Berkeley + Univ. of Colorado, Denver + @date May 2015 + + @generated from zmergebicgstab.cu normal z -> c, Sun May 3 11:22:58 2015 + @author Hartwig Anzt + +*/ +#include "common_magma.h" + +#define BLOCK_SIZE 512 + +#define PRECISION_c + + +// These routines merge multiple kernels from cmergebicgstab into one +// The difference to cmergedbicgstab2 is that the SpMV is not merged into the +// kernes. This results in higher flexibility at the price of lower performance. + +/* -------------------------------------------------------------------------- */ + +__global__ void +magma_cbicgmerge1_kernel( + int n, + magmaFloatComplex * skp, + magmaFloatComplex * v, + magmaFloatComplex * r, + magmaFloatComplex * p ) +{ + int i = blockIdx.x * blockDim.x + threadIdx.x; + magmaFloatComplex beta=skp[1]; + magmaFloatComplex omega=skp[2]; + if( i p = r + beta * ( p - omega * v ) + + Arguments + --------- + + @param[in] + n int + dimension n + + @param[in] + skp magmaFloatComplex_ptr + set of scalar parameters + + @param[in] + v magmaFloatComplex_ptr + input v + + @param[in] + r magmaFloatComplex_ptr + input r + + @param[in/out] + p magmaFloatComplex_ptr + input/output p + + @param[in] + queue magma_queue_t + Queue to execute in. + + @ingroup magmasparse_cgegpuk + ********************************************************************/ + +extern "C" int +magma_cbicgmerge1( + int n, + magmaFloatComplex_ptr skp, + magmaFloatComplex_ptr v, + magmaFloatComplex_ptr r, + magmaFloatComplex_ptr p ){ + + + dim3 Bs( BLOCK_SIZE ); + dim3 Gs( magma_ceildiv( n, BLOCK_SIZE ) ); + magma_cbicgmerge1_kernel<<>>( n, skp, v, r, p ); + + return MAGMA_SUCCESS; +} + +/* -------------------------------------------------------------------------- */ + +__global__ void +magma_cbicgmerge2_kernel( + int n, + magmaFloatComplex * skp, + magmaFloatComplex * r, + magmaFloatComplex * v, + magmaFloatComplex * s ) +{ + int i = blockIdx.x * blockDim.x + threadIdx.x; + magmaFloatComplex alpha=skp[0]; + if( i s = r - alpha * v + + Arguments + --------- + + @param[in] + n int + dimension n + + @param[in] + skp magmaFloatComplex_ptr + set of scalar parameters + + @param[in] + r magmaFloatComplex_ptr + input r + + @param[in] + v magmaFloatComplex_ptr + input v + + @param[s] + s magmaFloatComplex_ptr + output s + + @param[in] + queue magma_queue_t + Queue to execute in. + + @ingroup magmasparse_cgegpuk + ********************************************************************/ + +extern "C" int +magma_cbicgmerge2( + int n, + magmaFloatComplex_ptr skp, + magmaFloatComplex_ptr r, + magmaFloatComplex_ptr v, + magmaFloatComplex_ptr s ) +{ + + + dim3 Bs( BLOCK_SIZE ); + dim3 Gs( magma_ceildiv( n, BLOCK_SIZE ) ); + + magma_cbicgmerge2_kernel<<>>( n, skp, r, v, s ); + + return MAGMA_SUCCESS; +} + +/* -------------------------------------------------------------------------- */ + +__global__ void +magma_cbicgmerge3_kernel( + int n, + magmaFloatComplex * skp, + magmaFloatComplex * p, + magmaFloatComplex * se, + magmaFloatComplex * t, + magmaFloatComplex * x, + magmaFloatComplex * r ) +{ + int i = blockIdx.x * blockDim.x + threadIdx.x; + magmaFloatComplex alpha=skp[0]; + magmaFloatComplex omega=skp[2]; + if( i x = x + alpha * p + omega * s + -> r = s - omega * t + + Arguments + --------- + + @param[in] + n int + dimension n + + @param[in] + skp magmaFloatComplex_ptr + set of scalar parameters + + @param[in] + p magmaFloatComplex_ptr + input p + + @param[in] + s magmaFloatComplex_ptr + input s + + @param[in] + t magmaFloatComplex_ptr + input t + + @param[in/out] + x magmaFloatComplex_ptr + input/output x + + @param[in/out] + r magmaFloatComplex_ptr + input/output r + + @param[in] + queue magma_queue_t + Queue to execute in. + + @ingroup magmasparse_cgegpuk + ********************************************************************/ + +extern "C" int +magma_cbicgmerge3( + int n, + magmaFloatComplex_ptr skp, + magmaFloatComplex_ptr p, + magmaFloatComplex_ptr s, + magmaFloatComplex_ptr t, + magmaFloatComplex_ptr x, + magmaFloatComplex_ptr r ) +{ + + + dim3 Bs( BLOCK_SIZE ); + dim3 Gs( magma_ceildiv( n, BLOCK_SIZE ) ); + magma_cbicgmerge3_kernel<<>>( n, skp, p, s, t, x, r ); + + return MAGMA_SUCCESS; +} + +/* -------------------------------------------------------------------------- */ + +__global__ void +magma_cbicgmerge4_kernel_1( + magmaFloatComplex * skp ) +{ + int i = blockIdx.x * blockDim.x + threadIdx.x; + + if( i==0 ){ + magmaFloatComplex tmp = skp[0]; + skp[0] = skp[4]/tmp; + } +} + +__global__ void +magma_cbicgmerge4_kernel_2( + magmaFloatComplex * skp ) +{ + int i = blockIdx.x * blockDim.x + threadIdx.x; + + if( i==0 ){ + skp[2] = skp[6]/skp[7]; + skp[3] = skp[4]; + } +} + +__global__ void +magma_cbicgmerge4_kernel_3( + magmaFloatComplex * skp ) +{ + int i = blockIdx.x * blockDim.x + threadIdx.x; + + if( i==0 ){ + magmaFloatComplex tmp1 = skp[4]/skp[3]; + magmaFloatComplex tmp2 = skp[0] / skp[2]; + skp[1] = tmp1*tmp2; + //skp[1] = skp[4]/skp[3] * skp[0] / skp[2]; + + } +} + +/** + Purpose + ------- + + Performs some parameter operations for the BiCGSTAB with scalars on GPU. + + Arguments + --------- + + @param[in] + type int + kernel type + + @param[in/out] + skp magmaFloatComplex_ptr + vector with parameters + + @param[in] + queue magma_queue_t + Queue to execute in. + + @ingroup magmasparse_cgegpuk + ********************************************************************/ + +extern "C" int +magma_cbicgmerge4( + int type, + magmaFloatComplex_ptr skp ) +{ + + dim3 Bs( 1 ); + dim3 Gs( 1 ); + if( type == 1 ) + magma_cbicgmerge4_kernel_1<<>>( skp ); + else if( type == 2 ) + magma_cbicgmerge4_kernel_2<<>>( skp ); + else if( type == 3 ) + magma_cbicgmerge4_kernel_3<<>>( skp ); + else + printf("error: no kernel called\n"); + + return MAGMA_SUCCESS; +} + diff --git a/cuda_code/coalesced_reduction_9.cu b/cuda_code/coalesced_reduction_9.cu new file mode 100644 index 0000000000000000000000000000000000000000..fd541b80fa99c2f570568aa34839b9248242dec1 --- /dev/null +++ b/cuda_code/coalesced_reduction_9.cu @@ -0,0 +1,139 @@ +/* + * Copyright (c) 2018, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "cuda_utils.h" +#include "linalg/coalesced_reduction.h" +#include "random/rng.h" +#include "test_utils.h" + + +namespace MLCommon { +namespace LinAlg { + +template +__global__ void naiveReductionKernel(Type *dots, const Type *data, int D, int N) { + Type acc = (Type)0; + int rowStart = threadIdx.x + blockIdx.x * blockDim.x; + if (rowStart < N) { + for (int i = 0; i < D; ++i) { + acc += data[rowStart * D + i] * data[rowStart * D + i]; + } + dots[rowStart] = 2*acc; + } +} + +template +void naiveReduction(Type *dots, const Type *data, int D, int N) { + static const int TPB = 64; + int nblks = ceildiv(N, TPB); + naiveReductionKernel<<>>(dots, data, D, N); + CUDA_CHECK(cudaPeekAtLastError()); +} + + +template +struct coalescedReductionInputs { + T tolerance; + int rows, cols; + unsigned long long int seed; +}; + +template +::std::ostream &operator<<(::std::ostream &os, const coalescedReductionInputs &dims) { + return os; +} + +// Or else, we get the following compilation error +// for an extended __device__ lambda cannot have private or protected access +// within its class +template +void coalescedReductionLaunch(T *dots, const T *data, int cols, int rows, + bool inplace = false) { + coalescedReduction(dots, data, cols, rows, (T)0, + inplace, 0, + [] __device__(T in) { return in * in; }); +} + +template +class coalescedReductionTest : public ::testing::TestWithParam> { +protected: + void SetUp() override { + params = ::testing::TestWithParam>::GetParam(); + Random::Rng r(params.seed); + int rows = params.rows, cols = params.cols; + int len = rows * cols; + allocate(data, len); + allocate(dots_exp, rows); + allocate(dots_act, rows); + r.uniform(data, len, -1.f, 1.f); + naiveReduction(dots_exp, data, cols, rows); + + // Perform reduction with default inplace = false first + coalescedReductionLaunch(dots_act, data, cols, rows); + // Add to result with inplace = true next + coalescedReductionLaunch(dots_act, data, cols, rows, true); + } + + void TearDown() override { + CUDA_CHECK(cudaFree(data)); + CUDA_CHECK(cudaFree(dots_exp)); + CUDA_CHECK(cudaFree(dots_act)); + } + +protected: + coalescedReductionInputs params; + T *data, *dots_exp, *dots_act; +}; + +const std::vector> inputsf = { + {0.000002f, 1024, 32, 1234ULL}, + {0.000002f, 1024, 64, 1234ULL}, + {0.000002f, 1024, 128, 1234ULL}, + {0.000002f, 1024, 256, 1234ULL}, + {0.000002f, 1024, 32, 1234ULL}, + {0.000002f, 1024, 64, 1234ULL}, + {0.000002f, 1024, 128, 1234ULL}, + {0.000002f, 1024, 256, 1234ULL}}; + +const std::vector> inputsd = { + {0.000000001, 1024, 32, 1234ULL}, + {0.000000001, 1024, 64, 1234ULL}, + {0.000000001, 1024, 128, 1234ULL}, + {0.000000001, 1024, 256, 1234ULL}, + {0.000000001, 1024, 32, 1234ULL}, + {0.000000001, 1024, 64, 1234ULL}, + {0.000000001, 1024, 128, 1234ULL}, + {0.000000001, 1024, 256, 1234ULL}}; + +typedef coalescedReductionTest coalescedReductionTestF; +TEST_P(coalescedReductionTestF, Result) { + ASSERT_TRUE(devArrMatch(dots_exp, dots_act, params.rows, + CompareApprox(params.tolerance))); +} + +typedef coalescedReductionTest coalescedReductionTestD; +TEST_P(coalescedReductionTestD, Result) { + ASSERT_TRUE(devArrMatch(dots_exp, dots_act, params.rows, + CompareApprox(params.tolerance))); +} + +INSTANTIATE_TEST_CASE_P(coalescedReductionTests, coalescedReductionTestF, ::testing::ValuesIn(inputsf)); + +INSTANTIATE_TEST_CASE_P(coalescedReductionTests, coalescedReductionTestD, ::testing::ValuesIn(inputsd)); + +} // end namespace LinAlg +} // end namespace MLCommon diff --git a/cuda_code/column_factory_1.cu b/cuda_code/column_factory_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..b35f851aa3f7d2d8d76eb92818ba06288dcfc5e2 --- /dev/null +++ b/cuda_code/column_factory_1.cu @@ -0,0 +1,56 @@ +#include "column_factory.h" +#include +#include +#include +#include +#include +// #include +#include + +namespace blazingdb { +namespace test { + + template + auto make_col(cudf::size_type size) { + thrust::device_vector d_integers(size); + thrust::sequence( thrust::device, d_integers.begin(), d_integers.end()); + cudf::mask_state state = cudf::mask_state::ALL_VALID; + + auto integers = cudf::make_numeric_column(cudf::data_type{cudf::experimental::type_to_id()}, size, state); + auto integers_view = integers->mutable_view(); + cudaMemcpy( integers_view.data(), d_integers.data().get(), size * sizeof(TypeParam), cudaMemcpyDeviceToDevice ); + return integers; + } + + + +ral::frame::BlazingTable build_custom_table() { + cudf::size_type size = 10; + + auto num_column_1 = make_col(size); + auto num_column_2 = make_col(size); + auto num_column_3 = make_col(size); + auto num_column_4 = make_col(size); + + std::vector> columns; + columns.push_back(std::move(num_column_1)); + columns.push_back(std::move(num_column_2)); + columns.push_back(std::move(num_column_3)); + columns.push_back(std::move(num_column_4)); + + + cudf::test::strings_column_wrapper col2({"d", "e", "a", "d", "k", "d", "l", "a", "b", "c"}, {1, 0, 1, 1, 1, 1, 1, 1, 0, 1}); + + std::unique_ptr str_col = std::make_unique(std::move(col2)); + columns.push_back(std::move(str_col)); + + std::vector column_names = {"INT64", "INT32", "FLOAT64", "FLOAT32", "STRING"}; + + auto table = std::make_unique(std::move(columns)); + return ral::frame::BlazingTable(std::move(table), column_names); +} + + + +} // +} // \ No newline at end of file diff --git a/cuda_code/column_test_8.cu b/cuda_code/column_test_8.cu new file mode 100644 index 0000000000000000000000000000000000000000..f3d51af1b2e9d0f834a5c87968907918f65d421d --- /dev/null +++ b/cuda_code/column_test_8.cu @@ -0,0 +1,433 @@ +/* + * Copyright (c) 2019, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +template +struct TypedColumnTest : public cudf::test::BaseFixture { + static std::size_t data_size() { return 1000; } + static std::size_t mask_size() { return 100; } + cudf::data_type type() { + return cudf::data_type{cudf::experimental::type_to_id()}; + } + + TypedColumnTest() + : data{_num_elements * cudf::size_of(type())}, + mask{cudf::bitmask_allocation_size_bytes(_num_elements)} { + auto typed_data = static_cast(data.data()); + auto typed_mask = static_cast(mask.data()); + thrust::sequence(thrust::device, typed_data, typed_data + data_size()); + thrust::sequence(thrust::device, typed_mask, typed_mask + mask_size()); + } + + cudf::size_type num_elements() { return _num_elements; } + + std::random_device r; + std::default_random_engine generator{r()}; + std::uniform_int_distribution distribution{200, 1000}; + cudf::size_type _num_elements{distribution(generator)}; + rmm::device_buffer data{}; + rmm::device_buffer mask{}; + rmm::device_buffer all_valid_mask{ + create_null_mask(num_elements(), cudf::mask_state::ALL_VALID)}; + rmm::device_buffer all_null_mask{ + create_null_mask(num_elements(), cudf::mask_state::ALL_NULL)}; +}; + +TYPED_TEST_CASE(TypedColumnTest, cudf::test::Types); + +/**---------------------------------------------------------------------------* + * @brief Verifies equality of the properties and data of a `column`'s views. + * + * @param col The `column` to verify + *---------------------------------------------------------------------------**/ +void verify_column_views(cudf::column col) { + cudf::column_view view = col; + cudf::mutable_column_view mutable_view = col; + EXPECT_EQ(col.type(), view.type()); + EXPECT_EQ(col.type(), mutable_view.type()); + EXPECT_EQ(col.size(), view.size()); + EXPECT_EQ(col.size(), mutable_view.size()); + EXPECT_EQ(col.null_count(), view.null_count()); + EXPECT_EQ(col.null_count(), mutable_view.null_count()); + EXPECT_EQ(col.nullable(), view.nullable()); + EXPECT_EQ(col.nullable(), mutable_view.nullable()); + EXPECT_EQ(col.num_children(), view.num_children()); + EXPECT_EQ(col.num_children(), mutable_view.num_children()); + EXPECT_EQ(view.head(), mutable_view.head()); + EXPECT_EQ(view.data(), mutable_view.data()); + EXPECT_EQ(view.offset(), mutable_view.offset()); +} + +TYPED_TEST(TypedColumnTest, DefaultNullCountNoMask) { + cudf::column col{this->type(), this->num_elements(), this->data}; + EXPECT_FALSE(col.nullable()); + EXPECT_FALSE(col.has_nulls()); + EXPECT_EQ(0, col.null_count()); +} + +TYPED_TEST(TypedColumnTest, DefaultNullCountEmptyMask) { + cudf::column col{this->type(), this->num_elements(), this->data, + rmm::device_buffer{}}; + EXPECT_FALSE(col.nullable()); + EXPECT_FALSE(col.has_nulls()); + EXPECT_EQ(0, col.null_count()); +} + +TYPED_TEST(TypedColumnTest, DefaultNullCountAllValid) { + cudf::column col{this->type(), this->num_elements(), this->data, + this->all_valid_mask}; + EXPECT_TRUE(col.nullable()); + EXPECT_FALSE(col.has_nulls()); + EXPECT_EQ(0, col.null_count()); +} + +TYPED_TEST(TypedColumnTest, ExplicitNullCountAllValid) { + cudf::column col{this->type(), this->num_elements(), this->data, + this->all_valid_mask, 0}; + EXPECT_TRUE(col.nullable()); + EXPECT_FALSE(col.has_nulls()); + EXPECT_EQ(0, col.null_count()); +} + +TYPED_TEST(TypedColumnTest, DefaultNullCountAllNull) { + cudf::column col{this->type(), this->num_elements(), this->data, + this->all_null_mask}; + EXPECT_TRUE(col.nullable()); + EXPECT_TRUE(col.has_nulls()); + EXPECT_EQ(this->num_elements(), col.null_count()); +} + +TYPED_TEST(TypedColumnTest, ExplicitNullCountAllNull) { + cudf::column col{this->type(), this->num_elements(), this->data, + this->all_null_mask, this->num_elements()}; + EXPECT_TRUE(col.nullable()); + EXPECT_TRUE(col.has_nulls()); + EXPECT_EQ(this->num_elements(), col.null_count()); +} + +TYPED_TEST(TypedColumnTest, SetNullCountNoMask) { + cudf::column col{this->type(), this->num_elements(), this->data}; + EXPECT_THROW(col.set_null_count(1), cudf::logic_error); +} + +TYPED_TEST(TypedColumnTest, SetEmptyNullMaskNonZeroNullCount) { + cudf::column col{this->type(), this->num_elements(), this->data}; + rmm::device_buffer empty_null_mask{}; + EXPECT_THROW(col.set_null_mask(empty_null_mask, this->num_elements()), + cudf::logic_error); +} + +TYPED_TEST(TypedColumnTest, SetInvalidSizeNullMaskNonZeroNullCount) { + cudf::column col{this->type(), this->num_elements(), this->data}; + auto invalid_size_null_mask = + create_null_mask(std::min(this->num_elements() - 50, 0), + cudf::mask_state::ALL_VALID); + EXPECT_THROW(col.set_null_mask(invalid_size_null_mask, this->num_elements()), + cudf::logic_error); +} + +TYPED_TEST(TypedColumnTest, SetNullCountEmptyMask) { + cudf::column col{this->type(), this->num_elements(), this->data, + rmm::device_buffer{}}; + EXPECT_THROW(col.set_null_count(1), cudf::logic_error); +} + +TYPED_TEST(TypedColumnTest, SetNullCountAllValid) { + cudf::column col{this->type(), this->num_elements(), this->data, + this->all_valid_mask}; + EXPECT_NO_THROW(col.set_null_count(0)); + EXPECT_EQ(0, col.null_count()); +} + +TYPED_TEST(TypedColumnTest, SetNullCountAllNull) { + cudf::column col{this->type(), this->num_elements(), this->data, + this->all_null_mask}; + EXPECT_NO_THROW(col.set_null_count(this->num_elements())); + EXPECT_EQ(this->num_elements(), col.null_count()); +} + +TYPED_TEST(TypedColumnTest, ResetNullCountAllNull) { + cudf::column col{this->type(), this->num_elements(), this->data, + this->all_null_mask}; + + EXPECT_EQ(this->num_elements(), col.null_count()); + EXPECT_NO_THROW(col.set_null_count(cudf::UNKNOWN_NULL_COUNT)); + EXPECT_EQ(this->num_elements(), col.null_count()); +} + +TYPED_TEST(TypedColumnTest, ResetNullCountAllValid) { + cudf::column col{this->type(), this->num_elements(), this->data, + this->all_valid_mask}; + EXPECT_EQ(0, col.null_count()); + EXPECT_NO_THROW(col.set_null_count(cudf::UNKNOWN_NULL_COUNT)); + EXPECT_EQ(0, col.null_count()); +} + +TYPED_TEST(TypedColumnTest, CopyDataNoMask) { + cudf::column col{this->type(), this->num_elements(), this->data}; + EXPECT_EQ(this->type(), col.type()); + EXPECT_FALSE(col.nullable()); + EXPECT_EQ(0, col.null_count()); + EXPECT_EQ(this->num_elements(), col.size()); + EXPECT_EQ(0, col.num_children()); + + verify_column_views(col); + + // Verify deep copy + cudf::column_view v = col; + EXPECT_NE(v.head(), this->data.data()); + cudf::test::expect_equal_buffers(v.head(), this->data.data(), + this->data.size()); +} + +TYPED_TEST(TypedColumnTest, MoveDataNoMask) { + void* original_data = this->data.data(); + cudf::column col{this->type(), this->num_elements(), std::move(this->data)}; + EXPECT_EQ(this->type(), col.type()); + EXPECT_FALSE(col.nullable()); + EXPECT_EQ(0, col.null_count()); + EXPECT_EQ(this->num_elements(), col.size()); + EXPECT_EQ(0, col.num_children()); + + verify_column_views(col); + + // Verify shallow copy + cudf::column_view v = col; + EXPECT_EQ(v.head(), original_data); +} + +TYPED_TEST(TypedColumnTest, CopyDataAndMask) { + cudf::column col{this->type(), this->num_elements(), this->data, + this->all_valid_mask}; + EXPECT_EQ(this->type(), col.type()); + EXPECT_TRUE(col.nullable()); + EXPECT_EQ(0, col.null_count()); + EXPECT_EQ(this->num_elements(), col.size()); + EXPECT_EQ(0, col.num_children()); + + verify_column_views(col); + + // Verify deep copy + cudf::column_view v = col; + EXPECT_NE(v.head(), this->data.data()); + EXPECT_NE(v.null_mask(), this->all_valid_mask.data()); + cudf::test::expect_equal_buffers(v.head(), this->data.data(), + this->data.size()); + cudf::test::expect_equal_buffers(v.null_mask(), this->all_valid_mask.data(), + this->mask.size()); +} + +TYPED_TEST(TypedColumnTest, MoveDataAndMask) { + void* original_data = this->data.data(); + void* original_mask = this->all_valid_mask.data(); + cudf::column col{this->type(), this->num_elements(), std::move(this->data), + std::move(this->all_valid_mask)}; + EXPECT_EQ(this->type(), col.type()); + EXPECT_TRUE(col.nullable()); + EXPECT_EQ(0, col.null_count()); + EXPECT_EQ(this->num_elements(), col.size()); + EXPECT_EQ(0, col.num_children()); + + verify_column_views(col); + + // Verify shallow copy + cudf::column_view v = col; + EXPECT_EQ(v.head(), original_data); + EXPECT_EQ(v.null_mask(), original_mask); +} + +TYPED_TEST(TypedColumnTest, CopyConstructorNoMask) { + cudf::column original{this->type(), this->num_elements(), this->data}; + cudf::column copy{original}; + verify_column_views(copy); + cudf::test::expect_columns_equal(original, copy); + + // Verify deep copy + cudf::column_view original_view = original; + cudf::column_view copy_view = copy; + EXPECT_NE(original_view.head(), copy_view.head()); +} + +TYPED_TEST(TypedColumnTest, CopyConstructorWithMask) { + cudf::column original{this->type(), this->num_elements(), this->data, + this->all_valid_mask}; + cudf::column copy{original}; + verify_column_views(copy); + cudf::test::expect_columns_equal(original, copy); + + // Verify deep copy + cudf::column_view original_view = original; + cudf::column_view copy_view = copy; + EXPECT_NE(original_view.head(), copy_view.head()); + EXPECT_NE(original_view.null_mask(), copy_view.null_mask()); +} + +TYPED_TEST(TypedColumnTest, MoveConstructorNoMask) { + cudf::column original{this->type(), this->num_elements(), this->data}; + + auto original_data = original.view().head(); + + cudf::column moved_to{std::move(original)}; + + EXPECT_EQ(0, original.size()); + EXPECT_EQ(cudf::data_type{cudf::type_id::EMPTY}, original.type()); + + verify_column_views(moved_to); + + // Verify move + cudf::column_view moved_to_view = moved_to; + EXPECT_EQ(original_data, moved_to_view.head()); +} + +TYPED_TEST(TypedColumnTest, MoveConstructorWithMask) { + cudf::column original{this->type(), this->num_elements(), this->data, + this->all_valid_mask}; + auto original_data = original.view().head(); + auto original_mask = original.view().null_mask(); + cudf::column moved_to{std::move(original)}; + verify_column_views(moved_to); + + EXPECT_EQ(0, original.size()); + EXPECT_EQ(cudf::data_type{cudf::type_id::EMPTY}, original.type()); + + // Verify move + cudf::column_view moved_to_view = moved_to; + EXPECT_EQ(original_data, moved_to_view.head()); + EXPECT_EQ(original_mask, moved_to_view.null_mask()); +} + +TYPED_TEST(TypedColumnTest, ConstructWithChildren) { + std::vector> children; + children.emplace_back( + std::make_unique(cudf::data_type{cudf::type_id::INT8}, 42, + this->data, this->all_valid_mask)); + children.emplace_back( + std::make_unique(cudf::data_type{cudf::type_id::FLOAT64}, + 314, this->data, this->all_valid_mask)); + cudf::column col{ + this->type(), this->num_elements(), this->data, + this->all_valid_mask, cudf::UNKNOWN_NULL_COUNT, std::move(children)}; + + verify_column_views(col); + EXPECT_EQ(2, col.num_children()); + EXPECT_EQ(cudf::data_type{cudf::type_id::INT8}, col.child(0).type()); + EXPECT_EQ(42, col.child(0).size()); + EXPECT_EQ(cudf::data_type{cudf::type_id::FLOAT64}, col.child(1).type()); + EXPECT_EQ(314, col.child(1).size()); +} + +TYPED_TEST(TypedColumnTest, ReleaseNoChildren) { + cudf::column col{this->type(), this->num_elements(), this->data, + this->all_valid_mask}; + auto original_data = col.view().head(); + auto original_mask = col.view().null_mask(); + + cudf::column::contents contents = col.release(); + EXPECT_EQ(original_data, contents.data->data()); + EXPECT_EQ(original_mask, contents.null_mask->data()); + EXPECT_EQ(0u, contents.children.size()); + EXPECT_EQ(0, col.size()); + EXPECT_EQ(0, col.null_count()); + EXPECT_EQ(cudf::data_type{cudf::type_id::EMPTY}, col.type()); + EXPECT_EQ(0, col.num_children()); +} + +TYPED_TEST(TypedColumnTest, ReleaseWithChildren) { + std::vector> children; + children.emplace_back(std::make_unique( + this->type(), this->num_elements(), this->data, this->all_valid_mask)); + children.emplace_back(std::make_unique( + this->type(), this->num_elements(), this->data, this->all_valid_mask)); + cudf::column col{ + this->type(), this->num_elements(), this->data, + this->all_valid_mask, cudf::UNKNOWN_NULL_COUNT, std::move(children)}; + + auto original_data = col.view().head(); + auto original_mask = col.view().null_mask(); + + cudf::column::contents contents = col.release(); + EXPECT_EQ(original_data, contents.data->data()); + EXPECT_EQ(original_mask, contents.null_mask->data()); + EXPECT_EQ(2u, contents.children.size()); + EXPECT_EQ(0, col.size()); + EXPECT_EQ(0, col.null_count()); + EXPECT_EQ(cudf::data_type{cudf::type_id::EMPTY}, col.type()); + EXPECT_EQ(0, col.num_children()); +} + +TYPED_TEST(TypedColumnTest, ColumnViewConstructorWithMask) { + cudf::column original{this->type(), this->num_elements(), this->data, + this->all_valid_mask}; + cudf::column_view original_view = original; + cudf::column copy{original_view}; + verify_column_views(copy); + cudf::test::expect_columns_equal(original, copy); + + // Verify deep copy + cudf::column_view copy_view = copy; + EXPECT_NE(original_view.head(), copy_view.head()); + EXPECT_NE(original_view.null_mask(), copy_view.null_mask()); +} + +TYPED_TEST(TypedColumnTest, ConcatenateColumnView) { + cudf::column original{this->type(), this->num_elements(), this->data, + this->mask}; + std::vector indices{ + 0, this->num_elements()/3, + this->num_elements()/3, this->num_elements()/2, + this->num_elements()/2, this->num_elements()}; + std::vector views = cudf::experimental::slice(original, indices); + + auto concatenated_col = cudf::concatenate(views); + + cudf::test::expect_columns_equal(original, *concatenated_col); +} + +struct StringColumnTest : public cudf::test::BaseFixture {}; + +TEST_F(StringColumnTest, ConcatenateColumnView) { + std::vector h_strings{ "aaa", "bb", "", "cccc", "d", "ééé", "ff", "gggg", "", "h", "iiii", "jjj", "k", "lllllll", "mmmmm", "n", "oo", "ppp" }; + cudf::test::strings_column_wrapper strings1( h_strings.data(), h_strings.data()+6 ); + cudf::test::strings_column_wrapper strings2( h_strings.data()+6, h_strings.data()+10 ); + cudf::test::strings_column_wrapper strings3( h_strings.data()+10, h_strings.data()+h_strings.size() ); + + std::vector strings_columns; + strings_columns.push_back(strings1); + strings_columns.push_back(strings2); + strings_columns.push_back(strings3); + + auto results = cudf::concatenate(strings_columns); + + cudf::test::strings_column_wrapper expected( h_strings.begin(), h_strings.end() ); + cudf::test::expect_columns_equal(*results,expected); +} diff --git a/cuda_code/column_utilities_12.cu b/cuda_code/column_utilities_12.cu new file mode 100644 index 0000000000000000000000000000000000000000..45300ac24640e4a41b53a647094cad4ea01d1616 --- /dev/null +++ b/cuda_code/column_utilities_12.cu @@ -0,0 +1,375 @@ +/* + * Copyright (c) 2019, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "column_utilities.hpp" + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include + +namespace cudf { +namespace test { + +// Property comparison +template +void column_property_comparison(cudf::column_view const& lhs, cudf::column_view const& rhs) { + EXPECT_EQ(lhs.type(), rhs.type()); + EXPECT_EQ(lhs.size(), rhs.size()); + EXPECT_EQ(lhs.null_count(), rhs.null_count()); + if (lhs.size() > 0 and check_exact_equality) { + EXPECT_EQ(lhs.nullable(), rhs.nullable()); + } + EXPECT_EQ(lhs.num_children(), rhs.num_children()); +} + +void expect_column_properties_equal(column_view const& lhs, column_view const& rhs) { + column_property_comparison(lhs, rhs); +} + +void expect_column_properties_equivalent(column_view const& lhs, column_view const& rhs) { + column_property_comparison(lhs, rhs); +} + +class corresponding_rows_unequal { + table_device_view d_lhs; + table_device_view d_rhs; +public: + corresponding_rows_unequal(table_device_view d_lhs, table_device_view d_rhs) + : d_lhs(d_lhs), + d_rhs(d_rhs), + comp(d_lhs, d_rhs) + { + CUDF_EXPECTS(d_lhs.num_columns() == 1 and d_rhs.num_columns() == 1, + "Unsupported number of columns"); + } + + struct typed_element_unequal { + template + __device__ std::enable_if_t::value, bool> + operator()(column_device_view const& lhs, + column_device_view const& rhs, + size_type index) + { + if (lhs.is_valid(index) and rhs.is_valid(index)) { + int ulp = 4; // value taken from google test + T x = lhs.element(index); + T y = rhs.element(index); + return std::abs(x-y) > std::numeric_limits::epsilon() * std::abs(x+y) * ulp + && std::abs(x-y) >= std::numeric_limits::min(); + } else { + // if either is null, then the inequality was checked already + return true; + } + } + + template + __device__ std::enable_if_t::value, bool> + operator()(Args... args) { + // Non-floating point inequality is checked already + return true; + } + }; + + cudf::experimental::row_equality_comparator comp; + + __device__ bool operator()(size_type index) { + if (not comp(index, index)) { + return thrust::any_of(thrust::seq, + thrust::make_counting_iterator(0), + thrust::make_counting_iterator(d_lhs.num_columns()), + [this, index] (auto i) { + auto lhs_col = this->d_lhs.column(i); + auto rhs_col = this->d_rhs.column(i); + return experimental::type_dispatcher(lhs_col.type(), + typed_element_unequal{}, + lhs_col, rhs_col, index); + }); + } + return false; + } +}; + +class corresponding_rows_not_equivalent { + table_device_view d_lhs; + table_device_view d_rhs; +public: + corresponding_rows_not_equivalent(table_device_view d_lhs, table_device_view d_rhs) + : d_lhs(d_lhs), + d_rhs(d_rhs), + comp(d_lhs, d_rhs) + { + CUDF_EXPECTS(d_lhs.num_columns() == 1 and d_rhs.num_columns() == 1, + "Unsupported number of columns"); + } + + struct typed_element_not_equivalent { + template + __device__ std::enable_if_t::value, bool> + operator()(column_device_view const& lhs, + column_device_view const& rhs, + size_type index) + { + if (lhs.is_valid(index) and rhs.is_valid(index)) { + int ulp = 4; // value taken from google test + T x = lhs.element(index); + T y = rhs.element(index); + return std::abs(x-y) > std::numeric_limits::epsilon() * std::abs(x+y) * ulp + && std::abs(x-y) >= std::numeric_limits::min(); + } else { + // if either is null, then the inequality was checked already + return true; + } + } + + template + __device__ std::enable_if_t::value, bool> + operator()(Args... args) { + // Non-floating point inequality is checked already + return true; + } + }; + + cudf::experimental::row_equality_comparator comp; + + __device__ bool operator()(size_type index) { + if (not comp(index, index)) { + auto lhs_col = this->d_lhs.column(0); + auto rhs_col = this->d_rhs.column(0); + return experimental::type_dispatcher(lhs_col.type(), + typed_element_not_equivalent{}, + lhs_col, rhs_col, index); + } + return false; + } +}; + +namespace { + +template +void column_comparison(cudf::column_view const& lhs, cudf::column_view const& rhs, + bool print_all_differences) { + column_property_comparison(lhs, rhs); + + using ComparatorType = std::conditional_t; + + auto d_lhs = cudf::table_device_view::create(table_view{{lhs}}); + auto d_rhs = cudf::table_device_view::create(table_view{{rhs}}); + + // TODO (dm): handle floating point equality + thrust::device_vector differences(lhs.size()); + + auto diff_iter = thrust::copy_if(thrust::device, + thrust::make_counting_iterator(0), + thrust::make_counting_iterator(lhs.size()), + differences.begin(), + ComparatorType(*d_lhs, *d_rhs)); + + CUDA_TRY(cudaDeviceSynchronize()); + + differences.resize(thrust::distance(differences.begin(), diff_iter)); + + if (diff_iter > differences.begin()) { + if (print_all_differences) { + // + // If there are differences, display them all + // + std::ostringstream buffer; + buffer << "differences:" << std::endl; + + cudf::table_view source_table ({lhs, rhs}); + + fixed_width_column_wrapper diff_column(differences.begin(), differences.end()); + + std::unique_ptr diff_table = cudf::experimental::gather(source_table, + diff_column); + + // + // Need to pull back the differences + // + std::vector h_left_strings = to_strings(diff_table->get_column(0)); + std::vector h_right_strings = to_strings(diff_table->get_column(1)); + + for (size_t i = 0 ; i < differences.size() ; ++i) { + buffer << "lhs[" << differences[i] << "] = " << h_left_strings[i] + << ", rhs[" << differences[i] << "] = " << h_right_strings[i] << std::endl; + } + + EXPECT_EQ(differences.size(), size_t{0}) << buffer.str(); + } else { + // + // If there are differences, just display the first one + // + int index = differences[0]; + + auto diff_lhs = cudf::experimental::detail::slice(lhs, index, index+1); + auto diff_rhs = cudf::experimental::detail::slice(rhs, index, index+1); + + std::vector h_left_strings = to_strings(diff_lhs); + std::vector h_right_strings = to_strings(diff_rhs); + + EXPECT_EQ(differences.size(), size_t{0}) << "first difference: " + << "lhs[" << index << "] = " + << to_string(diff_lhs, "") + << ", rhs[" << index << "] = " + << to_string(diff_rhs, ""); + } + } +} + +} // namespace anonymous + +void expect_columns_equal(cudf::column_view const& lhs, cudf::column_view const& rhs, + bool print_all_differences) +{ + column_comparison(lhs, rhs, print_all_differences); +} + +void expect_columns_equivalent(cudf::column_view const& lhs, + cudf::column_view const& rhs, + bool print_all_differences) +{ + column_comparison(lhs, rhs, print_all_differences); +} + +// Bitwise equality +void expect_equal_buffers(void const* lhs, void const* rhs, + std::size_t size_bytes) { + if (size_bytes > 0) { + EXPECT_NE(nullptr, lhs); + EXPECT_NE(nullptr, rhs); + } + auto typed_lhs = static_cast(lhs); + auto typed_rhs = static_cast(rhs); + EXPECT_TRUE(thrust::equal(thrust::device, typed_lhs, typed_lhs + size_bytes, + typed_rhs)); +} + +// copy column bitmask to host (used by to_host()) +std::vector bitmask_to_host(cudf::column_view const& c) { + if (c.nullable()) { + auto num_bitmasks = bitmask_allocation_size_bytes(c.size()) / sizeof(bitmask_type); + std::vector host_bitmask(num_bitmasks); + if (c.offset()==0) { + CUDA_TRY(cudaMemcpy(host_bitmask.data(), c.null_mask(), num_bitmasks * sizeof(bitmask_type), + cudaMemcpyDeviceToHost)); + } else { + auto mask = copy_bitmask(c.null_mask(), c.offset(), c.offset()+c.size()); + CUDA_TRY(cudaMemcpy(host_bitmask.data(), mask.data(), num_bitmasks * sizeof(bitmask_type), + cudaMemcpyDeviceToHost)); + } + + return host_bitmask; + } + else { + return std::vector{}; + } +} + + +struct column_view_printer { + template ()>* = nullptr> + void operator()(cudf::column_view const& col, std::vector & out) { + auto h_data = cudf::test::to_host(col); + + out.resize(col.size()); + + if (col.nullable()) { + std::transform(thrust::make_counting_iterator(size_type{0}), + thrust::make_counting_iterator(col.size()), + out.begin(), + [&h_data](auto idx) { + return bit_is_set(h_data.second.data(), idx) ? std::to_string(h_data.first[idx]) : std::string("NULL"); + }); + } else { + std::transform(h_data.first.begin(), h_data.first.end(), out.begin(), [](Element el) { + return std::to_string(el); + }); + } + } + + template ()>* = nullptr> + void operator()(cudf::column_view const& col, std::vector & out) { + // + // For timestamps, convert timestamp column to column of strings, then + // call string version + // + auto col_as_strings = cudf::strings::from_timestamps(col); + + this->template operator()(*col_as_strings, out); + } + + template ::value>* = nullptr> + void operator()(cudf::column_view const& col, std::vector & out) { + // + // Implementation for strings, call special to_host variant + // + auto h_data = cudf::test::to_host(col); + + out.resize(col.size()); + if (col.nullable()) { + std::transform(thrust::make_counting_iterator(size_type{0}), + thrust::make_counting_iterator(col.size()), + out.begin(), + [&h_data](auto idx) { + return bit_is_set(h_data.second.data(), idx) ? h_data.first[idx] : std::string("NULL"); + }); + } else { + out = std::move(h_data.first); + } + } +}; + +std::vector to_strings(cudf::column_view const& col) { + std::vector reply; + + cudf::experimental::type_dispatcher(col.type(), + column_view_printer{}, + col, + reply); + + return reply; +} + +std::string to_string(cudf::column_view const& col, std::string const& delimiter) { + + std::ostringstream buffer; + std::vector h_data = to_strings(col); + + std::copy(h_data.begin(), h_data.end() - 1, std::ostream_iterator(buffer, delimiter.c_str())); + buffer << h_data.back(); + + return buffer.str(); +} + +void print(cudf::column_view const& col, std::ostream &os, std::string const& delimiter) { + os << to_string(col, delimiter); +} + +} // namespace test +} // namespace cudf diff --git a/cuda_code/combine_2.cu b/cuda_code/combine_2.cu new file mode 100644 index 0000000000000000000000000000000000000000..2de4e0b39774ddd056c031451ee8379b3d9e7003 --- /dev/null +++ b/cuda_code/combine_2.cu @@ -0,0 +1,271 @@ +/* + * Copyright (c) 2019-2020, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace cudf { +namespace strings { +namespace detail { +// +std::unique_ptr concatenate( + table_view const& strings_columns, + string_scalar const& separator = string_scalar(""), + string_scalar const& narep = string_scalar("", false), + rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(), + cudaStream_t stream = 0) +{ + auto num_columns = strings_columns.num_columns(); + CUDF_EXPECTS(num_columns > 0, "At least one column must be specified"); + // check all columns are of type string + CUDF_EXPECTS(std::all_of(strings_columns.begin(), + strings_columns.end(), + [](auto c) { return c.type().id() == STRING; }), + "All columns must be of type string"); + if (num_columns == 1) // single strings column returns a copy + return std::make_unique(*(strings_columns.begin()), stream, mr); + auto strings_count = strings_columns.num_rows(); + if (strings_count == 0) // empty begets empty + return detail::make_empty_strings_column(mr, stream); + + CUDF_EXPECTS(separator.is_valid(), "Parameter separator must be a valid string_scalar"); + string_view d_separator(separator.data(), separator.size()); + auto d_narep = get_scalar_device_view(const_cast(narep)); + + // Create device views from the strings columns. + auto table = table_device_view::create(strings_columns, stream); + auto d_table = *table; + + // create resulting null mask + auto valid_mask = cudf::experimental::detail::valid_if( + thrust::make_counting_iterator(0), + thrust::make_counting_iterator(strings_count), + [d_table, d_narep] __device__(size_type idx) { + bool null_element = thrust::any_of( + thrust::seq, d_table.begin(), d_table.end(), [idx](auto col) { return col.is_null(idx); }); + return (!null_element || d_narep.is_valid()); + }, + stream, + mr); + rmm::device_buffer null_mask = valid_mask.first; + auto null_count = valid_mask.second; + + // build offsets column by computing sizes of each string in the output + auto offsets_transformer = [d_table, num_columns, d_separator, d_narep] __device__( + size_type row_idx) { + // for this row (idx), iterate over each column and add up the bytes + bool null_element = + thrust::any_of(thrust::seq, d_table.begin(), d_table.end(), [row_idx](auto const& d_column) { + return d_column.is_null(row_idx); + }); + if (null_element && !d_narep.is_valid()) return 0; + size_type bytes = thrust::transform_reduce( + thrust::seq, + d_table.begin(), + d_table.end(), + [row_idx, d_separator, d_narep] __device__(column_device_view const& d_column) { + return d_separator.size_bytes() + (d_column.is_null(row_idx) + ? d_narep.size() + : d_column.element(row_idx).size_bytes()); + }, + 0, + thrust::plus()); + // separator goes only in between elements + if (bytes > 0) // if not null + bytes -= d_separator.size_bytes(); // remove the last separator + return bytes; + }; + auto offsets_transformer_itr = thrust::make_transform_iterator( + thrust::make_counting_iterator(0), offsets_transformer); + auto offsets_column = detail::make_offsets_child_column( + offsets_transformer_itr, offsets_transformer_itr + strings_count, mr, stream); + auto d_results_offsets = offsets_column->view().data(); + + // create the chars column + size_type bytes = thrust::device_pointer_cast(d_results_offsets)[strings_count]; + auto chars_column = + strings::detail::create_chars_child_column(strings_count, null_count, bytes, mr, stream); + // fill the chars column + auto d_results_chars = chars_column->mutable_view().data(); + thrust::for_each_n( + rmm::exec_policy(stream)->on(stream), + thrust::make_counting_iterator(0), + strings_count, + [d_table, num_columns, d_separator, d_narep, d_results_offsets, d_results_chars] __device__( + size_type idx) { + bool null_element = thrust::any_of( + thrust::seq, d_table.begin(), d_table.end(), [idx](column_device_view const& col) { + return col.is_null(idx); + }); + if (null_element && !d_narep.is_valid()) + return; // do not write to buffer at all if any column element for this row is null + size_type offset = d_results_offsets[idx]; + char* d_buffer = d_results_chars + offset; + // write out each column's entry for this row + for (size_type col_idx = 0; col_idx < num_columns; ++col_idx) { + auto d_column = d_table.column(col_idx); + string_view d_str = + d_column.is_null(idx) ? d_narep.value() : d_column.element(idx); + d_buffer = detail::copy_string(d_buffer, d_str); + // separator goes only in between elements + if (col_idx + 1 < num_columns) d_buffer = detail::copy_string(d_buffer, d_separator); + } + }); + + return make_strings_column(strings_count, + std::move(offsets_column), + std::move(chars_column), + null_count, + std::move(null_mask), + stream, + mr); +} + +// +std::unique_ptr join_strings( + strings_column_view const& strings, + string_scalar const& separator = string_scalar(""), + string_scalar const& narep = string_scalar("", false), + rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(), + cudaStream_t stream = 0) +{ + auto strings_count = strings.size(); + if (strings_count == 0) return detail::make_empty_strings_column(mr, stream); + + CUDF_EXPECTS(separator.is_valid(), "Parameter separator must be a valid string_scalar"); + + auto execpol = rmm::exec_policy(stream); + string_view d_separator(separator.data(), separator.size()); + auto d_narep = get_scalar_device_view(const_cast(narep)); + + auto strings_column = column_device_view::create(strings.parent(), stream); + auto d_strings = *strings_column; + + // create an offsets array for building the output memory layout + rmm::device_vector output_offsets(strings_count + 1); + auto d_output_offsets = output_offsets.data().get(); + // using inclusive-scan to compute last entry which is the total size + thrust::transform_inclusive_scan( + execpol->on(stream), + thrust::make_counting_iterator(0), + thrust::make_counting_iterator(strings_count), + d_output_offsets + 1, + [d_strings, d_separator, d_narep] __device__(size_type idx) { + size_type bytes = 0; + if (d_strings.is_null(idx)) { + if (!d_narep.is_valid()) return 0; // skip nulls + bytes += d_narep.size(); + } else + bytes += d_strings.element(idx).size_bytes(); + if ((idx + 1) < d_strings.size()) bytes += d_separator.size_bytes(); + return bytes; + }, + thrust::plus()); + CUDA_TRY(cudaMemsetAsync(d_output_offsets, 0, sizeof(size_type), stream)); + // total size is the last entry + size_type bytes = output_offsets.back(); + + // build offsets column (only 1 string so 2 offset entries) + auto offsets_column = + make_numeric_column(data_type{INT32}, 2, mask_state::UNALLOCATED, stream, mr); + auto offsets_view = offsets_column->mutable_view(); + // set the first entry to 0 and the last entry to bytes + int32_t new_offsets[] = {0, bytes}; + CUDA_TRY(cudaMemcpyAsync(offsets_view.data(), + new_offsets, + sizeof(new_offsets), + cudaMemcpyHostToDevice, + stream)); + + // build null mask + // only one entry so it is either all valid or all null + size_type null_count = 0; + rmm::device_buffer null_mask; // init to null null-mask + if (strings.null_count() == strings_count && !narep.is_valid()) { + null_mask = create_null_mask(1, cudf::mask_state::ALL_NULL, stream, mr); + null_count = 1; + } + auto chars_column = + detail::create_chars_child_column(strings_count, null_count, bytes, mr, stream); + auto chars_view = chars_column->mutable_view(); + auto d_chars = chars_view.data(); + thrust::for_each_n( + execpol->on(stream), + thrust::make_counting_iterator(0), + strings_count, + [d_strings, d_separator, d_narep, d_output_offsets, d_chars] __device__(size_type idx) { + size_type offset = d_output_offsets[idx]; + char* d_buffer = d_chars + offset; + if (d_strings.is_null(idx)) { + if (!d_narep.is_valid()) + return; // do not write to buffer if element is null (including separator) + d_buffer = detail::copy_string(d_buffer, d_narep.value()); + } else { + string_view d_str = d_strings.element(idx); + d_buffer = detail::copy_string(d_buffer, d_str); + } + if ((idx + 1) < d_strings.size()) d_buffer = detail::copy_string(d_buffer, d_separator); + }); + + return make_strings_column(1, + std::move(offsets_column), + std::move(chars_column), + null_count, + std::move(null_mask), + stream, + mr); +} + +} // namespace detail + +// APIs + +std::unique_ptr concatenate(table_view const& strings_columns, + string_scalar const& separator, + string_scalar const& narep, + rmm::mr::device_memory_resource* mr) +{ + CUDF_FUNC_RANGE(); + return detail::concatenate(strings_columns, separator, narep, mr); +} + +std::unique_ptr join_strings(strings_column_view const& strings, + string_scalar const& separator, + string_scalar const& narep, + rmm::mr::device_memory_resource* mr) +{ + CUDF_FUNC_RANGE(); + return detail::join_strings(strings, separator, narep, mr); +} + +} // namespace strings +} // namespace cudf diff --git a/cuda_code/common_105.cu b/cuda_code/common_105.cu new file mode 100644 index 0000000000000000000000000000000000000000..d86fc90c2e3540db3ce5dff89d93fffb52b44a72 --- /dev/null +++ b/cuda_code/common_105.cu @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted + * provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright notice, this list of + * conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, this list of + * conditions and the following disclaimer in the documentation and/or other materials + * provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND + * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *//* + */ + +/** @file common.cu + * @author Thomas Müller and Nikolaus Binder, NVIDIA + * @brief Common utilities that are needed by pretty much every component of this framework. + */ + +#include + +#include + +#include +#include + +TCNN_NAMESPACE_BEGIN + +static_assert( + __CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2), + "tiny-cuda-nn requires at least CUDA 10.2" +); + +int cuda_device() { + int device; + CUDA_CHECK_THROW(cudaGetDevice(&device)); + return device; +} + +bool cuda_supports_virtual_memory(int device) { + int supports_vmm; + CU_CHECK_THROW(cuDeviceGetAttribute(&supports_vmm, CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED, device)); + return supports_vmm != 0; +} + +uint32_t cuda_compute_capability(int device) { + cudaDeviceProp props; + CUDA_CHECK_THROW(cudaGetDeviceProperties(&props, device)); + return props.major * 10 + props.minor; +} + +size_t cuda_memory_granularity(int device) { + size_t granularity; + CUmemAllocationProp prop = {}; + prop.type = CU_MEM_ALLOCATION_TYPE_PINNED; + prop.location.type = CU_MEM_LOCATION_TYPE_DEVICE; + prop.location.id = 0; + CU_CHECK_THROW(cuMemGetAllocationGranularity(&granularity, &prop, CU_MEM_ALLOC_GRANULARITY_MINIMUM)); + return granularity; +} + +MemoryInfo cuda_memory_info() { + MemoryInfo info; + CUDA_CHECK_THROW(cudaMemGetInfo(&info.free, &info.total)); + info.used = info.total - info.free; + return info; +} + +std::string to_lower(std::string str) { + std::transform(std::begin(str), std::end(str), std::begin(str), [](unsigned char c) { return (char)std::tolower(c); }); + return str; +} + +std::string to_upper(std::string str) { + std::transform(std::begin(str), std::end(str), std::begin(str), [](unsigned char c) { return (char)std::toupper(c); }); + return str; +} + +TCNN_NAMESPACE_END diff --git a/cuda_code/compare_against_shared_2.cu b/cuda_code/compare_against_shared_2.cu new file mode 100644 index 0000000000000000000000000000000000000000..4fc5700048e85d9e54fae0f1b7e3555dafede0e4 --- /dev/null +++ b/cuda_code/compare_against_shared_2.cu @@ -0,0 +1,203 @@ +/* + * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "../src/cudf_helper.cuh" +#include "../src/distributed.cuh" +#include "../src/error.cuh" +#include "../src/comm.cuh" + +#define BUILD_TABLE_SIZE 1'000'000 +#define PROBE_TABLE_SIZE 5'000'000 +#define SELECTIVITY 0.3 +#define RAND_MAX_VAL 2'000'000 +#define IS_BUILD_TABLE_KEY_UNIQUE true +#define OVER_DECOMPOSITION_FACTOR 10 + +#define KEY_T int +#define PAYLOAD_T int + + +int main(int argc, char *argv[]) +{ + /* Initialize communication */ + + UCXBufferCommunicator communicator; + communicator.initialize(argc, argv); + + int mpi_rank {communicator.mpi_rank}; + int mpi_size {communicator.mpi_size}; + + communicator.setup_cache(2 * 3 * 2 * 2 * mpi_size, 1'000'000LL); + communicator.warmup_cache(); + + /* Generate build table and probe table and compute reference solution */ + + std::vector build_table; + std::vector probe_table; + std::vector reference_result; + + gdf_context ctxt = { + 0, // input data is not sorted + gdf_method::GDF_HASH, // hash based join + 0 + }; + + int columns_to_join[] = {0}; + + if (mpi_rank == 0) { + generate_build_probe_tables( + build_table, BUILD_TABLE_SIZE, probe_table, PROBE_TABLE_SIZE, + SELECTIVITY, RAND_MAX_VAL, IS_BUILD_TABLE_KEY_UNIQUE + ); + + reference_result.resize(build_table.size() + probe_table.size() - 1, nullptr); + + for (auto & col_ptr : reference_result) { + col_ptr = new gdf_column; + } + + CHECK_ERROR( + gdf_inner_join(build_table.data(), build_table.size(), columns_to_join, + probe_table.data(), probe_table.size(), columns_to_join, + 1, build_table.size() + probe_table.size() - 1, reference_result.data(), + nullptr, nullptr, &ctxt), + GDF_SUCCESS, "gdf_inner_join" + ); + + } + + std::vector local_build_table = distribute_table(build_table, &communicator); + std::vector local_probe_table = distribute_table(probe_table, &communicator); + + /* Distributed join */ + + std::vector distributed_result; + + distributed_join( + local_build_table, local_probe_table, distributed_result, + &communicator, OVER_DECOMPOSITION_FACTOR + ); + + if (mpi_rank == 0) { + free_table(build_table); + free_table(probe_table); + } + + free_table(local_build_table); + free_table(local_probe_table); + + std::vector received_table; + + collect_tables(received_table, distributed_result, &communicator); + free_table(distributed_result); + + if (mpi_rank == 0) { + // hold the indices of sort result + gdf_column refernece_idx; + gdf_column received_idx; + + gdf_size_type size = reference_result[0]->size; + int ncols = reference_result.size(); + + assert(size == received_table[0]->size); + + /* Allocate device memory for sort indices */ + + void *data; + + CHECK_ERROR(RMM_ALLOC(&data, size * sizeof(int), 0), RMM_SUCCESS, "RMM_ALLOC"); + + CHECK_ERROR( + gdf_column_view(&refernece_idx, data, nullptr, size, GDF_INT32), + GDF_SUCCESS, "gdf_column_view" + ); + + CHECK_ERROR(RMM_ALLOC(&data, size * sizeof(int), 0), RMM_SUCCESS, "RMM_ALLOC"); + + CHECK_ERROR( + gdf_column_view(&received_idx, data, nullptr, size, GDF_INT32), + GDF_SUCCESS, "gdf_column_view" + ); + + /* Sort the reference table and reference table */ + + std::vector asc_desc(ncols, 0); + int8_t *asc_desc_dev; + CHECK_ERROR(RMM_ALLOC(&asc_desc_dev, ncols * sizeof(int8_t), 0), RMM_SUCCESS, "RMM_ALLOC"); + CHECK_ERROR( + cudaMemcpy(asc_desc_dev, asc_desc.data(), ncols * sizeof(int8_t), cudaMemcpyHostToDevice), + cudaSuccess, "cudaMemcpy" + ); + + CHECK_ERROR( + gdf_order_by(reference_result.data(), asc_desc_dev, ncols, &refernece_idx, &ctxt), + GDF_SUCCESS, "gdf_order_by" + ); + + CHECK_ERROR( + gdf_order_by(received_table.data(), asc_desc_dev, ncols, &received_idx, &ctxt), + GDF_SUCCESS, "gdf_order_by" + ); + + /* Verify correctness */ + + const int block_size = 128; + int nblocks {-1}; + + CHECK_ERROR( + cudaOccupancyMaxActiveBlocksPerMultiprocessor(&nblocks, verify_correctness, block_size, 0), + cudaSuccess, "cudaOccupancyMaxActiveBlocksPerMultiprocessor" + ); + + for (int icol = 0; icol < ncols; icol++) { + verify_correctness<<>>( + (int *)reference_result[icol]->data, + (int *)refernece_idx.data, + (int *)received_table[icol]->data, + (int *)received_idx.data, + size + ); + } + + CHECK_ERROR(RMM_FREE(asc_desc_dev, 0), RMM_SUCCESS, "RMM_FREE"); + CHECK_ERROR(gdf_column_free(&refernece_idx), GDF_SUCCESS, "gdf_column_free"); + CHECK_ERROR(gdf_column_free(&received_idx), GDF_SUCCESS, "gdf_column_free"); + + } + + /* Cleanup */ + + if (mpi_rank == 0) { + free_table(reference_result); + free_table(received_table); + } + + communicator.finalize(); + + if (mpi_rank == 0) { + std::cerr << "Test case \"compare_against_shared\" passes successfully.\n"; + } + + return 0; +} diff --git a/cuda_code/compare_segmented_scan.cu b/cuda_code/compare_segmented_scan.cu new file mode 100644 index 0000000000000000000000000000000000000000..98499e5fc19db38a63477f08b2d7ea6e3886e822 --- /dev/null +++ b/cuda_code/compare_segmented_scan.cu @@ -0,0 +1,406 @@ +/****************************************************************************** + * + * Copyright 2010-2011 Duane Merrill + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information, see our Google Code project site: + * http://code.google.com/p/back40computing/ + * + ******************************************************************************/ + + +/****************************************************************************** + * Simple test driver program for segmented scan. + ******************************************************************************/ + +#include + +// Test utils +#include "b40c_test_util.h" +#include "test_segmented_scan.h" + +#include +#include +#include +#include +#include + +using namespace b40c; + + +/****************************************************************************** + * Defines, constants, globals + ******************************************************************************/ + +bool g_verbose = false; +int g_max_ctas = 0; +int g_iterations = 1; +bool g_inclusive = false; + + +/****************************************************************************** + * Utility Routines + ******************************************************************************/ + +/** + * Displays the commandline usage for this tool + */ +void Usage() +{ + printf("\ncompare_segmented_scan [--device=] [--v] [--i=] " + "[--max-ctas=] [--n=] [--inclusive]\n"); + printf("\n"); + printf("\t--v\tDisplays copied results to the console.\n"); + printf("\n"); + printf("\t--i\tPerforms the segmented scan operation times\n"); + printf("\t\t\ton the device. Re-copies original input each time. Default = 1\n"); + printf("\n"); + printf("\t--n\tThe number of elements to comprise the sample problem\n"); + printf("\t\t\tDefault = 512\n"); + printf("\n"); +} + + +template +struct segmented_scan_functor +{ + AssociativeOperator binary_op; + + typedef typename thrust::tuple result_type; + + __host__ __device__ + segmented_scan_functor(AssociativeOperator _binary_op) : binary_op(_binary_op) {} + + __host__ __device__ + result_type operator()(result_type a, result_type b) + { + return result_type(thrust::get<1>(b) ? thrust::get<0>(b) : binary_op(thrust::get<0>(a), thrust::get<0>(b)), + thrust::get<1>(a) | thrust::get<1>(b)); + } +}; + + +/** + * Timed segmented scan. Uses the GPU to copy the specified vector of elements for the given + * number of iterations, displaying runtime information. + */ +template < + bool EXCLUSIVE, + typename T, + typename Flag, + typename SizeT, + typename ReductionOp, + typename IdentityOp> +double TimedThrustSegmentedScan( + T *h_data, + Flag *h_flag_data, + T *h_reference, + SizeT num_elements, + ReductionOp scan_op, + IdentityOp identity_op) +{ + using namespace b40c; + + // Allocate device storage + T *d_src, *d_dest; + Flag *d_flag_src; + if (util::B40CPerror(cudaMalloc((void**) &d_src, sizeof(T) * num_elements), + "TimedSegmentedScan cudaMalloc d_src failed: ", __FILE__, __LINE__)) exit(1); + if (util::B40CPerror(cudaMalloc((void**) &d_dest, sizeof(T) * num_elements), + "TimedSegmentedScan cudaMalloc d_dest failed: ", __FILE__, __LINE__)) exit(1); + if (util::B40CPerror(cudaMalloc((void**) &d_flag_src, sizeof(Flag) * num_elements), + "TimedSegmentedScan cudaMalloc d_dest failed: ", __FILE__, __LINE__)) exit(1); + + // Move a fresh copy of the problem into device storage + if (util::B40CPerror(cudaMemcpy(d_src, h_data, sizeof(T) * num_elements, cudaMemcpyHostToDevice), + "TimedSegmentedScan cudaMemcpy d_src failed: ", __FILE__, __LINE__)) exit(1); + if (util::B40CPerror(cudaMemcpy(d_flag_src, h_flag_data, sizeof(Flag) * num_elements, cudaMemcpyHostToDevice), + "TimedSegmentedScan cudaMemcpy d_src failed: ", __FILE__, __LINE__)) exit(1); + + // Perform a single iteration to allocate any memory if needed, prime code caches, etc. + thrust::device_ptr dev_src(d_src); + thrust::device_ptr dev_dest(d_dest); + thrust::device_ptr dev_flag_src(d_flag_src); + if (EXCLUSIVE) { + + // shift input one to the right and initialize segments with init + thrust::detail::raw_buffer temp(num_elements); + thrust::replace_copy_if( + dev_src, + dev_src + num_elements - 1, + dev_flag_src + 1, temp.begin() + 1, thrust::negate(), identity_op()); + temp[0] = identity_op(); + + thrust::detail::device::inclusive_scan(thrust::make_zip_iterator(thrust::make_tuple(temp.begin(), dev_flag_src)), + thrust::make_zip_iterator(thrust::make_tuple(temp.begin(), dev_flag_src)) + num_elements, + thrust::make_zip_iterator(thrust::make_tuple(dev_dest, dev_flag_src)), + segmented_scan_functor >(thrust::plus())); + + } else { + + thrust::detail::device::inclusive_scan + (thrust::make_zip_iterator(thrust::make_tuple(dev_src, dev_flag_src)), + thrust::make_zip_iterator(thrust::make_tuple(dev_src, dev_flag_src)) + num_elements, + thrust::make_zip_iterator(thrust::make_tuple(dev_dest, dev_flag_src)), + segmented_scan_functor >(thrust::plus())); + } + + // Perform the timed number of iterations + GpuTimer timer; + + double elapsed = 0; + for (int i = 0; i < g_iterations; i++) { + + // Move a fresh copy of flags into device storage because we destroyed it last time :( + if (util::B40CPerror(cudaMemcpy(d_flag_src, h_flag_data, sizeof(Flag) * num_elements, cudaMemcpyHostToDevice), + "TimedSegmentedScan cudaMemcpy d_src failed: ", __FILE__, __LINE__)) exit(1); + + // Start timing record + timer.Start(); + + if (EXCLUSIVE) { + + // shift input one to the right and initialize segments with init + thrust::detail::raw_buffer temp(num_elements); + thrust::replace_copy_if( + dev_src, + dev_src + num_elements - 1, + dev_flag_src + 1, temp.begin() + 1, thrust::negate(), identity_op()); + temp[0] = identity_op(); + + thrust::detail::device::inclusive_scan(thrust::make_zip_iterator(thrust::make_tuple(temp.begin(), dev_flag_src)), + thrust::make_zip_iterator(thrust::make_tuple(temp.begin(), dev_flag_src)) + num_elements, + thrust::make_zip_iterator(thrust::make_tuple(dev_dest, dev_flag_src)), + segmented_scan_functor >(thrust::plus())); + + } else { + + thrust::detail::device::inclusive_scan + (thrust::make_zip_iterator(thrust::make_tuple(dev_src, dev_flag_src)), + thrust::make_zip_iterator(thrust::make_tuple(dev_src, dev_flag_src)) + num_elements, + thrust::make_zip_iterator(thrust::make_tuple(dev_dest, dev_flag_src)), + segmented_scan_functor >(thrust::plus())); + } + + // End timing record + timer.Stop(); + elapsed += (double) timer.ElapsedMillis(); + } + + // Display timing information + double avg_runtime = elapsed / g_iterations; + double throughput = ((double) num_elements) / avg_runtime / 1000.0 / 1000.0; + printf("\nThrust SegmentedScan: %d iterations, %lu elements, ", g_iterations, (unsigned long) num_elements); + printf("%f GPU ms, %f x10^9 elts/sec", + avg_runtime, throughput); + + // Copy out data + T *h_dest = (T*) malloc(num_elements * sizeof(T)); + if (util::B40CPerror(cudaMemcpy(h_dest, d_dest, sizeof(T) * num_elements, cudaMemcpyDeviceToHost), + "TimedSegmentedScan cudaMemcpy d_dest failed: ", __FILE__, __LINE__)) exit(1); + + // Free allocated memory + if (d_src) cudaFree(d_src); + if (d_dest) cudaFree(d_dest); + if (d_flag_src) cudaFree(d_flag_src); + + // Flushes any stdio from the GPU + cudaThreadSynchronize(); + + // Display copied data + if (g_verbose) { + printf("\n\nData:\n"); + for (int i = 0; i < num_elements; i++) { + PrintValue(h_dest[i]); + printf(", "); + } + printf("\n\n"); + } + + // Verify solution + CompareResults(h_dest, h_reference, num_elements, true); + printf("\n"); + fflush(stdout); + + if (h_dest) free(h_dest); + + return throughput; +} + + + +/** + * Creates an example segmented scan problem and then dispatches the problem + * to the GPU for the given number of iterations, displaying runtime information. + */ +template< + typename T, + typename Flag, + bool EXCLUSIVE, + typename SizeT, + typename ReductionOp, + typename IdentityOp> +void TestSegmentedScan( + SizeT num_elements, + ReductionOp scan_op, + IdentityOp identity_op) +{ + // Allocate the segmented scan problem on the host and fill the keys with random bytes + + T *h_data = (T*) malloc(num_elements * sizeof(T)); + T *h_reference = (T*) malloc(num_elements * sizeof(T)); + Flag *h_flag_data = (Flag*) malloc(num_elements * sizeof(Flag)); + + if ((h_data == NULL) || (h_reference == NULL) || (h_flag_data == NULL)){ + fprintf(stderr, "Host malloc of problem data failed\n"); + exit(1); + } + + for (size_t i = 0; i < num_elements; ++i) { +// util::RandomBits(h_data[i], 0); +// util::RandomBits(h_flag_data[i], 0); + h_data[i] = 1; + h_flag_data[i] = (i % 11) == 0; + } + + for (size_t i = 0; i < num_elements; ++i) { + if (EXCLUSIVE) + { + h_reference[i] = ((i == 0) || (h_flag_data[i])) ? + identity_op() : + scan_op(h_reference[i - 1], h_data[i - 1]); + } else { + h_reference[i] = ((i == 0) || (h_flag_data[i])) ? + h_data[i] : + scan_op(h_reference[i - 1], h_data[i]); + } + } + + // + // Run the timing test(s) + // + + double b40c = TimedSegmentedScan( + h_data, + h_flag_data, + h_reference, + num_elements, + scan_op, + identity_op, + g_max_ctas, + g_verbose, + g_iterations); + + double thrust = TimedThrustSegmentedScan( + h_data, + h_flag_data, + h_reference, + num_elements, + scan_op, + identity_op); + + printf("B40C speedup: %.2f\n", b40c/thrust); + + + // Free our allocated host memory + if (h_data) free(h_data); + if (h_reference) free(h_reference); +} + + +/** + * Creates an example segmented scan problem and then dispatches the problem + * to the GPU for the given number of iterations, displaying runtime information. + */ +template< + typename T, + typename Flag, + typename SizeT, + typename ReductionOp, + typename IdentityOp> +void TestSegmentedScanVariety( + SizeT num_elements, + ReductionOp scan_op, + IdentityOp identity_op) +{ + if (g_inclusive) { + TestSegmentedScan(num_elements, scan_op, identity_op); + } else { + TestSegmentedScan(num_elements, scan_op, identity_op); + } +} + + +/****************************************************************************** + * Main + ******************************************************************************/ + +int main(int argc, char** argv) +{ + // Initialize commandline args and device + CommandLineArgs args(argc, argv); + DeviceInit(args); + + // Seed random number generator + srand(0); // presently deterministic + //srand(time(NULL)); + + // Use 32-bit integer for array indexing + typedef int SizeT; + SizeT num_elements = 1024; + + // Parse command line arguments + if (args.CheckCmdLineFlag("help")) { + Usage(); + return 0; + } + g_inclusive = args.CheckCmdLineFlag("inclusive"); + args.GetCmdLineArgument("i", g_iterations); + args.GetCmdLineArgument("n", num_elements); + args.GetCmdLineArgument("max-ctas", g_max_ctas); + g_verbose = args.CheckCmdLineFlag("v"); + + typedef unsigned char Flag; + + // Execute test(s) + { + printf("\n-- UNSIGNED CHAR ----------------------------------------------\n"); + typedef unsigned char T; + Sum op; + TestSegmentedScanVariety(num_elements * 4, op, op); + } + { + printf("\n-- UNSIGNED SHORT ----------------------------------------------\n"); + typedef unsigned short T; + Sum op; + TestSegmentedScanVariety(num_elements * 2, op, op); + } + { + printf("\n-- UNSIGNED INT -----------------------------------------------\n"); + typedef unsigned int T; + Sum op; + TestSegmentedScanVariety(num_elements, op, op); + } + { + printf("\n-- UNSIGNED LONG LONG -----------------------------------------\n"); + typedef unsigned long long T; + Sum op; + TestSegmentedScanVariety(num_elements / 2, op, op); + } + + return 0; +} + + + diff --git a/cuda_code/concat_49.cu b/cuda_code/concat_49.cu new file mode 100644 index 0000000000000000000000000000000000000000..43c0e4af925c57e034e9fb39fe0f12d5b5734187 --- /dev/null +++ b/cuda_code/concat_49.cu @@ -0,0 +1,125 @@ +/******************************************************************************* + * Copyright (c) 2015-2018 Skymind, Inc. + * + * This program and the accompanying materials are made available under the + * terms of the Apache License, Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0. + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + * + * SPDX-License-Identifier: Apache-2.0 + ******************************************************************************/ + +// +// @author Yurii Shyrma (iuriish@yahoo.com), created on 20.04.2018 +// + + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace nd4j { +namespace ops { +namespace helpers { + + +/////////////////////////////////////////////////////////////////// +template +__global__ static void concatCuda(void* pVx, void* pxShapeInfo, void* vz, Nd4jLong* zShapeInfo, const int axis) { + + T* z = reinterpret_cast(vz); + __shared__ Nd4jLong zLen, totalThreads; + __shared__ int rank; + + if (threadIdx.x == 0) { + zLen = shape::length(zShapeInfo); + rank = shape::rank(zShapeInfo); + totalThreads = gridDim.x * blockDim.x; + } + __syncthreads(); + + const auto tid = blockIdx.x * blockDim.x + threadIdx.x; + + Nd4jLong coords[MAX_RANK]; + + for (uint64_t i = tid; i < zLen; i += totalThreads) { + shape::index2coords(i, zShapeInfo, coords); + + const auto zOffset = shape::getOffset(zShapeInfo, coords); + + int inArrIdx = 0; + Nd4jLong *xShapeInfo = reinterpret_cast(pxShapeInfo)[inArrIdx]; + + while (coords[axis] >= xShapeInfo[axis + 1]) { + coords[axis] -= xShapeInfo[axis + 1]; + xShapeInfo = reinterpret_cast(pxShapeInfo)[++inArrIdx]; + } + + const auto *x = reinterpret_cast(reinterpret_cast(pVx)[inArrIdx]); + const auto xOffset = shape::getOffset(xShapeInfo, coords); + + z[zOffset] = x[xOffset]; + } +} + +/////////////////////////////////////////////////////////////////// +template +__host__ static void concatCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, + void* pVx, void* pxShapeInfo, void* vz, Nd4jLong* zShapeInfo, const int axis) { + + concatCuda<<>>(pVx, pxShapeInfo, vz, zShapeInfo, axis); +} +BUILD_SINGLE_TEMPLATE(template void concatCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, void* pVx, void* pxShapeInfo, void* vz, Nd4jLong* zShapeInfo, const int axis), LIBND4J_TYPES); + +////////////////////////////////////////////////////////////////////////// +void concat(nd4j::LaunchContext * context, const std::vector& inArrs, NDArray& output, const int axis) { + + const int threadsPerBlock = 256; + const int blocksPerGrid = 512; + const int sharedMem = 512; + + const int numOfArrs = inArrs.size(); + + for(int i = 0; i < numOfArrs; ++i) + inArrs[i]->syncToDevice(); + + output.syncToDevice(); + + // prepare arrays of pointers on buffers and shapes + std::vector hInBuffers(numOfArrs); + std::vector hInShapeInfo(numOfArrs); + + for(int i = 0; i < numOfArrs; ++i) { + hInBuffers[i] = inArrs[i]->getSpecialBuffer(); + hInShapeInfo[i] = inArrs[i]->getSpecialShapeInfo(); + } + + PointersManager manager(context, "helpers::concat"); + + void* dInBuffers = manager.replicatePointer(hInBuffers.data(), hInBuffers.size() * sizeof(void*)); + void* dInShapeInfo = manager.replicatePointer(hInShapeInfo.data(), hInShapeInfo.size() * sizeof(Nd4jLong*)); + + BUILD_SINGLE_SELECTOR(inArrs[0]->dataType(), concatCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), dInBuffers, dInShapeInfo, output.specialBuffer(), output.specialShapeInfo(), axis), LIBND4J_TYPES); + + manager.synchronize(); + + for(int i = 0; i < numOfArrs; ++i) + inArrs[i]->tickReadDevice(); + + output.tickWriteDevice(); +} + +} +} +} \ No newline at end of file diff --git a/cuda_code/concat_58.cu b/cuda_code/concat_58.cu new file mode 100644 index 0000000000000000000000000000000000000000..a9a9445b31a3b527ab6e280f5712bc560da7414a --- /dev/null +++ b/cuda_code/concat_58.cu @@ -0,0 +1,542 @@ +/* Copyright 2017 Stanford, NVIDIA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "model.h" +#include "cuda_helper.h" + +Tensor FFModel::concat(int n, + const Tensor* tensors, + int axis, + const char *name) +{ + Concat *cat = new Concat(*this, n, tensors, axis, name); + layers.push_back(cat); + return cat->outputs[0]; +} + +Concat::Concat(FFModel& model, + int _n, const Tensor* _tensors, + int _axis, + const char* name) +: Op(model, OP_CONCAT, name, _n, _tensors), axis(_axis) +{ + //TODO: swich to use the Legion dim ordering + int num_dim = inputs[0].numDim; + outputs[0].numDim = num_dim; + for (int i = 0; i < num_dim; i++) + outputs[0].adim[i] = inputs[0].adim[i]; + for (int i = 1; i < numInputs; i++) + for (int j = 0; j < num_dim; j++) { + if (j != num_dim - 1 - axis) + assert(inputs[i].adim[j] == outputs[0].adim[j]); + else + outputs[0].adim[j] += inputs[i].adim[j]; + } + numOutputs = 1; + numWeights = 0; +} + +void Concat::create_weights(FFModel& model) +{ + // DO nothing +} + +void Concat::create_output_and_partition(FFModel& model) +{ + // Retrive the task indexspace for the op + std::string pcname = name; + task_is = model.get_or_create_task_is(inputs[0].numDim, pcname); + + Context ctx = model.config.lg_ctx; + Runtime* runtime = model.config.lg_hlr; + Domain domain = runtime->get_index_space_domain(ctx, task_is); + int dims[MAX_TENSOR_DIM], num_dim = inputs[0].numDim; + assert(num_dim == domain.get_dim()); + for (int i = 0; i < num_dim; i++) + dims[i] = inputs[0].adim[num_dim-1-i]; + for (int i = 1; i < numInputs; i++) + for (int j = 0; j < num_dim; j++) { + if (j != axis) + assert(inputs[i].adim[num_dim-1-j] == dims[j]); + else + dims[j] += inputs[i].adim[num_dim-1-j]; + } + //for (int i = 0; i < num_dim; i++) + //printf("concat: dim[%d] = %d\n", i, dims[i]); + switch (domain.get_dim()) { +#define DIMFUNC(DIM) \ + case DIM: \ + { \ + Rect part_rect = domain; \ + outputs[0] = model.create_tensor(dims, DT_FLOAT, this); \ + outputs[0].owner_op = this; \ + outputs[0].owner_idx = 0; \ + for (int i = 0; i < numInputs; i++) { \ + Rect input_rect = runtime->get_index_partition_color_space( \ + ctx, inputs[i].part.get_index_partition()); \ + if (input_rect == part_rect) { \ + input_lps[i] = inputs[i].part; \ + input_grad_lps[i] = inputs[i].part_grad; \ + } else { \ + model.create_disjoint_partition(inputs[i], \ + IndexSpaceT(task_is), input_lps[i], input_grad_lps[i]); \ + } \ + } \ + break; \ + } + LEGION_FOREACH_N(DIMFUNC) +#undef DIMFUNC + default: + { + fprintf(stderr, "Unsupported concat dimension number"); + assert(false); + } + } +} + +void Concat::init_meta(ConcatMeta *m) const +{ + m->axis = this->outputs[0].numDim - 1 - this->axis; +} + +__host__ +OpMeta* Concat::init_task(const Task *task, + const std::vector ®ions, + Context ctx, Runtime *runtime) +{ + Concat* cc = (Concat*) task->args; + FFHandler handler = *((const FFHandler*) task->local_args); + ConcatMeta* m = new ConcatMeta(handler); + // Note that our internal axis index ordering is opposite to other frameworks + cc->init_meta(m); + m->profiling = cc->profiling; + return m; +} + +void Concat::init(const FFModel& ff) +{ + ArgumentMap argmap; + Context ctx = ff.config.lg_ctx; + Runtime* runtime = ff.config.lg_hlr; + Domain domain = runtime->get_index_space_domain(ctx, task_is); + switch (domain.get_dim()) { +#define DIMFUNC(DIM) \ + case DIM: \ + { \ + Rect rect = domain; \ + ParallelConfig pc; \ + std::string pcname = name; \ + ff.config.find_parallel_config(DIM, pcname, pc); \ + int idx = 0; \ + for (PointInRectIterator it(rect); it(); it++) { \ + FFHandler handle = ff.handlers[pc.device_ids[idx++]]; \ + argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler))); \ + } \ + break; \ + } + LEGION_FOREACH_N(DIMFUNC) +#undef DIMFUNC + default: + assert(false); + } + IndexLauncher launcher(CONCAT_INIT_TASK_ID, task_is, + TaskArgument(this, sizeof(Concat)), argmap, + Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, + FFConfig::get_hash_id(std::string(name))); + launcher.add_region_requirement( + RegionRequirement(outputs[0].part, 0/*projection id*/, + WRITE_ONLY, EXCLUSIVE, outputs[0].region)); + launcher.add_field(0, FID_DATA); + for (int i = 0; i < numInputs; i++) { + launcher.add_region_requirement( + RegionRequirement(input_lps[i], 0/*projection id*/, + READ_ONLY, EXCLUSIVE, inputs[i].region)); + launcher.add_field(i + 1, FID_DATA); + } + for (int i = 0; i < numInputs; i++) { + launcher.add_region_requirement( + RegionRequirement(input_grad_lps[i], 0/*projection id*/, + WRITE_ONLY, EXCLUSIVE, inputs[i].region_grad)); + launcher.add_field(i + numInputs + 1, FID_DATA); + } + FutureMap fm = runtime->execute_index_space(ctx, launcher); + fm.wait_all_results(); + switch (domain.get_dim()) { +#define DIMFUNC(DIM) \ + case DIM: \ + { \ + Rect rect = domain; \ + int idx = 0; \ + for (PointInRectIterator it(rect); it(); it++) { \ + meta[idx++] = fm.get_result(*it); \ + } \ + break; \ + } + LEGION_FOREACH_N(DIMFUNC) +#undef DIMFUNC + default: + assert(false); + } +} + +template +void calc_blk_size(coord_t& num_blocks, + coord_t& blk_size, + Rect rect, + int axis) +{ + num_blocks = 1; + blk_size = 1; + for (int d = 0; d < N; d++) { + if (d <= axis) + blk_size *= (rect.hi[d] - rect.lo[d] + 1); + else + num_blocks *= (rect.hi[d] - rect.lo[d] + 1); + } +} + +/*static*/ +void Concat::forward_kernel(float* output, + float const * const *inputs, + int num_inputs, + int axis, + const Domain& out_domain, + const Domain* in_domain, + cudaStream_t stream) +{ + coord_t num_blocks = 1, output_blk_size = 1, input_blk_sizes[MAX_NUM_INPUTS]; + assert(num_inputs <= MAX_NUM_INPUTS); + switch (out_domain.get_dim()) { +#define DIMFUNC(DIM) \ + case DIM: \ + { \ + Rect rect = out_domain; \ + calc_blk_size(num_blocks, output_blk_size, rect, axis); \ + for (int i = 0; i < num_inputs; i++) { \ + rect = in_domain[i]; \ + coord_t input_num_blocks = 1; \ + calc_blk_size(input_num_blocks, input_blk_sizes[i], rect, axis); \ + assert(input_num_blocks == num_blocks); \ + } \ + break; \ + } + LEGION_FOREACH_N(DIMFUNC) +#undef DIMFUNC + default: + fprintf(stderr, "Unsupported concat dimension number"); + assert(false); + } + + for (int i = 0; i < num_inputs; i++) { + copy_with_stride<<>>( + output, inputs[i], num_blocks, output_blk_size, input_blk_sizes[i]); + //printf("output = %x num_blocks=%d output_blk_size=%d input_blk_size[%d]=%d\n", + // output, num_blocks, output_blk_size, i, input_blk_sizes[i]); + output += input_blk_sizes[i]; + } +} + +/* + regions[0](O): output + regions[1..numInputs](I): inputs +*/ +void Concat::forward_task(const Task *task, + const std::vector ®ions, + Context ctx, Runtime *runtime) +{ + const Concat* cc = (Concat*) task->args; + // Note that our internal axis index ordering is opposite to other frameworks + int axis = cc->outputs[0].numDim - 1 - cc->axis; + assert(regions.size() == cc->numInputs + 1); + assert(task->regions.size() == cc->numInputs + 1); + Domain out_domain = runtime->get_index_space_domain( + ctx, task->regions[0].region.get_index_space()); + assert(out_domain.get_dim() == cc->outputs[0].numDim); + Domain in_domain[MAX_NUM_INPUTS]; + for (int i = 0; i < cc->numInputs; i++) + in_domain[i] = runtime->get_index_space_domain( + ctx, task->regions[i+1].region.get_index_space()); + float *output = helperGetTensorPointerWO( + regions[0], task->regions[0], FID_DATA, ctx, runtime); + const float *inputs[MAX_NUM_INPUTS]; + for (int i = 0; i < cc->numInputs; i++) + inputs[i] = helperGetTensorPointerRO( + regions[i+1], task->regions[i+1], FID_DATA, ctx, runtime); + + cudaStream_t stream; + checkCUDA(get_legion_stream(&stream)); + + cudaEvent_t t_start, t_end; + if (cc->profiling) { + cudaEventCreate(&t_start); + cudaEventCreate(&t_end); + cudaEventRecord(t_start, stream); + } + forward_kernel(output, inputs, cc->numInputs, axis, out_domain, in_domain, stream); + if (cc->profiling) { + cudaEventRecord(t_end, stream); + checkCUDA(cudaEventSynchronize(t_end)); + //print_tensor<4, float>(output - output_blk_size, output_rect, "[Concat:forward:output]"); + //printf("output_blk_size=%zu\n", output_blk_size); + //print_tensor<4, float>(inputs[0], input_rect[0], "[Concat:forward:input0]"); + //print_tensor<4, float>(inputs[1], input_rect[1], "[Concat:forward:input1]"); + float elapsed = 0; + checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end)); + printf("[%s] forward time = %.4f ms\n", cc->name, elapsed); + cudaEventDestroy(t_start); + cudaEventDestroy(t_end); + } +} + +void Concat::forward(const FFModel& ff) +{ + ArgumentMap argmap; + Context ctx = ff.config.lg_ctx; + Runtime* runtime = ff.config.lg_hlr; + IndexLauncher launcher(CONCAT_FWD_TASK_ID, task_is, + TaskArgument(this, sizeof(Concat)), argmap, + Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, + FFConfig::get_hash_id(std::string(name))); + launcher.add_region_requirement( + RegionRequirement(outputs[0].part, 0/*projection id*/, + WRITE_ONLY, EXCLUSIVE, outputs[0].region)); + launcher.add_field(0, FID_DATA); + for (int i = 0; i < numInputs; i++) { + launcher.add_region_requirement( + RegionRequirement(input_lps[i], 0/*projection id*/, + READ_ONLY, EXCLUSIVE, inputs[i].region)); + launcher.add_field(i + 1, FID_DATA); + } + runtime->execute_index_space(ctx, launcher); +} + +void Concat::backward_kernel(const float* output_grad, + float** input_grads, + int num_inputs, + int axis, + const Domain& out_grad_domain, + const Domain* in_grad_domain, + cudaStream_t stream) +{ + coord_t num_blocks = 1, output_blk_size = 1, input_blk_sizes[MAX_NUM_INPUTS]; + assert(num_inputs <= MAX_NUM_INPUTS); + switch (out_grad_domain.get_dim()) { +#define DIMFUNC(DIM) \ + case DIM: \ + { \ + Rect rect = out_grad_domain; \ + calc_blk_size(num_blocks, output_blk_size, rect, axis); \ + for (int i = 0; i < num_inputs; i++) { \ + rect = in_grad_domain[i]; \ + coord_t input_num_blocks = 1; \ + calc_blk_size(input_num_blocks, input_blk_sizes[i], rect, axis); \ + assert(input_num_blocks == num_blocks); \ + } \ + break; \ + } + LEGION_FOREACH_N(DIMFUNC) +#undef DIMFUNC + default: + fprintf(stderr, "Unsupported concat dimension number"); + assert(false); + } + + for (int i = 0; i < num_inputs; i++) { + add_with_stride<<>>( + input_grads[i], output_grad, num_blocks, input_blk_sizes[i], output_blk_size); + output_grad += input_blk_sizes[i]; + } + + //Rect<2> output_rect(Point<2>(0, 0), Point<2>(output_blk_size-1, batch_size - 1)); + //Rect<2> input_rect(Point<2>(0, 0), Point<2>(input_blk_sizes[0]-1, batch_size - 1)); + //print_tensor<2, float>(output_grad - output_blk_size, output_rect, "[Concat:backward:output]"); + //print_tensor<2, float>(input_grads[0], input_rect, "[Concat:backward:input0]"); +} + +/* + regions[0](I): output_grad + regions[1..numInputs](I/O): input_grad +*/ +void Concat::backward_task(const Task *task, + const std::vector ®ions, + Context ctx, Runtime *runtime) +{ + const Concat* cc = (Concat*) task->args; + // Note that our internal axis index ordering is opposite to other frameworks + int axis = cc->outputs[0].numDim - 1 - cc->axis; + assert(regions.size() == cc->numInputs + 1); + assert(task->regions.size() == cc->numInputs + 1); + assert(cc->numInputs <= MAX_NUM_INPUTS); + Domain out_grad_domain = runtime->get_index_space_domain( + ctx, task->regions[0].region.get_index_space()); + assert(out_grad_domain.get_dim() == cc->outputs[0].numDim); + Domain in_grad_domains[MAX_NUM_INPUTS]; + for (int i = 0; i < cc->numInputs; i++) + in_grad_domains[i] = runtime->get_index_space_domain( + ctx, task->regions[i+1].region.get_index_space()); + const float *output_grad = helperGetTensorPointerRO( + regions[0], task->regions[0], FID_DATA, ctx, runtime); + float *input_grads[MAX_NUM_INPUTS]; + for (int i = 0; i < cc->numInputs; i++) + input_grads[i] = helperGetTensorPointerRW( + regions[i+1], task->regions[i+1], FID_DATA, ctx, runtime); + + cudaStream_t stream; + checkCUDA(get_legion_stream(&stream)); + + cudaEvent_t t_start, t_end; + if (cc->profiling) { + cudaEventCreate(&t_start); + cudaEventCreate(&t_end); + cudaEventRecord(t_start, stream); + } + backward_kernel(output_grad, input_grads, cc->numInputs, axis, + out_grad_domain, in_grad_domains, stream); + if (cc->profiling) { + cudaEventRecord(t_end, stream); + checkCUDA(cudaEventSynchronize(t_end)); + float elapsed = 0; + checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end)); + printf("[%s] forward time = %.4f ms\n", cc->name, elapsed); + cudaEventDestroy(t_start); + cudaEventDestroy(t_end); + } +} + +void Concat::backward(const FFModel& ff) +{ + ArgumentMap argmap; + Context ctx = ff.config.lg_ctx; + Runtime* runtime = ff.config.lg_hlr; + IndexLauncher launcher(CONCAT_BWD_TASK_ID, task_is, + TaskArgument(this, sizeof(Concat)), argmap, + Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, + FFConfig::get_hash_id(std::string(name))); + launcher.add_region_requirement( + RegionRequirement(outputs[0].part_grad, 0/*projection id*/, + READ_ONLY, EXCLUSIVE, outputs[0].region_grad)); + launcher.add_field(0, FID_DATA); + for (int i = 0; i < numInputs; i++) { + launcher.add_region_requirement( + RegionRequirement(input_grad_lps[i], 0/*projection id*/, + READ_WRITE, EXCLUSIVE, inputs[i].region_grad)); + //LogicalRegion lr = inputs[i].region_grad; + //printf("concat[%d]: region(%d,%d,%d)\n", i+1, lr.get_index_space().get_id(), lr.get_field_space().get_id(), lr.get_tree_id()); + launcher.add_field(i + 1, FID_DATA); + } + runtime->execute_index_space(ctx, launcher); +} + + +bool Concat::measure_operator_cost(Simulator* sim, + const ParallelConfig& pc, + CostMetrics& cost_metrics) +{ + assert (numInputs <= MAX_NUM_INPUTS); + Tensor sub_inputs[MAX_NUM_INPUTS], sub_output; + if (!outputs[0].get_output_sub_tensor(pc, sub_output, op_type)) { + return false; + } + for (int i = 0; i < numInputs; i++) { + if (!inputs[i].get_input_sub_tensor(pc, sub_inputs[i], op_type)) { + return false; + } + } + + ConcatMeta *m = sim->concat_meta; + this->init_meta(m); + + sim->free_all(); + float *input_ptrs[MAX_NUM_INPUTS]; + float *input_grad_ptrs[MAX_NUM_INPUTS]; + for (int i = 0; i < numInputs; i++) { + input_ptrs[i] = (float *)sim->allocate(sub_inputs[i].get_volume(), DT_FLOAT); + if (input_ptrs[i] == NULL) { + cost_metrics.forward_time = -1; + cost_metrics.backward_time = -1; + return true; + } + assert (input_ptrs[i] != NULL); + } + float *output_ptr = (float *)sim->allocate(sub_output.get_volume(), DT_FLOAT); + if (output_ptr == NULL) { + cost_metrics.forward_time = -1; + cost_metrics.backward_time = -1; + return true; + } + assert (output_ptr != NULL); + + int axis = outputs[0].numDim - 1 - this->axis; + + Domain out_domain = sub_output.get_domain(); + Domain in_domains[MAX_NUM_INPUTS]; + for (int i = 0; i < numInputs; i++) { + in_domains[i] = sub_inputs[i].get_domain(); + } + + cudaStream_t stream; + checkCUDA(get_legion_stream(&stream)); + + std::function forward , backward = []{}; + forward = [&] { + forward_kernel(output_ptr, input_ptrs, numInputs, axis, out_domain, in_domains, stream); + }; + inner_measure_operator_cost(sim, forward, backward, cost_metrics); + float fwtime = cost_metrics.forward_time; + size_t memreq = cost_metrics.memory_requirement; + if (sim->computationMode == COMP_MODE_TRAINING) { + sim->free_all(); + for (int i = 0; i < numInputs; i++) { + input_grad_ptrs[i] = (float *)sim->allocate(sub_inputs[i].get_volume(), DT_FLOAT); + if (input_grad_ptrs[i] == NULL) { + cost_metrics.forward_time = -1; + cost_metrics.backward_time = -1; + return false; + } + assert (input_grad_ptrs[i] != NULL); + } + float *output_grad_ptr = (float *)sim->allocate(sub_output.get_volume(), DT_FLOAT); + if (output_grad_ptr == NULL) { + cost_metrics.forward_time = -1; + cost_metrics.backward_time = -1; + return false; + } + assert (output_grad_ptr != NULL); + forward = [] {}; + backward = [&] { + backward_kernel(output_grad_ptr, input_grad_ptrs, + numInputs, axis, out_domain, in_domains, stream); + }; + inner_measure_operator_cost(sim, forward, backward, cost_metrics); + cost_metrics.forward_time = fwtime; + cost_metrics.memory_requirement += memreq; + } + + if (sim->computationMode == COMP_MODE_TRAINING) { + printf("[Measure Concat] name(%s) forward_time(%.4lf) backward_time(%.4lf)\n", + name, + cost_metrics.forward_time, + cost_metrics.backward_time); + } else { + printf("[Measure Concat] name(%s) forward_time(%.4lf)\n", + name, cost_metrics.forward_time); + } + + return true; +} + +std::string Concat::get_name_structure() const { + return "Concat_"+std::to_string(axis); +} \ No newline at end of file diff --git a/cuda_code/concat_impl_3.cu b/cuda_code/concat_impl_3.cu new file mode 100644 index 0000000000000000000000000000000000000000..2a24efe9ca29a2fb3bc0fae1f6dbe8081253d332 --- /dev/null +++ b/cuda_code/concat_impl_3.cu @@ -0,0 +1,96 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#include "core/providers/cuda/cu_inc/common.cuh" +#include "core/providers/cuda/cuda_common.h" +#include "concat_impl.h" + +namespace onnxruntime { +namespace cuda { + +template +__global__ void _ConcatKernel(const fast_divmod block_size_including_axis_dim_div, + const fast_divmod block_size_inside_axis_dim_div, + const int64_t* concat_sizes, + const int64_t* concat_sizes_range, + const int64_t* axis_dimension_input_output_mapping, + T* output_data, + const void** input_ptr, + const CUDA_LONG N) { + CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); + CUDA_LONG input_pos = 0; + + int outter_block_index = 0; + int block_index = 0; + int offset = 0; + + block_size_including_axis_dim_div.divmod(id, outter_block_index, offset); + block_size_inside_axis_dim_div.divmod(offset, block_index, offset); + + int input_index = axis_dimension_input_output_mapping[block_index]; + int64_t range_left = (input_index == 0) ? 0 : concat_sizes_range[input_index - 1]; + int block_offset = block_index - range_left; + + input_pos = (outter_block_index * concat_sizes[input_index] + block_offset) * + block_size_inside_axis_dim_div.d_ + + offset; + + output_data[id] = reinterpret_cast(input_ptr[input_index])[input_pos]; +} + +Status ConcatImpl(const size_t element_bytes, + const int block_size_including_axis_dim, + const int block_size_inside_axis_dim, + const int64_t* concat_sizes, + const int64_t* concat_sizes_range, + const int64_t* axis_dimension_input_output_mapping, + void* output_data, + const void** input_ptr, + const size_t N) { + int blocksPerGrid = (int)(ceil(static_cast(N) / GridDim::maxThreadsPerBlock)); + + fast_divmod block_size_including_axis_dim_div = fast_divmod(block_size_including_axis_dim); + fast_divmod block_size_inside_axis_dim_div = fast_divmod(block_size_inside_axis_dim); + + switch (element_bytes) { + case sizeof(int8_t): + _ConcatKernel<<>>( + block_size_including_axis_dim_div, block_size_inside_axis_dim_div, + concat_sizes, concat_sizes_range, axis_dimension_input_output_mapping, + reinterpret_cast(output_data), + input_ptr, + (CUDA_LONG)N); + break; + case sizeof(int16_t): + _ConcatKernel<<>>( + block_size_including_axis_dim_div, block_size_inside_axis_dim_div, + concat_sizes, concat_sizes_range, axis_dimension_input_output_mapping, + reinterpret_cast(output_data), + input_ptr, + (CUDA_LONG)N); + break; + case sizeof(int32_t): + _ConcatKernel<<>>( + block_size_including_axis_dim_div, block_size_inside_axis_dim_div, + concat_sizes, concat_sizes_range, axis_dimension_input_output_mapping, + reinterpret_cast(output_data), + input_ptr, + (CUDA_LONG)N); + break; + case sizeof(int64_t): + _ConcatKernel<<>>( + block_size_including_axis_dim_div, block_size_inside_axis_dim_div, + concat_sizes, concat_sizes_range, axis_dimension_input_output_mapping, + reinterpret_cast(output_data), + input_ptr, + (CUDA_LONG)N); + break; + default: + return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Type not supported for Concat operator"); + } + + return Status::OK(); +} + +} // namespace cuda +} // namespace onnxruntime diff --git a/cuda_code/concat_layer_11.cu b/cuda_code/concat_layer_11.cu new file mode 100644 index 0000000000000000000000000000000000000000..c22d749296f940b1d5e2087839c5c2ba5bdc3c56 --- /dev/null +++ b/cuda_code/concat_layer_11.cu @@ -0,0 +1,74 @@ +#include + +#include "caffe/layer.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { + +template +__global__ void Concat(const int nthreads, const Dtype* in_data, + const bool forward, const int num_concats, const int concat_size, + const int top_concat_axis, const int bottom_concat_axis, + const int offset_concat_axis, Dtype* out_data) { + CUDA_KERNEL_LOOP(index, nthreads) { + const int total_concat_size = concat_size * bottom_concat_axis; + const int concat_num = index / total_concat_size; + const int concat_index = index % total_concat_size; + const int top_index = concat_index + + (concat_num * top_concat_axis + offset_concat_axis) * concat_size; + if (forward) { + out_data[top_index] = in_data[index]; + } else { + out_data[index] = in_data[top_index]; + } + } +} + +template +void ConcatLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + if (bottom.size() == 1) { return; } + Dtype* top_data = top[0]->mutable_gpu_data(); + int offset_concat_axis = 0; + const int top_concat_axis = top[0]->shape(concat_axis_); + const bool kForward = true; + for (int i = 0; i < bottom.size(); ++i) { + const Dtype* bottom_data = bottom[i]->gpu_data(); + const int bottom_concat_axis = bottom[i]->shape(concat_axis_); + const int bottom_concat_size = bottom_concat_axis * concat_input_size_; + const int nthreads = bottom_concat_size * num_concats_; + Concat // NOLINT_NEXT_LINE(whitespace/operators) + <<>>( + nthreads, bottom_data, kForward, num_concats_, concat_input_size_, + top_concat_axis, bottom_concat_axis, offset_concat_axis, top_data); + offset_concat_axis += bottom_concat_axis; + } +} + +template +void ConcatLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + if (bottom.size() == 1) { return; } + const Dtype* top_diff = top[0]->gpu_diff(); + int offset_concat_axis = 0; + const int top_concat_axis = top[0]->shape(concat_axis_); + const bool kForward = false; + for (int i = 0; i < bottom.size(); ++i) { + const int bottom_concat_axis = bottom[i]->shape(concat_axis_); + if (propagate_down[i]) { + Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); + const int bottom_concat_size = bottom_concat_axis * concat_input_size_; + const int nthreads = bottom_concat_size * num_concats_; + Concat // NOLINT_NEXT_LINE(whitespace/operators) + <<>>( + nthreads, top_diff, kForward, num_concats_, concat_input_size_, + top_concat_axis, bottom_concat_axis, offset_concat_axis, bottom_diff); + } + offset_concat_axis += bottom_concat_axis; + } +} + +INSTANTIATE_LAYER_GPU_FUNCS(ConcatLayer); + +} // namespace caffe diff --git a/cuda_code/concurrencyMemcpyKernelMapped.cu b/cuda_code/concurrencyMemcpyKernelMapped.cu new file mode 100644 index 0000000000000000000000000000000000000000..6394f1fe360ebb99be0a7407175dc7ef8fba85e5 --- /dev/null +++ b/cuda_code/concurrencyMemcpyKernelMapped.cu @@ -0,0 +1,119 @@ +/* + * + * concurrencyMemcpyKernelMapped.cu + * + * Microbenchmark to shmoo the speedup from concurrent kernels when + * operating on mapped pinned memory. + * + * NOTE: To date, I have not found a piece of hardware where this is faster. + * + * Build with: nvcc -I ../chLib concurrencyMemcpyKernelMapped.cu + * Requires: SM 1.1 for mapped pinned memory and global atomics. + * + * Copyright (c) 2011-2012, Archaea Software, LLC. + * All rights reserved. + + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include +#include + +#include "chAssert.h" +#include "chError.h" +#include "chShmoo.h" +#include "chCommandLine.h" +#include "chTimer.h" + +#include "AddKernel.cuh" + +#include "TimeConcurrentMemcpyKernel.cuh" +#include "TimeSequentialMemcpyKernelMapped.cuh" +#include "TimeConcurrentKernelMapped.cuh" + +int +main( int argc, char *argv[] ) +{ + const int numTimes = 256; + float timesSequential[numTimes]; + float timesConcurrent[numTimes]; + int numBlocks; + int unrollFactor = 1; + const size_t numInts = 32*1048576; + + cudaSetDeviceFlags( cudaDeviceMapHost ); + + chCommandLineGet( &unrollFactor, "unrollFactor", argc, argv ); + chShmooRange cyclesRange; + { + const int minCycles = 8; + const int maxCycles = 512; + const int stepCycles = 8; + cyclesRange.Initialize( minCycles, maxCycles, stepCycles ); + chCommandLineGet( &cyclesRange, "Cycles", argc, argv ); + } + + chShmooRange streamsRange; + { + int numStreams = 8; + if ( ! chCommandLineGet( &streamsRange, "streams", argc, argv ) ) { + streamsRange.Initialize( numStreams ); + } + } + + { + cudaDeviceProp props; + cudaGetDeviceProperties( &props, 0 ); + int multiplier = 16; + chCommandLineGet( &multiplier, "blocksPerSM", argc, argv ); + numBlocks = props.multiProcessorCount * multiplier; + printf( "Using %d blocks per SM on GPU with %d SMs = %d blocks\n", multiplier, + props.multiProcessorCount, numBlocks ); + } + + printf( "Timing mapped operations" ); + if ( ! TimeSequentialMemcpyKernelMapped( timesSequential, numInts, cyclesRange, numBlocks, unrollFactor ) ) { + printf( "TimeSequentialMemcpyKernelMapped failed\n" ); + return 1; + } + printf( "\nTiming streamed operations" ); + if ( ! TimeConcurrentKernelMapped( timesConcurrent, numInts, cyclesRange, streamsRange, numBlocks, unrollFactor ) ) { + printf( "TimeConcurrentMemcpyKernel failed\n" ); + return 1; + } + + printf( "\n%d integers\n", (int) numInts ); + printf( "Cycles\tMapped\tStreamed\tSpeedup\n" ); + + int index = 0; + for ( chShmooIterator cycles(cyclesRange); cycles; cycles++, index++ ) { + printf( "%d\t%.2f\t%.2f\t%.2f\n", + *cycles, timesSequential[index], timesConcurrent[index], + timesConcurrent[index] / timesSequential[index] ); + } + + return 0; +} diff --git a/cuda_code/conditional_join_3.cu b/cuda_code/conditional_join_3.cu new file mode 100644 index 0000000000000000000000000000000000000000..dc62eeec5393e8d09df8076ccfc6ecb880ffad2a --- /dev/null +++ b/cuda_code/conditional_join_3.cu @@ -0,0 +1,418 @@ +/* + * Copyright (c) 2021, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +namespace cudf { +namespace detail { + +std::pair>, + std::unique_ptr>> +conditional_join(table_view const& left, + table_view const& right, + ast::expression const& binary_predicate, + join_kind join_type, + std::optional output_size, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + // We can immediately filter out cases where the right table is empty. In + // some cases, we return all the rows of the left table with a corresponding + // null index for the right table; in others, we return an empty output. + auto right_num_rows{right.num_rows()}; + auto left_num_rows{left.num_rows()}; + if (right_num_rows == 0) { + switch (join_type) { + // Left, left anti, and full all return all the row indices from left + // with a corresponding NULL from the right. + case join_kind::LEFT_JOIN: + case join_kind::LEFT_ANTI_JOIN: + case join_kind::FULL_JOIN: return get_trivial_left_join_indices(left, stream); + // Inner and left semi joins return empty output because no matches can exist. + case join_kind::INNER_JOIN: + case join_kind::LEFT_SEMI_JOIN: + return std::make_pair(std::make_unique>(0, stream, mr), + std::make_unique>(0, stream, mr)); + default: CUDF_FAIL("Invalid join kind."); break; + } + } else if (left_num_rows == 0) { + switch (join_type) { + // Left, left anti, left semi, and inner joins all return empty sets. + case join_kind::LEFT_JOIN: + case join_kind::LEFT_ANTI_JOIN: + case join_kind::INNER_JOIN: + case join_kind::LEFT_SEMI_JOIN: + return std::make_pair(std::make_unique>(0, stream, mr), + std::make_unique>(0, stream, mr)); + // Full joins need to return the trivial complement. + case join_kind::FULL_JOIN: { + auto ret_flipped = get_trivial_left_join_indices(right, stream); + return std::make_pair(std::move(ret_flipped.second), std::move(ret_flipped.first)); + } + default: CUDF_FAIL("Invalid join kind."); break; + } + } + + // If evaluating the expression may produce null outputs we create a nullable + // output column and follow the null-supporting expression evaluation code + // path. + auto const has_nulls = binary_predicate.may_evaluate_null(left, right, stream); + + auto const parser = + ast::detail::expression_parser{binary_predicate, left, right, has_nulls, stream, mr}; + CUDF_EXPECTS(parser.output_type().id() == type_id::BOOL8, + "The expression must produce a boolean output."); + + auto left_table = table_device_view::create(left, stream); + auto right_table = table_device_view::create(right, stream); + + // For inner joins we support optimizing the join by launching one thread for + // whichever table is larger rather than always using the left table. + auto swap_tables = (join_type == join_kind::INNER_JOIN) && (right_num_rows > left_num_rows); + detail::grid_1d const config(swap_tables ? right_num_rows : left_num_rows, + DEFAULT_JOIN_BLOCK_SIZE); + auto const shmem_size_per_block = parser.shmem_per_thread * config.num_threads_per_block; + join_kind const kernel_join_type = + join_type == join_kind::FULL_JOIN ? join_kind::LEFT_JOIN : join_type; + + // If the join size was not provided as an input, compute it here. + std::size_t join_size; + if (output_size.has_value()) { + join_size = *output_size; + } else { + // Allocate storage for the counter used to get the size of the join output + rmm::device_scalar size(0, stream, mr); + if (has_nulls) { + compute_conditional_join_output_size + <<>>( + *left_table, + *right_table, + kernel_join_type, + parser.device_expression_data, + swap_tables, + size.data()); + } else { + compute_conditional_join_output_size + <<>>( + *left_table, + *right_table, + kernel_join_type, + parser.device_expression_data, + swap_tables, + size.data()); + } + join_size = size.value(stream); + } + + // The initial early exit clauses guarantee that we will not reach this point + // unless both the left and right tables are non-empty. Under that + // constraint, neither left nor full joins can return an empty result since + // at minimum we are guaranteed null matches for all non-matching rows. In + // all other cases (inner, left semi, and left anti joins) if we reach this + // point we can safely return an empty result. + if (join_size == 0) { + return std::make_pair(std::make_unique>(0, stream, mr), + std::make_unique>(0, stream, mr)); + } + + rmm::device_scalar write_index(0, stream); + + auto left_indices = std::make_unique>(join_size, stream, mr); + auto right_indices = std::make_unique>(join_size, stream, mr); + + auto const& join_output_l = left_indices->data(); + auto const& join_output_r = right_indices->data(); + if (has_nulls) { + conditional_join + <<>>( + *left_table, + *right_table, + kernel_join_type, + join_output_l, + join_output_r, + write_index.data(), + parser.device_expression_data, + join_size, + swap_tables); + } else { + conditional_join + <<>>( + *left_table, + *right_table, + kernel_join_type, + join_output_l, + join_output_r, + write_index.data(), + parser.device_expression_data, + join_size, + swap_tables); + } + + auto join_indices = std::make_pair(std::move(left_indices), std::move(right_indices)); + + // For full joins, get the indices in the right table that were not joined to + // by any row in the left table. + if (join_type == join_kind::FULL_JOIN) { + auto complement_indices = detail::get_left_join_indices_complement( + join_indices.second, left_num_rows, right_num_rows, stream, mr); + join_indices = detail::concatenate_vector_pairs(join_indices, complement_indices, stream); + } + return join_indices; +} + +std::size_t compute_conditional_join_output_size(table_view const& left, + table_view const& right, + ast::expression const& binary_predicate, + join_kind join_type, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + // Until we add logic to handle the number of non-matches in the right table, + // full joins are not supported in this function. Note that this does not + // prevent actually performing full joins since we do that by calculating the + // left join and then concatenating the complementary right indices. + CUDF_EXPECTS(join_type != join_kind::FULL_JOIN, + "Size estimation is not available for full joins."); + + // We can immediately filter out cases where one table is empty. In + // some cases, we return all the rows of the other table with a corresponding + // null index for the empty table; in others, we return an empty output. + auto right_num_rows{right.num_rows()}; + auto left_num_rows{left.num_rows()}; + if (right_num_rows == 0) { + switch (join_type) { + // Left, left anti, and full all return all the row indices from left + // with a corresponding NULL from the right. + case join_kind::LEFT_JOIN: + case join_kind::LEFT_ANTI_JOIN: + case join_kind::FULL_JOIN: return left_num_rows; + // Inner and left semi joins return empty output because no matches can exist. + case join_kind::INNER_JOIN: + case join_kind::LEFT_SEMI_JOIN: return 0; + default: CUDF_FAIL("Invalid join kind."); break; + } + } else if (left_num_rows == 0) { + switch (join_type) { + // Left, left anti, left semi, and inner joins all return empty sets. + case join_kind::LEFT_JOIN: + case join_kind::LEFT_ANTI_JOIN: + case join_kind::INNER_JOIN: + case join_kind::LEFT_SEMI_JOIN: return 0; + // Full joins need to return the trivial complement. + case join_kind::FULL_JOIN: return right_num_rows; + default: CUDF_FAIL("Invalid join kind."); break; + } + } + + // Prepare output column. Whether or not the output column is nullable is + // determined by whether any of the columns in the input table are nullable. + // If none of the input columns actually contain nulls, we can still use the + // non-nullable version of the expression evaluation code path for + // performance, so we capture that information as well. + auto const has_nulls = binary_predicate.may_evaluate_null(left, right, stream); + + auto const parser = + ast::detail::expression_parser{binary_predicate, left, right, has_nulls, stream, mr}; + CUDF_EXPECTS(parser.output_type().id() == type_id::BOOL8, + "The expression must produce a boolean output."); + + auto left_table = table_device_view::create(left, stream); + auto right_table = table_device_view::create(right, stream); + + // For inner joins we support optimizing the join by launching one thread for + // whichever table is larger rather than always using the left table. + auto swap_tables = (join_type == join_kind::INNER_JOIN) && (right_num_rows > left_num_rows); + detail::grid_1d const config(swap_tables ? right_num_rows : left_num_rows, + DEFAULT_JOIN_BLOCK_SIZE); + auto const shmem_size_per_block = parser.shmem_per_thread * config.num_threads_per_block; + + // Allocate storage for the counter used to get the size of the join output + rmm::device_scalar size(0, stream, mr); + + // Determine number of output rows without actually building the output to simply + // find what the size of the output will be. + if (has_nulls) { + compute_conditional_join_output_size + <<>>( + *left_table, + *right_table, + join_type, + parser.device_expression_data, + swap_tables, + size.data()); + } else { + compute_conditional_join_output_size + <<>>( + *left_table, + *right_table, + join_type, + parser.device_expression_data, + swap_tables, + size.data()); + } + return size.value(stream); +} + +} // namespace detail + +std::pair>, + std::unique_ptr>> +conditional_inner_join(table_view const& left, + table_view const& right, + ast::expression const& binary_predicate, + std::optional output_size, + rmm::mr::device_memory_resource* mr) +{ + CUDF_FUNC_RANGE(); + return detail::conditional_join(left, + right, + binary_predicate, + detail::join_kind::INNER_JOIN, + output_size, + rmm::cuda_stream_default, + mr); +} + +std::pair>, + std::unique_ptr>> +conditional_left_join(table_view const& left, + table_view const& right, + ast::expression const& binary_predicate, + std::optional output_size, + rmm::mr::device_memory_resource* mr) +{ + CUDF_FUNC_RANGE(); + return detail::conditional_join(left, + right, + binary_predicate, + detail::join_kind::LEFT_JOIN, + output_size, + rmm::cuda_stream_default, + mr); +} + +std::pair>, + std::unique_ptr>> +conditional_full_join(table_view const& left, + table_view const& right, + ast::expression const& binary_predicate, + rmm::mr::device_memory_resource* mr) +{ + CUDF_FUNC_RANGE(); + return detail::conditional_join( + left, right, binary_predicate, detail::join_kind::FULL_JOIN, {}, rmm::cuda_stream_default, mr); +} + +std::unique_ptr> conditional_left_semi_join( + table_view const& left, + table_view const& right, + ast::expression const& binary_predicate, + std::optional output_size, + rmm::mr::device_memory_resource* mr) +{ + CUDF_FUNC_RANGE(); + return std::move(detail::conditional_join(left, + right, + binary_predicate, + detail::join_kind::LEFT_SEMI_JOIN, + output_size, + rmm::cuda_stream_default, + mr) + .first); +} + +std::unique_ptr> conditional_left_anti_join( + table_view const& left, + table_view const& right, + ast::expression const& binary_predicate, + std::optional output_size, + rmm::mr::device_memory_resource* mr) +{ + CUDF_FUNC_RANGE(); + return std::move(detail::conditional_join(left, + right, + binary_predicate, + detail::join_kind::LEFT_ANTI_JOIN, + output_size, + rmm::cuda_stream_default, + mr) + .first); +} + +std::size_t conditional_inner_join_size(table_view const& left, + table_view const& right, + ast::expression const& binary_predicate, + rmm::mr::device_memory_resource* mr) +{ + CUDF_FUNC_RANGE(); + return detail::compute_conditional_join_output_size( + left, right, binary_predicate, detail::join_kind::INNER_JOIN, rmm::cuda_stream_default, mr); +} + +std::size_t conditional_left_join_size(table_view const& left, + table_view const& right, + ast::expression const& binary_predicate, + rmm::mr::device_memory_resource* mr) +{ + CUDF_FUNC_RANGE(); + return detail::compute_conditional_join_output_size( + left, right, binary_predicate, detail::join_kind::LEFT_JOIN, rmm::cuda_stream_default, mr); +} + +std::size_t conditional_left_semi_join_size(table_view const& left, + table_view const& right, + ast::expression const& binary_predicate, + rmm::mr::device_memory_resource* mr) +{ + CUDF_FUNC_RANGE(); + return std::move(detail::compute_conditional_join_output_size(left, + right, + binary_predicate, + detail::join_kind::LEFT_SEMI_JOIN, + rmm::cuda_stream_default, + mr)); +} + +std::size_t conditional_left_anti_join_size(table_view const& left, + table_view const& right, + ast::expression const& binary_predicate, + rmm::mr::device_memory_resource* mr) +{ + CUDF_FUNC_RANGE(); + return std::move(detail::compute_conditional_join_output_size(left, + right, + binary_predicate, + detail::join_kind::LEFT_ANTI_JOIN, + rmm::cuda_stream_default, + mr)); +} + +} // namespace cudf diff --git a/cuda_code/contraction_csr_mul.cu b/cuda_code/contraction_csr_mul.cu new file mode 100644 index 0000000000000000000000000000000000000000..77b8534a757f1162a41eeb4cad1f19254bcadf65 --- /dev/null +++ b/cuda_code/contraction_csr_mul.cu @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2019, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +namespace nvgraph +{ + //------------------------- Graph Contraction: ---------------------- + // + CsrGraph* contract_graph_csr_mul(CsrGraph& graph, + int* pV, size_t n, + cudaStream_t stream, + const int& VCombine, + const int& VReduce, + const int& ECombine, + const int& EReduce) + { + return contract_from_aggregates_t::FctrType >(graph, pV, n, stream, + static_cast(VCombine), + static_cast(VReduce), + static_cast(ECombine), + static_cast(EReduce)); + } + +} diff --git a/cuda_code/conv2d_12.cu b/cuda_code/conv2d_12.cu new file mode 100644 index 0000000000000000000000000000000000000000..6892469b841be9604607b361283d31844abb142f --- /dev/null +++ b/cuda_code/conv2d_12.cu @@ -0,0 +1,513 @@ +/* ****************************************************************************** + * + * + * This program and the accompanying materials are made available under the + * terms of the Apache License, Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0. + * + * See the NOTICE file distributed with this work for additional + * information regarding copyright ownership. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + * + * SPDX-License-Identifier: Apache-2.0 + ******************************************************************************/ + +// +// @author raver119@gmail.com +// @author Yurii Shyrma (iuriish@yahoo.com) +// + +#include + +#include "cudnnUtils.h" + +namespace sd { +namespace ops { +namespace platforms { + +////////////////////////////////////////////////////////////////////////// +static void conv2dCUDNN(const LaunchContext* context, const NDArray* input, const NDArray* weights, const NDArray* bias, + NDArray* output, const int kH, const int kW, const int sH, const int sW, const int pH, + const int pW, const int dH, const int dW, const int paddingMode, const bool isNCHW, + const int wFormat) { + // cudnn support only two formats for weights {oC,iC,kH,kW} and {oC,kH,kW,iC} + + int bS, iC, iH, iW, oC, oH, + oW; // batch size, input channels, input height/width, output channels, output height/width; + int indIOioC, indIiH, indWoC, indWiC, indWkH, indOoH; // corresponding indexes + ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, wFormat, *input, *output, bS, iC, iH, iW, oC, oH, oW, indIOioC, + indIiH, indWiC, indWoC, indWkH, indOoH); + + auto handle = reinterpret_cast(context->getCuDnnHandle()); + CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnSetStream), cudnnSetStream(*handle, *context->getCudaStream())); + + cudnnTensorFormat_t format = isNCHW ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC; + cudnnTensorFormat_t formatW = 0 == wFormat ? format : (1 == wFormat ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC); + + // input descriptor + CudnnTensor x; + + if (input->ews() == 1 && input->ordering() == 'c') + x.set4D(format, cudnnDataType(input->dataType()), bS, iC, iH, iW); + else + x.set4DEx(cudnnDataType(input->dataType()), bS, iC, iH, iW, input->strideAt(0), input->strideAt(indIOioC), + input->strideAt(indIiH), input->strideAt(indIiH + 1)); + + // weights descriptor + FilterDesc w; + w.set4D(cudnnDataType(weights->dataType()), formatW, oC, iC, kH, kW); + + // output descriptor + CudnnTensor z; + + if (output->ews() == 1 && output->ordering() == 'c') + z.set4D(format, cudnnDataType(output->dataType()), bS, oC, oH, oW); + else + z.set4DEx(cudnnDataType(output->dataType()), bS, oC, oH, oW, output->strideAt(0), output->strideAt(indIOioC), + output->strideAt(indOoH), output->strideAt(indOoH + 1)); + + // description of convolution + ConvolutionDesc conv; + conv.set2D(pH, pW, sH, sW, dH, dW, CUDNN_CROSS_CORRELATION, cudnnDataType(output->dataType())); + + // algorithm description + cudnnConvolutionFwdAlgo_t algo; + cudnnConvolutionFwdAlgoPerf_t algoPerf; + int count = 0; + // err = cudnnGetConvolutionForwardAlgorithm(*handle, x, w, conv, z, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo); + CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnFindConvolutionForwardAlgorithm), + cudnnFindConvolutionForwardAlgorithm(*handle, x, w, conv, z, 1, &count, &algoPerf)); + if (count == 0) + throw sd::cuda_exception::build("conv2dCUDNN: cudnnGetConvolutionForwardAlgorithm failed as the count is 0", 0); + algo = algoPerf.algo; + + PointersManager manager(context, __func__); + // allocate auxiliary device memory, abbreviation ws means workspace + size_t wsSize; + CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnGetConvolutionForwardWorkspaceSize), + cudnnGetConvolutionForwardWorkspaceSize(*handle, x, w, conv, z, algo, &wsSize)); + void* wsData = manager.allocateDevMem(wsSize); + + // provide scaling parameters + const float alpha32(1), beta32(0); + const double alpha64(1), beta64(0); + const void* alpha = + output->sizeOfT() <= 4 ? reinterpret_cast(&alpha32) : reinterpret_cast(&alpha64); + const void* beta = + output->sizeOfT() <= 4 ? reinterpret_cast(&beta32) : reinterpret_cast(&beta64); + + NDArray::prepareSpecialUse({output}, {input, weights, bias}); + + // run calculation + CHECK_CUDNN_FAILURE_MSG( + STRINGIZE(cudnnConvolutionForward), + cudnnConvolutionForward(*handle, alpha, x, input->specialBuffer(), w, weights->specialBuffer(), conv, algo, + wsData, wsSize, beta, z, output->specialBuffer())); + + // add bias if it is present + if (bias != nullptr) { + CudnnTensor b; + + // b.set4D(format, cudnnDataType(bias->dataType()), 1, isNCHW ? bias->lengthOf() : 1, 1, isNCHW ? 1: + // bias->lengthOf()); + b.set4D(CUDNN_TENSOR_NCHW, cudnnDataType(bias->dataType()), 1, oC, 1, 1); + CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnAddTensor), cudnnAddTensor(*handle, alpha, b, bias->specialBuffer(), alpha, + z, output->specialBuffer())); + } + + // cudaErr = cudaStreamSynchronize(*context->getCudaStream()); + // if (cudaErr != 0) + // throw cuda_exception::build("conv2dCUDNN: cudaStreamSynchronize failed !", cudaErr); + + NDArray::registerSpecialUse({output}, {input, weights, bias}); +} + +////////////////////////////////////////////////////////////////////////// +static void conv2dBpCUDNN(const LaunchContext* context, const NDArray* input, const NDArray* weights, + const NDArray* gradO, NDArray* gradI, NDArray* gradW, NDArray* gradB, const int kH, + const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, + const int dW, const int paddingMode, const bool isNCHW, const int wFormat) { + int bS, iC, iH, iW, oC, oH, + oW; // batch size, input channels, input height/width, output channels, output height/width; + int indIOioC, indIiH, indWoC, indWiC, indWkH, indOoH; // corresponding indexes + ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, wFormat, *input, *gradO, bS, iC, iH, iW, oC, oH, oW, indIOioC, + indIiH, indWiC, indWoC, indWkH, indOoH); + + auto handle = reinterpret_cast(context->getCuDnnHandle()); + CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnSetStream), cudnnSetStream(*handle, *context->getCudaStream())); + + cudnnTensorFormat_t format = isNCHW ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC; + cudnnTensorFormat_t formatW = 0 == wFormat ? format : (1 == wFormat ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC); + PointersManager manager(context, __func__); + // input descriptor, gradO descriptor, gradI descriptor + CudnnTensor x, dz, dx; + + if (input->ews() == 1 && input->ordering() == 'c') + x.set4D(format, cudnnDataType(input->dataType()), bS, iC, iH, iW); + else + x.set4DEx(cudnnDataType(input->dataType()), bS, iC, iH, iW, input->strideAt(0), input->strideAt(indIOioC), + input->strideAt(indIiH), input->strideAt(indIiH + 1)); + + if (gradO->ews() == 1 && gradO->ordering() == 'c') + dz.set4D(format, cudnnDataType(gradO->dataType()), bS, oC, oH, oW); + else + dz.set4DEx(cudnnDataType(gradO->dataType()), bS, oC, oH, oW, gradO->strideAt(0), gradO->strideAt(indIOioC), + gradO->strideAt(indOoH), gradO->strideAt(indOoH + 1)); + + if (gradI->ews() == 1 && gradI->ordering() == 'c') + dx.set4D(format, cudnnDataType(gradI->dataType()), bS, iC, iH, iW); + else + dx.set4DEx(cudnnDataType(gradI->dataType()), bS, iC, iH, iW, gradI->strideAt(0), gradI->strideAt(indIOioC), + gradI->strideAt(indIiH), gradI->strideAt(indIiH + 1)); + + // gradW descriptor + FilterDesc dw; + dw.set4D(cudnnDataType(gradW->dataType()), formatW, oC, iC, kH, kW); + + // description of convolution + ConvolutionDesc conv; + conv.set2D(pH, pW, sH, sW, dH, dW, CUDNN_CROSS_CORRELATION, cudnnDataType(gradO->dataType())); + + // gradW algorithm description + cudnnConvolutionBwdFilterAlgo_t algoGradW; + cudnnConvolutionBwdFilterAlgoPerf_t algoGradWPerf; + int count = 0; + // err = cudnnGetConvolutionBackwardFilterAlgorithm(*handle, x, dz, conv, dw, + // CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algoGradW); + CHECK_CUDNN_FAILURE_MSG( + STRINGIZE(cudnnFindConvolutionBackwardFilterAlgorithm), + cudnnFindConvolutionBackwardFilterAlgorithm(*handle, x, dz, conv, dw, 1, &count, &algoGradWPerf)); + if (count == 0) + throw sd::cuda_exception::build( + "conv2dBpCUDNN: cudnnGetConvolutionBackwardFilterAlgorithm failed as the count is 0", 0); + algoGradW = algoGradWPerf.algo; + + // gradI algorithm description + cudnnConvolutionBwdDataAlgo_t algoGradI; + cudnnConvolutionBwdDataAlgoPerf_t algoGradIPerf; + // err = cudnnGetConvolutionBackwardDataAlgorithm(*handle, dw, dz, conv, x, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, + // 0, &algoGradI); + CHECK_CUDNN_FAILURE_MSG( + STRINGIZE(cudnnFindConvolutionBackwardDataAlgorithm), + cudnnFindConvolutionBackwardDataAlgorithm(*handle, dw, dz, conv, x, 1, &count, &algoGradIPerf)); + if (count == 0) + throw sd::cuda_exception::build("conv2dBpCUDNN: cudnnGetConvolutionBackwardDataAlgorithm failed as the count is 0", + 0); + algoGradI = algoGradIPerf.algo; + + // allocate auxiliary device memory for gradW calculation, abbreviation ws means workspace + size_t wsGradWSize; + CHECK_CUDNN_FAILURE_MSG( + STRINGIZE(cudnnGetConvolutionBackwardFilterWorkspaceSize), + cudnnGetConvolutionBackwardFilterWorkspaceSize(*handle, x, dz, conv, dw, algoGradW, &wsGradWSize)); + void* wsGradWData = manager.allocateDevMem(wsGradWSize); + + // allocate auxiliary device memory for gradI calculation, abbreviation ws means workspace + size_t wsGradISize; + CHECK_CUDNN_FAILURE_MSG( + STRINGIZE(cudnnGetConvolutionBackwardDataWorkspaceSize), + cudnnGetConvolutionBackwardDataWorkspaceSize(*handle, dw, dz, conv, dx, algoGradI, &wsGradISize)); + void* wsGradIData = manager.allocateDevMem(wsGradISize); + + // provide scaling parameters + const float alpha32(1), beta32(0); + const double alpha64(1), beta64(0); + const void* alpha = + gradO->sizeOfT() <= 4 ? reinterpret_cast(&alpha32) : reinterpret_cast(&alpha64); + const void* beta = + gradO->sizeOfT() <= 4 ? reinterpret_cast(&beta32) : reinterpret_cast(&beta64); + + NDArray::prepareSpecialUse({gradI, gradW, gradB}, {input, weights, gradO}); + + // run calculation for gradB (if not nullptr) + if (gradB != nullptr) { + CudnnTensor db; + // db.set4D(format, cudnnDataType(gradB->dataType()), 1, isNCHW ? gradB->lengthOf() : 1, 1, isNCHW ? 1: + // gradB->lengthOf()); + db.set4D(CUDNN_TENSOR_NCHW, cudnnDataType(gradB->dataType()), 1, oC, 1, 1); + + CHECK_CUDNN_FAILURE_MSG( + STRINGIZE(cudnnConvolutionBackwardBias), + cudnnConvolutionBackwardBias(*handle, alpha, dz, gradO->specialBuffer(), beta, db, gradB->specialBuffer())); + } + + // run calculation for gradW + CHECK_CUDNN_FAILURE_MSG( + STRINGIZE(cudnnConvolutionBackwardFilter), + cudnnConvolutionBackwardFilter(*handle, alpha, x, input->specialBuffer(), dz, gradO->specialBuffer(), conv, + algoGradW, wsGradWData, wsGradWSize, beta, dw, gradW->specialBuffer())); + + // run calculation for gradI + CHECK_CUDNN_FAILURE_MSG( + STRINGIZE(cudnnConvolutionBackwardData), + cudnnConvolutionBackwardData(*handle, alpha, dw, weights->specialBuffer(), dz, gradO->specialBuffer(), conv, + algoGradI, wsGradIData, wsGradISize, beta, dx, gradI->specialBuffer())); + + // cudaErr = cudaStreamSynchronize(*context->getCudaStream()); + // if (cudaErr != 0) + // throw cuda_exception::build("conv2dBpCUDNN: cudaStreamSynchronize failed !", cudaErr); + + NDArray::registerSpecialUse({gradI, gradW, gradB}, {input, weights, gradO}); +} + +////////////////////////////////////////////////////////////////////////// +PLATFORM_IMPL(conv2d, ENGINE_CUDA) { + auto input = INPUT_VARIABLE(0); // [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW) + auto weights = INPUT_VARIABLE(1); // [kH, kW, iC, oC], [oC, iC, kH, kW], [oC, kH, kW, iC] + auto bias = block.width() > 2 ? INPUT_VARIABLE(2) : nullptr; // [oC] + + auto output = OUTPUT_VARIABLE(0); // [bS, oH, oW, oC] (NHWC) or [bS, oC, oH, oW] (NCHW) + + int sH = INT_ARG(2); // strides height + int sW = INT_ARG(3); // strides width + int pH = INT_ARG(4); // paddings height + int pW = INT_ARG(5); // paddings width + int dH = INT_ARG(6); // dilations height + int dW = INT_ARG(7); // dilations width + int paddingMode = INT_ARG(8); // 0-VALID, 1-SAME + bool isNCHW = block.getIArguments()->size() > 9 ? !INT_ARG(9) : 1; // INT_ARG(9): 0-NCHW, 1-NHWC + int wFormat = block.getIArguments()->size() > 10 + ? INT_ARG(10) + : 0; // 0 - [kH, kW, iC, oC], 1 - [oC, iC, kH, kW], 2 - [oC, kH, kW, iC] + + int kH = INT_ARG(0) > 0 ? INT_ARG(0) : static_cast(weights->sizeAt(0)); // filter(kernel) height + int kW = INT_ARG(1) > 0 ? INT_ARG(1) : static_cast(weights->sizeAt(1)); // filter(kernel) width + + REQUIRE_TRUE(input->rankOf() == 4, 0, + "CUSTOM CONV2D CUDNN OP: rank of input array must be equal to 4, but got %i instead !", input->rankOf()); + REQUIRE_TRUE(weights->rankOf() == 4, 0, + "CUSTOM CONV2D CUDNN OP: rank of weights array must be equal to 4, but got %i instead !", + weights->rankOf()); + + int bS, iC, iH, iW, oC, oH, + oW; // batch size, input channels, input height/width, output channels, output height/width; + int indIOioC, indIiH, indWoC, indWiC, indWkH, indOoH; // corresponding indexes + ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, wFormat, *input, *output, bS, iC, iH, iW, oC, oH, oW, indIOioC, + indIiH, indWiC, indWoC, indWkH, indOoH); + + ConvolutionUtils::calcPadding2D(pH, pW, oH, oW, iH, iW, kH, kW, sH, sW, dH, dW, paddingMode); + + std::vector expectedWeightsShape = ConvolutionUtils::expectWeightsShape(wFormat, kH, kW, iC, oC); + REQUIRE_TRUE(weights->isSameShape(expectedWeightsShape), 0, + "CUSTOM CONV2D CUDNN OP: wrong shape of weights array, expected is %s, but got %s instead !", + ShapeUtils::shapeAsString(expectedWeightsShape).c_str(), ShapeUtils::shapeAsString(weights).c_str()); + if (bias) { + REQUIRE_TRUE(bias->rankOf() <= 2 && oC == bias->lengthOf(), 0, + "CUSTOM CONV2D CUDNN OP: wrong shape of array with biases, expected rank, length: <=2, %i, but got " + "%i, %i instead !", + oC, bias->rankOf(), bias->lengthOf()); + REQUIRE_TRUE((bias->rankOf() == 1 && bias->strideAt(0) == 1) || + (bias->rankOf() == 2 && bias->sizeAt(0) == 1 && bias->strideAt(1) == 1) || + (bias->rankOf() == 2 && bias->sizeAt(1) == 1 && bias->strideAt(0) == 1), + 0, "CUSTOM CONV2D CUDNN OP: bias array should be contiguous in memory !"); + } + std::unique_ptr tmpWeight = {}, tmpInput = {}; + NDArray* newWeights = weights; // cudnn support only two formats {oC,iC,kH,kW} and {oC,kH,kW,iC} + if (0 == wFormat) { + tmpWeight.reset( + new NDArray(weights->ordering(), + isNCHW ? std::vector({oC, iC, kH, kW}) : std::vector({oC, kH, kW, iC}), + weights->dataType(), weights->getContext())); + newWeights = tmpWeight.get(); + newWeights->assign(weights->permute( + isNCHW ? std::vector({3, 2, 0, 1}) + : std::vector( + {3, 0, 1, 2}))); // (kH, kW, iC, oC --> oC, iC, kH, kW) or (kH, kW, iC, oC --> oC, kH, kW, iC) + } + + if (paddingMode == 1) { // in same paddingMode cudnn doesn't support asymmetric left/right top/bottopm paddings + auto ret = checkConv2dCUDNNPadAsymmetric(input, nullptr, iH, iW, oH, oW, kH, kW, sH, sW, pH, pW, dH, dW, isNCHW); + tmpInput = std::move(std::get<0>(ret)); // prolong life + if (tmpInput) input = tmpInput.get(); + } + conv2dCUDNN(block.launchContext(), input, newWeights, bias, output, kH, kW, sH, sW, pH, pW, dH, dW, paddingMode, + isNCHW, wFormat); + + return sd::Status::OK; +} + +////////////////////////////////////////////////////////////////////////// +PLATFORM_CHECK(conv2d, ENGINE_CUDA) { + auto input = INPUT_VARIABLE(0); // [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW) + auto weights = INPUT_VARIABLE(1); // [kH, kW, iC, oC] always + auto bias = block.width() > 2 ? INPUT_VARIABLE(2) : nullptr; // [oC] + + const int paddingMode = INT_ARG(8); // 0-VALID, 1-SAME, 2-CAUSAL + + const bool badInputType = input->dataType() != DataType::DOUBLE && input->dataType() != DataType::FLOAT32 && + input->dataType() != DataType::HALF; + const bool badWeightsType = weights->dataType() != DataType::DOUBLE && weights->dataType() != DataType::FLOAT32 && + weights->dataType() != DataType::HALF; + const bool badBiasType = bias == nullptr + ? false + : (bias->dataType() != DataType::DOUBLE && bias->dataType() != DataType::FLOAT32 && + bias->dataType() != DataType::HALF); + + return paddingMode != 2 && !badInputType && !badWeightsType && !badBiasType; + Requirements req("CUDNN CONV2d OP"); + req.expectNotEq(makeInfoVariable(paddingMode, "paddingMode"), 2) && + req.expectIn(makeInfoVariable(input->dataType(), TYPE_MSG_INPUT0), + {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE}) && + req.expectIn(makeInfoVariable(weights->dataType(), TYPE_MSG_INPUT1), + {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE}); + if (bias) { + req.expectIn(makeInfoVariable(bias->dataType(), TYPE_MSG_INPUT_ "#bias"), + {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE}); + } + req.logTheSuccess(); + return req; +} + +////////////////////////////////////////////////////////////////////////// +PLATFORM_IMPL(conv2d_bp, ENGINE_CUDA) { + auto input = INPUT_VARIABLE(0); // [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW) + auto weights = INPUT_VARIABLE(1); // [kH, kW, iC, oC], [oC, iC, kH, kW], [oC, kH, kW, iC] + auto bias = block.width() > 3 ? INPUT_VARIABLE(2) : nullptr; // [oC] + auto gradO = block.width() > 3 + ? INPUT_VARIABLE(3) + : INPUT_VARIABLE(2); // [bS, oH, oW, oC] (NHWC) or [bS, oC, oH, oW] (NCHW), epsilon_next + + auto gradI = OUTPUT_VARIABLE(0); // [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW), epsilon + auto gradW = OUTPUT_VARIABLE(1); // [kH, kW, iC, oC], [oC, iC, kH, kW], [oC, kH, kW, iC] + auto gradB = block.width() > 3 ? OUTPUT_VARIABLE(2) : nullptr; // [oC] + + int kH = INT_ARG(0); // filter(kernel) height + int kW = INT_ARG(1); // filter(kernel) width + int sH = INT_ARG(2); // strides height + int sW = INT_ARG(3); // strides width + int pH = INT_ARG(4); // paddings height + int pW = INT_ARG(5); // paddings width + int dH = INT_ARG(6); // dilations height + int dW = INT_ARG(7); // dilations width + int paddingMode = INT_ARG(8); // 0-VALID, 1-SAME + int isNCHW = block.getIArguments()->size() > 9 ? !INT_ARG(9) : 1; // INT_ARG(9): 0-NCHW, 1-NHWC + int wFormat = block.getIArguments()->size() > 10 + ? INT_ARG(10) + : 0; // 0 - [kH, kW, iC, oC], 1 - [oC, iC, kH, kW], 2 - [oC, kH, kW, iC] + + REQUIRE_TRUE(input->rankOf() == 4, 0, + "CUSTOM CONV2D_BP CUDNN OP: rank of input array must be equal to 4, but got %i instead !", + input->rankOf()); + REQUIRE_TRUE(weights->rankOf() == 4, 0, + "CUSTOM CONV2D_BP CUDNN OP: rank of weights array must be equal to 4, but got %i instead !", + weights->rankOf()); + REQUIRE_TRUE(gradO->rankOf() == 4, 0, + "CUSTOM CONV2D_BP CUDNN OP: rank of output's gradients (next epsilon) array must be equal to 4, but got " + "%i instead !", + gradO->rankOf()); + + int bS, iC, iH, iW, oC, oH, + oW; // batch size, input channels, input height/width, output channels, output height/width; + int indIOioC, indIiH, indWoC, indWiC, indWkH, indOoH; // corresponding indexes + ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, wFormat, *input, *gradO, bS, iC, iH, iW, oC, oH, oW, indIOioC, + indIiH, indWiC, indWoC, indWkH, indOoH); + + int trueoH, trueoW; // true output height, width + ConvolutionUtils::calcOutSizePool2D(trueoH, trueoW, kH, kW, sH, sW, pH, pW, dH, dW, iH, iW, paddingMode); + + ConvolutionUtils::calcPadding2D(pH, pW, oH, oW, iH, iW, kH, kW, sH, sW, dH, dW, paddingMode); + + std::vector expectedGradOShape = + ShapeUtils::composeShapeUsingDimsAndIdx({bS, oC, trueoH, trueoW, 0, indIOioC, indOoH, indOoH + 1}); + std::vector expectedWeightsShape = ConvolutionUtils::expectWeightsShape(wFormat, kH, kW, iC, oC); + REQUIRE_TRUE(gradO->isSameShape(expectedGradOShape), 0, + "CUSTOM CONV2D_BP CUDNN OP: wrong shape of output gradients (next epsilon) array, expected is %s, but " + "got %s instead !", + ShapeUtils::shapeAsString(expectedGradOShape).c_str(), ShapeUtils::shapeAsString(gradO).c_str()); + REQUIRE_TRUE(weights->isSameShape(expectedWeightsShape), 0, + "CUSTOM CONV2D_BP CUDNN OP: wrong shape of weights array, expected is %s, but got %s instead !", + ShapeUtils::shapeAsString(expectedWeightsShape).c_str(), ShapeUtils::shapeAsString(weights).c_str()); + if (bias) + REQUIRE_TRUE(bias->rankOf() <= 2 && oC == bias->lengthOf(), 0, + "CUSTOM CONV2D_BP CUDNN OP: wrong shape of array with biases, expected rank, length: <=2, %i, but got " + "%i, %i instead !", + oC, bias->rankOf(), bias->lengthOf()); + + std::unique_ptr tmpGradI = {}, tmpInput = {}, tmpWeights = {}, tmpGradW = {}; + NDArray *newWeights = weights, *newGradW = gradW; // cudnn support only two formats {oC,iC,kH,kW} and {oC,kH,kW,iC} + if (0 == wFormat) { + tmpGradW.reset( + new NDArray(gradW->ordering(), + isNCHW ? std::vector({oC, iC, kH, kW}) : std::vector({oC, kH, kW, iC}), + gradW->dataType(), gradW->getContext())); + tmpWeights.reset( + new NDArray(weights->ordering(), + isNCHW ? std::vector({oC, iC, kH, kW}) : std::vector({oC, kH, kW, iC}), + weights->dataType(), weights->getContext())); + newGradW = tmpGradW.get(); + newWeights = tmpWeights.get(); + newWeights->assign(weights->permute( + isNCHW ? std::vector({3, 2, 0, 1}) + : std::vector( + {3, 0, 1, 2}))); // (kH, kW, iC, oC --> oC, iC, kH, kW) or (kH, kW, iC, oC --> oC, kH, kW, iC) + } + + NDArray* newInput = input; + NDArray* newGradI = gradI; + + if (paddingMode == 1) { // in same paddingMode cudnn doesn't support asymmetric left/right top/bottopm paddings + auto ret = checkConv2dCUDNNPadAsymmetric(input, gradI, iH, iW, oH, oW, kH, kW, sH, sW, pH, pW, dH, dW, isNCHW); + tmpInput = std::move(std::get<0>(ret)); + tmpGradI = std::move(std::get<1>(ret)); + if (tmpInput) newInput = tmpInput.get(); + if (tmpGradI) newGradI = tmpGradI.get(); + } + conv2dBpCUDNN(block.launchContext(), newInput, newWeights, gradO, newGradI, newGradW, gradB, kH, kW, sH, sW, pH, pW, + dH, dW, paddingMode, isNCHW, wFormat); + + if (0 == wFormat) { + newGradW->permutei( + isNCHW ? std::vector({2, 3, 1, 0}) + : std::vector( + {1, 2, 3, 0})); // (oC, iC, kH, kW --> kH, kW, iC, oC) or (oC, kH, kW, iC --> kH, kW, iC, oC) + gradW->assign(newGradW); + } + + if (newInput != input) { + if (isNCHW) + gradI->assign((*newGradI)({0, 0, 0, 0, 0, gradI->sizeAt(2), 0, gradI->sizeAt(3)})); + else + gradI->assign((*newGradI)({0, 0, 0, gradI->sizeAt(1), 0, gradI->sizeAt(2), 0, 0})); + } + + return sd::Status::OK; +} + +PLATFORM_CHECK(conv2d_bp, ENGINE_CUDA) { + auto input = INPUT_VARIABLE(0); // [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW) + auto weights = INPUT_VARIABLE(1); // [kH, kW, iC, oC] always + auto bias = block.width() > 3 ? INPUT_VARIABLE(2) : nullptr; // [oC] + auto gradO = block.width() > 3 + ? INPUT_VARIABLE(3) + : INPUT_VARIABLE(2); // [bS, oH, oW, oC] (NHWC) or [bS, oC, oH, oW] (NCHW), epsilon_next + + const int paddingMode = INT_ARG(8); // 0-VALID, 1-SAME, 2-CAUSAL + const int isNCHW = block.getIArguments()->size() > 9 ? !INT_ARG(9) : 1; // INT_ARG(9): 0-NCHW, 1-NHWC + + Requirements req("CUDNN CONV2d_BP OP"); + req.expectNotEq(makeInfoVariable(paddingMode, "paddingMode"), 2) && + req.expectTrue(makeInfoVariable(isNCHW, "isNCHW")) && + req.expectIn(makeInfoVariable(input->dataType(), TYPE_MSG_INPUT0), + {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE}) && + req.expectIn(makeInfoVariable(weights->dataType(), TYPE_MSG_INPUT1), + {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE}); + if (bias) { + req.expectIn(makeInfoVariable(bias->dataType(), TYPE_MSG_INPUT_ "#bias"), + {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE}) && + req.expectIn(makeInfoVariable(gradO->dataType(), TYPE_MSG_INPUT3), + {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE}); + } else { + req.expectIn(makeInfoVariable(gradO->dataType(), TYPE_MSG_INPUT2), + {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE}); + } + req.logTheSuccess(); + return req; +} + +} // namespace platforms +} // namespace ops +} // namespace sd diff --git a/cuda_code/conv2d_16.cu b/cuda_code/conv2d_16.cu new file mode 100644 index 0000000000000000000000000000000000000000..234dbffb79ad2d282352eccdacc4906fcff6067f --- /dev/null +++ b/cuda_code/conv2d_16.cu @@ -0,0 +1,521 @@ +/******************************************************************************* + * Copyright (c) 2019 Konduit K.K. + * + * This program and the accompanying materials are made available under the + * terms of the Apache License, Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0. + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + * + * SPDX-License-Identifier: Apache-2.0 + ******************************************************************************/ + +// +// @author raver119@gmail.com +// @author Yurii Shyrma (iuriish@yahoo.com) +// + + +#include "cudnnUtils.h" +#include + +namespace nd4j { +namespace ops { +namespace platforms { + +////////////////////////////////////////////////////////////////////////// +static void conv2dCUDNN(const LaunchContext* context, + const NDArray* input, const NDArray* weights, const NDArray* bias, NDArray* output, + const int kH, const int kW, + const int sH, const int sW, + const int pH, const int pW, + const int dH, const int dW, + const int paddingMode, const bool isNCHW) { + + int bS, iC, iH, iW, oC, oH, oW; // batch size, input channels, input height/width, output channels, output height/width; + int indIOioC, indIiH, indWoC, indWiC, indWkH, indOoH; // corresponding indexes + ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, *input, *output, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWoC, indWkH, indOoH); + + auto handle = reinterpret_cast(context->getCuDnnHandle()); + cudnnStatus_t err = cudnnSetStream(*handle, *context->getCudaStream()); + if (err != 0) throw nd4j::cuda_exception::build("conv2dCUDNN: can't set stream for cuDNN", err); + + cudnnTensorFormat_t format = isNCHW ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC; + + // input descriptor + cudnnTensorDescriptor_t x; + cudnnCreateTensorDescriptor(&x); + if(input->ews() == 1) + err = cudnnSetTensor4dDescriptor(x, format, cudnnDataType(input->dataType()), bS, iC, iH, iW); + else + err = cudnnSetTensor4dDescriptorEx(x, cudnnDataType(input->dataType()), bS, iC, iH, iW, input->strideAt(0), input->strideAt(indIOioC), input->strideAt(indIiH), input->strideAt(indIiH + 1)); + if (err != 0) throw nd4j::cuda_exception::build("conv2dCUDNN: cudnnSetTensor4dDescriptor/cudnnSetTensor4dDescriptorEx for input failed", err); + + // weights descriptor + cudnnFilterDescriptor_t w; + cudnnCreateFilterDescriptor(&w); + err = cudnnSetFilter4dDescriptor(w, cudnnDataType(weights->dataType()), CUDNN_TENSOR_NCHW, oC, iC, kH, kW); + if(err != 0) throw nd4j::cuda_exception::build("conv2dCUDNN: cudnnSetFilter4dDescriptor failed", err); + + // output descriptor + cudnnTensorDescriptor_t z; + cudnnCreateTensorDescriptor(&z); + if(output->ews() == 1) + err = cudnnSetTensor4dDescriptor(z, format, cudnnDataType(output->dataType()), bS, oC, oH, oW); + else + err = cudnnSetTensor4dDescriptorEx(z, cudnnDataType(output->dataType()), bS, oC, oH, oW, output->strideAt(0), output->strideAt(indIOioC), output->strideAt(indOoH), output->strideAt(indOoH + 1)); + if (err != 0) throw nd4j::cuda_exception::build("conv2dCUDNN: cudnnSetTensor4dDescriptor/cudnnSetTensor4dDescriptorEx for output failed", err); + + // description of convolution + cudnnConvolutionDescriptor_t conv; + cudnnCreateConvolutionDescriptor(&conv); + err = cudnnSetConvolution2dDescriptor(conv, pH, pW, sH, sW, dH, dW, CUDNN_CROSS_CORRELATION, cudnnDataType(output->dataType())); + if (err != 0) throw nd4j::cuda_exception::build("conv2dCUDNN: cudnnSetConvolution2dDescriptor failed", err); + + // algorithm description + cudnnConvolutionFwdAlgo_t algo; + err = cudnnGetConvolutionForwardAlgorithm(*handle, x, w, conv, z, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo); + if (err != 0) throw nd4j::cuda_exception::build("conv2dCUDNN: cudnnGetConvolutionForwardAlgorithm failed", err); + + + // allocate auxiliary device memory, abbreviation ws means workspace + size_t wsSize; + err = cudnnGetConvolutionForwardWorkspaceSize(*handle, x, w, conv, z, algo, &wsSize); + if (err != 0) throw nd4j::cuda_exception::build("conv2dCUDNN: cudnnGetConvolutionForwardWorkspaceSize failed", err); + void* wsData; + auto cudaErr = cudaMalloc(&wsData, wsSize); + if (cudaErr != 0) throw nd4j::cuda_exception::build("conv2dCUDNN: cudaMalloc for auxiliary workspace memory failed", cudaErr); + + // provide scaling parameters + const float alpha32(1), beta32(0); + const double alpha64(1), beta64(0); + const void* alpha = output->sizeOfT() <= 4 ? reinterpret_cast(&alpha32) : reinterpret_cast(&alpha64); + const void* beta = output->sizeOfT() <= 4 ? reinterpret_cast(&beta32) : reinterpret_cast(&beta64); + + NDArray::prepareSpecialUse({output}, {input, weights, bias}); + + // run calculation + err = cudnnConvolutionForward(*handle, alpha, x, input->getSpecialBuffer(), w, weights->getSpecialBuffer(), conv, algo, wsData, wsSize, beta, z, output->specialBuffer()); + if (err != 0) throw nd4j::cuda_exception::build("conv2dCUDNN: cudnnConvolutionForward failed", err); + + // add bias if it is present + if (bias != nullptr) { + + cudnnTensorDescriptor_t b; + cudnnCreateTensorDescriptor(&b); + err = cudnnSetTensor4dDescriptor(b, format, cudnnDataType(bias->dataType()), 1, isNCHW ? bias->lengthOf() : 1, 1, isNCHW ? 1: bias->lengthOf()); + if (err != 0) throw nd4j::cuda_exception::build("conv2dCUDNN: cudnnSetTensor4dDescriptor for bias failed", err); + err = cudnnAddTensor(*handle, alpha, b, bias->getSpecialBuffer(), alpha, z, output->specialBuffer()); + if (err != 0) throw nd4j::cuda_exception::build("conv2dCUDNN: cudnnAddTensor bias failed", err); + } + + // cudaErr = cudaStreamSynchronize(*context->getCudaStream()); + // if (cudaErr != 0) + // throw cuda_exception::build("conv2dCUDNN: cudaStreamSynchronize failed !", cudaErr); + + cudaErr = cudaFree(wsData); + if (cudaErr != 0) throw nd4j::cuda_exception::build("conv2dCUDNN: cudaFree for auxiliary workspace memory failed", cudaErr); + + NDArray::registerSpecialUse({output}, {input, weights, bias}); +} + +////////////////////////////////////////////////////////////////////////// +static void conv2dBpCUDNN(const LaunchContext* context, + const NDArray* input, const NDArray* weights, const NDArray* gradO, + NDArray* gradI, NDArray* gradW, NDArray* gradB, + const int kH, const int kW, + const int sH, const int sW, + const int pH, const int pW, + const int dH, const int dW, + const int paddingMode, const bool isNCHW) { + + int bS, iC, iH, iW, oC, oH, oW; // batch size, input channels, input height/width, output channels, output height/width; + int indIOioC, indIiH, indWoC, indWiC, indWkH, indOoH; // corresponding indexes + ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, *input, *gradO, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWoC, indWkH, indOoH); + + auto handle = reinterpret_cast(context->getCuDnnHandle()); + cudnnStatus_t err = cudnnSetStream(*handle, *context->getCudaStream()); + if (err != 0) throw nd4j::cuda_exception::build("conv2dBpCUDNN: can't set stream for cuDNN", err); + + cudnnTensorFormat_t format = isNCHW ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC; + + // input descriptor + cudnnTensorDescriptor_t x; + cudnnCreateTensorDescriptor(&x); + if(input->ews() == 1) + err = cudnnSetTensor4dDescriptor(x, format, cudnnDataType(input->dataType()), bS, iC, iH, iW); + else + err = cudnnSetTensor4dDescriptorEx(x, cudnnDataType(input->dataType()), bS, iC, iH, iW, input->strideAt(0), input->strideAt(indIOioC), input->strideAt(indIiH), input->strideAt(indIiH + 1)); + if (err != 0) throw nd4j::cuda_exception::build("conv2dBpCUDNN: cudnnSetTensor4dDescriptor/cudnnSetTensor4dDescriptorEx for input failed", err); + + // gradO descriptor + cudnnTensorDescriptor_t dz; + cudnnCreateTensorDescriptor(&dz); + if(gradO->ews() == 1) + err = cudnnSetTensor4dDescriptor(dz, format, cudnnDataType(gradO->dataType()), bS, oC, oH, oW); + else + err = cudnnSetTensor4dDescriptorEx(dz, cudnnDataType(gradO->dataType()), bS, oC, oH, oW, gradO->strideAt(0), gradO->strideAt(indIOioC), gradO->strideAt(indOoH), gradO->strideAt(indOoH + 1)); + if (err != 0) throw nd4j::cuda_exception::build("conv2dBpCUDNN: cudnnSetTensor4dDescriptor/cudnnSetTensor4dDescriptorEx for gradO failed", err); + + // gradI descriptor + cudnnTensorDescriptor_t dx; + cudnnCreateTensorDescriptor(&dx); + if(gradI->ews() == 1) + err = cudnnSetTensor4dDescriptor(dx, format, cudnnDataType(gradI->dataType()), bS, iC, iH, iW); + else + err = cudnnSetTensor4dDescriptorEx(dx, cudnnDataType(gradI->dataType()), bS, iC, iH, iW, gradI->strideAt(0), gradI->strideAt(indIOioC), gradI->strideAt(indIiH), gradI->strideAt(indIiH + 1)); + if (err != 0) throw nd4j::cuda_exception::build("conv2dBpCUDNN: cudnnSetTensor4dDescriptor/cudnnSetTensor4dDescriptorEx for gradI failed", err); + + // gradW descriptor + cudnnFilterDescriptor_t dw; + cudnnCreateFilterDescriptor(&dw); + err = cudnnSetFilter4dDescriptor(dw, cudnnDataType(gradW->dataType()), CUDNN_TENSOR_NCHW, oC, iC, kH, kW); + if(err != 0) throw nd4j::cuda_exception::build("conv2dBpCUDNN: cudnnSetFilter4dDescriptor gradW failed", err); + + // description of convolution + cudnnConvolutionDescriptor_t conv; + cudnnCreateConvolutionDescriptor(&conv); + err = cudnnSetConvolution2dDescriptor(conv, pH, pW, sH, sW, dH, dW, CUDNN_CROSS_CORRELATION, cudnnDataType(gradO->dataType())); + if (err != 0) throw nd4j::cuda_exception::build("conv2dBpCUDNN: cudnnSetConvolution2dDescriptor failed", err); + + // gradW algorithm description + cudnnConvolutionBwdFilterAlgo_t algoGradW; + err = cudnnGetConvolutionBackwardFilterAlgorithm(*handle, x, dz, conv, dw, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algoGradW); + if (err != 0) throw nd4j::cuda_exception::build("conv2dBpCUDNN: cudnnGetConvolutionBackwardFilterAlgorithm failed", err); + + // gradI algorithm description + cudnnConvolutionBwdDataAlgo_t algoGradI; + err = cudnnGetConvolutionBackwardDataAlgorithm(*handle, dw, dz, conv, x, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algoGradI); + if (err != 0) throw nd4j::cuda_exception::build("conv2dBpCUDNN: cudnnGetConvolutionBackwardDataAlgorithm failed", err); + + // allocate auxiliary device memory for gradW calculation, abbreviation ws means workspace + size_t wsGradWSize; + err = cudnnGetConvolutionBackwardFilterWorkspaceSize(*handle, x, dz, conv, dw, algoGradW, &wsGradWSize); + if (err != 0) throw nd4j::cuda_exception::build("conv2dBpCUDNN: cudnnGetConvolutionBackwardFilterWorkspaceSize failed", err); + void* wsGradWData; + auto cudaErr = cudaMalloc(&wsGradWData, wsGradWSize); + if (cudaErr != 0) throw nd4j::cuda_exception::build("conv2dBpCUDNN: cudaMalloc for auxiliary workspace memory wsGradWData failed", cudaErr); + + // allocate auxiliary device memory for gradI calculation, abbreviation ws means workspace + size_t wsGradISize; + err = cudnnGetConvolutionBackwardDataWorkspaceSize(*handle, dw, dz, conv, dx, algoGradI, &wsGradISize); + if (err != 0) throw nd4j::cuda_exception::build("conv2dBpCUDNN: cudnnGetConvolutionBackwardDataWorkspaceSize failed", err); + void* wsGradIData; + cudaErr = cudaMalloc(&wsGradIData, wsGradISize); + if (cudaErr != 0) throw nd4j::cuda_exception::build("conv2dBpCUDNN: cudaMalloc for auxiliary workspace memory wsGradIData failed", cudaErr); + + // provide scaling parameters + const float alpha32(1), beta32(0); + const double alpha64(1), beta64(0); + const void* alpha = gradO->sizeOfT() <= 4 ? reinterpret_cast(&alpha32) : reinterpret_cast(&alpha64); + const void* beta = gradO->sizeOfT() <= 4 ? reinterpret_cast(&beta32) : reinterpret_cast(&beta64); + + NDArray::prepareSpecialUse({gradI, gradW, gradB}, {input, weights, gradO}); + + // run calculation for gradB (if not nullptr) + if(gradB != nullptr) { + cudnnTensorDescriptor_t db; + cudnnCreateTensorDescriptor(&db); + err = cudnnSetTensor4dDescriptor(db, format, cudnnDataType(gradB->dataType()), 1, isNCHW ? gradB->lengthOf() : 1, 1, isNCHW ? 1: gradB->lengthOf()); + if (err != 0) throw nd4j::cuda_exception::build("conv2dBpCUDNN: cudnnSetTensor4dDescriptor for gradB failed", err); + + err = cudnnConvolutionBackwardBias(*handle, alpha, dz, gradO->getSpecialBuffer(), beta, db, gradB->getSpecialBuffer()); + if (err != 0) throw nd4j::cuda_exception::build("conv2dBpCUDNN: cudnnConvolutionBackwardBias failed", err); + } + + // run calculation for gradW + err = cudnnConvolutionBackwardFilter(*handle, alpha, x, input->getSpecialBuffer(), dz, gradO->getSpecialBuffer(), conv, algoGradW, wsGradWData, wsGradWSize, beta, dw, gradW->getSpecialBuffer()); + if (err != 0) throw nd4j::cuda_exception::build("conv2dBpCUDNN: cudnnConvolutionBackwardFilter failed", err); + + // run calculation for gradI + err = cudnnConvolutionBackwardData(*handle, alpha, dw, weights->getSpecialBuffer(), dz, gradO->getSpecialBuffer(), conv, algoGradI, wsGradIData, wsGradISize, beta, dx, gradI->getSpecialBuffer()); + if (err != 0) throw nd4j::cuda_exception::build("conv2dBpCUDNN: cudnnConvolutionBackwardData failed", err); + + // cudaErr = cudaStreamSynchronize(*context->getCudaStream()); + // if (cudaErr != 0) + // throw cuda_exception::build("conv2dBpCUDNN: cudaStreamSynchronize failed !", cudaErr); + + cudaErr = cudaFree(wsGradWData); + if (cudaErr != 0) throw nd4j::cuda_exception::build("conv2dBpCUDNN: cudaFree for auxiliary workspace memory wsGradWData failed", cudaErr); + cudaErr = cudaFree(wsGradIData); + if (cudaErr != 0) throw nd4j::cuda_exception::build("conv2dBpCUDNN: cudaFree for auxiliary workspace memory wsGradIData failed", cudaErr); + + NDArray::registerSpecialUse({gradI, gradW, gradB}, {input, weights, gradO}); +} + +////////////////////////////////////////////////////////////////////////// +PLATFORM_IMPL(conv2d, ENGINE_CUDA) { + + auto input = INPUT_VARIABLE(0); // [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW) + auto weights = INPUT_VARIABLE(1); // [kH, kW, iC, oC] always + auto bias = block.width() > 2 ? INPUT_VARIABLE(2) : nullptr; // [oC] + + auto output = OUTPUT_VARIABLE(0); // [bS, oH, oW, oC] (NHWC) or [bS, oC, oH, oW] (NCHW) + + int sH = INT_ARG(2); // strides height + int sW = INT_ARG(3); // strides width + int pH = INT_ARG(4); // paddings height + int pW = INT_ARG(5); // paddings width + int dH = INT_ARG(6); // dilations height + int dW = INT_ARG(7); // dilations width + int paddingMode = INT_ARG(8); // 0-VALID, 1-SAME + bool isNCHW = block.getIArguments()->size() > 9 ? !INT_ARG(9) : 1; // INT_ARG(9): 0-NCHW, 1-NHWC + + int kH = INT_ARG(0) > 0 ? INT_ARG(0) : static_cast(weights->sizeAt(0)); // filter(kernel) height + int kW = INT_ARG(1) > 0 ? INT_ARG(1) : static_cast(weights->sizeAt(1)); // filter(kernel) width + + REQUIRE_TRUE(input->rankOf() == 4, 0, "CUSTOM CONV2D CUDNN OP: rank of input array must be equal to 4, but got %i instead !", input->rankOf()); + REQUIRE_TRUE(weights->rankOf() == 4, 0, "CUSTOM CONV2D CUDNN OP: rank of weights array must be equal to 4, but got %i instead !", weights->rankOf()); + + int bS, iC, iH, iW, oC, oH, oW; // batch size, input channels, input height/width, output channels, output height/width; + int indIOioC, indIiH, indWoC, indWiC, indWkH, indOoH; // corresponding indexes + ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, *input, *output, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWoC, indWkH, indOoH); + + ConvolutionUtils::calcPadding2D(pH, pW, oH, oW, iH, iW, kH, kW, sH, sW, dH, dW, paddingMode); + + std::vector expectedWeightsShape = {kH, kW, iC, oC}; + REQUIRE_TRUE(weights->isSameShape(expectedWeightsShape), 0, "CUSTOM CONV2D CUDNN OP: wrong shape of weights array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expectedWeightsShape).c_str(), ShapeUtils::shapeAsString(weights).c_str()); + if (bias) { + REQUIRE_TRUE(bias->rankOf() <= 2 && oC == bias->lengthOf(), 0, "CUSTOM CONV2D CUDNN OP: wrong shape of array with biases, expected rank, length: <=2, %i, but got %i, %i instead !", oC, bias->rankOf(), bias->lengthOf()); + REQUIRE_TRUE((bias->rankOf() == 1 && bias->strideAt(0) == 1) || (bias->rankOf() == 2 && bias->sizeAt(0) == 1 && bias->strideAt(1) == 1) || (bias->rankOf() == 2 && bias->sizeAt(1) == 1 && bias->strideAt(0) == 1), 0, "CUSTOM CONV2D CUDNN OP: bias array should be contiguous in memory !"); + } + + NDArray* newWeights = new NDArray(weights->ordering(), {oC, iC, kH, kW}, weights->dataType(), weights->getContext()); // cudnn support only two formats {oC,iC,kH,kW} and {oC,kH,kW,iC} + newWeights->assign(weights->permute({3,2,0,1})); // permute weights (kH, kW, iC, oC --> oC, iC, kH, kW) + + NDArray* newInput = input; + NDArray* newGradI = nullptr; + if(paddingMode == 1) // in same paddingMode cudnn doesn't support asymmetric left/right top/bottopm paddings + checkConv2dCUDNNPadAsymmetric(newInput, newGradI, iH, iW, oH, oW, kH, kW, sH, sW, pH, pW, dH, dW, isNCHW); + + conv2dCUDNN(block.launchContext(), newInput, newWeights, bias, output, kH,kW,sH,sW,pH,pW,dH,dW, paddingMode, isNCHW); + + if(newInput != input) + delete newInput; + + delete newWeights; + + return Status::OK(); +} + +////////////////////////////////////////////////////////////////////////// +PLATFORM_CHECK(conv2d, ENGINE_CUDA) { + + auto input = INPUT_VARIABLE(0); // [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW) + auto weights = INPUT_VARIABLE(1); // [kH, kW, iC, oC] always + auto bias = block.width() > 2 ? INPUT_VARIABLE(2) : nullptr; // [oC] + + const int paddingMode = INT_ARG(8); // 0-VALID, 1-SAME, 2-CAUSAL + + const bool badInputType = input->dataType() != DataType::DOUBLE && input->dataType() != DataType::FLOAT32 && input->dataType() != DataType::HALF; + const bool badWeightsType = weights->dataType() != DataType::DOUBLE && weights->dataType() != DataType::FLOAT32 && weights->dataType() != DataType::HALF; + const bool badBiasType = bias == nullptr ? false : (bias->dataType() != DataType::DOUBLE && bias->dataType() != DataType::FLOAT32 && bias->dataType() != DataType::HALF); + + return paddingMode != 2 && !badInputType && !badWeightsType && !badBiasType; +} + +////////////////////////////////////////////////////////////////////////// +PLATFORM_IMPL(conv2d_bp, ENGINE_CUDA) { + + auto input = INPUT_VARIABLE(0); // [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW) + auto weights = INPUT_VARIABLE(1); // [kH, kW, iC, oC] always + auto bias = block.width() > 3 ? INPUT_VARIABLE(2) : nullptr; // [oC] + auto gradO = block.width() > 3 ? INPUT_VARIABLE(3) : INPUT_VARIABLE(2); // [bS, oH, oW, oC] (NHWC) or [bS, oC, oH, oW] (NCHW), epsilon_next + + auto gradI = OUTPUT_VARIABLE(0); // [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW), epsilon + auto gradW = OUTPUT_VARIABLE(1); // [kH, kW, iC, oC] always + auto gradB = block.width() > 3 ? OUTPUT_VARIABLE(2) : nullptr; // [oC] + + int kH = INT_ARG(0); // filter(kernel) height + int kW = INT_ARG(1); // filter(kernel) width + int sH = INT_ARG(2); // strides height + int sW = INT_ARG(3); // strides width + int pH = INT_ARG(4); // paddings height + int pW = INT_ARG(5); // paddings width + int dH = INT_ARG(6); // dilations height + int dW = INT_ARG(7); // dilations width + int paddingMode = INT_ARG(8); // 0-VALID, 1-SAME + int isNCHW = block.getIArguments()->size() > 9 ? !INT_ARG(9) : 1; // INT_ARG(9): 0-NCHW, 1-NHWC + + REQUIRE_TRUE(input->rankOf() == 4, 0, "CUSTOM CONV2D_BP CUDNN OP: rank of input array must be equal to 4, but got %i instead !", input->rankOf()); + REQUIRE_TRUE(weights->rankOf() == 4, 0, "CUSTOM CONV2D_BP CUDNN OP: rank of weights array must be equal to 4, but got %i instead !", weights->rankOf()); + REQUIRE_TRUE(gradO->rankOf() == 4, 0, "CUSTOM CONV2D_BP CUDNN OP: rank of output's gradients (next epsilon) array must be equal to 4, but got %i instead !", gradO->rankOf()); + + int bS, iC, iH, iW, oC, oH, oW; // batch size, input channels, input height/width, output channels, output height/width; + int indIOioC, indIiH, indWoC, indWiC, indWkH, indOoH; // corresponding indexes + ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, *input, *gradO, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWoC, indWkH, indOoH); + + int trueoH, trueoW; // true output height, width + ConvolutionUtils::calcOutSizePool2D(trueoH, trueoW, kH, kW, sH, sW, pH, pW, dH, dW, iH, iW, paddingMode); + + ConvolutionUtils::calcPadding2D(pH, pW, oH, oW, iH, iW, kH, kW, sH, sW, dH, dW, paddingMode); + + std::vector expectedGradOShape = ShapeUtils::composeShapeUsingDimsAndIdx({bS,oC,trueoH,trueoW, 0,indIOioC,indOoH,indOoH+1}); + std::vector expectedWeightsShape = {kH, kW, iC, oC}; + REQUIRE_TRUE(gradO->isSameShape(expectedGradOShape), 0, "CUSTOM CONV2D_BP CUDNN OP: wrong shape of output gradients (next epsilon) array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expectedGradOShape).c_str(), ShapeUtils::shapeAsString(gradO).c_str()); + REQUIRE_TRUE(weights->isSameShape(expectedWeightsShape), 0, "CUSTOM CONV2D_BP CUDNN OP: wrong shape of weights array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expectedWeightsShape).c_str(), ShapeUtils::shapeAsString(weights).c_str()); + if(bias) + REQUIRE_TRUE(bias->rankOf() <= 2 && oC == bias->lengthOf(), 0, "CUSTOM CONV2D_BP CUDNN OP: wrong shape of array with biases, expected rank, length: <=2, %i, but got %i, %i instead !", oC, bias->rankOf(), bias->lengthOf()); + + NDArray* newGradW = new NDArray(gradW->ordering(), {oC, iC, kH, kW}, gradW->dataType(), gradW->getContext()); // cudnn support only two formats for weights {oC,iC,kH,kW} and {oC,kH,kW,iC} + NDArray* newWeights = new NDArray(weights->ordering(), {oC, iC, kH, kW}, weights->dataType(), weights->getContext()); + + newWeights->assign(weights->permute({3,2,0,1})); // permute weights (kH, kW, iC, oC --> oC, iC, kH, kW) + + NDArray* newInput = input; + NDArray* newGradI = gradI; + if(paddingMode == 1) // in same paddingMode cudnn doesn't support asymmetric left/right top/bottopm paddings + checkConv2dCUDNNPadAsymmetric(newInput, newGradI, iH, iW, oH, oW, kH, kW, sH, sW, pH, pW, dH, dW, isNCHW); + + conv2dBpCUDNN(block.launchContext(), newInput, newWeights, gradO, newGradI, newGradW, gradB, kH,kW,sH,sW,pH,pW,dH,dW,paddingMode,isNCHW); + + newGradW->permutei({2,3,1,0}); // [oC, iC, kH, kW] -> [kH, kW, iC, oC] + gradW->assign(newGradW); + + if(newInput != input) { + + if(isNCHW) + gradI->assign((*newGradI)({0,0, 0,0, 0,gradI->sizeAt(2), 0,gradI->sizeAt(3)})); + else + gradI->assign((*newGradI)({0,0, 0,gradI->sizeAt(1), 0,gradI->sizeAt(2), 0,0})); + + delete newInput; + delete newGradI; + } + + delete newWeights; + delete newGradW; + + return Status::OK(); +} + +PLATFORM_CHECK(conv2d_bp, ENGINE_CUDA) { + + auto input = INPUT_VARIABLE(0); // [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW) + auto weights = INPUT_VARIABLE(1); // [kH, kW, iC, oC] always + auto bias = block.width() > 3 ? INPUT_VARIABLE(2) : nullptr; // [oC] + auto gradO = block.width() > 3 ? INPUT_VARIABLE(3) : INPUT_VARIABLE(2); // [bS, oH, oW, oC] (NHWC) or [bS, oC, oH, oW] (NCHW), epsilon_next + + const int paddingMode = INT_ARG(8); // 0-VALID, 1-SAME, 2-CAUSAL + const int isNCHW = block.getIArguments()->size() > 9 ? !INT_ARG(9) : 1; // INT_ARG(9): 0-NCHW, 1-NHWC + + const bool badInputType = input->dataType() != DataType::DOUBLE && input->dataType() != DataType::FLOAT32 && input->dataType() != DataType::HALF; + const bool badWeightsType = weights->dataType() != DataType::DOUBLE && weights->dataType() != DataType::FLOAT32 && weights->dataType() != DataType::HALF; + const bool badGradOType = gradO->dataType() != DataType::DOUBLE && gradO->dataType() != DataType::FLOAT32 && gradO->dataType() != DataType::HALF; + const bool badBiasType = bias == nullptr ? false : (bias->dataType() != DataType::DOUBLE && bias->dataType() != DataType::FLOAT32 && bias->dataType() != DataType::HALF); + + return isNCHW && paddingMode != 2 && !badInputType && !badWeightsType && !badGradOType && !badBiasType; +} + + + + + + + +// PLATFORM_IMPL(conv2d, ENGINE_CUDA) { + +// auto handle = reinterpret_cast(block.launchContext()->getCuDnnHandle()); +// auto res = cudnnSetStream(*handle, *block.launchContext()->getCudaStream()); +// if (res != 0) +// throw nd4j::cuda_exception::build("Can't set stream for cuDNN", res); + +// auto input = INPUT_VARIABLE(0); // [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW) +// auto weights = INPUT_VARIABLE(1); // [kH, kW, iC, oC] always +// auto bias = block.width() > 2 ? INPUT_VARIABLE(2) : nullptr; // [oC] + +// auto output = OUTPUT_VARIABLE(0); // [bS, oH, oW, oC] (NHWC) or [bS, oC, oH, oW] (NCHW) + +// NDArray::prepareSpecialUse({output}, {input, weights, bias}); + +// int sH = INT_ARG(2); // strides height +// int sW = INT_ARG(3); // strides width +// int pH = INT_ARG(4); // paddings height +// int pW = INT_ARG(5); // paddings width +// int dH = INT_ARG(6); // dilations height +// int dW = INT_ARG(7); // dilations width +// int isSameMode = INT_ARG(8); // 0-VALID, 1-SAME +// bool isNCHW = block.getIArguments()->size() > 9 ? !INT_ARG(9) : 1; // INT_ARG(9): 0-NCHW, 1-NHWC + +// int kH = INT_ARG(0) > 0 ? INT_ARG(0) : static_cast(weights->sizeAt(0)); // filter(kernel) height +// int kW = INT_ARG(1) > 0 ? INT_ARG(1) : static_cast(weights->sizeAt(1)); // filter(kernel) width + +// int bS, iC, iH, iW, oC, oH, oW; // batch size, input channels, input height/width, output channels, output height/width; +// int indIOioC, indIiH, indWoC, indWiC, indWkH, indOoH; // corresponding indexes +// ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, *input, *output, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWoC, indWkH, indOoH); +// ConvolutionUtils::calcPadding2D(pH, pW, oH, oW, iH, iW, kH, kW, sH, sW, dH, dW, isSameMode); + +// auto dtype = cudnnDataType(input->dataType()); + + +// cudnnTensorDescriptor_t src; +// cudnnCreateTensorDescriptor(&src); +// res = cudnnSetTensor4dDescriptorEx(src, dtype, input->sizeAt(0), input->sizeAt(1), input->sizeAt(2), input->sizeAt(3), input->strideAt(0), input->strideAt(1), input->strideAt(2), input->strideAt(3)); +// if (res != 0) +// throw nd4j::cuda_exception::build("cudnnSetTensor4dDescriptorEx src failed", res); + +// // TODO: we definitely want NHWC here as well +// cudnnFilterDescriptor_t wght; +// cudnnCreateFilterDescriptor(&wght); +// res = cudnnSetFilter4dDescriptor(wght, dtype, CUDNN_TENSOR_NCHW, oC, iC, kH, kW); +// if (res != 0) +// throw nd4j::cuda_exception::build("cudnnSetFilter4dDescriptor failed", res); + +// cudnnConvolutionDescriptor_t cdc; +// cudnnCreateConvolutionDescriptor(&cdc); +// res = cudnnSetConvolution2dDescriptor(cdc, pH, pW, sH, sW, dH, dW, CUDNN_CROSS_CORRELATION, dtype); +// if (res != 0) +// throw nd4j::cuda_exception::build("cudnnSetConvolution2dDescriptor failed", res); + +// cudnnTensorDescriptor_t dst; +// cudnnCreateTensorDescriptor(&dst); +// res = cudnnSetTensor4dDescriptorEx(dst, dtype, output->sizeAt(0), output->sizeAt(1), output->sizeAt(2), output->sizeAt(3), output->strideAt(0), output->strideAt(1), output->strideAt(2), output->strideAt(3)); +// if (res != 0) +// throw nd4j::cuda_exception::build("cudnnSetTensor4dDescriptorEx dst failed", res); + +// // TODO: workspace algorithms are supposed to be faster, so we should use it here if we have enough memory +// cudnnConvolutionFwdAlgo_t algo; +// res = cudnnGetConvolutionForwardAlgorithm(*handle, src, wght, cdc, dst, CUDNN_CONVOLUTION_FWD_NO_WORKSPACE, 0, &algo); +// if (res != 0) +// throw nd4j::cuda_exception::build("cudnnGetConvolutionForwardAlgorithm failed", res); + +// // TODO: should be float if dtype is half/float, and double otherwise +// float alpha = 1.0f; +// float beta = 0.0f; +// res = cudnnConvolutionForward(*handle, &alpha, src, input->specialBuffer(), wght, weights->specialBuffer(), cdc, algo, nullptr, 0, &beta, dst, output->specialBuffer()); +// if (res != 0) +// throw nd4j::cuda_exception::build("cudnnConvolutionForward failed", res); + + +// if (bias != nullptr) { +// cudnnTensorDescriptor_t bs; +// cudnnCreateTensorDescriptor(&bs); +// if (isNCHW) { +// res = cudnnSetTensor4dDescriptor(bs, CUDNN_TENSOR_NCHW, dtype, 1, bias->lengthOf(), 1, 1); +// if (res != 0) +// throw nd4j::cuda_exception::build("cudnnSetTensor4dDescriptorEx bias NHWC failed", res); +// } else { +// res = cudnnSetTensor4dDescriptor(bs, CUDNN_TENSOR_NHWC, dtype, 1, 1, 1, bias->lengthOf()); +// if (res != 0) +// throw nd4j::cuda_exception::build("cudnnSetTensor4dDescriptorEx bias NHWC failed", res); +// } + +// res = cudnnAddTensor(*handle, &alpha, bs, bias->specialBuffer(), &alpha, dst, output->specialBuffer()); +// if (res != 0) +// throw nd4j::cuda_exception::build("cudnnAddTensor failed", res); +// } + + +// NDArray::registerSpecialUse({output}, {input, weights, bias}); + +// return Status::OK(); +// } + + +} +} +} diff --git a/cuda_code/conv3d_10.cu b/cuda_code/conv3d_10.cu new file mode 100644 index 0000000000000000000000000000000000000000..23b01e09b5512b9aeaba6a1dccf931758a8675cb --- /dev/null +++ b/cuda_code/conv3d_10.cu @@ -0,0 +1,537 @@ +/* ****************************************************************************** + * + * + * This program and the accompanying materials are made available under the + * terms of the Apache License, Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0. + * + * See the NOTICE file distributed with this work for additional + * information regarding copyright ownership. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + * + * SPDX-License-Identifier: Apache-2.0 + ******************************************************************************/ + +// +// @author raver119@gmail.com +// @author Yurii Shyrma (iuriish@yahoo.com) +// + +#include + +#include "cudnnUtils.h" + +namespace sd { +namespace ops { +namespace platforms { + +////////////////////////////////////////////////////////////////////////// +static void conv3dCUDNN(const LaunchContext* context, const NDArray* input, const NDArray* weights, const NDArray* bias, + NDArray* output, const int kD, const int kH, const int kW, const int sD, const int sH, + const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, + const int dW, const int paddingMode, const bool isNCDHW, const int wFormat) { + // cudnn support only one format for weights {oC,iC,kD,kH,kW} + + const int numDims = 5; + + int bS, iC, iD, iH, iW, oC, oD, oH, + oW; // batch size, input channels, input depth/height/width, output channels, output depth/height/width; + int indIOioC, indIOioD, indWoC, indWiC, indWkD; // corresponding indexes + ConvolutionUtils::getSizesAndIndexesConv3d(isNCDHW, wFormat, *input, *output, bS, iC, iD, iH, iW, oC, oD, oH, oW, + indIOioC, indIOioD, indWiC, indWoC, indWkD); + + auto handle = reinterpret_cast(context->getCuDnnHandle()); + CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnSetStream), cudnnSetStream(*handle, *context->getCudaStream())); + + const std::vector pads = {pD, pH, pW}; + const std::vector filtStrides = {sD, sH, sW}; + const std::vector dilations = {dD, dH, dW}; + + const std::vector xShape = {bS, iC, iD, iH, iW}; + const std::vector zShape = {bS, oC, oD, oH, oW}; + const std::vector wShape = {oC, iC, kD, kH, kW}; + const std::vector bShape = {1, oC, 1, 1, 1}; // {1, (isNCDHW ? oC : 1), 1, 1, (isNCDHW ? 1 : oC)}; + + const std::vector xStrides = {(int)input->strideAt(0), (int)input->strideAt(1), (int)input->strideAt(2), + (int)input->strideAt(3), (int)input->strideAt(4)}; + const std::vector zStrides = {(int)output->strideAt(0), (int)output->strideAt(1), (int)output->strideAt(2), + (int)output->strideAt(3), (int)output->strideAt(4)}; + + cudnnTensorFormat_t format = isNCDHW ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC; + PointersManager manager(context, __func__); + // input descriptor + CudnnTensor x; + if (input->ews() == 1) + x.setEx(format, cudnnDataType(input->dataType()), numDims, xShape.data()); + else + x.set(cudnnDataType(input->dataType()), numDims, xShape.data(), xStrides.data()); + + // weights descriptor + FilterDesc w; + w.set(cudnnDataType(weights->dataType()), CUDNN_TENSOR_NCHW, numDims, wShape.data()); + + // output descriptor + CudnnTensor z; + if (output->ews() == 1) + z.setEx(format, cudnnDataType(output->dataType()), numDims, zShape.data()); + else + z.set(cudnnDataType(output->dataType()), numDims, zShape.data(), zStrides.data()); + + // description of convolution + ConvolutionDesc conv; + conv.set(numDims - 2, pads.data(), filtStrides.data(), dilations.data(), CUDNN_CROSS_CORRELATION, + cudnnDataType(output->dataType())); + + // algorithm description + cudnnConvolutionFwdAlgo_t algo; + cudnnConvolutionFwdAlgoPerf_t algoPerf; + int count = 0; + // CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnGetConvolutionForwardAlgorithm), cudnnGetConvolutionForwardAlgorithm( + // *handle, x, w, conv, z, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); + CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnFindConvolutionForwardAlgorithm), + cudnnFindConvolutionForwardAlgorithm(*handle, x, w, conv, z, 1, &count, &algoPerf)); + if (count == 0) + throw sd::cuda_exception::build("conv3dCUDNN: cudnnGetConvolutionForwardAlgorithm failed as the count is 0", 0); + algo = algoPerf.algo; + + // allocate auxiliary device memory, abbreviation ws means workspace + size_t wsSize; + CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnGetConvolutionForwardWorkspaceSize), + cudnnGetConvolutionForwardWorkspaceSize(*handle, x, w, conv, z, algo, &wsSize)); + void* wsData = manager.allocateDevMem(wsSize); + + // provide scaling parameters + const float alpha32(1), beta32(0); + const double alpha64(1), beta64(0); + const void* alpha = + output->sizeOfT() <= 4 ? reinterpret_cast(&alpha32) : reinterpret_cast(&alpha64); + const void* beta = + output->sizeOfT() <= 4 ? reinterpret_cast(&beta32) : reinterpret_cast(&beta64); + + NDArray::prepareSpecialUse({output}, {input, weights, bias}); + + // run calculation + CHECK_CUDNN_FAILURE_MSG( + STRINGIZE(cudnnConvolutionForward), + cudnnConvolutionForward(*handle, alpha, x, input->specialBuffer(), w, weights->specialBuffer(), conv, algo, + wsData, wsSize, beta, z, output->specialBuffer())); + + // add bias if it is present + if (bias != nullptr) { + CudnnTensor b; + b.setEx(/*format*/ CUDNN_TENSOR_NCHW, cudnnDataType(bias->dataType()), numDims, bShape.data()); + + CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnAddTensor), cudnnAddTensor(*handle, alpha, b, bias->specialBuffer(), alpha, + z, output->specialBuffer())); + } + + // cudaErr = cudaStreamSynchronize(*context->getCudaStream()); + // if (cudaErr != 0) + // throw cuda_exception::build("conv3dCUDNN: cudaStreamSynchronize failed !", cudaErr); + + NDArray::registerSpecialUse({output}, {input, weights, bias}); +} + +////////////////////////////////////////////////////////////////////////// +static void conv3dBpCUDNN(const LaunchContext* context, const NDArray* input, const NDArray* weights, + const NDArray* gradO, NDArray* gradI, NDArray* gradW, NDArray* gradB, const int kD, + const int kH, const int kW, const int sD, const int sH, const int sW, const int pD, + const int pH, const int pW, const int dD, const int dH, const int dW, const int paddingMode, + const bool isNCDHW, const int wFormat) { + // cudnn supports only two formats {oC,iC,kD,kH,kW} and {oC,kD,kH,kW,iC} for weights/gradW + + const int numDims = 5; + + int bS, iC, iD, iH, iW, oC, oD, oH, + oW; // batch size, input channels, input depth/height/width, output channels, output depth/height/width; + int indIOioC, indIOioD, indWoC, indWiC, indWkD; // corresponding indexes + ConvolutionUtils::getSizesAndIndexesConv3d(isNCDHW, wFormat, *input, *gradO, bS, iC, iD, iH, iW, oC, oD, oH, oW, + indIOioC, indIOioD, indWiC, indWoC, indWkD); + + auto handle = reinterpret_cast(context->getCuDnnHandle()); + CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnSetStream), cudnnSetStream(*handle, *context->getCudaStream())); + + const std::vector pads = {pD, pH, pW}; + const std::vector filtStrides = {sD, sH, sW}; + const std::vector dilations = {dD, dH, dW}; + + const std::vector xShape = {bS, iC, iD, iH, iW}; + const std::vector dzShape = {bS, oC, oD, oH, oW}; + const std::vector wShape = {oC, iC, kD, kH, kW}; + const std::vector dbShape = {1, (int)(isNCDHW ? oC : 1), 1, 1, (int)(isNCDHW ? 1 : oC)}; + + const std::vector xStrides = {(int)input->strideAt(0), (int)input->strideAt(1), (int)input->strideAt(2), + (int)input->strideAt(3), (int)input->strideAt(4)}; + const std::vector dxStrides = {(int)gradI->strideAt(0), (int)gradI->strideAt(1), (int)gradI->strideAt(2), + (int)gradI->strideAt(3), (int)gradI->strideAt(4)}; + const std::vector dzStrides = {(int)gradO->strideAt(0), (int)gradO->strideAt(1), (int)gradO->strideAt(2), + (int)gradO->strideAt(3), (int)gradO->strideAt(4)}; + + cudnnTensorFormat_t format = isNCDHW ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC; + cudnnTensorFormat_t formatW = 0 == wFormat ? format : (1 == wFormat ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC); + PointersManager manager(context, __func__); + // input descriptor, gradO descriptor, gradI descriptor + CudnnTensor x, dz, dx; + if (input->ews() == 1) + x.setEx(format, cudnnDataType(input->dataType()), numDims, xShape.data()); + else + x.set(cudnnDataType(input->dataType()), numDims, xShape.data(), xStrides.data()); + + if (gradO->ews() == 1) + dz.setEx(format, cudnnDataType(gradO->dataType()), numDims, dzShape.data()); + else + dz.set(cudnnDataType(gradO->dataType()), numDims, dzShape.data(), dzStrides.data()); + + if (gradI->ews() == 1) + dx.setEx(format, cudnnDataType(gradI->dataType()), numDims, xShape.data()); + else + dx.set(cudnnDataType(gradI->dataType()), numDims, xShape.data(), dxStrides.data()); + + // gradW descriptor + FilterDesc dw; + dw.set(cudnnDataType(gradW->dataType()), formatW, numDims, wShape.data()); + + // description of convolution + ConvolutionDesc conv; + conv.set(numDims - 2, pads.data(), filtStrides.data(), dilations.data(), CUDNN_CROSS_CORRELATION, + cudnnDataType(gradO->dataType())); + + // gradW algorithm description + cudnnConvolutionBwdFilterAlgo_t algoGradW; + cudnnConvolutionBwdFilterAlgoPerf_t algoGradWPerf; + int count = 0; + // CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnGetConvolutionBackwardFilterAlgorithm), + // cudnnGetConvolutionBackwardFilterAlgorithm( *handle, x, dz, conv, dw, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, + // 0, &algoGradW)); + CHECK_CUDNN_FAILURE_MSG( + STRINGIZE(cudnnFindConvolutionBackwardFilterAlgorithm), + cudnnFindConvolutionBackwardFilterAlgorithm(*handle, x, dz, conv, dw, 1, &count, &algoGradWPerf)); + if (count == 0) + throw sd::cuda_exception::build( + "conv3dBpCUDNN: cudnnGetConvolutionBackwardFilterAlgorithm failed as the count is 0", 0); + algoGradW = algoGradWPerf.algo; + + // gradI algorithm description + cudnnConvolutionBwdDataAlgo_t algoGradI; + cudnnConvolutionBwdDataAlgoPerf_t algoGradIPerf; + // CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnGetConvolutionBackwardDataAlgorithm), + // cudnnGetConvolutionBackwardDataAlgorithm( *handle, dw, dz, conv, x, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, + // &algoGradI)); + CHECK_CUDNN_FAILURE_MSG( + STRINGIZE(cudnnFindConvolutionBackwardDataAlgorithm), + cudnnFindConvolutionBackwardDataAlgorithm(*handle, dw, dz, conv, x, 1, &count, &algoGradIPerf)); + if (count == 0) + throw sd::cuda_exception::build("conv3dBpCUDNN: cudnnGetConvolutionBackwardDataAlgorithm failed as the count is 0", + 0); + algoGradI = algoGradIPerf.algo; + + // allocate auxiliary device memory for gradW calculation, abbreviation ws means workspace + size_t wsGradWSize; + CHECK_CUDNN_FAILURE_MSG( + STRINGIZE(cudnnGetConvolutionBackwardFilterWorkspaceSize), + cudnnGetConvolutionBackwardFilterWorkspaceSize(*handle, x, dz, conv, dw, algoGradW, &wsGradWSize)); + void* wsGradWData = manager.allocateDevMem(wsGradWSize); + + // allocate auxiliary device memory for gradI calculation, abbreviation ws means workspace + size_t wsGradISize; + CHECK_CUDNN_FAILURE_MSG( + STRINGIZE(cudnnGetConvolutionBackwardDataWorkspaceSize), + cudnnGetConvolutionBackwardDataWorkspaceSize(*handle, dw, dz, conv, dx, algoGradI, &wsGradISize)); + void* wsGradIData = manager.allocateDevMem(wsGradISize); + + // provide scaling parameters + const float alpha32(1), beta32(0); + const double alpha64(1), beta64(0); + const void* alpha = + gradO->sizeOfT() <= 4 ? reinterpret_cast(&alpha32) : reinterpret_cast(&alpha64); + const void* beta = + gradO->sizeOfT() <= 4 ? reinterpret_cast(&beta32) : reinterpret_cast(&beta64); + + NDArray::prepareSpecialUse({gradI, gradW, gradB}, {input, weights, gradO}); + + // run calculation for gradB (if not nullptr) + if (gradB != nullptr) { + CudnnTensor db; + db.setEx(format, cudnnDataType(gradB->dataType()), numDims, dbShape.data()); + + CHECK_CUDNN_FAILURE_MSG( + STRINGIZE(cudnnConvolutionBackwardBias), + cudnnConvolutionBackwardBias(*handle, alpha, dz, gradO->specialBuffer(), beta, db, gradB->specialBuffer())); + } + + // run calculation for gradW + CHECK_CUDNN_FAILURE_MSG( + STRINGIZE(cudnnConvolutionBackwardFilter), + cudnnConvolutionBackwardFilter(*handle, alpha, x, input->specialBuffer(), dz, gradO->specialBuffer(), conv, + algoGradW, wsGradWData, wsGradWSize, beta, dw, gradW->specialBuffer())); + + // run calculation for gradI + CHECK_CUDNN_FAILURE_MSG( + STRINGIZE(cudnnConvolutionBackwardData), + cudnnConvolutionBackwardData(*handle, alpha, dw, weights->specialBuffer(), dz, gradO->specialBuffer(), conv, + algoGradI, wsGradIData, wsGradISize, beta, dx, gradI->specialBuffer())); + + // cudaErr = cudaStreamSynchronize(*context->getCudaStream()); + // if (cudaErr != 0) + // throw cuda_exception::build("conv3dBpCUDNN: cudaStreamSynchronize failed !", cudaErr); + + NDArray::registerSpecialUse({gradI, gradW, gradB}, {input, weights, gradO}); +} + +////////////////////////////////////////////////////////////////////////// +PLATFORM_IMPL(conv3dnew, ENGINE_CUDA) { + auto input = INPUT_VARIABLE(0); // [bS, iD, iH, iW, iC] (NDHWC) or [bS, iC, iD, iH, iW] (NCDHW) + auto weights = INPUT_VARIABLE(1); // [kD, kH, kW, iC, oC], [oC, iC, kD, kH, kW], [oC, kD, kH, kW, iC] + auto bias = block.width() > 2 ? INPUT_VARIABLE(2) : nullptr; // [oC] + auto output = OUTPUT_VARIABLE(0); // [bS, oD, oH, oW, oC] (NDHWC) or [bS, oC, oD, oH, oW] (NCDHW) + + REQUIRE_TRUE(input->rankOf() == 5, 0, "CONV3D CUDNN OP: rank of input array must be equal to 5, but got %i instead !", + input->rankOf()); + REQUIRE_TRUE(weights->rankOf() == 5, 0, + "CONV3D CUDNN OP: rank of weights array must be equal to 5, but got %i instead !", weights->rankOf()); + + int kD = INT_ARG(0) > 0 ? INT_ARG(0) : static_cast(weights->sizeAt(0)); // filter(kernel) depth + int kH = INT_ARG(1) > 0 ? INT_ARG(1) : static_cast(weights->sizeAt(1)); // filter(kernel) height + int kW = INT_ARG(2) > 0 ? INT_ARG(2) : static_cast(weights->sizeAt(2)); // filter(kernel) width + int sD = INT_ARG(3); // strides depth + int sH = INT_ARG(4); // strides height + int sW = INT_ARG(5); // strides width + int pD = INT_ARG(6); // paddings depth + int pH = INT_ARG(7); // paddings height + int pW = INT_ARG(8); // paddings width + int dD = INT_ARG(9); // dilations depth + int dH = INT_ARG(10); // dilations height + int dW = INT_ARG(11); // dilations width + int paddingMode = INT_ARG(12); // 0-SAME, 1-VALID + int isNCDHW = block.getIArguments()->size() > 13 ? !INT_ARG(13) : 1; // INT_ARG(13): 1-NDHWC, 0-NCDHW + int wFormat = block.getIArguments()->size() > 14 + ? INT_ARG(14) + : 0; // 0-[kD, kH, kW, iC, oC], 1-[oC, iC, kD, kH, kW], 2-[oC, kD, kH, kW, iC] + + REQUIRE_TRUE(paddingMode < 2, 0, + "CONV3D CUDNN OP: causal padding mode (paddingMode = 2) is not allowed for this operation !"); + + int bS, iC, iD, iH, iW, oC, oD, oH, + oW; // batch size, input channels, input depth/height/width, output channels, output depth/height/width; + int indIOioC, indIOioD, indWoC, indWiC, indWkD; // corresponding indexes + ConvolutionUtils::getSizesAndIndexesConv3d(isNCDHW, wFormat, *input, *output, bS, iC, iD, iH, iW, oC, oD, oH, oW, + indIOioC, indIOioD, indWiC, indWoC, indWkD); + + ConvolutionUtils::calcPadding3D(pD, pH, pW, oD, oH, oW, iD, iH, iW, kD, kH, kW, sD, sH, sW, dD, dH, dW, paddingMode); + + std::vector expectedWeightsShape = ConvolutionUtils::expectWeightsShape(wFormat, kD, kH, kW, iC, oC); + REQUIRE_TRUE(weights->isSameShape(expectedWeightsShape), 0, + "CONV3D CUDNN OP: wrong shape of weights array, expected is %s, but got %s instead !", + ShapeUtils::shapeAsString(expectedWeightsShape).c_str(), ShapeUtils::shapeAsString(weights).c_str()); + if (bias) + REQUIRE_TRUE( + bias->rankOf() <= 2 && oC == bias->lengthOf(), 0, + "CONV3D CUDNN OP: wrong shape of array with biases, expected rank, length: <=2, %i, but got %i, %i instead !", + oC, bias->rankOf(), bias->lengthOf()); + + std::unique_ptr tmpWeight = {}, tmpInput = {}; + NDArray* newWeights = weights; // cudnn support only one format {oC,iC,kD,kH,kW} + if (1 != wFormat) { + tmpWeight.reset(new NDArray(weights->ordering(), {oC, iC, kD, kH, kW}, weights->dataType(), weights->getContext())); + newWeights = tmpWeight.get(); + newWeights->assign(weights->permute( + 0 == wFormat + ? std::vector({4, 3, 0, 1, 2}) + : std::vector( + {0, 4, 1, 2, + 3}))); // kD, kH, kW, iC, oC --> oC, iC, kD, kH, kW or oC, kD, kH, kW, iC --> oC, iC, kD, kH, kW + } + + if (paddingMode == 1) { // in same paddingMode cudnn doesn't support asymmetric left/right top/bottopm paddings + auto ret = checkConv3dCUDNNPadAsymmetric(input, nullptr, iD, iH, iW, oD, oH, oW, kD, kH, kW, sD, sH, sW, pD, pH, pW, + dD, dH, dW, isNCDHW); + tmpInput = std::move(std::get<0>(ret)); // prolong life + if (tmpInput) input = tmpInput.get(); + } + conv3dCUDNN(block.launchContext(), input, newWeights, bias, output, kD, kH, kW, sD, sH, sW, pD, pH, pW, dD, dH, dW, + paddingMode, isNCDHW, wFormat); + + return sd::Status::OK; +} + +////////////////////////////////////////////////////////////////////////// +PLATFORM_CHECK(conv3dnew, ENGINE_CUDA) { + auto input = INPUT_VARIABLE(0); // [bS, iD, iH, iW, iC] (NDHWC) or [bS, iC, iD, iH, iW] (NCDHW) + auto weights = INPUT_VARIABLE(1); // [kD, kH, kW, iC, oC], [oC, iC, kD, kH, kW], [oC, kD, kH, kW, iC] + auto bias = block.width() > 2 ? INPUT_VARIABLE(2) : nullptr; // [oC] + + int paddingMode = INT_ARG(12); // 0-SAME, 1-VALID + + Requirements req("CUDNN CONV3d OP"); + req.expectNotEq(makeInfoVariable(paddingMode, "paddingMode"), 2) && + req.expectIn(makeInfoVariable(input->dataType(), TYPE_MSG_INPUT0), + {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE}) && + req.expectIn(makeInfoVariable(weights->dataType(), TYPE_MSG_INPUT1), + {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE}); + if (bias) { + req.expectIn(makeInfoVariable(bias->dataType(), TYPE_MSG_INPUT_ "#bias"), + {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE}); + } + req.logTheSuccess(); + return req; +} + +////////////////////////////////////////////////////////////////////////// +PLATFORM_IMPL(conv3dnew_bp, ENGINE_CUDA) { + auto input = INPUT_VARIABLE(0); // [bS, iD, iH, iW, iC] (NDHWC) or [bS, iC, iD, iH, iW] (NCDHW) + auto weights = INPUT_VARIABLE(1); // [kD, kH, kW, iC, oC], [oC, iC, kD, kH, kW], [oC, kD, kH, kW, iC] + auto bias = block.width() > 3 ? INPUT_VARIABLE(2) : nullptr; // [oC] + auto gradO = block.width() > 3 + ? INPUT_VARIABLE(3) + : INPUT_VARIABLE(2); // [bS, oD, oH, oW, oC] (NDHWC) or [bS, oC, oD, oH, oW] (NCDHW), epsilon_next + + auto gradI = OUTPUT_VARIABLE(0); // [bS, iD, iH, iW, iC] (NDHWC) or [bS, iC, iD, iH, iW] (NCDHW), epsilon + auto gradW = OUTPUT_VARIABLE(1); // [kD, kH, kW, iC, oC], [oC, iC, kD, kH, kW], [oC, kD, kH, kW, iC] + auto gradB = block.width() > 3 ? OUTPUT_VARIABLE(2) : nullptr; // [oC] + + REQUIRE_TRUE(input->rankOf() == 5, 0, + "CONV3D_BP CUDNN OP: rank of input array must be equal to 5, but got %i instead !", input->rankOf()); + REQUIRE_TRUE(weights->rankOf() == 5, 0, + "CONV3D_BP CUDNN OP: rank of weights array must be equal to 5, but got %i instead !", weights->rankOf()); + REQUIRE_TRUE( + gradO->rankOf() == 5, 0, + "CONV3D_BP CUDNN OP: rank of output gradients (next epsilon) array must be equal to 5, but got %i instead !", + gradO->rankOf()); + + int kD = INT_ARG(0) > 0 ? INT_ARG(0) : static_cast(weights->sizeAt(0)); // filter(kernel) depth + int kH = INT_ARG(1) > 0 ? INT_ARG(1) : static_cast(weights->sizeAt(1)); // filter(kernel) height + int kW = INT_ARG(2) > 0 ? INT_ARG(2) : static_cast(weights->sizeAt(2)); // filter(kernel) width + int sD = INT_ARG(3); // strides depth + int sH = INT_ARG(4); // strides height + int sW = INT_ARG(5); // strides width + int pD = INT_ARG(6); // paddings depth + int pH = INT_ARG(7); // paddings height + int pW = INT_ARG(8); // paddings width + int dD = INT_ARG(9); // dilations depth + int dH = INT_ARG(10); // dilations height + int dW = INT_ARG(11); // dilations width + int paddingMode = INT_ARG(12); // 1-SAME, 0-VALID + int isNCDHW = block.getIArguments()->size() > 13 ? !INT_ARG(13) : 1; // INT_ARG(13): 1-NDHWC, 0-NCDHW + int wFormat = block.getIArguments()->size() > 14 + ? INT_ARG(14) + : 0; // 0-[kD, kH, kW, iC, oC], 1-[oC, iC, kD, kH, kW], 2-[oC, kD, kH, kW, iC] + + int bS, iC, iD, iH, iW, oC, oD, oH, + oW; // batch size, input channels, input depth/height/width, output channels, output depth/height/width; + int indIOioC, indIOioD, indWoC, indWiC, indWkD; // corresponding indexes + ConvolutionUtils::getSizesAndIndexesConv3d(isNCDHW, wFormat, *input, *gradO, bS, iC, iD, iH, iW, oC, oD, oH, oW, + indIOioC, indIOioD, indWiC, indWoC, indWkD); + + int trueoD, trueoH, trueoW; // true output depth/height/width + ConvolutionUtils::calcOutSizePool3D(trueoD, trueoH, trueoW, kD, kH, kW, sD, sH, sW, pD, pH, pW, dD, dH, dW, iD, iH, + iW, paddingMode); + + REQUIRE_TRUE(paddingMode < 2, 0, + "CONV3D_BP CUDNN OP: causal padding mode (paddingMode = 2) is not allowed for this operation !"); + + std::vector expectedGradOShape = ShapeUtils::composeShapeUsingDimsAndIdx( + {bS, oC, trueoD, trueoH, trueoW, 0, indIOioC, indIOioD, indIOioD + 1, indIOioD + 2}); + std::vector expectedWeightsShape = ConvolutionUtils::expectWeightsShape(wFormat, kD, kH, kW, iC, oC); + REQUIRE_TRUE( + gradO->isSameShape(expectedGradOShape), 0, + "CONV3D_BP CUDNN OP: wrong shape of output gradients (next epsilon) array, expected is %s, but got %s instead !", + ShapeUtils::shapeAsString(expectedGradOShape).c_str(), ShapeUtils::shapeAsString(gradO).c_str()); + REQUIRE_TRUE(gradW->isSameShape(expectedWeightsShape), 0, + "CONV3D_BP CUDNN OP: wrong shape of weights array, expected is %s, but got %s instead !", + ShapeUtils::shapeAsString(expectedWeightsShape).c_str(), ShapeUtils::shapeAsString(weights).c_str()); + if (bias) + REQUIRE_TRUE(bias->rankOf() <= 2 && oC == bias->lengthOf(), 0, + "CONV3D_BP CUDNN OP: wrong shape of array with biases, expected rank, length: <=2, %i, but got %i, %i " + "instead !", + oC, bias->rankOf(), bias->lengthOf()); + + ConvolutionUtils::calcPadding3D(pD, pH, pW, oD, oH, oW, iD, iH, iW, kD, kH, kW, sD, sH, sW, dD, dH, dW, paddingMode); + + std::unique_ptr tmpGradI = {}, tmpInput = {}, tmpWeights = {}, tmpGradW = {}; + NDArray *newWeights = weights, + *newGradW = gradW; // cudnn support only two formats {oC,iC,kD,kH,kW} and {oC,kD,kH,kW,iC} + if (0 == wFormat) { + tmpGradW.reset(new NDArray( + gradW->ordering(), + isNCDHW ? std::vector({oC, iC, kD, kH, kW}) : std::vector({oC, kD, kH, kW, iC}), + gradW->dataType(), gradW->getContext())); + tmpWeights.reset(new NDArray( + weights->ordering(), + isNCDHW ? std::vector({oC, iC, kD, kH, kW}) : std::vector({oC, kD, kH, kW, iC}), + weights->dataType(), weights->getContext())); + newGradW = tmpGradW.get(); + newWeights = tmpWeights.get(); + newWeights->assign(weights->permute( + isNCDHW ? std::vector({4, 3, 0, 1, 2}) + : std::vector({4, 0, 1, 2, 3}))); // (kD, kH, kW, iC, oC --> oC, iC, kD, kH, kW) or (kD, kH, kW, + // iC, oC --> oC, kD, kH, kW, iC) + } + + NDArray* newInput = input; + NDArray* newGradI = gradI; + if (paddingMode == 1) { // in same paddingMode cudnn doesn't support asymmetric left/right top/bottopm paddings + auto ret = checkConv3dCUDNNPadAsymmetric(input, gradI, iD, iH, iW, oD, oH, oW, kD, kH, kW, sD, sH, sW, pD, pH, pW, + dD, dH, dW, isNCDHW); + tmpInput = std::move(std::get<0>(ret)); + tmpGradI = std::move(std::get<1>(ret)); + if (tmpInput) newInput = tmpInput.get(); + if (tmpGradI) newGradI = tmpGradI.get(); + } + conv3dBpCUDNN(block.launchContext(), newInput, newWeights, gradO, newGradI, newGradW, gradB, kD, kH, kW, sD, sH, sW, + pD, pH, pW, dD, dH, dW, paddingMode, isNCDHW, wFormat); + + if (0 == wFormat) { + newGradW->permutei(isNCDHW ? std::vector({2, 3, 4, 1, 0}) + : std::vector({1, 2, 3, 4, 0})); // (oC, iC, kD, kH, kW --> kD, kH, kW, iC, oC) or + // (oC, kD, kH, kW, iC --> kD, kH, kW, iC, oC) + gradW->assign(newGradW); + } + + if (newInput != input) { + if (isNCDHW) + gradI->assign((*newGradI)({0, 0, 0, 0, 0, gradI->sizeAt(2), 0, gradI->sizeAt(3), 0, gradI->sizeAt(4)})); + else + gradI->assign((*newGradI)({0, 0, 0, gradI->sizeAt(1), 0, gradI->sizeAt(2), 0, gradI->sizeAt(3), 0, 0})); + } + + return sd::Status::OK; +} + +PLATFORM_CHECK(conv3dnew_bp, ENGINE_CUDA) { + auto input = INPUT_VARIABLE(0); // [bS, iD, iH, iW, iC] (NDHWC) or [bS, iC, iD, iH, iW] (NCDHW) + auto weights = INPUT_VARIABLE(1); // [kD, kH, kW, iC, oC], [oC, iC, kD, kH, kW], [oC, kD, kH, kW, iC] + auto bias = block.width() > 3 ? INPUT_VARIABLE(2) : nullptr; // [oC] + auto gradO = block.width() > 3 + ? INPUT_VARIABLE(3) + : INPUT_VARIABLE(2); // [bS, oD, oH, oW, oC] (NDHWC) or [bS, oC, oD, oH, oW] (NCDHW), epsilon_next + + int paddingMode = INT_ARG(12); // 1-SAME, 0-VALID + int isNCDHW = block.getIArguments()->size() > 13 ? !INT_ARG(13) : 1; // INT_ARG(13): 1-NDHWC, 0-NCDHW + + Requirements req("CUDNN CONV3d_BP OP"); + req.expectNotEq(makeInfoVariable(paddingMode, "paddingMode"), 2) && + req.expectTrue(makeInfoVariable(isNCDHW, "isNCDHW")) && + req.expectIn(makeInfoVariable(input->dataType(), TYPE_MSG_INPUT0), + {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE}) && + req.expectIn(makeInfoVariable(weights->dataType(), TYPE_MSG_INPUT1), + {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE}); + if (bias) { + req.expectIn(makeInfoVariable(bias->dataType(), TYPE_MSG_INPUT_ "#bias"), + {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE}) && + req.expectIn(makeInfoVariable(gradO->dataType(), TYPE_MSG_INPUT3), + {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE}); + } else { + req.expectIn(makeInfoVariable(gradO->dataType(), TYPE_MSG_INPUT2), + {DataType::HALF, DataType::FLOAT32, DataType::DOUBLE}); + } + req.logTheSuccess(); + return req; +} + +} // namespace platforms +} // namespace ops +} // namespace sd diff --git a/cuda_code/conv_bias_int8_implicit_gemm_dp4a_ncdiv4hw4_16x64x8_16x64x8_id.cu b/cuda_code/conv_bias_int8_implicit_gemm_dp4a_ncdiv4hw4_16x64x8_16x64x8_id.cu new file mode 100644 index 0000000000000000000000000000000000000000..70949c6586f2b6197e02ffe55433a5c1c71dd010 --- /dev/null +++ b/cuda_code/conv_bias_int8_implicit_gemm_dp4a_ncdiv4hw4_16x64x8_16x64x8_id.cu @@ -0,0 +1,37 @@ +#if !MEGDNN_TEGRA_X1 +// generated by gen_cuda_conv_bias_kern_impls.py +// ignore warning of cutlass +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-parameter" +#pragma GCC diagnostic ignored "-Wstrict-aliasing" +#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" + +using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; +using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; +using LayoutDst = cutlass::layout::TensorNCxHWx<4>; +using ThreadBlockShape = cutlass::gemm::GemmShape<16, 64, 8>; +using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; +using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; +using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp< + int8_t, 4, int32_t, int32_t, float>; +using Convolution = cutlass::convolution::device::Convolution< + int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, + LayoutDst, int32_t, LayoutDst, int32_t, + cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, + ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, + cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle< + cutlass::convolution::ConvType::kConvolution>, + 2, 4, 4, true, + cutlass::arch::OpMultiplyAddSaturate>; +template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper( + const typename Convolution::ElementSrc* d_src, + const typename Convolution::ElementFilter* d_filter, + const typename Convolution::ElementBias* d_bias, + const typename Convolution::ElementDst* d_z, + typename Convolution::ElementDst* d_dst, + int* workspace, + typename Convolution::ConvolutionParameter const& conv_param, + typename Convolution::EpilogueOutputOp::Params const& epilogue, + cudaStream_t stream); +#pragma GCC diagnostic pop +#endif diff --git a/cuda_code/conv_bias_int8_implicit_gemm_dp4a_ncdiv4hw4_1x1_128x64x32_64x32x32_relu_1.cu b/cuda_code/conv_bias_int8_implicit_gemm_dp4a_ncdiv4hw4_1x1_128x64x32_64x32x32_relu_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..f871dae4ed6b97fd373ceec9706bdb3b2f6f7e4f --- /dev/null +++ b/cuda_code/conv_bias_int8_implicit_gemm_dp4a_ncdiv4hw4_1x1_128x64x32_64x32x32_relu_1.cu @@ -0,0 +1,36 @@ +#if !MEGDNN_TEGRA_X1 +// generated by gen_cuda_conv_bias_kern_impls.py +// ignore warning of cutlass +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-parameter" +#pragma GCC diagnostic ignored "-Wstrict-aliasing" +#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" + +using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; +using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; +using LayoutDst = cutlass::layout::TensorNCxHWx<4>; +using ThreadBlockShape = cutlass::gemm::GemmShape<128, 64, 32>; +using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>; +using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; +using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< + int8_t, 4, int32_t, int32_t, float>; +using Convolution = cutlass::conv::device::Convolution< + int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, + LayoutDst, int32_t, LayoutDst, int32_t, + cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, + ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, + cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, + 2, 4, 16, false, + cutlass::arch::OpMultiplyAddSaturate>; +template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper( + const typename Convolution::ElementSrc* d_src, + const typename Convolution::ElementFilter* d_filter, + const typename Convolution::ElementBias* d_bias, + const typename Convolution::ElementDst* d_z, + typename Convolution::ElementDst* d_dst, + int* workspace, + typename Convolution::ConvolutionParameter const& conv_param, + typename Convolution::EpilogueOutputOp::Params const& epilogue, + cudaStream_t stream); +#pragma GCC diagnostic pop +#endif diff --git a/cuda_code/conv_bias_int8_implicit_gemm_imma16x16x16_cdiv4hwn4_unroll_width_per_chan_hswish_1.cu b/cuda_code/conv_bias_int8_implicit_gemm_imma16x16x16_cdiv4hwn4_unroll_width_per_chan_hswish_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..5050e4f49aa1084ef09e10a688f36a797d9179d9 --- /dev/null +++ b/cuda_code/conv_bias_int8_implicit_gemm_imma16x16x16_cdiv4hwn4_unroll_width_per_chan_hswish_1.cu @@ -0,0 +1,13 @@ +// generated by gen_cuda_conv_bias_kern_impls.py +#include "../conv_bias_int8_implicit_gemm_imma16x16x16_cdiv4hwn4_unroll_width.cuinl" + +template void megdnn::cuda::conv_bias_int8:: + do_conv_bias_int8_implicit_gemm_imma16x16x16_cdiv4hwn4_unroll_width< + PerChannelBiasVisitor, + IConvEpilogue>>( + const int8_t* d_src, const int8_t* d_filter, PerChannelBiasVisitor bias, + IConvEpilogue< + Activation> + epilogue, + const ConvParam& param, float alpha, float beta, cudaStream_t stream); diff --git a/cuda_code/conv_bias_int8_implicit_gemm_imma8x32x16_cdiv4hwn4_unroll_width_per_chan_hswish_1.cu b/cuda_code/conv_bias_int8_implicit_gemm_imma8x32x16_cdiv4hwn4_unroll_width_per_chan_hswish_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..7c9500fe980b369e07acff05207dd0c4c0e40085 --- /dev/null +++ b/cuda_code/conv_bias_int8_implicit_gemm_imma8x32x16_cdiv4hwn4_unroll_width_per_chan_hswish_1.cu @@ -0,0 +1,13 @@ +// generated by gen_cuda_conv_bias_kern_impls.py +#include "../conv_bias_int8_implicit_gemm_imma8x32x16_cdiv4hwn4_unroll_width.cuinl" + +template void megdnn::cuda::conv_bias_int8::do_conv_bias_int8_implicit_gemm_imma8x32x16_cdiv4hwn4_unroll_width>>( + const int8_t* d_src, + const int8_t* d_filter, + PerChannelBiasVisitor bias, + IConvEpilogue> epilogue, + const ConvParam& param, + float alpha, + float beta, + cudaStream_t stream); diff --git a/cuda_code/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_32x64x64_32x16x64_relu_1.cu b/cuda_code/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_32x64x64_32x16x64_relu_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..8926a9d6a5e51c34a9a0c731246e020d644ee085 --- /dev/null +++ b/cuda_code/conv_bias_int8_implicit_gemm_imma_ncdiv32hw32_1x1_32x64x64_32x16x64_relu_1.cu @@ -0,0 +1,36 @@ +#if !MEGDNN_TEGRA_X1 +// generated by gen_cuda_conv_bias_kern_impls.py +// ignore warning of cutlass +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-parameter" +#pragma GCC diagnostic ignored "-Wstrict-aliasing" +#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" + +using LayoutSrc = cutlass::layout::TensorNCxHWx<32>; +using LayoutFilter = cutlass::layout::TensorCxRSKx<32>; +using LayoutDst = cutlass::layout::TensorNCxHWx<32>; +using ThreadBlockShape = cutlass::gemm::GemmShape<32, 64, 64>; +using WarpShape = cutlass::gemm::GemmShape<32, 16, 64>; +using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; +using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< + int8_t, 8, int32_t, int32_t, float>; +using Convolution = cutlass::conv::device::Convolution< + int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, + LayoutDst, int32_t, LayoutDst, int32_t, + cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, + ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, + cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, + 2, 16, 16, false, + cutlass::arch::OpMultiplyAddSaturate>; +template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper( + const typename Convolution::ElementSrc* d_src, + const typename Convolution::ElementFilter* d_filter, + const typename Convolution::ElementBias* d_bias, + const typename Convolution::ElementDst* d_z, + typename Convolution::ElementDst* d_dst, + int* workspace, + typename Convolution::ConvolutionParameter const& conv_param, + typename Convolution::EpilogueOutputOp::Params const& epilogue, + cudaStream_t stream); +#pragma GCC diagnostic pop +#endif diff --git a/cuda_code/conv_transpose_grad_kernel.cu b/cuda_code/conv_transpose_grad_kernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..0ce16f66becfaf92552e3c42be1b940f5506639b --- /dev/null +++ b/cuda_code/conv_transpose_grad_kernel.cu @@ -0,0 +1,1118 @@ +/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/phi/kernels/conv_transpose_grad_kernel.h" + +#include + +#include "paddle/phi/backends/dynload/cudnn.h" +#include "paddle/phi/common/float16.h" +#include "paddle/phi/core/ddim.h" +#include "paddle/phi/core/kernel_registry.h" +#include "paddle/phi/kernels/cpu/conv_util.h" +#include "paddle/phi/kernels/funcs/batch_norm_utils.h" +#include "paddle/phi/kernels/funcs/math_function.h" +#include "paddle/phi/kernels/funcs/padding.h" +#include "paddle/phi/kernels/funcs/slice.h" +#include "paddle/phi/kernels/transpose_kernel.h" + +#ifdef PADDLE_WITH_HIP +#include "paddle/fluid/operators/conv_miopen_helper.h" +#include "paddle/fluid/platform/device/gpu/rocm/miopen_helper.h" +#else +#include "paddle/fluid/operators/conv_cudnn_helper.h" +#include "paddle/fluid/platform/device/gpu/cuda/cudnn_helper.h" +#endif + +namespace phi { + +using GPUDNNDataLayout = paddle::platform::DataLayout; + +template +void ConvTransposeGradRawGPUDNNKernel(const Context& ctx, + const DenseTensor& x, + const DenseTensor& filter, + const DenseTensor& dout, + const std::vector& strides, + const std::vector& paddings, + const std::string& padding_algorithm, + int groups, + const std::vector& dilations, + const std::string& data_format, + DenseTensor* dx, + DenseTensor* dfilter) { + const T* filter_data = filter.data(); + std::vector paddings_ = paddings; + std::vector dilations_ = + dilations; // cudnn v5 does not support dilations + const GPUDNNDataLayout data_layout = + (data_format != "NHWC" ? GPUDNNDataLayout::kNCHW + : GPUDNNDataLayout::kNHWC); + + // if channel_last, transpose to channel_first + DenseTensor x_transpose; + DenseTensor dout_transpose; + std::vector x_vec = vectorize(x.dims()); + std::vector out_vec = vectorize(dout.dims()); + if (data_layout == GPUDNNDataLayout::kNHWC) { + if (strides.size() == 2U) { + std::vector axis = {0, 3, 1, 2}; + for (size_t i = 0; i < axis.size(); ++i) { + x_vec[i] = x.dims()[axis[i]]; + out_vec[i] = dout.dims()[axis[i]]; + } + x_transpose = Transpose(ctx, x, axis); + dout_transpose = Transpose(ctx, dout, axis); + } else if (strides.size() == 3U) { + std::vector axis = {0, 4, 1, 2, 3}; + for (size_t i = 0; i < axis.size(); ++i) { + x_vec[i] = x.dims()[axis[i]]; + out_vec[i] = dout.dims()[axis[i]]; + } + x_transpose = Transpose(ctx, x, axis); + dout_transpose = Transpose(ctx, dout, axis); + } + } else { + x_transpose = x; + dout_transpose = dout; + } + + // update padding and dilation + auto x_dims = x_transpose.dims(); + auto filter_dims = filter.dims(); + DDim x_data_dims; + x_data_dims = slice_ddim(x_dims, 2, x_dims.size()); + DDim filter_data_dims = slice_ddim(filter_dims, 2, filter_dims.size()); + std::vector ksize = vectorize(filter_data_dims); + UpdatePaddingAndDilation( + &paddings_, &dilations_, padding_algorithm, x_data_dims, strides, ksize); + + int data_dim = strides.size(); // 2d or 3d + bool is_sys_pad = funcs::IsSymmetricPadding(paddings_, data_dim); + + std::vector x_pad(x_dims.size() * 2, 0); + DenseTensor transformed_dout; + std::vector padding_common(data_dim, 0); + if (!is_sys_pad) { + std::vector padding_diff(data_dim); + std::vector new_dout_shape_vec(data_dim + 2); + new_dout_shape_vec[0] = dout_transpose.dims()[0]; + new_dout_shape_vec[1] = dout_transpose.dims()[1]; + + for (size_t i = 0; i < data_dim; ++i) { + padding_diff[i] = std::abs(paddings_[2 * i] - paddings_[2 * i + 1]); + padding_common[i] = std::min(paddings_[2 * i], paddings_[2 * i + 1]); + new_dout_shape_vec[i + 2] = + dout_transpose.dims()[i + 2] + padding_diff[i]; + x_pad[2 * i + 4] = paddings_[2 * i] - padding_common[i]; + x_pad[2 * i + 4 + 1] = paddings_[2 * i + 1] - padding_common[i]; + } + + transformed_dout.Resize(make_ddim(new_dout_shape_vec)); + ctx.template Alloc(&transformed_dout); + + const int rank = x_transpose.dims().size(); + T pad_value(0.0); + switch (rank) { + case 4: { + funcs::PadFunction( + ctx, x_pad, dout_transpose, pad_value, &transformed_dout); + } break; + case 5: { + funcs::PadFunction( + ctx, x_pad, dout_transpose, pad_value, &transformed_dout); + } break; + default: + PADDLE_THROW(errors::InvalidArgument( + "Op(ConvTranspose) only supports 4-D or 5-D x DenseTensor.")); + } + } else { + transformed_dout = dout_transpose; + if (paddings_.size() == data_dim) { + for (size_t i = 0; i < data_dim; ++i) { + padding_common[i] = paddings_[i]; + } + } else { + for (size_t i = 0; i < data_dim; ++i) { + padding_common[i] = paddings_[2 * i]; + } + } + } + + const T* x_data = x_transpose.data(); + const T* dout_data = transformed_dout.data(); + out_vec = vectorize(transformed_dout.dims()); + + // ------------------- cudnn descriptors --------------------- + GPUDNNDataLayout layout; + + if (strides.size() == 2U) { + layout = GPUDNNDataLayout::kNCHW; + } else { + layout = GPUDNNDataLayout::kNCDHW; + } + + int iwo_groups = groups; + int c_groups = 1; +#if defined(PADDLE_WITH_HIP) || CUDNN_VERSION_MIN(7, 0, 1) + iwo_groups = 1; + c_groups = groups; + groups = 1; +#endif + + auto dtype = paddle::platform::CudnnDataType::type; + + paddle::operators::ConvArgs args1{&transformed_dout, + &filter, + &x_transpose, + strides, + padding_common, + dilations_, + dtype}; + paddle::operators::ConvArgs args2{&transformed_dout, + &filter, + &x_transpose, + strides, + padding_common, + dilations_, + dtype}; + +#ifdef PADDLE_WITH_HIP + paddle::operators::SearchResult fwd_result; + paddle::operators::SearchResult + filter_result; +#else + paddle::operators::SearchResult fwd_result; + paddle::operators::SearchResult + filter_result; +#endif + + auto layout_tensor = paddle::platform::GetCudnnTensorFormat(layout); + size_t workspace_size = 0; + auto handle = ctx.cudnn_handle(); + bool deterministic = FLAGS_cudnn_deterministic; + T* dx_data = nullptr; + T* dfilter_data = nullptr; + + if (dx) { + dx_data = ctx.template Alloc(dx); + args1.handle = handle; + args1.idesc.set(transformed_dout, iwo_groups); + args1.wdesc.set(filter, layout_tensor, iwo_groups); + args1.odesc.set(x_transpose, iwo_groups); + args1.cdesc.set(dtype, + padding_common, + strides, + dilations_, + paddle::platform::AllowTF32Cudnn(), + c_groups); +#ifdef PADDLE_WITH_HIP + using search1 = + paddle::operators::SearchAlgorithm; + workspace_size = std::max(workspace_size, search1::GetWorkspaceSize(args1)); + fwd_result.algo = + search1::Find(args1, false, deterministic, workspace_size, ctx); +#else + using search1 = + paddle::operators::SearchAlgorithm; + fwd_result = search1::Find(args1, false, deterministic, ctx); + workspace_size = std::max( + workspace_size, search1::GetWorkspaceSize(args1, fwd_result.algo)); +#endif + } + + if (dfilter) { + dfilter_data = ctx.template Alloc(dfilter); + args2.handle = handle; + args2.idesc.set(transformed_dout, iwo_groups); + args2.wdesc.set(*dfilter, layout_tensor, iwo_groups); + args2.odesc.set(x_transpose, iwo_groups); + args2.cdesc.set(dtype, + padding_common, + strides, + dilations_, + paddle::platform::AllowTF32Cudnn(), + c_groups); +#ifdef PADDLE_WITH_HIP + using search2 = + paddle::operators::SearchAlgorithm; + workspace_size = std::max(workspace_size, search2::GetWorkspaceSize(args2)); + filter_result.algo = + search2::Find(args2, false, deterministic, workspace_size, ctx); +#else + using search2 = + paddle::operators::SearchAlgorithm; + filter_result = search2::Find(args2, false, deterministic, ctx); + workspace_size = std::max( + workspace_size, search2::GetWorkspaceSize(args2, filter_result.algo)); +#endif + } + + // ------------------- cudnn conv backward data --------------------- + // FIxME(typhoonzero): template type T may not be the same as cudnn call. + int x_offset = x.numel() / x.dims()[0] / groups; + int dout_offset = + transformed_dout.numel() / transformed_dout.dims()[0] / groups; + int filter_offset = filter.numel() / groups; + paddle::operators::ScalingParamType alpha = 1.0f; + paddle::operators::ScalingParamType beta = 0.0f; + auto workspace_handle = ctx.cudnn_workspace_handle(); + if (dx) { + // Because beta is zero, it is unnecessary to reset dx. + for (int g = 0; g < groups; g++) { +#ifdef PADDLE_WITH_HIP + auto cudnn_func = [&](void* cudnn_workspace) { + PADDLE_ENFORCE_GPU_SUCCESS( + dynload::miopenConvolutionForward(handle, + &alpha, + args1.idesc.desc(), + dout_data + dout_offset * g, + args1.wdesc.desc(), + filter_data + filter_offset * g, + args1.cdesc.desc(), + fwd_result.algo, + &beta, + args1.odesc.desc(), + dx_data + x_offset * g, + cudnn_workspace, + workspace_size)); + }; +#else // PADDLE_WITH_HIP + auto cudnn_func = [&](void* cudnn_workspace) { + PADDLE_ENFORCE_GPU_SUCCESS( + dynload::cudnnConvolutionForward(handle, + &alpha, + args1.idesc.desc(), + dout_data + dout_offset * g, + args1.wdesc.desc(), + filter_data + filter_offset * g, + args1.cdesc.desc(), + fwd_result.algo, + cudnn_workspace, + workspace_size, + &beta, + args1.odesc.desc(), + dx_data + x_offset * g)); + }; +#endif // PADDLE_WITH_HIP + workspace_handle.RunFunc(cudnn_func, workspace_size); + } + + if (data_layout == GPUDNNDataLayout::kNHWC) { + DenseTensor dx_transpose; + DenseTensor dx_nchw; + dx_nchw.ShareDataWith(*dx); + dx_nchw.Resize(make_ddim(x_vec)); + if (strides.size() == 2U) { + std::vector axis = {0, 2, 3, 1}; + dx_transpose = Transpose(ctx, dx_nchw, axis); + *dx = dx_transpose; + } else if (strides.size() == 3U) { + std::vector axis = {0, 2, 3, 4, 1}; + dx_transpose = Transpose(ctx, dx_nchw, axis); + *dx = dx_transpose; + } + } + } + + // ------------------- cudnn conv backward filter --------------------- + if (dfilter) { + // Because beta is zero, it is unnecessary to reset dfilter. + // Gradient with respect to the filter + for (int g = 0; g < groups; g++) { +#ifdef PADDLE_WITH_HIP + auto cudnn_func = [&](void* cudnn_workspace) { + PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenConvolutionBackwardWeights( + handle, + &alpha, + args2.odesc.desc(), + x_data + x_offset * g, + args2.idesc.desc(), + dout_data + dout_offset * g, + args2.cdesc.desc(), + filter_result.algo, + &beta, + args2.wdesc.desc(), + dfilter_data + filter_offset * g, + cudnn_workspace, + workspace_size)); + }; +#else // PADDLE_WITH_HIP + auto cudnn_func = [&](void* cudnn_workspace) { + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnConvolutionBackwardFilter( + handle, + &alpha, + args2.idesc.desc(), + dout_data + dout_offset * g, + args2.odesc.desc(), + x_data + x_offset * g, + args2.cdesc.desc(), + filter_result.algo, + cudnn_workspace, + workspace_size, + &beta, + args2.wdesc.desc(), + dfilter_data + filter_offset * g)); + }; +#endif // PADDLE_WITH_HIP + workspace_handle.RunFunc(cudnn_func, workspace_size); + } + } +} + +template +void Conv2dTransposeGradGPUDNNKernel(const Context& ctx, + const DenseTensor& x, + const DenseTensor& filter, + const DenseTensor& dout, + const std::vector& strides, + const std::vector& paddings_, + const std::vector& output_padding, + const std::vector& output_size, + const std::string& padding_algorithm, + int groups, + const std::vector& dilations_, + const std::string& data_format, + DenseTensor* dx, + DenseTensor* dfilter) { + ConvTransposeGradRawGPUDNNKernel(ctx, + x, + filter, + dout, + strides, + paddings_, + padding_algorithm, + groups, + dilations_, + data_format, + dx, + dfilter); +} + +/* + * Inputs: I, filter, dout, ddI, ddfilter + * Outputs: ddout, dfilter, dI + * ddo = conv_bp_data(filter, ddI) + conv_bp_data(ddfilter, I) + * dfilter = conv_bp_filter(dout, ddI) + * dI = conv(dout, ddfilter) + */ +template +void Conv2dTransposeDoubleGradGPUDNNKernel( + const Context& ctx, + const DenseTensor& x, + const DenseTensor& filter, + const DenseTensor& dout, + const DenseTensor& ddx, + const DenseTensor& ddfilter, + const std::vector& strides, + const std::vector& paddings, + const std::vector& output_padding, + const std::vector& output_size, + const std::string& padding_algorithm, + int groups, + const std::vector& dilations, + const std::string& data_format, + DenseTensor* dx, + DenseTensor* dfilter, + DenseTensor* ddout) { + if (dx) { + ctx.template Alloc(dx); + } + if (dfilter) { + ctx.template Alloc(dfilter); + } + if (ddout) { + ctx.template Alloc(ddout); + funcs::SetConstant set_zero; + set_zero(ctx, ddout, static_cast(0)); + } + + const T* filter_ = filter.data(); + const T* dout_ = dout.data(); + const T* ddx_ = nullptr; + const T* ddfilter_ = nullptr; + T* dx_ = nullptr; + T* dfilter_ = nullptr; + T* ddout_ = nullptr; + T* transformed_dx_ = nullptr; + + std::vector paddings_ = paddings; + std::vector dilations_ = dilations; + + bool deterministic = FLAGS_cudnn_deterministic; + const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC"); + + // transform DenseTensors to channel first----------- + DenseTensor transformed_x_channel(x.type()); + DenseTensor transformed_dout_channel(dout.type()); + DenseTensor transformed_ddx_channel(x.type()); + + DenseTensor transformed_dx_channel(x.type()); + DenseTensor transformed_ddout_channel(dout.type()); + + if (channel_last) { + ResizeToChannelFirst(ctx, &x, &transformed_x_channel); + TransToChannelFirst(ctx, &x, &transformed_x_channel); + + ResizeToChannelFirst(ctx, &dout, &transformed_dout_channel); + TransToChannelFirst(ctx, &dout, &transformed_dout_channel); + + ResizeToChannelFirst(ctx, &ddx, &transformed_ddx_channel); + TransToChannelFirst(ctx, &ddx, &transformed_ddx_channel); + + if (dx) { + ResizeToChannelFirst(ctx, dx, &transformed_dx_channel); + ctx.template Alloc(&transformed_dx_channel); + } + if (ddout) { + ResizeToChannelFirst(ctx, ddout, &transformed_ddout_channel); + } + } else { + transformed_x_channel = x; + transformed_dout_channel = dout; + transformed_ddx_channel = ddx; + + if (dx) { + transformed_dx_channel = *dx; + } + } + std::vector out_vec = vectorize(transformed_dout_channel.dims()); + + auto x_dims = transformed_x_channel.dims(); + auto filter_dims = filter.dims(); + DDim x_data_dims = slice_ddim(x_dims, 2, x_dims.size()); + DDim filter_data_dims = slice_ddim(filter_dims, 2, filter_dims.size()); + std::vector ksize = vectorize(filter_data_dims); + UpdatePaddingAndDilation( + &paddings_, &dilations_, padding_algorithm, x_data_dims, strides, ksize); + + int data_dim = strides.size(); // 2d or 3d + bool is_sys_pad = funcs::IsSymmetricPadding(paddings_, data_dim); + DenseTensor transformed_x(x.type()); + DenseTensor transformed_ddx(x.type()); + + DenseTensor transformed_dout(dout.type()); + + std::vector padding_common(data_dim, 0); + std::vector input_pad(x.dims().size() * 2, 0); + + if (!is_sys_pad) { + // get pad + std::vector padding_diff(data_dim); + std::vector new_input_shape_vec(data_dim + 2); + std::vector new_output_grad_shape_vec(data_dim + 2); + + new_input_shape_vec[0] = transformed_x_channel.dims()[0]; + new_input_shape_vec[1] = transformed_x_channel.dims()[1]; + + new_output_grad_shape_vec[0] = transformed_dout_channel.dims()[0]; + new_output_grad_shape_vec[1] = transformed_dout_channel.dims()[1]; + + for (size_t i = 0; i < data_dim; ++i) { + padding_diff[i] = std::abs(paddings_[2 * i] - paddings_[2 * i + 1]); + padding_common[i] = std::min(paddings_[2 * i], paddings_[2 * i + 1]); + new_input_shape_vec[i + 2] = + transformed_x_channel.dims()[i + 2] + padding_diff[i]; + + new_output_grad_shape_vec[i + 2] = + transformed_dout_channel.dims()[i + 2] + padding_diff[i]; + + input_pad[2 * i + 4] = paddings_[2 * i] - padding_common[i]; + input_pad[2 * i + 4 + 1] = paddings_[2 * i + 1] - padding_common[i]; + } + DDim new_input_shape(make_ddim(new_input_shape_vec)); + transformed_x.Resize(new_input_shape); + transformed_ddx.Resize(new_input_shape); + transformed_dout.Resize(make_ddim(new_output_grad_shape_vec)); + + ctx.template Alloc(&transformed_x); + ctx.template Alloc(&transformed_ddx); + ctx.template Alloc(&transformed_dout); + + // pad for input + const int rank = x.dims().size(); + T pad_value(0.0); + switch (rank) { + case 4: { + funcs::PadFunction( + ctx, input_pad, transformed_x_channel, pad_value, &transformed_x); + funcs::PadFunction(ctx, + input_pad, + transformed_dout_channel, + pad_value, + &transformed_dout); + funcs::PadFunction(ctx, + input_pad, + transformed_ddx_channel, + pad_value, + &transformed_ddx); + } break; + case 5: { + funcs::PadFunction( + ctx, input_pad, transformed_x_channel, pad_value, &transformed_x); + funcs::PadFunction(ctx, + input_pad, + transformed_ddx_channel, + pad_value, + &transformed_ddx); + } break; + default: + PADDLE_THROW(errors::InvalidArgument( + "ConvOp only support tensors with 4 or 5 dimensions.")); + } + } else { + transformed_x = transformed_x_channel; + transformed_dout = transformed_dout_channel; + transformed_ddx = transformed_ddx_channel; + + if (paddings_.size() == data_dim) { + for (size_t i = 0; i < data_dim; ++i) { + padding_common[i] = paddings_[i]; + } + } else { + for (size_t i = 0; i < data_dim; ++i) { + padding_common[i] = paddings_[2 * i]; + } + } + } + + std::vector starts(data_dim, 0); + std::vector ends(data_dim, 0); + std::vector axes(data_dim, 0); + for (size_t i = 0; i < data_dim; ++i) { + starts[i] = input_pad[2 * i + 4] * (strides[i] + 1); + ends[i] = starts[i] + out_vec[i + 2]; + axes[i] = i + 2; + } + + std::vector transformed_out_vec = out_vec; + for (size_t i = 0; i < data_dim; ++i) { + transformed_out_vec[i + 2] = + out_vec[i + 2] + + (input_pad[2 * i + 4] + input_pad[2 * i + 5]) * strides[i] - + 2 * padding_common[i] + paddings_[2 * i] + paddings_[2 * i + 1]; + } + + if (!is_sys_pad) { + transformed_ddout_channel.Resize(make_ddim(transformed_out_vec)); + ctx.template Alloc(&transformed_ddout_channel); + } else { + ctx.template Alloc(ddout); + transformed_ddout_channel = *ddout; + transformed_ddout_channel.Resize(make_ddim(transformed_out_vec)); + } + + const T* x_ = transformed_x.data(); + + int iwo_group = groups; + int c_group = 1; +#if defined(PADDLE_WITH_HIP) || CUDNN_VERSION_MIN(7, 0, 1) + iwo_group = 1; + c_group = groups; + groups = 1; +#endif + auto dtype = paddle::platform::CudnnDataType::type; + + auto handle = ctx.cudnn_handle(); + + paddle::operators::ConvArgs args1{&transformed_ddout_channel, + &filter, + &transformed_ddx, + strides, + padding_common, + dilations_, + dtype}; + paddle::operators::ConvArgs args2{&transformed_ddout_channel, + &ddfilter, + &transformed_x, + strides, + padding_common, + dilations_, + dtype}; + + paddle::operators::ConvArgs args3{&transformed_dout, + dfilter, + &transformed_ddx_channel, + strides, + padding_common, + dilations_, + dtype}; + paddle::operators::ConvArgs args4{&transformed_dout, + &ddfilter, + &transformed_dx_channel, + strides, + padding_common, + dilations_, + dtype}; +#ifdef PADDLE_WITH_HIP + paddle::operators::SearchResult bwd_result1; + paddle::operators::SearchResult bwd_result2; + paddle::operators::SearchResult + filter_result; + paddle::operators::SearchResult fwd_result; +#else + paddle::operators::SearchResult bwd_result1; + paddle::operators::SearchResult bwd_result2; + paddle::operators::SearchResult + filter_result; + paddle::operators::SearchResult fwd_result; +#endif + + auto layout = paddle::platform::GetCudnnTensorFormat(GPUDNNDataLayout::kNCHW); + + // ddo = conv(ddI, filter) + conv(I, ddfilter) + size_t workspace_size = 0; + + T* transformed_ddout_channel_ = nullptr; + + if (ddout) { + ddout_ = ddout->data(); + transformed_ddout_channel_ = transformed_ddout_channel.data(); + + args1.handle = handle; + args1.idesc.set(transformed_ddout_channel, iwo_group); + args1.wdesc.set(filter, layout, iwo_group); + args1.odesc.set(transformed_ddx, iwo_group); + args1.cdesc.set(dtype, + padding_common, + strides, + dilations_, + paddle::platform::AllowTF32Cudnn(), + c_group); +#ifdef PADDLE_WITH_HIP + using search1 = + paddle::operators::SearchAlgorithm; + workspace_size = search1::GetWorkspaceSize(args1); + bwd_result1.algo = + search1::Find(args1, false, deterministic, workspace_size, ctx); +#else + using search1 = + paddle::operators::SearchAlgorithm; + bwd_result1 = search1::Find(args1, false, deterministic, ctx); + workspace_size = search1::GetWorkspaceSize(args1, bwd_result1.algo); +#endif + + ddfilter_ = ddfilter.data(); + args2.handle = handle; + args2.idesc.set(transformed_ddout_channel, iwo_group); + args2.wdesc.set(ddfilter, layout, iwo_group); + args2.odesc.set(transformed_x, iwo_group); + args2.cdesc.set(dtype, + padding_common, + strides, + dilations_, + paddle::platform::AllowTF32Cudnn(), + c_group); +#ifdef PADDLE_WITH_HIP + using search2 = + paddle::operators::SearchAlgorithm; + workspace_size = std::max(workspace_size, search2::GetWorkspaceSize(args2)); + bwd_result2.algo = + search2::Find(args2, false, deterministic, workspace_size, ctx); +#else + using search2 = + paddle::operators::SearchAlgorithm; + bwd_result2 = search2::Find(args2, false, deterministic, ctx); + workspace_size = std::max( + workspace_size, search2::GetWorkspaceSize(args2, bwd_result2.algo)); +#endif + } + + if (dfilter) { + dfilter_ = dfilter->data(); + args3.handle = handle; + args3.idesc.set(transformed_dout, iwo_group); + args3.wdesc.set(*dfilter, layout, iwo_group); + args3.odesc.set(transformed_ddx_channel, iwo_group); + args3.cdesc.set(dtype, + padding_common, + strides, + dilations_, + paddle::platform::AllowTF32Cudnn(), + c_group); +#ifdef PADDLE_WITH_HIP + using search3 = + paddle::operators::SearchAlgorithm; + workspace_size = std::max(workspace_size, search3::GetWorkspaceSize(args3)); + filter_result.algo = + search3::Find(args3, false, deterministic, workspace_size, ctx); +#else + using search3 = + paddle::operators::SearchAlgorithm; + filter_result = search3::Find(args3, false, deterministic, ctx); + workspace_size = std::max( + workspace_size, search3::GetWorkspaceSize(args3, filter_result.algo)); +#endif + } + + if (dx) { + transformed_dx_ = transformed_dx_channel.data(); + + args4.handle = handle; + args4.idesc.set(transformed_dout, iwo_group); + args4.wdesc.set(ddfilter, layout, iwo_group); + args4.odesc.set(transformed_dx_channel, iwo_group); + args4.cdesc.set(dtype, + padding_common, + strides, + dilations_, + paddle::platform::AllowTF32Cudnn(), + c_group); +#ifdef PADDLE_WITH_HIP + using search4 = + paddle::operators::SearchAlgorithm; + workspace_size = std::max(workspace_size, search4::GetWorkspaceSize(args4)); + fwd_result.algo = + search4::Find(args4, false, deterministic, workspace_size, ctx); +#else + using search4 = + paddle::operators::SearchAlgorithm; + fwd_result = search4::Find(args4, false, deterministic, ctx); + workspace_size = std::max( + workspace_size, search4::GetWorkspaceSize(args4, fwd_result.algo)); +#endif + } + + int i_n, i_c, i_d, i_h, i_w; + paddle::operators::GetNCDHW(transformed_x.dims(), + GPUDNNDataLayout::kNCHW, + &i_n, + &i_c, + &i_d, + &i_h, + &i_w); + + int o_n, o_c, o_d, o_h, o_w; + paddle::operators::GetNCDHW(transformed_dout.dims(), + GPUDNNDataLayout::kNCHW, + &o_n, + &o_c, + &o_d, + &o_h, + &o_w); + + int group_offset_in = + transformed_x.numel() / transformed_x.dims()[0] / groups; + int group_offset_out = + transformed_dout.numel() / transformed_dout.dims()[0] / groups; + int group_offset_filter = filter.numel() / groups; + + paddle::operators::ScalingParamType alpha = 1.0f; + paddle::operators::ScalingParamType beta = 0.0f; + + auto wkspace_handle = ctx.cudnn_workspace_handle(); + + if (ddout) { + ddx_ = transformed_ddx.data(); + for (int i = 0; i < groups; i++) { +#ifdef PADDLE_WITH_HIP + wkspace_handle.RunFunc( + [&](void* workspace_ptr) { + PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenConvolutionBackwardData( + handle, + &alpha, + args1.odesc.desc(), + ddx_ + i * group_offset_in, + args1.wdesc.desc(), + filter_ + i * group_offset_filter, + args1.cdesc.desc(), + bwd_result1.algo, + &beta, + args1.idesc.desc(), + transformed_ddout_channel_ + i * group_offset_out, + workspace_ptr, + workspace_size)); + }, + workspace_size); +#else // PADDLE_WITH_HIP + wkspace_handle.RunFunc( + [&](void* workspace_ptr) { + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnConvolutionBackwardData( + handle, + &alpha, + args1.wdesc.desc(), + filter_ + i * group_offset_filter, + args1.odesc.desc(), + ddx_ + i * group_offset_in, + args1.cdesc.desc(), + bwd_result1.algo, + workspace_ptr, + workspace_size, + &beta, + args1.idesc.desc(), + transformed_ddout_channel_ + i * group_offset_out)); + }, + workspace_size); +#endif // PADDLE_WITH_HIP + } + + for (int i = 0; i < groups; i++) { +#ifdef PADDLE_WITH_HIP + // MIOPEN ONLY support beta to be 0.0f + DenseTensor conv_x_ddfilter(dout.type()); + conv_x_ddfilter.Resize(transformed_ddout_channel.dims()); + T* conv_x_ddfilter_data = ctx.template Alloc(&conv_x_ddfilter); + wkspace_handle.RunFunc( + [&](void* workspace_ptr) { + PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenConvolutionBackwardData( + handle, + &alpha, + args2.odesc.desc(), + x_ + i * group_offset_in, + args2.wdesc.desc(), + ddfilter_ + i * group_offset_filter, + args2.cdesc.desc(), + bwd_result2.algo, + &beta, + args2.idesc.desc(), + conv_x_ddfilter_data + i * group_offset_out, + workspace_ptr, + workspace_size)); + }, + workspace_size); + PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenOpTensor( + handle, + miopenTensorOpAdd, + &alpha, + args2.idesc.desc(), + transformed_ddout_channel_ + i * group_offset_out, + &alpha, + args2.idesc.desc(), + conv_x_ddfilter_data + i * group_offset_out, + &beta, + args2.idesc.desc(), + transformed_ddout_channel_ + i * group_offset_out)); +#else // PADDLE_WITH_HIP + wkspace_handle.RunFunc( + [&](void* workspace_ptr) { + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnConvolutionBackwardData( + handle, + &alpha, + args2.wdesc.desc(), + ddfilter_ + i * group_offset_filter, + args2.odesc.desc(), + x_ + i * group_offset_in, + args2.cdesc.desc(), + bwd_result2.algo, + workspace_ptr, + workspace_size, + &alpha, + args2.idesc.desc(), + transformed_ddout_channel_ + i * group_offset_out)); + }, + workspace_size); +#endif // PADDLE_WITH_HIP + } + + if ((!is_sys_pad) && (!channel_last)) { + if (strides.size() == 2U) { + funcs::Slice( + ctx, &transformed_ddout_channel, ddout, starts, ends, axes); + } else if (!is_sys_pad && strides.size() == 3U) { + funcs::Slice( + ctx, &transformed_ddout_channel, ddout, starts, ends, axes); + } + } else if ((!is_sys_pad) && (channel_last)) { + if (strides.size() == 2U) { + funcs::Slice(ctx, + &transformed_ddout_channel, + &transformed_ddout_channel, + starts, + ends, + axes); + } else if (!is_sys_pad && strides.size() == 3U) { + funcs::Slice(ctx, + &transformed_ddout_channel, + &transformed_ddout_channel, + starts, + ends, + axes); + } + + TransToChannelLast(ctx, &transformed_ddout_channel, ddout); + } + } + + T* transformed_dout_channel_ = transformed_dout.data(); + if (dfilter) { + ddx_ = transformed_ddx_channel.data(); + for (int i = 0; i < groups; i++) { +#ifdef PADDLE_WITH_HIP + wkspace_handle.RunFunc( + [&](void* workspace_ptr) { + PADDLE_ENFORCE_GPU_SUCCESS( + dynload::miopenConvolutionBackwardWeights( + handle, + &alpha, + args3.odesc.desc(), + ddx_ + i * group_offset_in, + args3.idesc.desc(), + transformed_dout_channel_ + i * group_offset_out, + args3.cdesc.desc(), + filter_result.algo, + &beta, + args3.wdesc.desc(), + dfilter_ + i * group_offset_filter, + workspace_ptr, + workspace_size)); + }, + workspace_size); +#else // PADDLE_WITH_HIP + wkspace_handle.RunFunc( + [&](void* workspace_ptr) { + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnConvolutionBackwardFilter( + handle, + &alpha, + args3.idesc.desc(), + transformed_dout_channel_ + i * group_offset_out, + args3.odesc.desc(), + ddx_ + i * group_offset_in, + args3.cdesc.desc(), + filter_result.algo, + workspace_ptr, + workspace_size, + &beta, + args3.wdesc.desc(), + dfilter_ + i * group_offset_filter)); + }, + workspace_size); +#endif // PADDLE_WITH_HIP + } + } + + if (dx) { + ddfilter_ = ddfilter.data(); + for (int i = 0; i < groups; i++) { +#ifdef PADDLE_WITH_HIP + wkspace_handle.RunFunc( + [&](void* workspace_ptr) { + PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenConvolutionForward( + handle, + &alpha, + args4.idesc.desc(), + transformed_dout_channel_ + i * group_offset_out, + args4.wdesc.desc(), + ddfilter_ + i * group_offset_filter, + args4.cdesc.desc(), + fwd_result.algo, + &beta, + args4.odesc.desc(), + transformed_dx_ + i * group_offset_in, + workspace_ptr, + workspace_size)); + }, + workspace_size); +#else // PADDLE_WITH_HIP + wkspace_handle.RunFunc( + [&](void* workspace_ptr) { + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnConvolutionForward( + handle, + &alpha, + args4.idesc.desc(), + transformed_dout_channel_ + i * group_offset_out, + args4.wdesc.desc(), + ddfilter_ + i * group_offset_filter, + args4.cdesc.desc(), + fwd_result.algo, + workspace_ptr, + workspace_size, + &beta, + args4.odesc.desc(), + transformed_dx_ + i * group_offset_in)); + }, + workspace_size); +#endif // PADDLE_WITH_HIP + } + if (channel_last) { + TransToChannelLast(ctx, &transformed_dx_channel, dx); + } + } +} + +template +void Conv3dTransposeGradGPUDNNKernel(const Context& ctx, + const DenseTensor& x, + const DenseTensor& filter, + const DenseTensor& dout, + const std::vector& strides, + const std::vector& paddings_, + const std::vector& output_padding, + const std::vector& output_size, + const std::string& padding_algorithm, + int groups, + const std::vector& dilations_, + const std::string& data_format, + DenseTensor* dx, + DenseTensor* dfilter) { + ConvTransposeGradRawGPUDNNKernel(ctx, + x, + filter, + dout, + strides, + paddings_, + padding_algorithm, + groups, + dilations_, + data_format, + dx, + dfilter); +} + +} // namespace phi + +using float16 = phi::dtype::float16; + +#ifdef PADDLE_WITH_HIP +// MIOPEN do not support double +PD_REGISTER_KERNEL(conv2d_transpose_grad, + GPUDNN, + ALL_LAYOUT, + phi::Conv2dTransposeGradGPUDNNKernel, + float, + float16) {} +PD_REGISTER_KERNEL(conv2d_transpose_grad_grad, + GPUDNN, + ALL_LAYOUT, + phi::Conv2dTransposeDoubleGradGPUDNNKernel, + float, + float16) {} +PD_REGISTER_KERNEL(conv3d_transpose_grad, + GPUDNN, + ALL_LAYOUT, + phi::Conv3dTransposeGradGPUDNNKernel, + float, + float16) {} +#else +PD_REGISTER_KERNEL(conv2d_transpose_grad, + GPUDNN, + ALL_LAYOUT, + phi::Conv2dTransposeGradGPUDNNKernel, + float, + double, + float16) {} +PD_REGISTER_KERNEL(conv2d_transpose_grad_grad, + GPUDNN, + ALL_LAYOUT, + phi::Conv2dTransposeDoubleGradGPUDNNKernel, + float, + double, + float16) {} +PD_REGISTER_KERNEL(conv3d_transpose_grad, + GPUDNN, + ALL_LAYOUT, + phi::Conv3dTransposeGradGPUDNNKernel, + float, + double, + float16) {} +#endif diff --git a/cuda_code/convert_durations_5.cu b/cuda_code/convert_durations_5.cu new file mode 100644 index 0000000000000000000000000000000000000000..66e6f31cca278242edd6e2434c01440e82b0f3ad --- /dev/null +++ b/cuda_code/convert_durations_5.cu @@ -0,0 +1,757 @@ +/* + * Copyright (c) 2020-2021, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +#include +#include + +namespace cudf { +namespace strings { +namespace detail { + +namespace { + +// duration components timeparts structure +struct alignas(4) duration_component { + int32_t day; //-2,147,483,648 to 2,147,483,647 + int32_t subsecond; // 000000000 to 999999999 + int8_t hour; // 00 to 23 + int8_t minute; // 00 to 59 + int8_t second; // 00 to 59 + bool is_negative; // true/false +}; + +enum class format_char_type : int8_t { + literal, // literal char type passed through + specifier // duration format specifier +}; + +/** + * @brief Represents a format specifier or literal from a duration format string. + * + * Created by the format_compiler when parsing a format string. + */ +struct alignas(4) format_item { + format_char_type item_type; // specifier or literal indicator + char value; // specifier or literal value + int8_t length; // item length in bytes + + static format_item new_specifier(char format_char, int8_t length) + { + return format_item{format_char_type::specifier, format_char, length}; + } + static format_item new_delimiter(char literal) + { + return format_item{format_char_type::literal, literal, 1}; + } +}; + +/** + * @brief The format_compiler parses a duration format string into a vector of + * format_items. + * + * The vector of format_items are used when parsing a string into duration + * components and when formatting a string from duration components. + */ +struct format_compiler { + std::string format; + rmm::device_uvector d_items; + format_compiler(const char* format_, rmm::cuda_stream_view stream) + : format(format_), d_items(0, stream) + { + static std::map const specifier_lengths = { + {'-', -1}, // '-' if negative + {'D', -1}, // 1 to 11 (not in std::format) + {'H', 2}, // HH + {'I', 2}, // HH + {'M', 2}, // MM + {'S', -1}, // 2 to 13 SS[.mmm][uuu][nnn] (uuu,nnn are not in std::format) + {'p', 2}, // AM/PM + {'R', 5}, // 5 HH:MM + {'T', 8}, // 8 HH:MM:SS" + {'r', 11} // HH:MM:SS AM/PM + }; + std::vector items; + const char* str = format.c_str(); + auto length = format.length(); + bool negative_sign{true}; + while (length > 0) { + char ch = *str++; + length--; + if (ch != '%') { + items.push_back(format_item::new_delimiter(ch)); + continue; + } + CUDF_EXPECTS(length > 0, "Unfinished specifier in duration format"); + + ch = *str++; + length--; + if (ch == '%') // escaped % char + { + items.push_back(format_item::new_delimiter(ch)); + continue; + } else if (ch == 'n') { + items.push_back(format_item::new_delimiter('\n')); + continue; + } else if (ch == 't') { + items.push_back(format_item::new_delimiter('\t')); + continue; + } + if (ch == 'O') { + CUDF_EXPECTS(*str == 'H' || *str == 'I' || *str == 'M' || *str == 'S', + "locale's alternative representation not supported for specifier: " + + std::string(1, *str)); + ch = *str++; + length--; + items.push_back(format_item::new_specifier(ch, 2)); // without sign + continue; + } + CUDF_EXPECTS(specifier_lengths.find(ch) != specifier_lengths.end(), + "invalid format specifier: " + std::string(1, ch)); + + // negative sign should be present only once. + if (negative_sign) { + if (std::string("DHIMSRT").find_first_of(ch) != std::string::npos) { + items.push_back(format_item::new_specifier('-', specifier_lengths.at('-'))); + negative_sign = false; + } + } + + int8_t spec_length = specifier_lengths.at(ch); + items.push_back(format_item::new_specifier(ch, spec_length)); + } + + // create program in device memory + d_items.resize(items.size(), stream); + CUDA_TRY(cudaMemcpyAsync(d_items.data(), + items.data(), + items.size() * sizeof(items[0]), + cudaMemcpyHostToDevice, + stream.value())); + } + + format_item const* compiled_format_items() { return d_items.data(); } + + [[nodiscard]] size_type items_count() const { return static_cast(d_items.size()); } +}; + +template +__device__ void dissect_duration(T duration, duration_component* timeparts) +{ + timeparts->is_negative = (duration < T{0}); + timeparts->day = cuda::std::chrono::duration_cast(duration).count(); + + if (cuda::std::is_same_v) return; + + duration_s seconds = cuda::std::chrono::duration_cast(duration); + timeparts->hour = + (cuda::std::chrono::duration_cast(seconds) % duration_D(1)).count(); + timeparts->minute = (cuda::std::chrono::duration_cast(seconds) % + cuda::std::chrono::hours(1)) + .count(); + timeparts->second = (seconds % cuda::std::chrono::minutes(1)).count(); + if (not cuda::std::is_same_v) { + timeparts->subsecond = (duration % duration_s(1)).count(); + } +} + +template +struct duration_to_string_size_fn { + const column_device_view d_durations; + const format_item* d_format_items; + size_type items_count; + + __device__ int8_t format_length(char format_char, duration_component const* const timeparts) const + { + switch (format_char) { + case '-': return timeparts->is_negative; break; + case 'D': return count_digits(timeparts->day) - (timeparts->day < 0); break; + case 'S': + return 2 + (timeparts->subsecond == 0 ? 0 : [] { + if (cuda::std::is_same_v) return 3 + 1; // +1 is for dot + if (cuda::std::is_same_v) return 6 + 1; // +1 is for dot + if (cuda::std::is_same_v) return 9 + 1; // +1 is for dot + return 0; + }()); + break; + default: return 2; + } + } + + __device__ size_type operator()(size_type idx) + { + if (d_durations.is_null(idx)) return 0; + auto duration = d_durations.element(idx); + duration_component timeparts = {0}; // days, hours, minutes, seconds, subseconds(9) + dissect_duration(duration, &timeparts); + return thrust::transform_reduce( + thrust::seq, + d_format_items, + d_format_items + items_count, + [this, &timeparts] __device__(format_item item) -> size_type { + if (item.item_type == format_char_type::literal) + return 1; + else if (item.length != -1) + return item.length; + else + return format_length(item.value, &timeparts); + }, + size_type{0}, + thrust::plus()); + } +}; + +template +struct duration_to_string_fn : public duration_to_string_size_fn { + const int32_t* d_offsets; + char* d_chars; + using duration_to_string_size_fn::d_durations; + using duration_to_string_size_fn::d_format_items; + using duration_to_string_size_fn::items_count; + + duration_to_string_fn(const column_device_view d_durations, + const format_item* d_format_items, + size_type items_count, + const int32_t* d_offsets, + char* d_chars) + : duration_to_string_size_fn{d_durations, d_format_items, items_count}, + d_offsets(d_offsets), + d_chars(d_chars) + { + } + + // utility to create (optionally) 0-padded integers (up to 10 chars) without negative sign. + // min_digits==-1 indicates no 0-padding. + __device__ char* int2str(char* str, int min_digits, int32_t value) + { + constexpr int MAX_DIGITS = 10; // largest 32-bit integer is 10 digits + assert(min_digits <= MAX_DIGITS); + if (value == 0) { + do { + *str++ = '0'; + } while (--min_digits > 0); + return str; + } + + char digits[MAX_DIGITS] = {'0', '0', '0', '0', '0', '0', '0', '0', '0', '0'}; + int digits_idx = 0; + while (value != 0) { + assert(digits_idx < MAX_DIGITS); + digits[digits_idx++] = '0' + std::abs(value % 10); + // next digit + value = value / 10; + } + digits_idx = std::max(digits_idx, min_digits); + // digits are backwards, reverse the string into the output + while (digits_idx-- > 0) + *str++ = digits[digits_idx]; + return str; + } + + __device__ char* int_to_2digitstr(char* str, int8_t value) + { + assert(value >= -99 && value <= 99); + value = std::abs(value); + str[0] = '0' + value / 10; + str[1] = '0' + value % 10; + return str + 2; + } + + inline __device__ char* day(char* ptr, duration_component const* timeparts) + { + return int2str(ptr, -1, timeparts->day); + } + + inline __device__ char* hour_12(char* ptr, duration_component const* timeparts) + { + return int_to_2digitstr(ptr, timeparts->hour % 12); + } + inline __device__ char* hour_24(char* ptr, duration_component const* timeparts) + { + return int_to_2digitstr(ptr, timeparts->hour); + } + inline __device__ char* am_or_pm(char* ptr, duration_component const* timeparts) + { + *ptr++ = (timeparts->hour / 12 == 0 ? 'A' : 'P'); + *ptr++ = 'M'; + return ptr; + } + inline __device__ char* minute(char* ptr, duration_component const* timeparts) + { + return int_to_2digitstr(ptr, timeparts->minute); + } + inline __device__ char* second(char* ptr, duration_component const* timeparts) + { + return int_to_2digitstr(ptr, timeparts->second); + } + + inline __device__ char* subsecond(char* ptr, duration_component const* timeparts) + { + if (timeparts->subsecond == 0) return ptr; + const int digits = duration_to_string_size_fn::format_length('S', timeparts) - 3; + *ptr = '.'; + auto value = timeparts->subsecond; + for (int idx = digits; idx > 0; idx--) { + *(ptr + idx) = '0' + std::abs(value % 10); + value /= 10; + } + return ptr + digits + 1; + } + + __device__ char* format_from_parts(duration_component const* timeparts, char* ptr) + { + for (size_t idx = 0; idx < items_count; ++idx) { + auto item = d_format_items[idx]; + if (item.item_type == format_char_type::literal) { + *ptr++ = item.value; + continue; + } + // special logic for each specifier + switch (item.value) { + case 'D': // days + ptr = day(ptr, timeparts); + break; + case '-': // - if value is negative + if (timeparts->is_negative) *ptr++ = '-'; + break; + case 'H': // 24-hour + ptr = hour_24(ptr, timeparts); + break; + case 'I': // 12-hour + ptr = hour_12(ptr, timeparts); + break; + case 'M': // minute + ptr = minute(ptr, timeparts); + break; + case 'S': // second + ptr = second(ptr, timeparts); + if (item.length == 2) break; + case 'f': // sub-second + ptr = subsecond(ptr, timeparts); + break; + case 'p': ptr = am_or_pm(ptr, timeparts); break; + case 'R': // HH:MM 24-hour + ptr = hour_24(ptr, timeparts); + *ptr++ = ':'; + ptr = minute(ptr, timeparts); + break; + case 'T': // HH:MM:SS 24-hour + ptr = hour_24(ptr, timeparts); + *ptr++ = ':'; + ptr = minute(ptr, timeparts); + *ptr++ = ':'; + ptr = second(ptr, timeparts); + break; + case 'r': // HH:MM:SS AM/PM 12-hour + ptr = hour_12(ptr, timeparts); + *ptr++ = ':'; + ptr = minute(ptr, timeparts); + *ptr++ = ':'; + ptr = second(ptr, timeparts); + *ptr++ = ' '; + ptr = am_or_pm(ptr, timeparts); + break; + default: // ignore everything else + break; + } + } + return ptr; + } + + __device__ void operator()(size_type idx) + { + if (d_durations.is_null(idx)) return; + auto duration = d_durations.template element(idx); + duration_component timeparts = {0}; // days, hours, minutes, seconds, subseconds(9) + dissect_duration(duration, &timeparts); + // convert to characters + format_from_parts(&timeparts, d_chars + d_offsets[idx]); + } +}; + +/** + * @brief This dispatch method is for converting durations into strings. + * + * The template function declaration ensures only duration types are used. + */ +struct dispatch_from_durations_fn { + template ()>* = nullptr> + std::unique_ptr operator()(column_view const& durations, + std::string const& format, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) const + { + CUDF_EXPECTS(!format.empty(), "Format parameter must not be empty."); + + format_compiler compiler(format.c_str(), stream); + auto d_format_items = compiler.compiled_format_items(); + + size_type strings_count = durations.size(); + auto column = column_device_view::create(durations, stream); + auto d_column = *column; + + // copy null mask + rmm::device_buffer null_mask = cudf::detail::copy_bitmask(durations, stream, mr); + // build offsets column + auto offsets_transformer_itr = thrust::make_transform_iterator( + thrust::make_counting_iterator(0), + duration_to_string_size_fn{d_column, d_format_items, compiler.items_count()}); + auto offsets_column = detail::make_offsets_child_column( + offsets_transformer_itr, offsets_transformer_itr + strings_count, stream, mr); + auto offsets_view = offsets_column->view(); + auto d_new_offsets = offsets_view.template data(); + + // build chars column + auto const chars_bytes = + cudf::detail::get_value(offsets_column->view(), strings_count, stream); + auto chars_column = detail::create_chars_child_column(chars_bytes, stream, mr); + auto d_chars = chars_column->mutable_view().template data(); + + thrust::for_each_n(rmm::exec_policy(stream), + thrust::make_counting_iterator(0), + strings_count, + duration_to_string_fn{ + d_column, d_format_items, compiler.items_count(), d_new_offsets, d_chars}); + + return make_strings_column(strings_count, + std::move(offsets_column), + std::move(chars_column), + durations.null_count(), + std::move(null_mask)); + } + + // non-duration types throw an exception + template + std::enable_if_t(), std::unique_ptr> operator()(Args&&...) const + { + CUDF_FAIL("Values for from_durations function must be a duration type."); + } +}; + +static const __device__ __constant__ int32_t powers_of_ten[10] = { + 1L, 10L, 100L, 1000L, 10000L, 100000L, 1000000L, 10000000L, 100000000L, 1000000000L}; + +// this parses duration string into a duration integer +template // duration type +struct parse_duration { + column_device_view const d_strings; + format_item const* d_format_items; + size_type items_count; + + // function to parse string (maximum 10 digits) to integer. + __device__ int32_t str2int(const char* str, int8_t max_bytes, int8_t& actual_length) + { + const char* ptr = (*str == '-' || *str == '+') ? str + 1 : str; + int32_t value = 0; + for (int8_t idx = 0; idx < max_bytes; ++idx) { + char chr = *ptr++; + if (chr < '0' || chr > '9') { + ptr--; // roll back + break; + } + value = (value * 10) + static_cast(chr - '0'); + } + actual_length += (ptr - str); + return (*str == '-') ? -value : value; + } + + // function to parse fraction of decimal value with trailing zeros removed. + __device__ int32_t str2int_fixed(const char* str, + int8_t fixed_width, + size_type string_length, + int8_t& actual_length) + { + const char* ptr = (*str == '.') ? str + 1 : str; + int32_t value = 0; + // parse till fixed_width or end of string. + for (int8_t idx = 0; idx < fixed_width && idx < string_length; ++idx) { + char chr = *ptr++; + if (chr < '0' || chr > '9') { + ptr--; // roll back + break; + } + value = (value * 10) + static_cast(chr - '0'); + } + auto parsed_length = ptr - str; + // compensate for missing trailing zeros + if (parsed_length < fixed_width) value *= powers_of_ten[fixed_width - parsed_length]; + actual_length += parsed_length; + return value; + } + + // parse 2 digit string to integer + __device__ int8_t parse_2digit_int(const char* str, int8_t& actual_length) + { + const char* ptr = (*str == '-' || *str == '+') ? str + 1 : str; + int8_t value = 0; + if (*ptr >= '0' && *ptr <= '9') value = (value * 10) + static_cast(*ptr++ - '0'); + if (*ptr >= '0' && *ptr <= '9') value = (value * 10) + static_cast(*ptr++ - '0'); + actual_length += (ptr - str); + return (*str == '-') ? -value : value; + } + inline __device__ int8_t parse_hour(const char* str, int8_t& actual_length) + { + return parse_2digit_int(str, actual_length); + } + inline __device__ int8_t parse_minute(const char* str, int8_t& actual_length) + { + return parse_2digit_int(str, actual_length); + } + inline __device__ int8_t parse_second(const char* str, int8_t& actual_length) + { + return parse_2digit_int(str, actual_length); + } + + // Walk the format_items to read the datetime string. + // Returns 0 if all ok. + __device__ int parse_into_parts(string_view const& d_string, duration_component* timeparts) + { + auto ptr = d_string.data(); + auto length = d_string.size_bytes(); + int8_t hour_shift{0}; + for (size_type idx = 0; idx < items_count; ++idx) { + auto item = d_format_items[idx]; + if (length < item.length) return 1; + if (item.item_type == format_char_type::literal) { // static character we'll just skip; + // consume item.length bytes from string + ptr += item.length; + length -= item.length; + continue; + } + timeparts->is_negative |= (*ptr == '-'); + + // special logic for each specifier + int8_t item_length{0}; + switch (item.value) { + case 'D': // day + timeparts->day = str2int(ptr, 11, item_length); + break; + case '-': // skip + item_length = (*ptr == '-'); + break; + case 'H': // 24-hour + timeparts->hour = parse_hour(ptr, item_length); + hour_shift = 0; + break; + case 'I': // 12-hour + timeparts->hour = parse_hour(ptr, item_length); + break; + case 'M': // minute + timeparts->minute = parse_minute(ptr, item_length); + break; + case 'S': // [-]SS[.mmm][uuu][nnn] + timeparts->second = parse_second(ptr, item_length); + if ((item_length < length) && *(ptr + item_length) == '.') { + item_length++; + int64_t nanoseconds = str2int_fixed( + ptr + item_length, 9, length - item_length, item_length); // normalize to nanoseconds + timeparts->subsecond = nanoseconds; + } + break; + case 'p': // AM/PM + if (*ptr == 'P' && *(ptr + 1) == 'M') + hour_shift = 12; + else + hour_shift = 0; + item_length = 2; + break; + case 'R': // [-]HH:SS + timeparts->hour = parse_hour(ptr, item_length); + hour_shift = 0; + item_length++; // : + timeparts->minute = parse_minute(ptr + item_length, item_length); + break; + case 'T': // [-]HH:MM:SS + timeparts->hour = parse_hour(ptr, item_length); + hour_shift = 0; + item_length++; // : + timeparts->minute = parse_minute(ptr + item_length, item_length); + item_length++; // : + timeparts->second = parse_second(ptr + item_length, item_length); + break; + case 'r': // hh:MM:SS AM/PM + timeparts->hour = parse_hour(ptr, item_length); + item_length++; // : + timeparts->minute = parse_minute(ptr + item_length, item_length); + item_length++; // : + timeparts->second = parse_second(ptr + item_length, item_length); + item_length++; // space + if (*(ptr + item_length) == 'P' && *(ptr + item_length + 1) == 'M') + hour_shift = 12; + else + hour_shift = 0; + item_length += 2; + break; + default: return 3; + } + ptr += item_length; + length -= item_length; + } + // negate all if duration has negative sign + if (timeparts->is_negative) { + auto negate = [](auto i) { return (i < 0 ? i : -i); }; + timeparts->day = negate(timeparts->day); + timeparts->hour = negate(timeparts->hour); + timeparts->minute = negate(timeparts->minute); + timeparts->second = negate(timeparts->second); + timeparts->subsecond = negate(timeparts->subsecond); + hour_shift = -hour_shift; + } + timeparts->hour += hour_shift; + return 0; + } + + inline __device__ int64_t duration_from_parts(duration_component const* timeparts) + { + int32_t days = timeparts->day; + auto hour = timeparts->hour; + auto minute = timeparts->minute; + auto second = timeparts->second; + auto duration = duration_D(days) + cuda::std::chrono::hours(hour) + + cuda::std::chrono::minutes(minute) + duration_s(second); + if (cuda::std::is_same_v) + return cuda::std::chrono::duration_cast(duration).count(); + else if (cuda::std::is_same_v) + return cuda::std::chrono::duration_cast(duration).count(); + + duration_ns subsecond(timeparts->subsecond); // ns + if (cuda::std::is_same_v) { + return cuda::std::chrono::duration_cast(duration + subsecond).count(); + } else if (cuda::std::is_same_v) { + return cuda::std::chrono::duration_cast(duration + subsecond).count(); + } else if (cuda::std::is_same_v) + return cuda::std::chrono::duration_cast(duration + subsecond).count(); + return cuda::std::chrono::duration_cast(duration + subsecond).count(); + } + + __device__ T operator()(size_type idx) + { + if (d_strings.is_null(idx)) return T{0}; + string_view d_str = d_strings.element(idx); + if (d_str.empty()) return T{0}; + // + duration_component timeparts = {0}; + if (parse_into_parts(d_str, &timeparts)) return T{0}; // unexpected parse case + // + return static_cast(duration_from_parts(&timeparts)); + } +}; + +/** + * @brief This dispatch method is for converting strings to durations. + * + * The template function declaration ensures only duration types are used. + */ +struct dispatch_to_durations_fn { + template ()>* = nullptr> + void operator()(column_device_view const& d_strings, + std::string const& format, + mutable_column_view& results_view, + rmm::cuda_stream_view stream) const + { + format_compiler compiler(format.c_str(), stream); + auto d_items = compiler.compiled_format_items(); + auto d_results = results_view.data(); + parse_duration pfn{d_strings, d_items, compiler.items_count()}; + thrust::transform(rmm::exec_policy(stream), + thrust::make_counting_iterator(0), + thrust::make_counting_iterator(results_view.size()), + d_results, + pfn); + } + template ()>* = nullptr> + void operator()(column_device_view const&, + std::string const&, + mutable_column_view&, + rmm::cuda_stream_view) const + { + CUDF_FAIL("Only durations type are expected for to_durations function"); + } +}; + +} // namespace + +std::unique_ptr from_durations(column_view const& durations, + std::string const& format, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + size_type strings_count = durations.size(); + if (strings_count == 0) return make_empty_column(type_id::STRING); + + return type_dispatcher( + durations.type(), dispatch_from_durations_fn{}, durations, format, stream, mr); +} + +std::unique_ptr to_durations(strings_column_view const& strings, + data_type duration_type, + std::string const& format, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + size_type strings_count = strings.size(); + if (strings_count == 0) return make_duration_column(duration_type, 0); + + CUDF_EXPECTS(!format.empty(), "Format parameter must not be empty."); + + auto strings_column = column_device_view::create(strings.parent(), stream); + auto d_column = *strings_column; + + auto results = make_duration_column(duration_type, + strings_count, + cudf::detail::copy_bitmask(strings.parent(), stream, mr), + strings.null_count(), + stream, + mr); + auto results_view = results->mutable_view(); + cudf::type_dispatcher( + duration_type, dispatch_to_durations_fn(), d_column, format, results_view, stream); + results->set_null_count(strings.null_count()); + return results; +} + +} // namespace detail + +std::unique_ptr from_durations(column_view const& durations, + std::string const& format, + rmm::mr::device_memory_resource* mr) +{ + CUDF_FUNC_RANGE(); + return detail::from_durations(durations, format, rmm::cuda_stream_default, mr); +} + +std::unique_ptr to_durations(strings_column_view const& strings, + data_type duration_type, + std::string const& format, + rmm::mr::device_memory_resource* mr) +{ + CUDF_FUNC_RANGE(); + return detail::to_durations(strings, duration_type, format, rmm::cuda_stream_default, mr); +} + +} // namespace strings +} // namespace cudf diff --git a/cuda_code/convert_urls_8.cu b/cuda_code/convert_urls_8.cu new file mode 100644 index 0000000000000000000000000000000000000000..b0d67a737dc46fb84e3bcb0b58dde50249eb5e80 --- /dev/null +++ b/cuda_code/convert_urls_8.cu @@ -0,0 +1,274 @@ +/* + * Copyright (c) 2019-2020, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace cudf +{ +namespace strings +{ +namespace detail +{ +namespace +{ + +// +// This is the functor for the url_encode() method below. +// Specific requirements are documented in custrings issue #321. +// In summary it converts mostly non-ascii characters and control characters into UTF-8 hex characters +// prefixed with '%'. For example, the space character must be converted to characters '%20' where the +// '20' indicates the hex value for space in UTF-8. Likewise, multi-byte characters are converted to +// multiple hex charactes. For example, the é character is converted to characters '%C3%A9' where 'C3A9' +// is the UTF-8 bytes xc3a9 for this character. +// +struct url_encoder_fn +{ + column_device_view const d_strings; + int32_t const* d_offsets{}; + char* d_chars{}; + + // utility to create 2-byte hex characters from single binary byte + __device__ void byte_to_hex( uint8_t byte, char* hex ) + { + hex[0] = '0'; + if( byte >= 16 ) + { + uint8_t hibyte = byte/16; + hex[0] = hibyte < 10 ? '0'+hibyte : 'A'+(hibyte-10); + byte = byte - (hibyte * 16); + } + hex[1] = byte < 10 ? '0'+byte : 'A'+(byte-10); + } + + __device__ bool should_not_url_encode( char ch ) + { + return ( (ch>='0' && ch<='9') || // these are the characters + (ch>='A' && ch<='Z') || // that are not to be url encoded + (ch>='a' && ch<='z') || // reference: docs.python.org/3/library/urllib.parse.html#urllib.parse.quote + (ch=='.') || (ch=='_') || (ch=='~') || (ch=='-') ); + } + + // main part of the functor the performs the url-encoding + __device__ size_type operator()( size_type idx ) + { + if( d_strings.is_null(idx) ) + return 0; + string_view d_str = d_strings.element(idx); + // + char* out_ptr = d_chars ? d_chars + d_offsets[idx] : nullptr; + size_type nbytes = 0; + char hex[2]; // two-byte hex max + for( auto itr = d_str.begin(); itr!=d_str.end(); ++itr ) + { + auto ch = *itr; + if( ch < 128 ) + { + if( should_not_url_encode( static_cast(ch) ) ) + { + nbytes++; + if( out_ptr ) + out_ptr = copy_and_increment( out_ptr, d_str.data() + itr.byte_offset(), 1); + } + else // url-encode everything else + { + nbytes += 3; + if( out_ptr ) + { + out_ptr = copy_and_increment(out_ptr,"%",1); // add the '%' prefix + byte_to_hex( static_cast(ch), hex); // convert to 2 hex chars + out_ptr = copy_and_increment(out_ptr,hex,2); // add them to the output + } + } + } + else // these are to be utf-8 url-encoded + { + uint8_t char_bytes[4]; // holds utf-8 bytes for one character + size_type char_width = from_char_utf8(ch, reinterpret_cast(char_bytes)); + nbytes += char_width * 3; // '%' plus 2 hex chars per byte (example: é is %C3%A9) + // process each byte in this current character + for( size_type chidx=0; out_ptr && (chidx < char_width); ++chidx ) + { + out_ptr = copy_and_increment(out_ptr,"%",1); // add '%' prefix + byte_to_hex( char_bytes[chidx], hex); // convert to 2 hex chars + out_ptr = copy_and_increment(out_ptr,hex,2); // add them to the output + } + } + } + return nbytes; + } +}; + +} // namespace + +// +std::unique_ptr url_encode( strings_column_view const& strings, + rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(), + cudaStream_t stream = 0) +{ + size_type strings_count = strings.size(); + if( strings_count == 0 ) + return make_empty_strings_column(mr,stream); + + auto strings_column = column_device_view::create(strings.parent(), stream); + auto d_strings = *strings_column; + + // copy null mask + rmm::device_buffer null_mask = copy_bitmask(strings.parent(),stream,mr); + // build offsets column + auto offsets_transformer_itr = thrust::make_transform_iterator( thrust::make_counting_iterator(0), + url_encoder_fn{d_strings} ); + auto offsets_column = make_offsets_child_column(offsets_transformer_itr, + offsets_transformer_itr+strings_count, + mr, stream); + auto d_offsets = offsets_column->view().data(); + // build chars column + auto chars_column = create_chars_child_column( strings_count, strings.null_count(), + thrust::device_pointer_cast(d_offsets)[strings_count], + mr, stream ); + auto d_chars = chars_column->mutable_view().data(); + thrust::for_each_n(rmm::exec_policy(stream)->on(stream), + thrust::make_counting_iterator(0), strings_count, + url_encoder_fn{d_strings,d_offsets,d_chars}); + return make_strings_column(strings_count, std::move(offsets_column), std::move(chars_column), + strings.null_count(), std::move(null_mask), stream, mr); +} + +} // namespace detail + +// external API +std::unique_ptr url_encode( strings_column_view const& strings, + rmm::mr::device_memory_resource* mr) +{ + CUDF_FUNC_RANGE(); + return detail::url_encode(strings,mr); +} + +namespace detail +{ +namespace +{ + +// +// This is the functor for the url_decode() method below. +// Specific requirements are documented in custrings issue #321. +// In summary it converts all character sequences starting with '%' into bytes +// interpretting the following 2 characters as hex values to create the output byte. +// For example, the sequence '%20' is converted into byte (0x20) which is a single +// space character. Another example converts '%C3%A9' into 2 sequential bytes +// (0xc3 and 0xa9 respectively). Overall, 3 characters are converted into one byte +// whenever a '%' character is encountered in the string. +// +struct url_decoder_fn +{ + column_device_view const d_strings; + int32_t const* d_offsets{}; + char* d_chars{}; + + // utility to convert a hex char into a single byte + __device__ uint8_t hex_char_to_byte( char ch ) + { + if( ch >= '0' && ch <= '9' ) + return (ch-'0'); + if( ch >= 'A' && ch <= 'F' ) + return (ch-'A'+10); // in hex A=10,B=11,...,F=15 + if( ch >='a' && ch <= 'f' ) + return (ch-'a'+10); // same for lower case + return 0; + } + + // main functor method executed on each string + __device__ size_type operator()(size_type idx) + { + if( d_strings.is_null(idx) ) + return 0; + string_view d_str = d_strings.element(idx); + char* out_ptr = d_chars ? out_ptr = d_chars + d_offsets[idx] : nullptr; + size_type nbytes = 0; + const char* in_ptr = d_str.data(); + const char* end = in_ptr + d_str.size_bytes(); + while( in_ptr < end ) // walk through each byte + { + char ch = *in_ptr++; + if( (ch == '%') && ((in_ptr+1) < end) ) + { // found '%', convert hex to byte + ch = static_cast(16 * hex_char_to_byte(*in_ptr++)); + ch += static_cast(hex_char_to_byte(*in_ptr++)); + } + ++nbytes; // keeping track of bytes and chars + if( out_ptr ) + out_ptr = copy_and_increment(out_ptr, &ch, 1); + } + return nbytes; + } +}; + +} + +// +std::unique_ptr url_decode( strings_column_view const& strings, + rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(), + cudaStream_t stream = 0) +{ + size_type strings_count = strings.size(); + if( strings_count == 0 ) + return make_empty_strings_column(mr,stream); + + auto strings_column = column_device_view::create(strings.parent(), stream); + auto d_strings = *strings_column; + + // copy null mask + rmm::device_buffer null_mask = copy_bitmask(strings.parent(),stream,mr); + // build offsets column + auto offsets_transformer_itr = thrust::make_transform_iterator( thrust::make_counting_iterator(0), + url_decoder_fn{d_strings} ); + auto offsets_column = make_offsets_child_column(offsets_transformer_itr, + offsets_transformer_itr+strings_count, + mr, stream); + auto d_offsets = offsets_column->view().data(); + + // build chars column + auto chars_column = create_chars_child_column( strings_count, strings.null_count(), + thrust::device_pointer_cast(d_offsets)[strings_count], + mr, stream ); + auto d_chars = chars_column->mutable_view().data(); + thrust::for_each_n(rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator(0), strings_count, + url_decoder_fn{d_strings,d_offsets,d_chars}); + // + return make_strings_column(strings_count, std::move(offsets_column), std::move(chars_column), + strings.null_count(), std::move(null_mask), stream, mr); +} + +} // namespace detail + +// external API + +std::unique_ptr url_decode( strings_column_view const& strings, + rmm::mr::device_memory_resource* mr) +{ + CUDF_FUNC_RANGE(); + return detail::url_decode(strings,mr); +} + +} // namespace strings +} // namespace cudf diff --git a/cuda_code/convertto.cu b/cuda_code/convertto.cu new file mode 100644 index 0000000000000000000000000000000000000000..2c2c32301f8a1506c5efb1777ab17e5218562ce1 --- /dev/null +++ b/cuda_code/convertto.cu @@ -0,0 +1,520 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0. + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +#include "ppl/cv/cuda/convertto.h" + +#include +#include + +#include "utility/utility.hpp" + +using namespace ppl::common; + +namespace ppl { +namespace cv { +namespace cuda { + +template +__global__ +void convertToKernel0(const Tsrc* src, int rows, int cols, int src_stride, + Tdst* dst, int dst_stride, float alpha, float beta) { + int element_x = ((blockIdx.x << kBlockShiftX0) + threadIdx.x) << 2; + int element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y; + if (element_y >= rows || element_x >= cols) { + return; + } + + const Tsrc* input = (Tsrc*)((uchar*)src + element_y * src_stride); + float value0, value1, value2, value3; + if (element_x < cols - 3) { + value0 = input[element_x]; + value1 = input[element_x + 1]; + value2 = input[element_x + 2]; + value3 = input[element_x + 3]; + } + else { + value0 = input[element_x]; + if (element_x < cols - 1) { + value1 = input[element_x + 1]; + } + if (element_x < cols - 2) { + value2 = input[element_x + 2]; + } + } + + value0 *= alpha; + value1 *= alpha; + value2 *= alpha; + value3 *= alpha; + + value0 += beta; + value1 += beta; + value2 += beta; + value3 += beta; + + Tdst* output = (Tdst*)((uchar*)dst + element_y * dst_stride); + if (sizeof(Tdst) == 1) { + if (element_x < cols - 3) { + output[element_x] = saturateCast(value0); + output[element_x + 1] = saturateCast(value1); + output[element_x + 2] = saturateCast(value2); + output[element_x + 3] = saturateCast(value3); + } + else { + output[element_x] = saturateCast(value0); + if (element_x < cols - 1) { + output[element_x + 1] = saturateCast(value1); + } + if (element_x < cols - 2) { + output[element_x + 2] = saturateCast(value2); + } + } + } + else { + if (element_x < cols - 3) { + output[element_x] = value0; + output[element_x + 1] = value1; + output[element_x + 2] = value2; + output[element_x + 3] = value3; + } + else { + output[element_x] = value0; + if (element_x < cols - 1) { + output[element_x + 1] = value1; + } + if (element_x < cols - 2) { + output[element_x + 2] = value2; + } + } + } +} + +template +__global__ +void convertToKernel1(const Tsrc* src, int rows, int cols, int src_stride, + Tdst* dst, int dst_stride, float alpha, float beta) { + int element_x = ((blockIdx.x << kBlockShiftX0) + threadIdx.x) << 1; + int element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y; + if (element_y >= rows || element_x >= cols) { + return; + } + + const Tsrc* input = (Tsrc*)((uchar*)src + element_y * src_stride); + float value0, value1; + if (element_x < cols - 1) { + value0 = input[element_x]; + value1 = input[element_x + 1]; + } + else { + value0 = input[element_x]; + } + + value0 *= alpha; + value1 *= alpha; + + value0 += beta; + value1 += beta; + + Tdst* output = (Tdst*)((uchar*)dst + element_y * dst_stride); + if (sizeof(Tdst) == 1) { + if (element_x < cols - 1) { + output[element_x] = saturateCast(value0); + output[element_x + 1] = saturateCast(value1); + } + else { + output[element_x] = saturateCast(value0); + } + } + else { + if (element_x < cols - 1) { + output[element_x] = value0; + output[element_x + 1] = value1; + } + else { + output[element_x] = value0; + } + } +} + +RetCode convertTo(const uchar* src, int rows, int cols, int channels, + int src_stride, uchar* dst, int dst_stride, float alpha, + float beta, cudaStream_t stream) { + PPL_ASSERT(src != nullptr); + PPL_ASSERT(dst != nullptr); + PPL_ASSERT(rows >= 1 && cols >= 1); + PPL_ASSERT(channels == 1 || channels == 3 || channels == 4); + PPL_ASSERT(src_stride >= cols * channels * (int)sizeof(uchar)); + PPL_ASSERT(dst_stride >= cols * channels * (int)sizeof(uchar)); + + cudaError_t code; + if (std::fabs(alpha - 1.f) < FLT_EPSILON && std::fabs(beta) < FLT_EPSILON) { + if (src == dst) { + return RC_SUCCESS; + } + + if (src_stride == dst_stride) { + code = cudaMemcpy(dst, src, src_stride * rows, cudaMemcpyDeviceToDevice); + } + else { + code = cudaMemcpy2D(dst, dst_stride, src, src_stride, + cols * sizeof(uchar), rows, cudaMemcpyDeviceToDevice); + } + if (code != cudaSuccess) { + LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code); + return RC_DEVICE_MEMORY_ERROR; + } + + return RC_SUCCESS; + } + + int columns = cols * channels; + cols = divideUp(columns, 4, 2); + dim3 block, grid; + block.x = kBlockDimX0; + block.y = kBlockDimY0; + grid.x = divideUp(cols, kBlockDimX0, kBlockShiftX0); + grid.y = divideUp(rows, kBlockDimY0, kBlockShiftY0); + + convertToKernel0<<>>(src, rows, columns, + src_stride, dst, dst_stride, alpha, beta); + + code = cudaGetLastError(); + if (code != cudaSuccess) { + LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code); + return RC_DEVICE_RUNTIME_ERROR; + } + + return RC_SUCCESS; +} + +RetCode convertTo(const uchar* src, int rows, int cols, int channels, + int src_stride, float* dst, int dst_stride, float alpha, + float beta, cudaStream_t stream) { + PPL_ASSERT(src != nullptr); + PPL_ASSERT(dst != nullptr); + PPL_ASSERT(rows >= 1 && cols >= 1); + PPL_ASSERT(channels == 1 || channels == 3 || channels == 4); + PPL_ASSERT(src_stride >= cols * channels * (int)sizeof(uchar)); + PPL_ASSERT(dst_stride >= cols * channels * (int)sizeof(float)); + + int columns = cols * channels; + cols = divideUp(columns, 2, 1); + dim3 block, grid; + block.x = kBlockDimX0; + block.y = kBlockDimY0; + grid.x = divideUp(cols, kBlockDimX0, kBlockShiftX0); + grid.y = divideUp(rows, kBlockDimY0, kBlockShiftY0); + + convertToKernel1<<>>(src, rows, columns, + src_stride, dst, dst_stride, alpha, beta); + + cudaError_t code = cudaGetLastError(); + if (code != cudaSuccess) { + LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code); + return RC_DEVICE_RUNTIME_ERROR; + } + + return RC_SUCCESS; +} + +RetCode convertTo(const float* src, int rows, int cols, int channels, + int src_stride, uchar* dst, int dst_stride, float alpha, + float beta, cudaStream_t stream) { + PPL_ASSERT(src != nullptr); + PPL_ASSERT(dst != nullptr); + PPL_ASSERT(rows >= 1 && cols >= 1); + PPL_ASSERT(channels == 1 || channels == 3 || channels == 4); + PPL_ASSERT(src_stride >= cols * channels * (int)sizeof(float)); + PPL_ASSERT(dst_stride >= cols * channels * (int)sizeof(uchar)); + + int columns = cols * channels; + cols = divideUp(columns, 4, 2); + dim3 block, grid; + block.x = kBlockDimX0; + block.y = kBlockDimY0; + grid.x = divideUp(cols, kBlockDimX0, kBlockShiftX0); + grid.y = divideUp(rows, kBlockDimY0, kBlockShiftY0); + + convertToKernel0<<>>(src, rows, columns, + src_stride, dst, dst_stride, alpha, beta); + + cudaError_t code = cudaGetLastError(); + if (code != cudaSuccess) { + LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code); + return RC_DEVICE_RUNTIME_ERROR; + } + + return RC_SUCCESS; +} + +RetCode convertTo(const float* src, int rows, int cols, int channels, + int src_stride, float* dst, int dst_stride, float alpha, + float beta, cudaStream_t stream) { + PPL_ASSERT(src != nullptr); + PPL_ASSERT(dst != nullptr); + PPL_ASSERT(rows >= 1 && cols >= 1); + PPL_ASSERT(channels == 1 || channels == 3 || channels == 4); + PPL_ASSERT(src_stride >= cols * channels * (int)sizeof(float)); + PPL_ASSERT(dst_stride >= cols * channels * (int)sizeof(float)); + + cudaError_t code; + if (std::fabs(alpha - 1.f) < FLT_EPSILON && std::fabs(beta) < FLT_EPSILON) { + if (src == dst) { + return RC_SUCCESS; + } + + if (src_stride == dst_stride) { + code = cudaMemcpy(dst, src, src_stride * rows, cudaMemcpyDeviceToDevice); + } + else { + code = cudaMemcpy2D(dst, dst_stride, src, src_stride, + cols * sizeof(float), rows, cudaMemcpyDeviceToDevice); + } + if (code != cudaSuccess) { + LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code); + return RC_DEVICE_MEMORY_ERROR; + } + + return RC_SUCCESS; + } + + int columns = cols * channels; + cols = divideUp(columns, 2, 1); + dim3 block, grid; + block.x = kBlockDimX0; + block.y = kBlockDimY0; + grid.x = divideUp(cols, kBlockDimX0, kBlockShiftX0); + grid.y = divideUp(rows, kBlockDimY0, kBlockShiftY0); + + convertToKernel1<<>>(src, rows, columns, + src_stride, dst, dst_stride, alpha, beta); + + code = cudaGetLastError(); + if (code != cudaSuccess) { + LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code); + return RC_DEVICE_RUNTIME_ERROR; + } + + return RC_SUCCESS; +} + +template <> +RetCode ConvertTo(cudaStream_t stream, + int height, + int width, + int inWidthStride, + const uchar* inData, + int outWidthStride, + uchar* outData, + float alpha, + float beta) { + RetCode code = convertTo(inData, height, width, 1, inWidthStride, outData, + outWidthStride, alpha, beta, stream); + + return code; +} + +template <> +RetCode ConvertTo(cudaStream_t stream, + int height, + int width, + int inWidthStride, + const uchar* inData, + int outWidthStride, + uchar* outData, + float alpha, + float beta) { + RetCode code = convertTo(inData, height, width, 3, inWidthStride, outData, + outWidthStride, alpha, beta, stream); + + return code; +} + +template <> +RetCode ConvertTo(cudaStream_t stream, + int height, + int width, + int inWidthStride, + const uchar* inData, + int outWidthStride, + uchar* outData, + float alpha, + float beta) { + RetCode code = convertTo(inData, height, width, 4, inWidthStride, outData, + outWidthStride, alpha, beta, stream); + + return code; +} + +template <> +RetCode ConvertTo(cudaStream_t stream, + int height, + int width, + int inWidthStride, + const uchar* inData, + int outWidthStride, + float* outData, + float alpha, + float beta) { + outWidthStride *= sizeof(float); + RetCode code = convertTo(inData, height, width, 1, inWidthStride, outData, + outWidthStride, alpha, beta, stream); + + return code; +} + +template <> +RetCode ConvertTo(cudaStream_t stream, + int height, + int width, + int inWidthStride, + const uchar* inData, + int outWidthStride, + float* outData, + float alpha, + float beta) { + outWidthStride *= sizeof(float); + RetCode code = convertTo(inData, height, width, 3, inWidthStride, outData, + outWidthStride, alpha, beta, stream); + + return code; +} + +template <> +RetCode ConvertTo(cudaStream_t stream, + int height, + int width, + int inWidthStride, + const uchar* inData, + int outWidthStride, + float* outData, + float alpha, + float beta) { + outWidthStride *= sizeof(float); + RetCode code = convertTo(inData, height, width, 4, inWidthStride, outData, + outWidthStride, alpha, beta, stream); + + return code; +} + +template <> +RetCode ConvertTo(cudaStream_t stream, + int height, + int width, + int inWidthStride, + const float* inData, + int outWidthStride, + uchar* outData, + float alpha, + float beta) { + inWidthStride *= sizeof(float); + RetCode code = convertTo(inData, height, width, 1, inWidthStride, outData, + outWidthStride, alpha, beta, stream); + + return code; +} + +template <> +RetCode ConvertTo(cudaStream_t stream, + int height, + int width, + int inWidthStride, + const float* inData, + int outWidthStride, + uchar* outData, + float alpha, + float beta) { + inWidthStride *= sizeof(float); + RetCode code = convertTo(inData, height, width, 3, inWidthStride, outData, + outWidthStride, alpha, beta, stream); + + return code; +} + +template <> +RetCode ConvertTo(cudaStream_t stream, + int height, + int width, + int inWidthStride, + const float* inData, + int outWidthStride, + uchar* outData, + float alpha, + float beta) { + inWidthStride *= sizeof(float); + RetCode code = convertTo(inData, height, width, 4, inWidthStride, outData, + outWidthStride, alpha, beta, stream); + + return code; +} + +template <> +RetCode ConvertTo(cudaStream_t stream, + int height, + int width, + int inWidthStride, + const float* inData, + int outWidthStride, + float* outData, + float alpha, + float beta) { + inWidthStride *= sizeof(float); + outWidthStride *= sizeof(float); + RetCode code = convertTo(inData, height, width, 1, inWidthStride, outData, + outWidthStride, alpha, beta, stream); + + return code; +} + +template <> +RetCode ConvertTo(cudaStream_t stream, + int height, + int width, + int inWidthStride, + const float* inData, + int outWidthStride, + float* outData, + float alpha, + float beta) { + inWidthStride *= sizeof(float); + outWidthStride *= sizeof(float); + RetCode code = convertTo(inData, height, width, 3, inWidthStride, outData, + outWidthStride, alpha, beta, stream); + + return code; +} + +template <> +RetCode ConvertTo(cudaStream_t stream, + int height, + int width, + int inWidthStride, + const float* inData, + int outWidthStride, + float* outData, + float alpha, + float beta) { + inWidthStride *= sizeof(float); + outWidthStride *= sizeof(float); + RetCode code = convertTo(inData, height, width, 4, inWidthStride, outData, + outWidthStride, alpha, beta, stream); + + return code; +} + +} // namespace cuda +} // namespace cv +} // namespace ppl diff --git a/cuda_code/convolution_kernel_15.cu b/cuda_code/convolution_kernel_15.cu new file mode 100644 index 0000000000000000000000000000000000000000..93da65dc0f7d8c630d1348b6ed1c31c7372973f6 --- /dev/null +++ b/cuda_code/convolution_kernel_15.cu @@ -0,0 +1,226 @@ +/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/phi/backends/gpu/gpu_context.h" +#include "paddle/phi/core/kernel_registry.h" +#include "paddle/phi/core/tensor_meta.h" +#include "paddle/phi/core/visit_type.h" +#include "paddle/phi/kernels/funcs/blas/blas.h" +#include "paddle/phi/kernels/funcs/scatter.cu.h" +#include "paddle/phi/kernels/sparse/convolution_kernel.h" +#include "paddle/phi/kernels/sparse/gpu/convolution.cu.h" + +namespace phi { +namespace sparse { + +template +void Conv3dGPUKernel(const GPUContext& dev_ctx, + const SparseCooTensor& x, + const DenseTensor& kernel, + const std::vector& paddings, + const std::vector& dilations, + const std::vector& strides, + const int groups, + const bool subm, + SparseCooTensor* out, + DenseTensor* rulebook) { + // update padding and dilation + // Currently, only support x.layout is NDHWC, groups = 1 + // if x.layout != NDHWC then transpose(x), transpose(weight) + const auto& x_dims = x.dims(); + const auto& kernel_dims = kernel.dims(); + int kernel_size = kernel_dims[0] * kernel_dims[1] * kernel_dims[2]; + DDim out_dims = {1, 1, 1, 1, 1}; + std::vector kernel_sizes(kernel_dims.size()); + for (int i = 0; i < kernel_dims.size(); i++) { + kernel_sizes[i] = kernel_dims[i]; + } + + std::vector subm_paddings(paddings), subm_strides(strides); + if (subm) { + // the out shape of subm_conv is same as input shape + // reset the padding=kernel_size/2 and strides=1 + phi::funcs::sparse::ResetSubmKernelSizeAndStrides( + kernel.dims(), &subm_paddings, &subm_strides); + } + + phi::funcs::sparse::GetOutShape( + x_dims, kernel_sizes, subm_paddings, dilations, subm_strides, &out_dims); + const int in_channels = kernel_dims[3]; + const int out_channels = kernel_dims[4]; + std::vector offsets(kernel_size + 1), h_counter(kernel_size); + + // Second algorithm: + // https://pdfs.semanticscholar.org/5125/a16039cabc6320c908a4764f32596e018ad3.pdf + // 1. product rulebook + DenseTensorMeta counter_meta( + DataType::INT32, {kernel_size}, DataLayout::NCHW); + DenseTensorMeta offsets_meta( + DataType::INT32, {kernel_size}, DataLayout::NCHW); + DenseTensor counter_per_kernel = phi::Empty(dev_ctx, std::move(counter_meta)); + DenseTensor offsets_per_kernel = phi::Empty(dev_ctx, std::move(offsets_meta)); + DenseTensorMeta index_meta(DataType::INT32, {1}, DataLayout::NCHW); + DenseTensor out_index = phi::Empty(dev_ctx, std::move(index_meta)); + DenseTensor unique_value = phi::Empty(dev_ctx, std::move(index_meta)); + + int n = ProductRuleBook(dev_ctx, + x, + kernel_sizes, + subm_paddings, + dilations, + subm_strides, + out_dims, + subm, + rulebook, + &counter_per_kernel, + &offsets_per_kernel, + &out_index, + &unique_value, + out, + &h_counter, + &offsets); + + const int* counter_ptr = counter_per_kernel.data(); + const int* offsets_ptr = counter_per_kernel.data(); + const IntT* rulebook_ptr = rulebook->data(); + + // 2. gather + DenseTensorMeta in_features_meta( + x.dtype(), {n, in_channels}, DataLayout::NCHW); + DenseTensorMeta out_features_meta( + x.dtype(), {n, out_channels}, DataLayout::NCHW); + phi::DenseTensor in_features = + phi::Empty(dev_ctx, std::move(in_features_meta)); + phi::DenseTensor out_features = + phi::Empty(dev_ctx, std::move(out_features_meta)); + T* in_features_ptr = in_features.data(); + T* out_features_ptr = out_features.data(); + phi::funcs::SetConstant set_zero; + set_zero(dev_ctx, &out_features, static_cast(0.0f)); + + auto config = + phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, n * in_channels, 1); + GatherKernel<<>>(x.non_zero_elements().data(), + rulebook_ptr + n, + in_features_ptr, + n, + in_channels); + + // 3. call gemm for every werght + auto blas = phi::funcs::GetBlas(dev_ctx); + auto* out_values = out->mutable_non_zero_elements(); + T* out_values_ptr = out_values->data(); + + const T* kernel_ptr = kernel.data(); + for (int i = 0; i < kernel_size; i++) { + if (h_counter[i] <= 0) { + continue; + } + + // call gemm: (n, in_channels) * (in_channels, out_channels) + const int M = h_counter[i]; + const int K = in_channels; + const int N = out_channels; + T* tmp_in_ptr = in_features_ptr + offsets[i] * in_channels; + const T* tmp_kernel_ptr = kernel_ptr + i * K * N; + T* tmp_out_ptr = out_features_ptr + offsets[i] * out_channels; + + blas.GEMM(CblasNoTrans, + CblasNoTrans, + M, + N, + K, + static_cast(1), + tmp_in_ptr, + tmp_kernel_ptr, + static_cast(0), + tmp_out_ptr); + } + + // 4. scatter + if (subm) { + set_zero(dev_ctx, out_values, static_cast(0.0f)); + config = + phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, n * out_channels, 1); + phi::funcs::ScatterCUDAKernel<<>>( + out_features_ptr, + rulebook_ptr + 2 * n, + out_values_ptr, + n, + out_channels, + false); + } else { + config = phi::backends::gpu::GetGpuLaunchConfig1D( + dev_ctx, out->nnz() * out_channels, 1); + ScatterKernel<<>>(out_features_ptr, + unique_value.data(), + out_index.data(), + out->nnz(), + n, + out_channels, + out_values_ptr); + } +} +/** + * x: (N, D, H, W, C) + * kernel: (D, H, W, C, OC) + * out: (N, D, H, W, OC) +**/ +template +void Conv3dKernel(const Context& dev_ctx, + const SparseCooTensor& x, + const DenseTensor& kernel, + const std::vector& paddings, + const std::vector& dilations, + const std::vector& strides, + const int groups, + const bool subm, + SparseCooTensor* out, + DenseTensor* rulebook) { + PD_VISIT_INTEGRAL_TYPES( + x.non_zero_indices().dtype(), "Conv3dGPUKernel", ([&] { + Conv3dGPUKernel(dev_ctx, + x, + kernel, + paddings, + dilations, + strides, + groups, + subm, + out, + rulebook); + })); +} + +} // namespace sparse +} // namespace phi + +PD_REGISTER_KERNEL(sparse_conv3d, + GPU, + ALL_LAYOUT, + phi::sparse::Conv3dKernel, + float, + double, + phi::dtype::float16) { + kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO); +} diff --git a/cuda_code/convolution_kernel_9.cu b/cuda_code/convolution_kernel_9.cu new file mode 100644 index 0000000000000000000000000000000000000000..94186600f1e2994f9b464bb8d81e9dbf891a4ae9 --- /dev/null +++ b/cuda_code/convolution_kernel_9.cu @@ -0,0 +1,678 @@ +/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include +#include +#include + +#include "paddle/phi/api/lib/utils/allocator.h" +#include "paddle/phi/backends/gpu/gpu_context.h" +#include "paddle/phi/backends/gpu/gpu_info.h" +#include "paddle/phi/backends/gpu/gpu_launch_config.h" +#include "paddle/phi/core/kernel_registry.h" +#include "paddle/phi/core/tensor_meta.h" +#include "paddle/phi/kernels/funcs/blas/blas.h" +#include "paddle/phi/kernels/funcs/index_impl.cu.h" +#include "paddle/phi/kernels/funcs/math_function.h" +#include "paddle/phi/kernels/primitive/compute_primitives.h" +#include "paddle/phi/kernels/sparse/convolution_kernel.h" +#include "paddle/phi/kernels/sparse/gpu/convolution.cu.h" + +namespace phi { +namespace sparse { + +__global__ void SetFlagAndUpdateCounterKernel(const int* indexs, + const int n, + const int rulebook_len, + const int kernel_size, + int* rulebook_ptr, + int* counter_ptr) { + int tid = threadIdx.x + blockIdx.x * blockDim.x; + extern __shared__ int cache_count[]; // kernel_size + for (int i = threadIdx.x; i < kernel_size; i += blockDim.x) { + cache_count[i] = 0; + } + __syncthreads(); + + for (int i = tid; i < n; i += gridDim.x * blockDim.x) { + int index = indexs[i]; + int kernel_index = rulebook_ptr[index]; + rulebook_ptr[index + rulebook_len] = -1; + rulebook_ptr[index + 2 * rulebook_len] = -1; + rulebook_ptr[index] = -1; + atomicAdd(&cache_count[kernel_index], 1); + } + __syncthreads(); + + for (int i = threadIdx.x; i < kernel_size; i += blockDim.x) { + atomicSub(&counter_ptr[i], cache_count[i]); + } +} + +/** + * @brief: update the out index and indices + * unique_keys: save the index of the output feature list + * unique_values: indiates the index of key before deduplication + * out_indexs: indicates the position of the output index in the rulebook + * rulebook_len: indicates the length of rulebook + * out_dims: indicates the output dims + * out_indices: the indices of output, out_indices = IndexToPoint(unique_keys) + * rulebook_out_indexs: the output index in rulebook +**/ +__global__ void UpdateIndexKernel(const int* unique_keys, + const int* unique_values, + const int* out_indexs, + const int non_zero_num, + const int rulebook_len, + const Dims4D out_dims, + int* out_indices, + int* rulebook_out_indexs) { + int tid = threadIdx.x + blockIdx.x * blockDim.x; + for (int i = tid; i < non_zero_num; i += gridDim.x * blockDim.x) { + const int index = unique_keys[i]; + int batch, x, y, z; + IndexToPoint(index, out_dims, &batch, &x, &y, &z); + // get out indices + out_indices[i] = batch; + out_indices[i + non_zero_num] = z; + out_indices[i + non_zero_num * 2] = y; + out_indices[i + non_zero_num * 3] = x; + + // update rulebook + int start = unique_values[i]; + int end = i == non_zero_num - 1 ? rulebook_len : unique_values[i + 1]; + // max(end-start) = kernel_size + for (int j = start; j < end; j++) { + rulebook_out_indexs[out_indexs[j]] = i; + } + } +} + +/** + * @brief product rulebook + * for input_i in x_indices: + * if input_i participate in the convolution calculation: + * infer the output_i by input_i and kernel_i + * save output_i + * + * x_indices: the indices of input features + * x_dims: the input dims + * kernel_dims: the kernel dims + * out_dims: the output dims + * non_zero_num: the number of input features + * rulebook: the rulebook to save the kernel index, input index and output index + * counter: save the number of times each location in the kernel participates in + *the caculation +**/ +__global__ void ProductRuleBookKernel(const int* x_indices, + const Dims4D x_dims, + const Dims4D kernel_dims, + const Dims4D out_dims, + const int64_t non_zero_num, + const Dims4D paddings, + const Dims4D dilations, + const Dims4D strides, + const bool subm, + int* rulebook, + int* counter, + int* in_indexs) { + int tid = threadIdx.x + blockIdx.x * blockDim.x; + extern __shared__ int counter_buf[]; // kernel_size + const int kernel_size = kernel_dims[3] * kernel_dims[2] * kernel_dims[1]; + const int offset = kernel_size * non_zero_num; + for (int i = threadIdx.x; i < kernel_size; i += blockDim.x) { + counter_buf[i] = 0; + } + __syncthreads(); + + for (int i = tid; i < non_zero_num; i += gridDim.x * blockDim.x) { + int kernel_index = 0; + int batch = x_indices[i]; + int in_z = x_indices[i + non_zero_num]; + int in_y = x_indices[i + 2 * non_zero_num]; + int in_x = x_indices[i + 3 * non_zero_num]; + if (subm) { + in_indexs[i] = PointToIndex(batch, in_x, in_y, in_z, x_dims); + } + for (int kz = 0; kz < kernel_dims[1]; kz++) { + for (int ky = 0; ky < kernel_dims[2]; ky++) { + for (int kx = 0; kx < kernel_dims[3]; kx++) { + int in_i = -1, out_index = -1, kernel_i = -1; + if (Check(x_dims, + kernel_dims, + paddings, + dilations, + strides, + in_x, + in_y, + in_z, + kx, + ky, + kz)) { + int out_z = (in_z + paddings[1] - kz * dilations[1]) / strides[1]; + int out_y = (in_y + paddings[2] - ky * dilations[2]) / strides[2]; + int out_x = (in_x + paddings[3] - kx * dilations[3]) / strides[3]; + in_i = i; + out_index = + PointToIndex(batch, out_x, out_y, out_z, out_dims); + atomicAdd(&counter_buf[kernel_index], 1); + kernel_i = kernel_index; + } + rulebook[kernel_index * non_zero_num + i] = kernel_i; + rulebook[kernel_index * non_zero_num + offset + i] = in_i; + rulebook[kernel_index * non_zero_num + offset * 2 + i] = out_index; + ++kernel_index; + } + } + } + } + __syncthreads(); + for (int i = threadIdx.x; i < kernel_size; i += blockDim.x) { + atomicAdd(&counter[i], counter_buf[i]); + } +} + +// brief: calculation the distance between start and end +__global__ void DistanceKernel(const int* start, + const int* end, + int* distance) { + if (threadIdx.x == 0) { + *distance = end - start; + } +} + +// the basic algorithm can refer to convolution_kernel.cc or +// the second paper +// example: +// 1. the rulebook: +// the kernel_index: 0, 0, 0, 1, 1, 1, 2, 2, .... +// the out_index(key): 20, 30, 33, 30, 33, 20, 25 +// 2. mark the index of out_index(value): 0, 1, 2, 3, 4, 5, 6, .... +// 3. sorted the (key, value) +// 4. unique the (key, value): +// unique_key: 20, 25, 30, 33 +// unique_values: 0, 2, 3, 5 +// the index of unique_values is: 0, 1, 2, 3 +// 5. update the out_index by unique_key, uniqe_value and the index of +// unique_value: +// the new out_index: 0, 2, 3, 2, 3, 0, 1 +template +int ProductRuleBook(const Context& dev_ctx, + const SparseCooTensor& x, + const DenseTensor& kernel, + const std::vector& paddings, + const std::vector& dilations, + const std::vector& strides, + const DDim& out_dims, + const bool subm, + DenseTensor* rulebook, + DenseTensor* counter_per_kernel, + DenseTensor* offsets_per_kernel, + DenseTensor* out_index, + DenseTensor* unique_key, + DenseTensor* unique_value, + SparseCooTensor* out, + std::vector* h_counter, + std::vector* h_offsets) { + const auto& kernel_dims = kernel.dims(); + const int64_t non_zero_num = x.nnz(); + const auto& non_zero_indices = x.non_zero_indices(); + const int* indices_ptr = non_zero_indices.data(); + DenseTensor in_indexs = phi::Empty( + dev_ctx, DenseTensorMeta(DataType::INT32, {x.nnz()}, DataLayout::NCHW)); + int* counter_ptr = counter_per_kernel->data(); + int* offsets_ptr = offsets_per_kernel->data(); + int kernel_size = kernel_dims[0] * kernel_dims[1] * kernel_dims[2]; + const int rulebook_rows = 3; + const int rulebook_cols = kernel_size * non_zero_num; + rulebook->ResizeAndAllocate({rulebook_rows, rulebook_cols}); + int* rulebook_ptr = rulebook->data(); + + const auto x_dims = x.dims(); + Dims4D d_x_dims(x_dims[0], x_dims[3], x_dims[2], x_dims[1]); + Dims4D d_kernel_dims(1, kernel_dims[2], kernel_dims[1], kernel_dims[0]); + Dims4D d_out_dims(out_dims[0], out_dims[3], out_dims[2], out_dims[1]); + Dims4D d_paddings(1, paddings[2], paddings[1], paddings[0]); + Dims4D d_strides(1, strides[2], strides[1], strides[0]); + Dims4D d_dilations(1, dilations[2], dilations[1], dilations[0]); + + // 1. product rule book + phi::funcs::SetConstant set_zero; + set_zero(dev_ctx, counter_per_kernel, 0); + auto config = + phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, non_zero_num, 1); + + ProductRuleBookKernel<<>>(indices_ptr, + d_x_dims, + d_kernel_dims, + d_out_dims, + non_zero_num, + d_paddings, + d_dilations, + d_strides, + subm, + rulebook_ptr, + counter_ptr, + in_indexs.data()); + +// 2. remove -1 +#ifdef PADDLE_WITH_HIP + int* last = thrust::remove(thrust::hip::par.on(dev_ctx.stream()), +#else + int* last = thrust::remove(thrust::cuda::par.on(dev_ctx.stream()), +#endif + rulebook_ptr, + rulebook_ptr + rulebook_rows * rulebook_cols, + -1); + + DistanceKernel<<<1, 1, 0, dev_ctx.stream()>>>( + rulebook_ptr, last, rulebook_ptr + 3 * kernel_size * non_zero_num - 1); + int rulebook_len = 0; + phi::backends::gpu::GpuMemcpyAsync( + &rulebook_len, + rulebook_ptr + 3 * kernel_size * non_zero_num - 1, + sizeof(int), +#ifdef PADDLE_WITH_HIP + hipMemcpyDeviceToHost, +#else + cudaMemcpyDeviceToHost, +#endif + dev_ctx.stream()); + rulebook_len /= 3; + dev_ctx.Wait(); + + if (subm) { + // At present, hashtable is not used to map the input and output indexes. + // At present, the intermediate output index is generated by normal + // convolution, + // and then the intermediate output index is subtracted from the input index + // to obain the rulebook. + // get difference + int32_t* A_key_ptr = rulebook_ptr + 2 * rulebook_len; + int32_t* B_key_ptr = in_indexs.data(); + DenseTensor A_val = phi::Empty( + dev_ctx, + DenseTensorMeta(DataType::INT32, {rulebook_len}, DataLayout::NCHW)); + DenseTensor B_val = phi::Empty( + dev_ctx, DenseTensorMeta(DataType::INT32, {x.nnz()}, DataLayout::NCHW)); + phi::IndexKernel>( + dev_ctx, &A_val, kps::IdentityFunctor()); + phi::IndexKernel>( + dev_ctx, &B_val, kps::IdentityFunctor()); + DenseTensor key_result = phi::Empty( + dev_ctx, + DenseTensorMeta(DataType::INT32, {rulebook_len + 1}, DataLayout::NCHW)); + DenseTensor val_result = phi::Empty( + dev_ctx, + DenseTensorMeta(DataType::INT32, {rulebook_len}, DataLayout::NCHW)); + +#ifdef PADDLE_WITH_HIP + thrust::exclusive_scan(thrust::hip::par.on(dev_ctx.stream()), +#else + thrust::exclusive_scan(thrust::cuda::par.on(dev_ctx.stream()), +#endif + counter_ptr, + counter_ptr + kernel_size, + offsets_ptr); + std::vector offsets(kernel_size, 0); + // TODO(zhangkaihuo): used unified memcpy interface + phi::backends::gpu::GpuMemcpyAsync(offsets.data(), + offsets_ptr, + kernel_size * sizeof(int), +#ifdef PADDLE_WITH_HIP + hipMemcpyDeviceToHost, +#else + cudaMemcpyDeviceToHost, +#endif + dev_ctx.stream()); + dev_ctx.Wait(); + + thrust::pair end; + // Because set_diff does not support duplicate data, set_diff is performed + // separately for each segment of data. + // TODO(zhangkaihuo): Using hashtable here may get better performance, + // further tests ared needed. + for (int i = 0; i < kernel_size; i++) { + int start = offsets[i]; + int stop = i == kernel_size - 1 ? rulebook_len : offsets[i + 1]; + int* key_result_start = (i == 0 ? key_result.data() : end.first); + int* val_result_start = i == 0 ? val_result.data() : end.second; + end = +#ifdef PADDLE_WITH_HIP + thrust::set_difference_by_key(thrust::hip::par.on(dev_ctx.stream()), +#else + thrust::set_difference_by_key(thrust::cuda::par.on(dev_ctx.stream()), +#endif + A_key_ptr + start, + A_key_ptr + stop, + B_key_ptr, + B_key_ptr + x.nnz(), + A_val.data() + start, + B_val.data(), + key_result_start, + val_result_start); + } + + DistanceKernel<<<1, 1, 0, dev_ctx.stream()>>>( + key_result.data(), + end.first, + key_result.data() + rulebook_len); + int len = 0; + phi::backends::gpu::GpuMemcpyAsync(&len, + key_result.data() + rulebook_len, + sizeof(int), +#ifdef PADDLE_WITH_HIP + hipMemcpyDeviceToHost, +#else + cudaMemcpyDeviceToHost, +#endif + dev_ctx.stream()); + dev_ctx.Wait(); + // set the diff value = -1, and update counter + auto config = phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, len, 1); + SetFlagAndUpdateCounterKernel<<>>(val_result.data(), + len, + rulebook_len, + kernel_size, + rulebook_ptr, + counter_ptr); +// remove -1 +#ifdef PADDLE_WITH_HIP + int* last = thrust::remove(thrust::hip::par.on(dev_ctx.stream()), +#else + int* last = thrust::remove(thrust::cuda::par.on(dev_ctx.stream()), +#endif + rulebook_ptr, + rulebook_ptr + 3 * rulebook_len, + -1); + DistanceKernel<<<1, 1, 0, dev_ctx.stream()>>>( + rulebook_ptr, last, key_result.data() + rulebook_len); + phi::backends::gpu::GpuMemcpyAsync(&rulebook_len, + key_result.data() + rulebook_len, + sizeof(int), +#ifdef PADDLE_WITH_HIP + hipMemcpyDeviceToHost, +#else + cudaMemcpyDeviceToHost, +#endif + dev_ctx.stream()); + dev_ctx.Wait(); + rulebook_len /= 3; + } + +#ifdef PADDLE_WITH_HIP + thrust::exclusive_scan(thrust::hip::par.on(dev_ctx.stream()), +#else + thrust::exclusive_scan(thrust::cuda::par.on(dev_ctx.stream()), +#endif + counter_ptr, + counter_ptr + kernel_size, + offsets_ptr); + +#ifdef PADDLE_WITH_HIP + phi::backends::gpu::GpuMemcpyAsync(&(*h_counter)[0], + counter_ptr, + kernel_size * sizeof(int), + hipMemcpyDeviceToHost, + dev_ctx.stream()); + phi::backends::gpu::GpuMemcpyAsync(&(*h_offsets)[0], + offsets_ptr, + kernel_size * sizeof(int), + hipMemcpyDeviceToHost, + dev_ctx.stream()); +#else + phi::backends::gpu::GpuMemcpyAsync(&(*h_counter)[0], + counter_ptr, + kernel_size * sizeof(int), + cudaMemcpyDeviceToHost, + dev_ctx.stream()); + phi::backends::gpu::GpuMemcpyAsync(&(*h_offsets)[0], + offsets_ptr, + kernel_size * sizeof(int), + cudaMemcpyDeviceToHost, + dev_ctx.stream()); +#endif + rulebook->Resize({rulebook_rows, rulebook_len}); + + // 3. sorted or merge the out index + out_index->ResizeAndAllocate({rulebook_len}); + unique_value->ResizeAndAllocate({rulebook_len}); + unique_key->ResizeAndAllocate({rulebook_len}); + int* out_index_ptr = out_index->data(); + int* unique_value_ptr = unique_value->data(); + int* unique_key_ptr = unique_key->data(); + + int* new_end = SortedAndUniqueIndex(dev_ctx, + rulebook_ptr + 2 * rulebook_len, + rulebook_len, + out_index, + unique_key, + unique_value); + // thrust::distance doesn't support stream parameters + // const int out_non_zero_num = thrust::distance(unique_key_ptr, + // new_end.first); + DistanceKernel<<<1, 1>>>(unique_key_ptr, + new_end, + rulebook_ptr + rulebook_rows * rulebook_cols - 1); + int out_non_zero_num = 0; +#ifdef PADDLE_WITH_HIP + phi::backends::gpu::GpuMemcpyAsync( + &out_non_zero_num, + rulebook_ptr + rulebook_rows * rulebook_cols - 1, + sizeof(int), + hipMemcpyDeviceToHost, + dev_ctx.stream()); +#else + phi::backends::gpu::GpuMemcpyAsync( + &out_non_zero_num, + rulebook_ptr + rulebook_rows * rulebook_cols - 1, + sizeof(int), + cudaMemcpyDeviceToHost, + dev_ctx.stream()); +#endif + dev_ctx.Wait(); + + // 5. update out_indices and rulebook by unique_value_ptr + const int64_t sparse_dim = 4; + DenseTensorMeta indices_meta( + DataType::INT32, {sparse_dim, out_non_zero_num}, DataLayout::NCHW); + DenseTensorMeta values_meta( + x.dtype(), {out_non_zero_num, kernel_dims[4]}, x.layout()); + phi::DenseTensor out_indices = phi::Empty(dev_ctx, std::move(indices_meta)); + phi::DenseTensor out_values = phi::Empty(dev_ctx, std::move(values_meta)); + + int* out_indices_ptr = out_indices.data(); + + config = + phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, out_non_zero_num, 1); + UpdateIndexKernel<<>>(unique_key_ptr, + unique_value_ptr, + out_index_ptr, + out_non_zero_num, + rulebook_len, + d_out_dims, + out_indices_ptr, + rulebook_ptr + 2 * rulebook_len); + out->SetMember(out_indices, out_values, out_dims, true); + return rulebook_len; +} + +/** + * x: (N, D, H, W, C) + * kernel: (D, H, W, C, OC) + * out: (N, D, H, W, OC) +**/ +template +void Conv3dKernel(const Context& dev_ctx, + const SparseCooTensor& x, + const DenseTensor& kernel, + const std::vector& paddings, + const std::vector& dilations, + const std::vector& strides, + const int groups, + const bool subm, + SparseCooTensor* out, + DenseTensor* rulebook) { + // update padding and dilation + // Currently, only support x.layout is NDHWC, groups = 1 + // if x.layout != NDHWC then transpose(x), transpose(weight) + + const auto& x_dims = x.dims(); + const auto& kernel_dims = kernel.dims(); + int kernel_size = kernel_dims[0] * kernel_dims[1] * kernel_dims[2]; + DDim out_dims = {1, 1, 1, 1, 1}; + GetOutShape(x_dims, kernel_dims, paddings, dilations, strides, &out_dims); + out->set_dims(out_dims); + const int in_channels = kernel_dims[3]; + const int out_channels = kernel_dims[4]; + std::vector offsets(kernel_size + 1), h_counter(kernel_size); + + // Second algorithm: + // https://pdfs.semanticscholar.org/5125/a16039cabc6320c908a4764f32596e018ad3.pdf + // 1. product rulebook + DenseTensorMeta counter_meta( + DataType::INT32, {kernel_size}, DataLayout::NCHW); + DenseTensorMeta offsets_meta( + DataType::INT32, {kernel_size}, DataLayout::NCHW); + DenseTensor counter_per_kernel = phi::Empty(dev_ctx, std::move(counter_meta)); + DenseTensor offsets_per_kernel = phi::Empty(dev_ctx, std::move(offsets_meta)); + DenseTensorMeta index_meta(DataType::INT32, {1}, DataLayout::NCHW); + DenseTensor out_index = phi::Empty(dev_ctx, std::move(index_meta)); + DenseTensor unique_key = phi::Empty(dev_ctx, std::move(index_meta)); + DenseTensor unique_value = phi::Empty(dev_ctx, std::move(index_meta)); + + std::vector subm_paddings(paddings), subm_strides(strides); + if (subm) { + auto kernel_dims = kernel.dims(); + for (int i = 0; i < paddings.size(); i++) { + subm_paddings[i] = kernel_dims[i] / 2; + subm_strides[i] = 1; + } + } + + int n = ProductRuleBook(dev_ctx, + x, + kernel, + subm_paddings, + dilations, + subm_strides, + out_dims, + subm, + rulebook, + &counter_per_kernel, + &offsets_per_kernel, + &out_index, + &unique_key, + &unique_value, + out, + &h_counter, + &offsets); + + const int* counter_ptr = counter_per_kernel.data(); + const int* offsets_ptr = counter_per_kernel.data(); + const int* rulebook_ptr = rulebook->data(); + + // 2. gather + DenseTensorMeta in_features_meta( + x.dtype(), {n, in_channels}, DataLayout::NCHW); + DenseTensorMeta out_features_meta( + x.dtype(), {n, out_channels}, DataLayout::NCHW); + phi::DenseTensor in_features = + phi::Empty(dev_ctx, std::move(in_features_meta)); + phi::DenseTensor out_features = + phi::Empty(dev_ctx, std::move(out_features_meta)); + T* in_features_ptr = in_features.data(); + T* out_features_ptr = out_features.data(); + phi::funcs::SetConstant set_zero; + set_zero(dev_ctx, &out_features, static_cast(0.0f)); + + auto config = + phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, n * in_channels, 1); + GatherKernel<<>>(x.non_zero_elements().data(), + rulebook_ptr + n, + in_features_ptr, + n, + in_channels); + + // 3. call gemm for every werght + auto blas = phi::funcs::GetBlas(dev_ctx); + auto* out_values = out->mutable_non_zero_elements(); + T* out_values_ptr = out_values->data(); + + const T* kernel_ptr = kernel.data(); + for (int i = 0; i < kernel_size; i++) { + if (h_counter[i] <= 0) { + continue; + } + + // call gemm: (n, in_channels) * (in_channels, out_channels) + const int M = h_counter[i]; + const int K = in_channels; + const int N = out_channels; + T* tmp_in_ptr = in_features_ptr + offsets[i] * in_channels; + const T* tmp_kernel_ptr = kernel_ptr + i * K * N; + T* tmp_out_ptr = out_features_ptr + offsets[i] * out_channels; + + blas.GEMM(CblasNoTrans, + CblasNoTrans, + M, + N, + K, + static_cast(1), + tmp_in_ptr, + tmp_kernel_ptr, + static_cast(0), + tmp_out_ptr); + } + + // 4. scatter + config = phi::backends::gpu::GetGpuLaunchConfig1D( + dev_ctx, out->nnz() * out_channels, 1); + ScatterKernel<<>>(out_features_ptr, + unique_value.data(), + out_index.data(), + out->nnz(), + n, + out_channels, + out_values_ptr); +} + +} // namespace sparse +} // namespace phi + +PD_REGISTER_KERNEL(sparse_conv3d, + GPU, + ALL_LAYOUT, + phi::sparse::Conv3dKernel, + float, + double, + phi::dtype::float16) { + kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO); +} diff --git a/cuda_code/cooperative_groups_kernels.cu b/cuda_code/cooperative_groups_kernels.cu new file mode 100644 index 0000000000000000000000000000000000000000..3585c958ccdd83eb1ed78bc9a3f2c02a87a2531b --- /dev/null +++ b/cuda_code/cooperative_groups_kernels.cu @@ -0,0 +1,262 @@ +/************************************************************* +Copyright (c) 2017-2021, the Ginkgo authors +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*************************************************************/ + +#include "cuda/components/cooperative_groups.cuh" + + +#include + + +#include + + +#include +#include + + +#include "cuda/base/config.hpp" +#include "cuda/test/utils.hpp" + + +namespace { + + +using namespace gko::kernels::cuda; + + +class CooperativeGroups : public ::testing::Test { +protected: + CooperativeGroups() + : ref(gko::ReferenceExecutor::create()), + cuda(gko::CudaExecutor::create(0, ref)), + result(ref, 1), + dresult(cuda) + { + *result.get_data() = true; + dresult = result; + } + + template + void test(Kernel kernel) + { + kernel<<<1, config::warp_size>>>(dresult.get_data()); + result = dresult; + auto success = *result.get_const_data(); + + ASSERT_TRUE(success); + } + + template + void test_subwarp(Kernel kernel) + { + kernel<<<1, config::warp_size / 2>>>(dresult.get_data()); + result = dresult; + auto success = *result.get_const_data(); + + ASSERT_TRUE(success); + } + + std::shared_ptr ref; + std::shared_ptr cuda; + gko::Array result; + gko::Array dresult; +}; + + +constexpr static int subwarp_size = config::warp_size / 4; + + +__device__ void test_assert(bool* success, bool partial) +{ + if (!partial) { + *success = false; + } +} + + +__global__ void cg_shuffle(bool* s) +{ + auto group = + group::tiled_partition(group::this_thread_block()); + auto i = int(group.thread_rank()); + test_assert(s, group.shfl_up(i, 1) == max(0, i - 1)); + test_assert(s, group.shfl_down(i, 1) == min(i + 1, config::warp_size - 1)); + test_assert(s, group.shfl(i, 0) == 0); +} + +TEST_F(CooperativeGroups, Shuffle) { test(cg_shuffle); } + + +__global__ void cg_all(bool* s) +{ + auto group = + group::tiled_partition(group::this_thread_block()); + test_assert(s, group.all(true)); + test_assert(s, !group.all(false)); + test_assert(s, !group.all(threadIdx.x < 13)); +} + +TEST_F(CooperativeGroups, All) { test(cg_all); } + + +__global__ void cg_any(bool* s) +{ + auto group = + group::tiled_partition(group::this_thread_block()); + test_assert(s, group.any(true)); + test_assert(s, group.any(threadIdx.x == 0)); + test_assert(s, !group.any(false)); +} + +TEST_F(CooperativeGroups, Any) { test(cg_any); } + + +__global__ void cg_ballot(bool* s) +{ + auto group = + group::tiled_partition(group::this_thread_block()); + test_assert(s, group.ballot(false) == 0); + test_assert(s, group.ballot(true) == ~config::lane_mask_type{}); + test_assert(s, group.ballot(threadIdx.x < 4) == 0xf); +} + +TEST_F(CooperativeGroups, Ballot) { test(cg_ballot); } + + +__global__ void cg_subwarp_shuffle(bool* s) +{ + auto group = + group::tiled_partition(group::this_thread_block()); + auto i = int(group.thread_rank()); + test_assert(s, group.shfl_up(i, 1) == max(i - 1, 0)); + test_assert(s, group.shfl_down(i, 1) == min(i + 1, subwarp_size - 1)); + auto group_base = threadIdx.x / subwarp_size * subwarp_size; + test_assert(s, group.shfl(int(threadIdx.x), 0) == group_base); + if (threadIdx.x / subwarp_size == 1) { + test_assert(s, group.shfl_up(i, 1) == max(i - 1, 0)); + test_assert(s, group.shfl_down(i, 1) == min(i + 1, subwarp_size - 1)); + test_assert(s, group.shfl(int(threadIdx.x), 0) == group_base); + } else { + test_assert(s, group.shfl_down(i, 1) == min(i + 1, subwarp_size - 1)); + test_assert(s, group.shfl(int(threadIdx.x), 0) == group_base); + test_assert(s, group.shfl_up(i, 1) == max(i - 1, 0)); + } +} + +TEST_F(CooperativeGroups, SubwarpShuffle) { test(cg_subwarp_shuffle); } + +TEST_F(CooperativeGroups, SubwarpShuffle2) { test_subwarp(cg_subwarp_shuffle); } + + +__global__ void cg_subwarp_all(bool* s) +{ + auto grp = threadIdx.x / subwarp_size; + bool test_grp = grp == 1; + auto i = threadIdx.x % subwarp_size; + // only test with test_grp, the other threads run 'interference' + auto group = + group::tiled_partition(group::this_thread_block()); + test_assert(s, !test_grp || group.all(test_grp)); + test_assert(s, !test_grp || !group.all(!test_grp)); + test_assert(s, !test_grp || !group.all(i < subwarp_size - 3 || !test_grp)); + if (test_grp) { + test_assert(s, group.all(true)); + test_assert(s, !group.all(false)); + test_assert(s, !group.all(i < subwarp_size - 3)); + } else { + test_assert(s, !group.all(false)); + test_assert(s, !group.all(i < subwarp_size - 3)); + test_assert(s, group.all(true)); + } +} + +TEST_F(CooperativeGroups, SubwarpAll) { test(cg_subwarp_all); } + +TEST_F(CooperativeGroups, SubwarpAll2) { test_subwarp(cg_subwarp_all); } + + +__global__ void cg_subwarp_any(bool* s) +{ + auto grp = threadIdx.x / subwarp_size; + bool test_grp = grp == 1; + // only test with test_grp, the other threads run 'interference' + auto group = + group::tiled_partition(group::this_thread_block()); + auto i = group.thread_rank(); + test_assert(s, !test_grp || group.any(test_grp)); + test_assert(s, !test_grp || group.any(test_grp && i == 1)); + test_assert(s, !test_grp || !group.any(!test_grp)); + if (test_grp) { + test_assert(s, group.any(true)); + test_assert(s, group.any(i == 1)); + test_assert(s, !group.any(false)); + } else { + test_assert(s, !group.any(false)); + test_assert(s, group.any(true)); + test_assert(s, group.any(i == 1)); + } +} + +TEST_F(CooperativeGroups, SubwarpAny) { test(cg_subwarp_any); } + +TEST_F(CooperativeGroups, SubwarpAny2) { test_subwarp(cg_subwarp_any); } + + +__global__ void cg_subwarp_ballot(bool* s) +{ + auto grp = threadIdx.x / subwarp_size; + bool test_grp = grp == 1; + auto full_mask = (config::lane_mask_type{1} << subwarp_size) - 1; + // only test with test_grp, the other threads run 'interference' + auto group = + group::tiled_partition(group::this_thread_block()); + auto i = group.thread_rank(); + test_assert(s, !test_grp || group.ballot(!test_grp) == 0); + test_assert(s, !test_grp || group.ballot(test_grp) == full_mask); + test_assert(s, !test_grp || group.ballot(i < 4 || !test_grp) == 0xf); + if (test_grp) { + test_assert(s, group.ballot(false) == 0); + test_assert(s, group.ballot(true) == full_mask); + test_assert(s, group.ballot(i < 4) == 0xf); + } else { + test_assert(s, group.ballot(true) == full_mask); + test_assert(s, group.ballot(i < 4) == 0xf); + test_assert(s, group.ballot(false) == 0); + } +} + +TEST_F(CooperativeGroups, SubwarpBallot) { test(cg_subwarp_ballot); } + +TEST_F(CooperativeGroups, SubwarpBallot2) { test_subwarp(cg_subwarp_ballot); } + + +} // namespace diff --git a/cuda_code/copy_58.cu b/cuda_code/copy_58.cu new file mode 100644 index 0000000000000000000000000000000000000000..708f4f657533f9e3361799edec06f45200cb5e50 --- /dev/null +++ b/cuda_code/copy_58.cu @@ -0,0 +1,38 @@ +// Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include "dali/pipeline/operators/util/copy.h" + +namespace dali { + +template<> +void Copy::RunImpl(DeviceWorkspace &ws) { + auto &input = ws.Input(0); + auto &output = ws.Output(0); + output.set_type(input.type()); + output.SetLayout(input.GetLayout()); + output.ResizeLike(input); + CUDA_CALL(cudaMemcpyAsync( + output.raw_mutable_data(), + input.raw_data(), + input.nbytes(), + cudaMemcpyDeviceToDevice, + ws.stream())); +} + +DALI_REGISTER_OPERATOR(Copy, Copy, GPU); + +} // namespace dali + diff --git a/cuda_code/copy_61.cu b/cuda_code/copy_61.cu new file mode 100644 index 0000000000000000000000000000000000000000..7974705b95236ac7deb1e3c49e469c4f05ed6e43 --- /dev/null +++ b/cuda_code/copy_61.cu @@ -0,0 +1,74 @@ +// copy_float1 copy_float2 copy_float3 copy_float4 copy_float5 +// copy_double1 copy_double2 copy_double3 copy_double4 copy_double5 + +#include "array.h" + +template +__device__ void copy(Array &dest, Array &src) { + int idx = threadIdx.x + blockIdx.x * blockDim.x; + if (idx >= dest.length()) return; + dest(idx) = src(idx); +} + +#define COPY_CAPI(T,N) \ +__global__ void copy ## _ ## T ## N(Array dest, Array src) { \ + int idx = threadIdx.x + blockIdx.x * blockDim.x; \ + if (idx >= dest.length()) return; \ + dest(idx) = src(idx); \ +} + +extern "C" { + COPY_CAPI(float,1) + COPY_CAPI(float,2) + COPY_CAPI(float,3) + COPY_CAPI(float,4) + COPY_CAPI(float,5) + COPY_CAPI(double,1) + COPY_CAPI(double,2) + COPY_CAPI(double,3) + COPY_CAPI(double,4) + COPY_CAPI(double,5) +} + +/* +@nvrtc """ +extern "C" __global__ void $(op)_$T(Array<$T,1> x, Array<$T,1> y) { + int idx = threadIdx.x + blockIdx.x * blockDim.x; + if (idx < y.length()) y[idx] = $(op)(x[idx]); +} + +extern "C" __global__ void $(op)_T(Array<$T,1> x1, Array<$T,1> x2, Array<$T,1> y) { + int idx = threadIdx.x + blockIdx.x * blockDim.x; + if (idx >= y.length()) return; + int idx_x1 = idx < x1.length() ? idx : idx % x1.length(); + int idx_x2 = idx < x2.length() ? idx : idx % x2.length(); + y(idx) = x1[idx_x1] $op x2[idx_x2]; +} +""" + +@nvrtc """ +extern "C" __global__ void copy1d(Array<$T,1> dest, Array<$T,1> src) { + int idx0 = threadIdx.x + blockIdx.x * blockDim.x; + if (idx0 >= src.dims[0]) return; + dest(idx0) = src(idx0); +} + +extern "C" __global__ void copy2d(Array<$T,2> dest, Array<$T,2> src) { + int idx0 = threadIdx.x + blockIdx.x * blockDim.x; + int idx1 = threadIdx.y + blockIdx.y * blockDim.y; + if (idx0 >= src.dims[0] || idx1 >= src.dims[1]) return; + dest(idx0,idx1) = src(idx0,idx1); +} + +extern "C" __global__ void copy3d(Array<$T,3> dest, Array<$T,3> src) { + int idx0 = threadIdx.x + blockIdx.x * blockDim.x; + int idx1 = threadIdx.y + blockIdx.y * blockDim.y; + int idx2 = threadIdx.z + blockIdx.z * blockDim.z; + if (idx0 >= src.dims[0] || idx1 >= src.dims[1] || idx2 >= src.dims[2]) return; + dest(idx0,idx1,idx2) = src(idx0,idx1,idx2); +} + +extern "C" __global__ void copynd(Array<$T,$N> dest, Array<$T,$N> src) { + +} +*/ diff --git a/cuda_code/copy_range_18.cu b/cuda_code/copy_range_18.cu new file mode 100644 index 0000000000000000000000000000000000000000..0a37367de78a49341036a608107bfa4e2b5efa9a --- /dev/null +++ b/cuda_code/copy_range_18.cu @@ -0,0 +1,232 @@ +/* + * Copyright (c) 2019, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +#include + +namespace { + +template +void in_place_copy_range( + cudf::column_view const& source, cudf::mutable_column_view& target, + cudf::size_type source_begin, cudf::size_type source_end, + cudf::size_type target_begin, + cudaStream_t stream = 0) { + auto p_source_device_view = + cudf::column_device_view::create(source, stream); + if (p_source_device_view->has_nulls()) { + cudf::experimental::detail::copy_range( + cudf::experimental::detail::make_null_replacement_iterator( + *p_source_device_view, T()) + source_begin, + cudf::experimental::detail::make_validity_iterator( + *p_source_device_view) + source_begin, + target, target_begin, target_begin + (source_end - source_begin), + stream); + } + else { + cudf::experimental::detail::copy_range( + p_source_device_view->begin() + source_begin, + thrust::make_constant_iterator(true), // dummy + target, target_begin, target_begin + (source_end - source_begin), + stream); + } +} + +struct in_place_copy_range_dispatch { + cudf::column_view const& source; + cudf::mutable_column_view& target; + + template + std::enable_if_t(), void> + operator()(cudf::size_type source_begin, cudf::size_type source_end, + cudf::size_type target_begin, cudaStream_t stream = 0) { + in_place_copy_range( + source, target, source_begin, source_end, target_begin, stream); + } + + template + std::enable_if_t(), void> + operator()(cudf::size_type source_begin, cudf::size_type source_end, + cudf::size_type target_begin, cudaStream_t stream = 0) { + CUDF_FAIL("in-place copy does not work for variable width types."); + } +}; + +struct out_of_place_copy_range_dispatch { + cudf::column_view const& source; + cudf::column_view const& target; + + template + std::enable_if_t(), std::unique_ptr> + operator()( + cudf::size_type source_begin, cudf::size_type source_end, + cudf::size_type target_begin, + rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(), + cudaStream_t stream = 0) { + auto p_ret = std::make_unique(target, stream, mr); + if ((!p_ret->nullable()) && source.has_nulls(source_begin, source_end)) { + p_ret->set_null_mask( + cudf::create_null_mask(p_ret->size(), cudf::mask_state::ALL_VALID, stream, mr), 0); + } + + if (source_end != source_begin) { // otherwise no-op + auto ret_view = p_ret->mutable_view(); + in_place_copy_range( + source, ret_view, source_begin, source_end, target_begin, stream); + } + + return p_ret; + } + + template + std::enable_if_t::value, + std::unique_ptr> + operator()( + cudf::size_type source_begin, cudf::size_type source_end, + cudf::size_type target_begin, + rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(), + cudaStream_t stream = 0) { + auto target_end = target_begin + (source_end - source_begin); + auto p_source_device_view = + cudf::column_device_view::create(source, stream); + if (source.has_nulls()) { + return cudf::strings::detail::copy_range( + cudf::experimental::detail:: + make_null_replacement_iterator( + *p_source_device_view, cudf::string_view()) + source_begin, + cudf::experimental::detail::make_validity_iterator( + *p_source_device_view) + source_begin, + cudf::strings_column_view(target), target_begin, target_end, + mr, stream); + } + else { + return cudf::strings::detail::copy_range( + p_source_device_view->begin() + source_begin, + thrust::make_constant_iterator(true), + cudf::strings_column_view(target), target_begin, target_end, + mr, stream); + } + } + + template + std::enable_if_t::value, + std::unique_ptr> + operator()( + cudf::size_type source_begin, cudf::size_type source_end, + cudf::size_type target_begin, + rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(), + cudaStream_t stream = 0) { + CUDF_FAIL("dictionary type not supported"); + } +}; + +} + +namespace cudf { +namespace experimental { + +namespace detail { + +void copy_range_in_place(column_view const& source, mutable_column_view& target, + size_type source_begin, size_type source_end, + size_type target_begin, + cudaStream_t stream) { + CUDF_EXPECTS(cudf::is_fixed_width(target.type()) == true, + "In-place copy_range does not support variable-sized types."); + CUDF_EXPECTS((source_begin <= source_end) && + (source_begin >= 0) && + (source_begin < source.size()) && + (source_end <= source.size()) && + (target_begin >= 0) && + (target_begin < target.size()) && + (target_begin + (source_end - source_begin) <= + target.size()) && + // overflow + (target_begin + (source_end - source_begin) >= target_begin), + "Range is out of bounds."); + CUDF_EXPECTS(target.type() == source.type(), "Data type mismatch."); + CUDF_EXPECTS((target.nullable() == true) || (source.has_nulls() == false), + "target should be nullable if source has null values."); + + if (source_end != source_begin) { // otherwise no-op + cudf::experimental::type_dispatcher( + target.type(), + in_place_copy_range_dispatch{source, target}, + source_begin, source_end, target_begin, stream); + } +} + +std::unique_ptr copy_range(column_view const& source, + column_view const& target, + size_type source_begin, size_type source_end, + size_type target_begin, + rmm::mr::device_memory_resource* mr, + cudaStream_t stream) { + CUDF_EXPECTS((source_begin >= 0) && + (source_begin <= source_end) && + (source_begin < source.size()) && + (source_end <= source.size()) && + (target_begin >= 0) && + (target_begin < target.size()) && + (target_begin + (source_end - source_begin) <= + target.size()) && + // overflow + (target_begin + (source_end - source_begin) >= target_begin), + "Range is out of bounds."); + CUDF_EXPECTS(target.type() == source.type(), "Data type mismatch."); + + return cudf::experimental::type_dispatcher( + target.type(), + out_of_place_copy_range_dispatch{source, target}, + source_begin, source_end, target_begin, mr, stream); +} + +} // namespace detail + +void copy_range_in_place(column_view const& source, mutable_column_view& target, + size_type source_begin, size_type source_end, + size_type target_begin) { + return detail::copy_range_in_place(source, target, source_begin, source_end, + target_begin, 0); +} + +std::unique_ptr copy_range(column_view const& source, + column_view const& target, + size_type source_begin, size_type source_end, + size_type target_begin, + rmm::mr::device_memory_resource* mr) { + return detail::copy_range(source, target, source_begin, source_end, + target_begin, mr, 0); +} + +} // namespace experimental +} // namespace cudf diff --git a/cuda_code/covariance_4.cu b/cuda_code/covariance_4.cu new file mode 100644 index 0000000000000000000000000000000000000000..46320c7f94fe1e598b0dc66b679b422cb9082051 --- /dev/null +++ b/cuda_code/covariance_4.cu @@ -0,0 +1,262 @@ +/** + * covariance.cu: This file is part of the PolyBench/GPU 1.0 test suite. + * + * + * Contact: Scott Grauer-Gray + * Louis-Noel Pouchet + * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "../../common/polybenchUtilFuncts.h" + +//define the error threshold for the results "not matching" +#define PERCENT_DIFF_ERROR_THRESHOLD 1.05 + +#define GPU_DEVICE 0 + +/* Problem size */ +#define M 2048 +#define N 2048 + +/* Thread block dimensions for kernel 1*/ +#define DIM_THREAD_BLOCK_KERNEL_1_X 256 +#define DIM_THREAD_BLOCK_KERNEL_1_Y 1 + +/* Thread block dimensions for kernel 2*/ +#define DIM_THREAD_BLOCK_KERNEL_2_X 32 +#define DIM_THREAD_BLOCK_KERNEL_2_Y 8 + +/* Thread block dimensions for kernel 3*/ +#define DIM_THREAD_BLOCK_KERNEL_3_X 256 +#define DIM_THREAD_BLOCK_KERNEL_3_Y 1 + +#define sqrt_of_array_cell(x,j) sqrt(x[j]) + +#define FLOAT_N 3214212.01 +#define EPS 0.005 + +/* Can switch DATA_TYPE between float and double */ +typedef float DATA_TYPE; + + + +void init_arrays(DATA_TYPE* data) +{ + int i, j; + + for (i = 1; i < (M+1); i++) + { + for (j = 1; j < (N+1); j++) + { + data[i*(N+1) + j] = ((DATA_TYPE) i*j) / M; + } + } +} + + +void covariance(DATA_TYPE* data, DATA_TYPE* symmat, DATA_TYPE* mean) +{ + int i, j, j1,j2; + + /* Determine mean of column vectors of input data matrix */ + for (j = 1; j < (M+1); j++) + { + mean[j] = 0.0; + for (i = 1; i < (N+1); i++) + { + mean[j] += data[i*(M+1) + j]; + } + mean[j] /= FLOAT_N; + } + + /* Center the column vectors. */ + for (i = 1; i < (N+1); i++) + { + for (j = 1; j < (M+1); j++) + { + data[i*(M+1) + j] -= mean[j]; + } + } + + /* Calculate the m * m covariance matrix. */ + for (j1 = 1; j1 < (M+1); j1++) + { + for (j2 = j1; j2 < (M+1); j2++) + { + symmat[j1*(M+1) + j2] = 0.0; + for (i = 1; i < N+1; i++) + { + symmat[j1*(M+1) + j2] += data[i*(M+1) + j1] * data[i*(M+1) + j2]; + } + symmat[j2*(M+1) + j1] = symmat[j1*(M+1) + j2]; + } + } +} + + +void compareResults(DATA_TYPE* symmat, DATA_TYPE* symmat_outputFromGpu) +{ + int i,j,fail; + fail = 0; + + for (i=1; i < (M+1); i++) + { + for (j=1; j < (N+1); j++) + { + if (percentDiff(symmat[i*(N+1) + j], symmat_outputFromGpu[i*(N+1) + j]) > PERCENT_DIFF_ERROR_THRESHOLD) + { + fail++; + } + } + } + printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); +} + + +void GPU_argv_init() +{ + cudaDeviceProp deviceProp; + cudaGetDeviceProperties(&deviceProp, GPU_DEVICE); + printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name); + cudaSetDevice( GPU_DEVICE ); + + return; +} + + +__global__ void mean_kernel(DATA_TYPE *mean, DATA_TYPE *data) +{ + int j = blockIdx.x * blockDim.x + threadIdx.x + 1; + + if ((j >= 1) && (j < (M+1))) + { + mean[j] = 0.0; + + int i; + for(i = 1; i < (N+1); i++) + { + mean[j] += data[i * (M+1) + j]; + } + mean[j] /= (DATA_TYPE)FLOAT_N; + } +} + + +__global__ void reduce_kernel(DATA_TYPE *mean, DATA_TYPE *data) +{ + int j = blockIdx.x * blockDim.x + threadIdx.x + 1; + int i = blockIdx.y * blockDim.y + threadIdx.y + 1; + + if ((i >= 1) && (i < (N+1)) && (j >= 1) && (j < (M+1))) + { + data[i * (M+1) + j] -= mean[j]; + } +} + + +__global__ void covar_kernel(DATA_TYPE *symmat, DATA_TYPE *data) +{ + int j1 = blockIdx.x * blockDim.x + threadIdx.x + 1; + int i, j2; + + if ((j1 >= 1) && (j1 < (M+1))) + { + for (j2 = j1; j2 < (M+1); j2++) + { + symmat[j1*(M+1) + j2] = 0.0; + for(i = 1; i < (N+1); i++) + { + symmat[j1 * (M+1) + j2] += data[i *(M+1) + j1] * data[i *(M+1) + j2]; + } + symmat[j2 * (M+1) + j1] = symmat[j1 * (M+1) + j2]; + } + } +} + + +void covarianceCuda(DATA_TYPE* data, DATA_TYPE* symmat, DATA_TYPE* mean, DATA_TYPE* symmat_outputFromGpu) +{ + double t_start, t_end; + + DATA_TYPE *data_gpu; + DATA_TYPE *mean_gpu; + DATA_TYPE *symmat_gpu; + + cudaMalloc((void **)&data_gpu, sizeof(DATA_TYPE) * (M+1) * (N+1)); + cudaMalloc((void **)&symmat_gpu, sizeof(DATA_TYPE) * (M+1) * (M+1)); + cudaMalloc((void **)&mean_gpu, sizeof(DATA_TYPE) * (M+1)); + cudaMemcpy(data_gpu, data, sizeof(DATA_TYPE) * (M+1) * (N+1), cudaMemcpyHostToDevice); + cudaMemcpy(symmat_gpu, symmat, sizeof(DATA_TYPE) * (M+1) * (M+1), cudaMemcpyHostToDevice); + cudaMemcpy(mean_gpu, mean, sizeof(DATA_TYPE) * (M+1), cudaMemcpyHostToDevice); + + dim3 block1(DIM_THREAD_BLOCK_KERNEL_1_X, DIM_THREAD_BLOCK_KERNEL_1_Y); + dim3 grid1((size_t)(ceil((float)M) / ((float)DIM_THREAD_BLOCK_KERNEL_1_X)), 1); + + dim3 block2(DIM_THREAD_BLOCK_KERNEL_2_X, DIM_THREAD_BLOCK_KERNEL_2_Y); + dim3 grid2((size_t)(ceil((float)M) / ((float)DIM_THREAD_BLOCK_KERNEL_2_X)), (size_t)(ceil((float)N) / ((float)DIM_THREAD_BLOCK_KERNEL_2_X))); + + dim3 block3(DIM_THREAD_BLOCK_KERNEL_3_X, DIM_THREAD_BLOCK_KERNEL_3_Y); + dim3 grid3((size_t)(ceil((float)M) / ((float)DIM_THREAD_BLOCK_KERNEL_3_X)), 1); + + t_start = rtclock(); + + mean_kernel<<>>(mean_gpu,data_gpu); + cudaThreadSynchronize(); + reduce_kernel<<>>(mean_gpu,data_gpu); + cudaThreadSynchronize(); + covar_kernel<<>>(symmat_gpu,data_gpu); + cudaThreadSynchronize(); + t_end = rtclock(); + fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); + + cudaMemcpy(symmat_outputFromGpu, symmat_gpu, sizeof(DATA_TYPE) * (M+1) * (N+1), cudaMemcpyDeviceToHost); + + cudaFree(data_gpu); + cudaFree(symmat_gpu); + cudaFree(mean_gpu); +} + + +int main() +{ + double t_start, t_end; + + DATA_TYPE* data; + DATA_TYPE* symmat; + DATA_TYPE* mean; + DATA_TYPE* symmat_outputFromGpu; + + data = (DATA_TYPE*)malloc((M+1)*(N+1)*sizeof(DATA_TYPE)); + symmat = (DATA_TYPE*)malloc((M+1)*(M+1)*sizeof(DATA_TYPE)); + mean = (DATA_TYPE*)malloc((M+1)*sizeof(DATA_TYPE)); + symmat_outputFromGpu = (DATA_TYPE*)malloc((M+1)*(M+1)*sizeof(DATA_TYPE)); + + init_arrays(data); + + GPU_argv_init(); + + covarianceCuda(data, symmat, mean, symmat_outputFromGpu); + + t_start = rtclock(); + covariance(data, symmat, mean); + t_end = rtclock(); + fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); + + compareResults(symmat, symmat_outputFromGpu); + + free(data); + free(symmat); + free(mean); + free(symmat_outputFromGpu); + + return 0; +} + diff --git a/cuda_code/cpp_api_2.cu b/cuda_code/cpp_api_2.cu new file mode 100644 index 0000000000000000000000000000000000000000..b417cde0e00bd26bb9c515283e0119748d8cf0be --- /dev/null +++ b/cuda_code/cpp_api_2.cu @@ -0,0 +1,189 @@ +/* + * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted + * provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright notice, this list of + * conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, this list of + * conditions and the following disclaimer in the documentation and/or other materials + * provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND + * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *//* + */ + +/** @file cpp_api.cu + * @author Thomas Müller, NVIDIA + * @brief API to be consumed by cpp (non-CUDA) programs. + */ + +#include +#include + +#include +#include + +namespace tcnn { namespace cpp { + +static constexpr EPrecision TCNN_PRECISION = std::is_same::value ? EPrecision::Fp32 : EPrecision::Fp16; + +class NetworkWithInputEncoding : public Module { +public: + NetworkWithInputEncoding(uint32_t n_input_dims, uint32_t n_output_dims, const json& encoding, const json& network) + : Module{TCNN_PRECISION}, m_network{std::make_shared>(n_input_dims, n_output_dims, encoding, network)} + {} + + virtual ~NetworkWithInputEncoding() {} + + void inference(cudaStream_t stream, uint32_t n_elements, const float* input, void* output, void* params) override { + m_network->set_params((network_precision_t*)params, (network_precision_t*)params, nullptr, nullptr); + + GPUMatrix input_matrix((float*)input, m_network->input_width(), n_elements); + GPUMatrix output_matrix((network_precision_t*)output, m_network->padded_output_width(), n_elements); + + m_network->inference_mixed_precision(stream, input_matrix, output_matrix); + } + + void forward(cudaStream_t stream, uint32_t n_elements, const float* input, void* output, void* params, bool prepare_input_gradients) override { + m_network->set_params((network_precision_t*)params, (network_precision_t*)params, nullptr, nullptr); + + GPUMatrix input_matrix((float*)input, m_network->input_width(), n_elements); + GPUMatrix output_matrix((network_precision_t*)output, m_network->padded_output_width(), n_elements); + + m_network->forward(stream, input_matrix, &output_matrix, false, prepare_input_gradients); + } + + void backward(cudaStream_t stream, uint32_t n_elements, float* dL_dinput, const void* dL_doutput, void* dL_dparams, const float* input, const void* output, const void* params) override { + m_network->set_params((network_precision_t*)params, (network_precision_t*)params, (network_precision_t*)params, (network_precision_t*)dL_dparams); + + GPUMatrix input_matrix((float*)input, m_network->input_width(), n_elements); + GPUMatrix dL_dinput_matrix(dL_dinput, m_network->input_width(), n_elements); + + GPUMatrix output_matrix((network_precision_t*)output, m_network->padded_output_width(), n_elements); + GPUMatrix dL_doutput_matrix((network_precision_t*)dL_doutput, m_network->padded_output_width(), n_elements); + + m_network->backward(stream, input_matrix, output_matrix, dL_doutput_matrix, dL_dinput ? &dL_dinput_matrix : nullptr); + } + + uint32_t n_input_dims() const override { + return m_network->input_width(); + } + + size_t n_params() const override { + return m_network->n_params(); + } + + EPrecision param_precision() const override { + return TCNN_PRECISION; + } + + void initialize_params(size_t seed, float* params_full_precision) override { + pcg32 rng{seed}; + m_network->initialize_params(rng, params_full_precision, nullptr, nullptr, nullptr, nullptr); + } + + uint32_t n_output_dims() const override { + return m_network->padded_output_width(); + } + +private: + std::shared_ptr> m_network; +}; + +class Encoding : public Module { +public: + Encoding(uint32_t n_input_dims, const json& encoding) + : Module{TCNN_PRECISION}, m_encoding{tcnn::create_encoding(n_input_dims, encoding, 0)} + {} + + virtual ~Encoding() {} + + void inference(cudaStream_t stream, uint32_t n_elements, const float* input, void* output, void* params) override { + m_encoding->set_params((network_precision_t*)params, (network_precision_t*)params, nullptr, nullptr); + + PitchedPtr pitched_input(input, m_encoding->num_dims_to_encode()); + PitchedPtr pitched_output((network_precision_t*)output, m_encoding->num_encoded_dims()); + + m_encoding->encode(stream, n_elements, pitched_input, pitched_output, nullptr, true); + } + + void forward(cudaStream_t stream, uint32_t n_elements, const float* input, void* output, void* params, bool prepare_input_gradients) override { + m_encoding->set_params((network_precision_t*)params, (network_precision_t*)params, nullptr, nullptr); + + PitchedPtr pitched_input(input, m_encoding->num_dims_to_encode()); + PitchedPtr pitched_output((network_precision_t*)output, m_encoding->num_encoded_dims()); + + if (prepare_input_gradients) { + m_forward_gradient = prepare_input_gradients ? GPUMatrix{m_encoding->num_forward_gradient_dims(), n_elements, stream} : GPUMatrix{}; + } + + m_encoding->encode(stream, n_elements, pitched_input, pitched_output, m_forward_gradient.data(), false); + } + + void backward(cudaStream_t stream, uint32_t n_elements, float* dL_dinput, const void* dL_doutput, void* dL_dparams, const float* input, const void*, const void* params) override { + m_encoding->set_params((network_precision_t*)params, (network_precision_t*)params, (network_precision_t*)params, (network_precision_t*)dL_dparams); + + PitchedPtr pitched_input(input, m_encoding->num_dims_to_encode()); + PitchedPtr pitched_dL_dinput(dL_dinput, m_encoding->num_dims_to_encode()); + PitchedPtr pitched_dL_doutput((network_precision_t*)dL_doutput, m_encoding->num_encoded_dims()); + + if (dL_dinput && !m_forward_gradient.data()) { + throw std::runtime_error{"Encoding: forward(prepare_input_gradients) must be called before backward(dL_dinput)"}; + } + + m_encoding->backward(stream, n_elements, pitched_dL_doutput, m_forward_gradient.data(), pitched_dL_dinput, pitched_input); + + m_forward_gradient = GPUMatrix{}; + } + + uint32_t n_input_dims() const override { + return m_encoding->num_dims_to_encode(); + } + + size_t n_params() const override { + return m_encoding->n_params(); + } + + EPrecision param_precision() const override { + return TCNN_PRECISION; + } + + void initialize_params(size_t seed, float* params_full_precision) override { + pcg32 rng{seed}; + m_encoding->initialize_params(rng, params_full_precision, nullptr, nullptr, nullptr, nullptr); + } + + uint32_t n_output_dims() const override { + return m_encoding->num_encoded_dims(); + } + +private: + std::shared_ptr> m_encoding; + + GPUMatrix m_forward_gradient; +}; + +Module* create_encoding(uint32_t n_input_dims, const json& encoding) { + return new Encoding{n_input_dims, encoding}; +} + +Module* create_network_with_input_encoding(uint32_t n_input_dims, uint32_t n_output_dims, const json& encoding, const json& network) { + return new NetworkWithInputEncoding{n_input_dims, n_output_dims, encoding, network}; +} + +Module* create_network(uint32_t n_input_dims, uint32_t n_output_dims, const json& network) { + return create_network_with_input_encoding(n_input_dims, n_output_dims, {{"otype", "Identity"}}, network); +} + +}} diff --git a/cuda_code/crop_30.cu b/cuda_code/crop_30.cu new file mode 100644 index 0000000000000000000000000000000000000000..e6c4aa9757dd24023b60b4af10226a3aab69de1b --- /dev/null +++ b/cuda_code/crop_30.cu @@ -0,0 +1,222 @@ +// Rutuja Patil +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 + +#include"crop.h" +#include +#include +#include +#include + +#define gpuErrChk(ans) { gpuAssert((ans), __FILE__, __LINE__); } +#define CEIL(num,den) ((num+den-1)/den) + +inline void gpuAssert(cudaError_t code, const char *file, + int line, int abort=1){ + if (code != cudaSuccess){ + fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code),file, line); + if (abort) exit(code); + } +} + +__global__ void crop(float *out_x ,float* out_y ,const float *in_x ,const float* in_y , + int loc_x ,int loc_y ,int halfsz ,int npatches ,int w , int h , + int counter){ + + const int idx = threadIdx.x + blockIdx.x*blockDim.x; + const int idy = threadIdx.y + blockIdx.y*blockDim.y; + const int x_start = loc_x - halfsz; + const int y_start = loc_y - halfsz; + const int x_end = loc_x + halfsz; + const int y_end = loc_y + halfsz; + + const int locx_id = x_start + idx; + const int locy_id = y_start + idy; + const int cropsz = 2*halfsz; + int xlim, ylim, xbeg, ybeg; + + // set the end limits for the crop + if(x_end < w){ + + xlim = x_end; + + }else{ + + xlim = w-1; + + } + + if(y_end < h){ + + ylim = y_end; + + }else{ + + ylim = h-1; + + } + + if(x_start >= 0){ + + xbeg = x_start; + + } else { + + xbeg = 0; + + } + + if(y_start >= 0){ + + ybeg = y_start; + + } else { + + ybeg = 0; + + } + + // crop patch + if(locx_id >= xbeg && locy_id >= ybeg && locx_id < xlim && locy_id < ylim){ + + out_x[(counter*cropsz) + idx + (idy*cropsz*npatches)] = in_x[locx_id + locy_id*w]; + out_y[(counter*cropsz) + idx + (idy*cropsz*npatches)] = in_y[locx_id + locy_id*w]; + + } + +} + +struct workspace{ + + workspace(struct CropContext *crp){ + + gpuErrChk(cudaMalloc(&out_x ,nbytes_cropsz(crp->halfcropsz,crp->crp_params.npatches))); + gpuErrChk(cudaMalloc(&out_y ,nbytes_cropsz(crp->halfcropsz,crp->crp_params.npatches))); + gpuErrChk(cudaMemset(out_x,0,nbytes_cropsz(crp->halfcropsz,crp->crp_params.npatches))); + gpuErrChk(cudaMemset(out_y,0,nbytes_cropsz(crp->halfcropsz,crp->crp_params.npatches))); + + } + + ~workspace(){ + + gpuErrChk(cudaFree(out_x)); + gpuErrChk(cudaFree(out_y)); + + } + + size_t nbytes_cropsz(int halfcropsz,int npatches){ + int cropsz = 2*halfcropsz; + return((cropsz*cropsz*npatches)*sizeof(float)); + } + + + // the size to be passed to this function is twice the total crop size of image + void copy_result(const struct CropContext *crp ,void* buf ,size_t size){ + + int cropsz = 2*crp->halfcropsz; + float* hout_x = (float*)buf; + float* hout_y = (float*)buf + cropsz*cropsz*crp->crp_params.npatches; + gpuErrChk(cudaMemcpy(hout_x ,out_x ,size/2 ,cudaMemcpyDeviceToHost)); + gpuErrChk(cudaMemcpy(hout_y ,out_y , size/2 ,cudaMemcpyDeviceToHost)) + + } + + void output_shape(const struct CropContext *crp ,unsigned *shape){ + + int cropsz = 2*crp->halfcropsz; + shape[0] = cropsz; + shape[1] = crp->crp_params.npatches*cropsz; + + } + + float *out_x; + float *out_y; +}; + + +void cropPatch(const struct CropContext *self ,const float *in_x , + const float *in_y ,int w ,int h){ + + if(!self->workspace) return; + + int cropsz =2*self->halfcropsz; + float* out_x = self->out_x; + float* out_y = self->out_y; + + dim3 block(32,8); + dim3 grid(CEIL(cropsz,block.x),CEIL(cropsz,block.y)); + + // crop for number of side views + for(int i = 0;i < self->crp_params.npatches;i++){ + + crop<<>>(out_x ,out_y ,in_x ,in_y ,self->crp_params.interest_pnts[2*i]-1, + self->crp_params.interest_pnts[2*i+1]-1 , + self->halfcropsz,self->crp_params.npatches,w,h,i); + cudaGetLastError(); + } + + cudaDeviceSynchronize(); + +} + +// Initialize params for a crop +struct CropContext CropInit(int cellw,int cellh,const struct CropParams params){ + + assert(cellw==cellh); + int halfcropsz = (params.ncells*cellw)/2; + struct CropContext crp = {0}; + crp.halfcropsz = halfcropsz; + crp.crp_params = params; + workspace *ws = new workspace(&crp); + crp.workspace = ws; + crp.out_x = ws->out_x; + crp.out_y = ws->out_y; + return crp; +} + +//compute the crop +void CropImage(const struct CropContext *self, const float *in_x , + const float *in_y ,int width ,int height){ + + if(!self->workspace) return; + cropPatch(self ,in_x ,in_y ,width ,height); + +} + +//copy the crop output +void CropOutputCopy(const struct CropContext *self ,void *buf ,size_t sz){ + + if(!self->workspace) return; + workspace *ws = (workspace*)self->workspace; + ws->copy_result(self ,buf ,sz); + +} + +// calculate the number of crop output image bytes +size_t CropOutputByteCount(const struct CropContext *self){ + + if(!self->workspace) return 0; + size_t nbytes = ((workspace*)self->workspace)->nbytes_cropsz(self->halfcropsz, self->crp_params.npatches)*2; + return nbytes; + +} + +void CropOutputShape(const struct CropContext *self,unsigned *shape) { + + if(!self->workspace) return; + workspace *ws = (workspace*)self->workspace; + ws->output_shape(self ,shape); + +} + +// delete the crop context +void CropTearDown(const struct CropContext *self){ + + if(!self->workspace) return; + workspace *ws = (workspace*)self->workspace; + delete ws; +} diff --git a/cuda_code/crop_layer_kernels_21.cu b/cuda_code/crop_layer_kernels_21.cu new file mode 100644 index 0000000000000000000000000000000000000000..b5b9f554627f2450e8f82fb16314ae23644eeb84 --- /dev/null +++ b/cuda_code/crop_layer_kernels_21.cu @@ -0,0 +1,225 @@ +#include "cuda_runtime.h" +#include "curand.h" +#include "cublas_v2.h" + +extern "C" { +#include "crop_layer.h" +#include "utils.h" +#include "cuda.h" +#include "image.h" +} + +__device__ float get_pixel_kernel(float *image, int w, int h, int x, int y, int c) +{ + if(x < 0 || x >= w || y < 0 || y >= h) return 0; + return image[x + w*(y + c*h)]; +} + +__device__ float3 rgb_to_hsv_kernel(float3 rgb) +{ + float r = rgb.x; + float g = rgb.y; + float b = rgb.z; + + float h, s, v; + float max = (r > g) ? ( (r > b) ? r : b) : ( (g > b) ? g : b); + float min = (r < g) ? ( (r < b) ? r : b) : ( (g < b) ? g : b); + float delta = max - min; + v = max; + if(max == 0){ + s = 0; + h = -1; + }else{ + s = delta/max; + if(r == max){ + h = (g - b) / delta; + } else if (g == max) { + h = 2 + (b - r) / delta; + } else { + h = 4 + (r - g) / delta; + } + if (h < 0) h += 6; + } + return make_float3(h, s, v); +} + +__device__ float3 hsv_to_rgb_kernel(float3 hsv) +{ + float h = hsv.x; + float s = hsv.y; + float v = hsv.z; + + float r, g, b; + float f, p, q, t; + + if (s == 0) { + r = g = b = v; + } else { + int index = (int) floorf(h); + f = h - index; + p = v*(1-s); + q = v*(1-s*f); + t = v*(1-s*(1-f)); + if(index == 0){ + r = v; g = t; b = p; + } else if(index == 1){ + r = q; g = v; b = p; + } else if(index == 2){ + r = p; g = v; b = t; + } else if(index == 3){ + r = p; g = q; b = v; + } else if(index == 4){ + r = t; g = p; b = v; + } else { + r = v; g = p; b = q; + } + } + r = (r < 0) ? 0 : ((r > 1) ? 1 : r); + g = (g < 0) ? 0 : ((g > 1) ? 1 : g); + b = (b < 0) ? 0 : ((b > 1) ? 1 : b); + return make_float3(r, g, b); +} + +__device__ float bilinear_interpolate_kernel(float *image, int w, int h, float x, float y, int c) +{ + int ix = (int) floorf(x); + int iy = (int) floorf(y); + + float dx = x - ix; + float dy = y - iy; + + float val = (1-dy) * (1-dx) * get_pixel_kernel(image, w, h, ix, iy, c) + + dy * (1-dx) * get_pixel_kernel(image, w, h, ix, iy+1, c) + + (1-dy) * dx * get_pixel_kernel(image, w, h, ix+1, iy, c) + + dy * dx * get_pixel_kernel(image, w, h, ix+1, iy+1, c); + return val; +} + +__global__ void levels_image_kernel(float *image, float *rand, int batch, int w, int h, int train, float saturation, float exposure, float translate, float scale, float shift) +{ + int size = batch * w * h; + int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; + if(id >= size) return; + int x = id % w; + id /= w; + int y = id % h; + id /= h; + float rshift = rand[0]; + float gshift = rand[1]; + float bshift = rand[2]; + float r0 = rand[8*id + 0]; + float r1 = rand[8*id + 1]; + float r2 = rand[8*id + 2]; + float r3 = rand[8*id + 3]; + + saturation = r0*(saturation - 1) + 1; + saturation = (r1 > .5f) ? 1.f/saturation : saturation; + exposure = r2*(exposure - 1) + 1; + exposure = (r3 > .5f) ? 1.f/exposure : exposure; + + size_t offset = id * h * w * 3; + image += offset; + float r = image[x + w*(y + h*0)]; + float g = image[x + w*(y + h*1)]; + float b = image[x + w*(y + h*2)]; + float3 rgb = make_float3(r,g,b); + if(train){ + float3 hsv = rgb_to_hsv_kernel(rgb); + hsv.y *= saturation; + hsv.z *= exposure; + rgb = hsv_to_rgb_kernel(hsv); + } else { + shift = 0; + } + image[x + w*(y + h*0)] = rgb.x*scale + translate + (rshift - .5f)*shift; + image[x + w*(y + h*1)] = rgb.y*scale + translate + (gshift - .5f)*shift; + image[x + w*(y + h*2)] = rgb.z*scale + translate + (bshift - .5f)*shift; +} + +__global__ void forward_crop_layer_kernel(float *input, float *rand, int size, int c, int h, int w, int crop_height, int crop_width, int train, int flip, float angle, float *output) +{ + int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; + if(id >= size) return; + + float cx = w/2.f; + float cy = h/2.f; + + int count = id; + int j = id % crop_width; + id /= crop_width; + int i = id % crop_height; + id /= crop_height; + int k = id % c; + id /= c; + int b = id; + + float r4 = rand[8*b + 4]; + float r5 = rand[8*b + 5]; + float r6 = rand[8*b + 6]; + float r7 = rand[8*b + 7]; + + float dw = (w - crop_width)*r4; + float dh = (h - crop_height)*r5; + flip = (flip && (r6 > .5f)); + angle = 2*angle*r7 - angle; + if(!train){ + dw = (w - crop_width)/2.f; + dh = (h - crop_height)/2.f; + flip = 0; + angle = 0; + } + + input += w*h*c*b; + + float x = (flip) ? w - dw - j - 1 : j + dw; + float y = i + dh; + + float rx = cosf(angle)*(x-cx) - sinf(angle)*(y-cy) + cx; + float ry = sinf(angle)*(x-cx) + cosf(angle)*(y-cy) + cy; + + output[count] = bilinear_interpolate_kernel(input, w, h, rx, ry, k); +} + +extern "C" void forward_crop_layer_gpu(crop_layer layer, network net) +{ + cuda_random(layer.rand_gpu, layer.batch*8); + + float radians = layer.angle*3.14159265f/180.f; + + float scale = 2; + float translate = -1; + if(layer.noadjust){ + scale = 1; + translate = 0; + } + + int size = layer.batch * layer.w * layer.h; + + levels_image_kernel<<>>(net.input_gpu, layer.rand_gpu, layer.batch, layer.w, layer.h, net.train, layer.saturation, layer.exposure, translate, scale, layer.shift); + check_error(cudaPeekAtLastError()); + + size = layer.batch*layer.c*layer.out_w*layer.out_h; + + forward_crop_layer_kernel<<>>(net.input_gpu, layer.rand_gpu, size, layer.c, layer.h, layer.w, layer.out_h, layer.out_w, net.train, layer.flip, radians, layer.output_gpu); + check_error(cudaPeekAtLastError()); + +/* + cuda_pull_array(layer.output_gpu, layer.output, size); + image im = float_to_image(layer.crop_width, layer.crop_height, layer.c, layer.output + 0*(size/layer.batch)); + image im2 = float_to_image(layer.crop_width, layer.crop_height, layer.c, layer.output + 1*(size/layer.batch)); + image im3 = float_to_image(layer.crop_width, layer.crop_height, layer.c, layer.output + 2*(size/layer.batch)); + + translate_image(im, -translate); + scale_image(im, 1/scale); + translate_image(im2, -translate); + scale_image(im2, 1/scale); + translate_image(im3, -translate); + scale_image(im3, 1/scale); + + show_image(im, "cropped"); + show_image(im2, "cropped2"); + show_image(im3, "cropped3"); + cvWaitKey(0); + */ +} + diff --git a/cuda_code/cross_3.cu b/cuda_code/cross_3.cu new file mode 100644 index 0000000000000000000000000000000000000000..d7694641c26154703e96e23c7333e96e562d3daa --- /dev/null +++ b/cuda_code/cross_3.cu @@ -0,0 +1,122 @@ +/******************************************************************************* + * Copyright (c) 2015-2018 Skymind, Inc. + * + * This program and the accompanying materials are made available under the + * terms of the Apache License, Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0. + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + * + * SPDX-License-Identifier: Apache-2.0 + ******************************************************************************/ + +// +// @author Yurii Shyrma, created on 10.06.2019 +// + + +#include +#include + + +namespace sd { +namespace ops { +namespace helpers { + +////////////////////////////////////////////////////////////////////////// +template +__global__ static void crossCuda(const void* vx, const Nd4jLong* xShapeInfo, + const void* vy, const Nd4jLong* yShapeInfo, + void* vz, const Nd4jLong* zShapeInfo) { + + __shared__ const T* x; + __shared__ const T* y; + __shared__ T* z; + __shared__ int rank, *sharedMem; + __shared__ Nd4jLong lenWithoutLastDim, totalThreads; + + if (threadIdx.x == 0) { + x = reinterpret_cast(vx); + y = reinterpret_cast(vy); + z = reinterpret_cast(vz); + + extern __shared__ unsigned char shmem[]; + sharedMem = reinterpret_cast(shmem); + totalThreads = gridDim.x * blockDim.x; + + rank = shape::rank(xShapeInfo); + lenWithoutLastDim = shape::length(xShapeInfo) / xShapeInfo[rank]; // shape::length(xShapeInfo) / 3; + } + __syncthreads(); + + auto coords = sharedMem + threadIdx.x * rank; + const auto tid = blockIdx.x * blockDim.x + threadIdx.x; + + for (uint i = tid; i < lenWithoutLastDim; i += totalThreads) { + + shape::index2coords(i, rank - 1, xShapeInfo + 1, coords); + + coords[rank - 1] = 0; + + auto xOffset = shape::getOffset(xShapeInfo, coords); + auto yOffset = shape::getOffset(yShapeInfo, coords); + + const auto x0 = x[xOffset]; + const auto y0 = y[yOffset]; + + xOffset += shape::stride(const_cast(xShapeInfo))[rank - 1]; + yOffset += shape::stride(const_cast(yShapeInfo))[rank - 1]; + + const auto x1 = x[xOffset]; + const auto y1 = y[yOffset]; + + xOffset += shape::stride(const_cast(xShapeInfo))[rank - 1]; + yOffset += shape::stride(const_cast(yShapeInfo))[rank - 1]; + + const auto x2 = x[xOffset]; + const auto y2 = y[yOffset]; + + auto zOffset = shape::getOffset(zShapeInfo, coords); + z[zOffset] = x1 * y2 - x2 * y1; + + zOffset += shape::stride(const_cast(zShapeInfo))[rank - 1]; + z[zOffset] = x2 * y0 - x0 * y2; + + zOffset += shape::stride(const_cast(zShapeInfo))[rank - 1]; + z[zOffset] = x0 * y1 - x1 * y0; + } +} + +template +__host__ static void crossCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, + const void* vx, const Nd4jLong* xShapeInfo, + const void* vy, const Nd4jLong* yShapeInfo, + void* vz, const Nd4jLong* zShapeInfo) { + + crossCuda<<>>(vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo); +} +BUILD_SINGLE_TEMPLATE(template void crossCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo), NUMERIC_TYPES); + + +void crossBatched(sd::LaunchContext* context, NDArray *x, NDArray *y, NDArray *z) { + + const int threadsPerBlock = MAX_NUM_THREADS / 4; + const int blocksPerGrid = (x->lengthOf() / x->sizeAt(-1) + threadsPerBlock - 1) / threadsPerBlock; + const int sharedMem = sizeof(int) * threadsPerBlock * x->rankOf() + 128; + + PointersManager manager(context, "cross"); + + NDArray::prepareSpecialUse({z}, {x, y}); + BUILD_SINGLE_SELECTOR(x->dataType(), crossCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), x->getSpecialBuffer(), x->getSpecialShapeInfo(), y->getSpecialBuffer(), y->getSpecialShapeInfo(), z->specialBuffer(), z->specialShapeInfo()), NUMERIC_TYPES); + NDArray::registerSpecialUse({z}, {x, y}); + + manager.synchronize(); +} + +} +} +} \ No newline at end of file diff --git a/cuda_code/crypt.config.cu b/cuda_code/crypt.config.cu new file mode 100644 index 0000000000000000000000000000000000000000..405b441bd282ec50017f1cadf560b9f53aa18837 --- /dev/null +++ b/cuda_code/crypt.config.cu @@ -0,0 +1,490 @@ +#include "../common/common.h" +#include +#include +#include +#include + +/* + * The crypt application implements IDEA encryption and decryption of a single + * input file using the secret key provided. + */ + +// Chunking size for IDEA, in bytes +#define CHUNK_SIZE 8 +// Length of the encryption/decryption keys, in bytes +#define KEY_LENGTH 52 +#define BLOCK_SIZE_IN_CHUNKS 1024000 +// Length of the secret key, in bytes +#define USERKEY_LENGTH 8 +#define BITS_PER_BYTE 8 + +typedef enum { ENCRYPT, DECRYPT } action; + +__constant__ int dkey[KEY_LENGTH]; + +/* + * doCrypt implements the core logic of IDEA. It iterates over the byte + * chunks stored in plainList and outputs their encrypted/decrypted form to the + * corresponding element in cryptList using the secret key provided. + */ +__device__ void doCrypt(int chunk, signed char *plain, signed char *crypt) +{ + long x1, x2, x3, x4, t1, t2, ik, r; + + x1 = (((unsigned int)plain[chunk * CHUNK_SIZE]) & 0xff); + x1 |= ((((unsigned int)plain[chunk * CHUNK_SIZE + 1]) & 0xff) << + BITS_PER_BYTE); + x2 = (((unsigned int)plain[chunk * CHUNK_SIZE + 2]) & 0xff); + x2 |= ((((unsigned int)plain[chunk * CHUNK_SIZE + 3]) & 0xff) << + BITS_PER_BYTE); + x3 = (((unsigned int)plain[chunk * CHUNK_SIZE + 4]) & 0xff); + x3 |= ((((unsigned int)plain[chunk * CHUNK_SIZE + 5]) & 0xff) << + BITS_PER_BYTE); + x4 = (((unsigned int)plain[chunk * CHUNK_SIZE + 6]) & 0xff); + x4 |= ((((unsigned int)plain[chunk * CHUNK_SIZE + 7]) & 0xff) << + BITS_PER_BYTE); + ik = 0; + r = CHUNK_SIZE; + + do + { + x1 = (int)((((long)x1 * dkey[ik++]) % 0x10001L) & 0xffff); + x2 = ((x2 + dkey[ik++]) & 0xffff); + x3 = ((x3 + dkey[ik++]) & 0xffff); + x4 = (int)((((long)x4 * dkey[ik++]) % 0x10001L) & 0xffff); + + t2 = (x1 ^ x3); + t2 = (int)((((long)t2 * dkey[ik++]) % 0x10001L) & 0xffff); + + t1 = ((t2 + (x2 ^ x4)) & 0xffff); + t1 = (int)((((long)t1 * dkey[ik++]) % 0x10001L) & 0xffff); + t2 = (t1 + t2 & 0xffff); + + x1 = (x1 ^ t1); + x4 = (x4 ^ t2); + t2 = (t2 ^ x2); + x2 = (x3 ^ t1); + x3 = t2; + } + while(--r != 0); + + x1 = (int)((((long)x1 * dkey[ik++]) % 0x10001L) & 0xffff); + x3 = ((x3 + dkey[ik++]) & 0xffff); + x2 = ((x2 + dkey[ik++]) & 0xffff); + x4 = (int)((((long)x4 * dkey[ik++]) % 0x10001L) & 0xffff); + + crypt[chunk * CHUNK_SIZE] = (signed char) x1; + crypt[chunk * CHUNK_SIZE + 1] = (signed char) ((unsigned long)x1 >> + BITS_PER_BYTE); + crypt[chunk * CHUNK_SIZE + 2] = (signed char) x3; + crypt[chunk * CHUNK_SIZE + 3] = (signed char) ((unsigned long)x3 >> + BITS_PER_BYTE); + crypt[chunk * CHUNK_SIZE + 4] = (signed char) x2; + crypt[chunk * CHUNK_SIZE + 5] = (signed char) ((unsigned long)x2 >> + BITS_PER_BYTE); + crypt[chunk * CHUNK_SIZE + 6] = (signed char) x4; + crypt[chunk * CHUNK_SIZE + 7] = (signed char) ((unsigned long)x4 >> + BITS_PER_BYTE); +} + +__global__ void encrypt_decrypt(signed char *plain, signed char *crypt, + int nChunks) +{ + int tid = blockIdx.x * blockDim.x + threadIdx.x; + int nthreads = blockDim.x * gridDim.x; + + for ( ; tid < nChunks; tid += nthreads) + { + doCrypt(tid, plain, crypt); + } +} + +static void encrypt_decrypt_driver(signed char *plain, signed char *crypt, + int *key, + int plainLength, int nThreadsPerBlock) +{ + cudaEvent_t start, *finishes; + cudaStream_t *streams; + int nChunks, b, nBlocks; + signed char *dPlain, *dCrypt; + + if (plainLength % CHUNK_SIZE != 0) + { + fprintf(stderr, "Invalid encryption: length of plain must be an even " + "multiple of %d but is %d\n", CHUNK_SIZE, plainLength); + exit(-1); + } + + cudaDeviceProp info; + CHECK(cudaGetDeviceProperties(&info, 0)); + nChunks = plainLength / CHUNK_SIZE; + nBlocks = (nChunks + BLOCK_SIZE_IN_CHUNKS - 1) / BLOCK_SIZE_IN_CHUNKS; + int nThreadBlocks = (nChunks + nThreadsPerBlock - 1) / nThreadsPerBlock; + + if (nThreadBlocks > info.maxGridSize[0]) + { + nThreadBlocks = info.maxGridSize[0]; + } + + CHECK(cudaEventCreate(&start, 0)); + finishes = (cudaEvent_t *)malloc(sizeof(cudaEvent_t) * nBlocks); + streams = (cudaStream_t *)malloc(sizeof(cudaStream_t) * nBlocks); + + for (b = 0; b < nBlocks; b++) + { + CHECK(cudaStreamCreate(streams + b)); + CHECK(cudaEventCreate(finishes + b)); + } + + CHECK(cudaMalloc((void **)&dPlain, + plainLength * sizeof(signed char))); + CHECK(cudaMalloc((void **)&dCrypt, + plainLength * sizeof(signed char))); + + CHECK(cudaEventRecord(start, streams[0])); + CHECK(cudaMemcpyToSymbolAsync(dkey, key, KEY_LENGTH * sizeof(int), 0, + cudaMemcpyHostToDevice, streams[0])); + CHECK(cudaStreamSynchronize(streams[0])); + + for (b = 0; b < nBlocks; b++) + { + int blockOffset = b * BLOCK_SIZE_IN_CHUNKS * CHUNK_SIZE; + int localChunks = BLOCK_SIZE_IN_CHUNKS; + + if (b * BLOCK_SIZE_IN_CHUNKS + localChunks > nChunks) + { + localChunks = nChunks - b * BLOCK_SIZE_IN_CHUNKS; + } + + CHECK(cudaMemcpyAsync(dPlain + blockOffset, plain + blockOffset, + localChunks * CHUNK_SIZE * sizeof(signed char), + cudaMemcpyHostToDevice, streams[b])); + + encrypt_decrypt<<>>( + dPlain + blockOffset, dCrypt + blockOffset, localChunks); + CHECK(cudaMemcpyAsync(crypt + blockOffset, dCrypt + blockOffset, + localChunks * CHUNK_SIZE * sizeof(signed char), + cudaMemcpyDeviceToHost, streams[b])); + CHECK(cudaEventRecord(finishes[b], streams[b])); + } + + CHECK(cudaDeviceSynchronize()); + + float maxElapsed = 0.0; + + for (b = 0; b < nBlocks; b++) + { + float elapsed; + CHECK(cudaEventElapsedTime(&elapsed, start, finishes[b])); + maxElapsed = elapsed > maxElapsed ? elapsed : maxElapsed; + } + + printf("Processed %d bytes in %f ms ( %f KB/ms )\n", plainLength, + maxElapsed, ((float)plainLength / maxElapsed) / 1024.0f); + + for (b = 0; b < nBlocks; b++) + { + CHECK(cudaStreamDestroy(streams[b])); + CHECK(cudaEventDestroy(finishes[b])); + } + + free(streams); + free(finishes); + CHECK(cudaEventDestroy(start)); + + CHECK(cudaFree(dPlain)); + CHECK(cudaFree(dCrypt)); +} + +/* + * Get the length of a file on disk. + */ +static size_t getFileLength(FILE *fp) +{ + fseek(fp, 0L, SEEK_END); + size_t fileLen = ftell(fp); + fseek(fp, 0L, SEEK_SET); + return (fileLen); +} + +/* + * inv is used to generate the key used for decryption from the secret key. + */ +static int inv(int x) +{ + int t0, t1; + int q, y; + + if (x <= 1) // Assumes positive x. + return (x); // 0 and 1 are self-inverse. + + t1 = 0x10001 / x; // (2**16+1)/x; x is >= 2, so fits 16 bits. + y = 0x10001 % x; + + if (y == 1) + return ((1 - t1) & 0xffff); + + t0 = 1; + + do + { + q = x / y; + x = x % y; + t0 += q * t1; + + if (x == 1) return (t0); + + q = y / x; + y = y % x; + t1 += q * t0; + } + while (y != 1); + + return ((1 - t1) & 0xffff); +} + +/* + * Generate the key to be used for encryption, based on the user key read from + * disk. + */ +static int *generateEncryptKey(int16_t *userkey) +{ + int i, j; + int *key; + + CHECK(cudaMallocHost(&key, KEY_LENGTH * sizeof(int))); + memset(key, 0x00, sizeof(int) * KEY_LENGTH); + + for (i = 0; i < CHUNK_SIZE; i++) + { + key[i] = (userkey[i] & 0xffff); + } + + for (i = CHUNK_SIZE; i < KEY_LENGTH; i++) + { + j = i % CHUNK_SIZE; + + if (j < 6) + { + key[i] = ((key[i - 7] >> 9) | (key[i - 6] << 7)) + & 0xffff; + continue; + } + + if (j == 6) + { + key[i] = ((key[i - 7] >> 9) | (key[i - 14] << 7)) + & 0xffff; + continue; + } + + key[i] = ((key[i - 15] >> 9) | (key[i - 14] << 7)) + & 0xffff; + } + + return (key); +} + +/* + * Generate the key to be used for decryption, based on the user key read from + * disk. + */ +static int *generateDecryptKey(int16_t *userkey) +{ + int *key; + int i, j, k; + int t1, t2, t3; + + CHECK(cudaMallocHost(&key, KEY_LENGTH * sizeof(int))); + int *Z = generateEncryptKey(userkey); + + t1 = inv(Z[0]); + t2 = - Z[1] & 0xffff; + t3 = - Z[2] & 0xffff; + + key[51] = inv(Z[3]); + key[50] = t3; + key[49] = t2; + key[48] = t1; + + j = 47; + k = 4; + + for (i = 0; i < 7; i++) + { + t1 = Z[k++]; + key[j--] = Z[k++]; + key[j--] = t1; + t1 = inv(Z[k++]); + t2 = -Z[k++] & 0xffff; + t3 = -Z[k++] & 0xffff; + key[j--] = inv(Z[k++]); + key[j--] = t2; + key[j--] = t3; + key[j--] = t1; + } + + t1 = Z[k++]; + key[j--] = Z[k++]; + key[j--] = t1; + t1 = inv(Z[k++]); + t2 = -Z[k++] & 0xffff; + t3 = -Z[k++] & 0xffff; + key[j--] = inv(Z[k++]); + key[j--] = t3; + key[j--] = t2; + key[j--] = t1; + + CHECK(cudaFreeHost(Z)); + + return (key); +} + +void readInputData(FILE *in, size_t textLen, signed char **text, + signed char **crypt) +{ + CHECK(cudaMallocHost(text, textLen * sizeof(signed char))); + CHECK(cudaMallocHost(crypt, textLen * sizeof(signed char))); + + if (fread(*text, sizeof(signed char), textLen, in) != textLen) + { + fprintf(stderr, "Failed reading text from input file\n"); + exit(1); + } +} + +void cleanup(signed char *text, signed char *crypt, int *key, + int16_t *userkey) +{ + free(userkey); + CHECK(cudaFreeHost(key)); + CHECK(cudaFreeHost(text)); + CHECK(cudaFreeHost(crypt)); +} + +/* + * Initialize application state by reading inputs from the disk and + * pre-allocating memory. Hand off to encrypt_decrypt to perform the actualy + * encryption or decryption. Then, write the encrypted/decrypted results to + * disk. + */ +int main(int argc, char **argv) +{ + FILE *in, *out, *keyfile; + signed char *text, *crypt; + size_t textLen, keyFileLength; + int16_t *userkey; + int *key; + action a; + + if (argc != 6) + { + printf("usage: %s " + "\n", argv[0]); + return (1); + } + + // Are we encrypting or decrypting? + if (strncmp(argv[1], "encrypt", 7) == 0) + { + a = ENCRYPT; + } + else if (strncmp(argv[1], "decrypt", 7) == 0) + { + a = DECRYPT; + } + else + { + fprintf(stderr, "The action specified ('%s') is not valid. Must be " + "either 'encrypt' or 'decrypt'\n", argv[1]); + return (1); + } + + // Input file + in = fopen(argv[2], "r"); + + if (in == NULL) + { + fprintf(stderr, "Unable to open %s for reading\n", argv[2]); + return (1); + } + + // Output file + out = fopen(argv[3], "w"); + + if (out == NULL) + { + fprintf(stderr, "Unable to open %s for writing\n", argv[3]); + return (1); + } + + // Key file + keyfile = fopen(argv[4], "r"); + + if (keyfile == NULL) + { + fprintf(stderr, "Unable to open key file %s for reading\n", argv[4]); + return (1); + } + + int nThreadsPerBlock = atoi(argv[5]); + + keyFileLength = getFileLength(keyfile); + + if (keyFileLength != sizeof(*userkey) * USERKEY_LENGTH) + { + fprintf(stderr, "Invalid user key file length %lu, must be %lu\n", + keyFileLength, sizeof(*userkey) * USERKEY_LENGTH); + return (1); + } + + userkey = (int16_t *)malloc(sizeof(int16_t) * USERKEY_LENGTH); + + if (userkey == NULL) + { + fprintf(stderr, "Error allocating user key\n"); + return (1); + } + + if (fread(userkey, sizeof(*userkey), USERKEY_LENGTH, keyfile) != + USERKEY_LENGTH) + { + fprintf(stderr, "Error reading user key\n"); + return (1); + } + + if (a == ENCRYPT) + { + key = generateEncryptKey(userkey); + } + else + { + key = generateDecryptKey(userkey); + } + + textLen = getFileLength(in); + + if (textLen % CHUNK_SIZE != 0) + { + fprintf(stderr, "Invalid input file length %lu, must be evenly " + "divisible by %d\n", textLen, CHUNK_SIZE); + return (1); + } + + readInputData(in, textLen, &text, &crypt); + fclose(in); + + encrypt_decrypt_driver(text, crypt, key, textLen, nThreadsPerBlock); + + if (fwrite(crypt, sizeof(signed char), textLen, out) != textLen) + { + fprintf(stderr, "Failed writing crypt to %s\n", argv[3]); + return (1); + } + + fclose(out); + + cleanup(text, crypt, key, userkey); + + return (0); +} diff --git a/cuda_code/csr2coo.cu b/cuda_code/csr2coo.cu new file mode 100644 index 0000000000000000000000000000000000000000..a9a4776e3e6d48d0ec328f5c3f5ff1fde60dc3c6 --- /dev/null +++ b/cuda_code/csr2coo.cu @@ -0,0 +1,65 @@ +#include +#include +#include +#include + +#include "utilities.h" +#include + +#include + +int main(int argn, char *argv[]) +{ + + // Host problem definition + + int hCsrRowPtr[] = {0, 0, 1, 2, 3}; + const int nnz = 9; + const int m = 4; + + int hCooRowInd[nnz]; + + int hCooRowInd_result[] = {1, 2, 3, 0, 0, 0, 0, 0, 0}; + + // Device memory management + + int *dCsrRowPtr, *dCooRowInd; + + CHECK_CUDA( cudaMalloc((void**) &dCsrRowPtr, (m + 1) * sizeof(int)) ); + CHECK_CUDA( cudaMalloc((void**) &dCooRowInd, nnz * sizeof(int)) ); + + CHECK_CUDA( cudaMemcpy(dCsrRowPtr, hCsrRowPtr, (m + 1) * sizeof(int), cudaMemcpyHostToDevice) ); + + // CUSPARSE APIs + cusparseHandle_t handle = NULL; + CHECK_CUSPARSE(cusparseCreate(&handle)); + + cusparseStatus_t cs = cusparseXcsr2coo(handle, dCsrRowPtr, nnz, m, dCooRowInd, CUSPARSE_INDEX_BASE_ZERO); + + // device result check + CHECK_CUDA( cudaMemcpy(hCooRowInd, dCooRowInd, nnz * sizeof(int), cudaMemcpyDeviceToHost) ); + + int correct = 1; + for (int i = 0; i < nnz; i++) { + if((fabs(hCooRowInd[i] - hCooRowInd_result[i]) > 0.000001)) { + correct = 0; + break; + } + } + + if (correct) + printf("csr2coo test PASSED\n"); + else + printf("csr2coo test FAILED: wrong result\n"); + + // step 6: free resources + + // device memory deallocation + CHECK_CUDA(cudaFree(dCooRowInd)); + CHECK_CUDA(cudaFree(dCsrRowPtr)); + + // destroy + CHECK_CUSPARSE(cusparseDestroy(handle)); + + return EXIT_SUCCESS; +} \ No newline at end of file diff --git a/cuda_code/csr_graph_3.cu b/cuda_code/csr_graph_3.cu new file mode 100644 index 0000000000000000000000000000000000000000..884677c9853c65b4ed5ca7207a62189111598110 --- /dev/null +++ b/cuda_code/csr_graph_3.cu @@ -0,0 +1,321 @@ +/* + csr_graph.cu + + Implements CSR Graph. Part of the GGC source code. + + Copyright (C) 2014--2016, The University of Texas at Austin + + See LICENSE.TXT for copyright license. + + Author: Sreepathi Pai +*/ + +/* -*- mode: c++ -*- */ + +#include "gg.h" +#include "csr_graph.h" + +unsigned CSRGraph::init() { + row_start = edge_dst = NULL; + edge_data = NULL; + node_data = NULL; + nnodes = nedges = 0; + device_graph = false; + + return 0; +} + +unsigned CSRGraph::allocOnHost(bool no_edge_data) { + assert(nnodes > 0); + assert(!device_graph); + + if(row_start != NULL) // already allocated + return true; + + size_t mem_usage = ((nnodes + 1) + nedges) * sizeof(index_type) + + (nnodes) * sizeof(node_data_type); + if (!no_edge_data) mem_usage += (nedges) * sizeof(edge_data_type); + + printf("Host memory for graph: %3u MB\n", mem_usage / 1048756); + + row_start = (index_type *) calloc(nnodes+1, sizeof(index_type)); + edge_dst = (index_type *) calloc(nedges, sizeof(index_type)); + if (!no_edge_data) edge_data = (edge_data_type *) calloc(nedges, sizeof(edge_data_type)); + node_data = (node_data_type *) calloc(nnodes, sizeof(node_data_type)); + + return ((no_edge_data || edge_data) && row_start && edge_dst && node_data); +} + +unsigned CSRGraph::allocOnDevice(bool no_edge_data) { + if(edge_dst != NULL) // already allocated + return true; + + assert(edge_dst == NULL); // make sure not already allocated + + check_cuda(cudaMalloc((void **) &edge_dst, nedges * sizeof(index_type))); + check_cuda(cudaMalloc((void **) &row_start, (nnodes+1) * sizeof(index_type))); + + if (!no_edge_data) check_cuda(cudaMalloc((void **) &edge_data, nedges * sizeof(edge_data_type))); + check_cuda(cudaMalloc((void **) &node_data, nnodes * sizeof(node_data_type))); + + device_graph = true; + + assert(edge_dst && (no_edge_data || edge_data) && row_start && node_data); + return true; +} + +void CSRGraphTex::copy_to_gpu(struct CSRGraphTex ©graph) { + copygraph.nnodes = nnodes; + copygraph.nedges = nedges; + + copygraph.allocOnDevice(edge_data == NULL); + + check_cuda(cudaMemcpy(copygraph.edge_dst, edge_dst, nedges * sizeof(index_type), cudaMemcpyHostToDevice)); + if (edge_data != NULL) check_cuda(cudaMemcpy(copygraph.edge_data, edge_data, nedges * sizeof(edge_data_type), cudaMemcpyHostToDevice)); + check_cuda(cudaMemcpy(copygraph.node_data, node_data, nnodes * sizeof(node_data_type), cudaMemcpyHostToDevice)); + + check_cuda(cudaMemcpy(copygraph.row_start, row_start, (nnodes+1) * sizeof(index_type), cudaMemcpyHostToDevice)); +} + +unsigned CSRGraphTex::allocOnDevice(bool no_edge_data) { + if(CSRGraph::allocOnDevice(no_edge_data)) + { + assert(sizeof(index_type) <= 4); // 32-bit only! + assert(sizeof(node_data_type) <= 4); // 32-bit only! + + cudaResourceDesc resDesc; + + memset(&resDesc, 0, sizeof(resDesc)); + resDesc.resType = cudaResourceTypeLinear; + resDesc.res.linear.desc.f = cudaChannelFormatKindUnsigned; + resDesc.res.linear.desc.x = 32; // bits per channel + + cudaTextureDesc texDesc; + memset(&texDesc, 0, sizeof(texDesc)); + texDesc.readMode = cudaReadModeElementType; + + resDesc.res.linear.devPtr = edge_dst; + resDesc.res.linear.sizeInBytes = nedges*sizeof(index_type); + check_cuda(cudaCreateTextureObject(&edge_dst_tx, &resDesc, &texDesc, NULL)); + + resDesc.res.linear.devPtr = row_start; + resDesc.res.linear.sizeInBytes = (nnodes + 1) * sizeof(index_type); + check_cuda(cudaCreateTextureObject(&row_start_tx, &resDesc, &texDesc, NULL)); + + resDesc.res.linear.devPtr = node_data; + resDesc.res.linear.sizeInBytes = (nnodes) * sizeof(node_data_type); + check_cuda(cudaCreateTextureObject(&node_data_tx, &resDesc, &texDesc, NULL)); + + return 1; + } + + return 0; +} + +unsigned CSRGraph::deallocOnHost() { + if(!device_graph) { + free(row_start); + free(edge_dst); + if (edge_data != NULL) free(edge_data); + free(node_data); + } + + return 0; +} +unsigned CSRGraph::deallocOnDevice() { + if(device_graph) { + cudaFree(edge_dst); + if (edge_data != NULL) cudaFree(edge_data); + cudaFree(row_start); + cudaFree(node_data); + } + + return 0; +} + +CSRGraph::CSRGraph() { + init(); +} + +void CSRGraph::progressPrint(unsigned maxii, unsigned ii) { + const unsigned nsteps = 10; + unsigned ineachstep = (maxii / nsteps); + if(ineachstep == 0) ineachstep = 1; + /*if (ii == maxii) { + printf("\t100%%\n"); + } else*/ if (ii % ineachstep == 0) { + int progress = ((size_t) ii * 100) / maxii + 1; + + printf("\t%3d%%\r", progress); + fflush(stdout); + } +} + +unsigned CSRGraph::readFromGR(char file[], bool read_edge_data) { + std::ifstream cfile; + cfile.open(file); + + // copied from GaloisCpp/trunk/src/FileGraph.h + int masterFD = open(file, O_RDONLY); + if (masterFD == -1) { + printf("FileGraph::structureFromFile: unable to open %s.\n", file); + return 1; + } + + struct stat buf; + int f = fstat(masterFD, &buf); + if (f == -1) { + printf("FileGraph::structureFromFile: unable to stat %s.\n", file); + abort(); + } + size_t masterLength = buf.st_size; + + int _MAP_BASE = MAP_PRIVATE; + //#ifdef MAP_POPULATE + // _MAP_BASE |= MAP_POPULATE; + //#endif + + void* m = mmap(0, masterLength, PROT_READ, _MAP_BASE, masterFD, 0); + if (m == MAP_FAILED) { + m = 0; + printf("FileGraph::structureFromFile: mmap failed.\n"); + abort(); + } + + ggc::Timer t("graphreader"); + t.start(); + + //parse file + uint64_t* fptr = (uint64_t*)m; + __attribute__((unused)) uint64_t version = le64toh(*fptr++); + assert(version == 1); + uint64_t sizeEdgeTy = le64toh(*fptr++); + uint64_t numNodes = le64toh(*fptr++); + uint64_t numEdges = le64toh(*fptr++); + uint64_t *outIdx = fptr; + fptr += numNodes; + uint32_t *fptr32 = (uint32_t*)fptr; + uint32_t *outs = fptr32; + fptr32 += numEdges; + if (numEdges % 2) fptr32 += 1; + edge_data_type *edgeData = (edge_data_type *)fptr32; + + // cuda. + nnodes = numNodes; + nedges = numEdges; + + printf("nnodes=%d, nedges=%d, sizeEdge=%d.\n", nnodes, nedges, sizeEdgeTy); + allocOnHost(!read_edge_data); + + row_start[0] = 0; + + for (unsigned ii = 0; ii < nnodes; ++ii) { + row_start[ii+1] = le64toh(outIdx[ii]); + // //noutgoing[ii] = le64toh(outIdx[ii]) - le64toh(outIdx[ii - 1]); + index_type degree = row_start[ii+1] - row_start[ii]; + + for (unsigned jj = 0; jj < degree; ++jj) { + unsigned edgeindex = row_start[ii] + jj; + + unsigned dst = le32toh(outs[edgeindex]); + if (dst >= nnodes) printf("\tinvalid edge from %d to %d at index %d(%d).\n", ii, dst, jj, edgeindex); + + edge_dst[edgeindex] = dst; + + if(sizeEdgeTy && read_edge_data) + edge_data[edgeindex] = edgeData[edgeindex]; + } + + progressPrint(nnodes, ii); + } + + cfile.close(); // probably galois doesn't close its file due to mmap. + t.stop(); + + // TODO: fix MB/s + printf("read %lld bytes in %d ms (%0.2f MB/s)\n\r\n", masterLength, t.duration_ms(), (masterLength / 1000.0) / (t.duration_ms())); + + return 0; +} + +unsigned CSRGraph::read(char file[], bool read_edge_data) { + return readFromGR(file, read_edge_data); +} + +void CSRGraph::dealloc() { + if(device_graph) + deallocOnDevice(); + else + deallocOnHost(); +} + +void CSRGraph::copy_to_gpu(struct CSRGraph ©graph) { + copygraph.nnodes = nnodes; + copygraph.nedges = nedges; + + copygraph.allocOnDevice(edge_data == NULL); + + check_cuda(cudaMemcpy(copygraph.edge_dst, edge_dst, nedges * sizeof(index_type), cudaMemcpyHostToDevice)); + if (edge_data != NULL) check_cuda(cudaMemcpy(copygraph.edge_data, edge_data, nedges * sizeof(edge_data_type), cudaMemcpyHostToDevice)); + check_cuda(cudaMemcpy(copygraph.node_data, node_data, nnodes * sizeof(node_data_type), cudaMemcpyHostToDevice)); + + check_cuda(cudaMemcpy(copygraph.row_start, row_start, (nnodes+1) * sizeof(index_type), cudaMemcpyHostToDevice)); +} + +void CSRGraph::copy_to_cpu(struct CSRGraph ©graph) { + assert(device_graph); + + // cpu graph is not allocated + assert(copygraph.nnodes = nnodes); + assert(copygraph.nedges = nedges); + + check_cuda(cudaMemcpy(copygraph.edge_dst, edge_dst, nedges * sizeof(index_type), cudaMemcpyDeviceToHost)); + if (edge_data != NULL) check_cuda(cudaMemcpy(copygraph.edge_data, edge_data, nedges * sizeof(edge_data_type), cudaMemcpyDeviceToHost)); + check_cuda(cudaMemcpy(copygraph.node_data, node_data, nnodes * sizeof(node_data_type), cudaMemcpyDeviceToHost)); + + check_cuda(cudaMemcpy(copygraph.row_start, row_start, (nnodes+1) * sizeof(index_type), cudaMemcpyDeviceToHost)); +} + +struct EdgeIterator { + CSRGraph *g; + index_type node; + index_type s; + + __device__ + EdgeIterator(CSRGraph& g, index_type node) { + this->g = &g; + this->node = node; + } + + __device__ + index_type size() const { + return g->row_start[node + 1] - g->row_start[node]; + } + + __device__ + index_type start() { + s = g->row_start[node]; + return s; + } + + __device__ + index_type end() const { + return g->row_start[node + 1]; + } + + __device__ + void next() { + s++; + } + + __device__ + index_type dst() const { + return g->edge_dst[s]; + } + + __device__ + edge_data_type data() const { + return g->edge_data[s]; + } +}; + diff --git a/cuda_code/csv_reader_1.cu b/cuda_code/csv_reader_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..f8a9122a8e6bab9537f2899503f6d538d995db77 --- /dev/null +++ b/cuda_code/csv_reader_1.cu @@ -0,0 +1,1777 @@ +/* + * Copyright (c) 2018, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file csv-reader.cu code to read csv data + * + * CSV Reader + */ + + +#include + +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + +#include "type_conversion.cuh" +#include "datetime_parser.cuh" + +#include "cudf.h" +#include "utilities/error_utils.hpp" +#include "utilities/trie.cuh" +#include "utilities/type_dispatcher.hpp" +#include "utilities/cudf_utils.h" + +#include "rmm/rmm.h" +#include "rmm/thrust_rmm_allocator.h" +#include "io/comp/io_uncomp.h" + +constexpr size_t max_chunk_bytes = 64*1024*1024; // 64MB + +using std::vector; +using std::string; + +using cu_reccnt_t = unsigned long long int; +using cu_recstart_t = unsigned long long int; + + +/**---------------------------------------------------------------------------* + * @brief Struct used for internal parsing state + *---------------------------------------------------------------------------**/ +typedef struct raw_csv_ { + char * data; // on-device: the raw unprocessed CSV data - loaded as a large char * array + cu_recstart_t* recStart; // on-device: Starting position of the records. + + ParseOptions opts; // options to control parsing behavior + + long num_bytes; // host: the number of bytes in the data + long num_bits; // host: the number of 64-bit bitmaps (different than valid) + gdf_size_type num_records; // host: number of records loaded into device memory, and then number of records to read + // int num_cols; // host: number of columns + int num_active_cols; // host: number of columns that will be return to user. + int num_actual_cols; // host: number of columns in the file --- based on the number of columns in header + vector dtypes; // host: array of dtypes (since gdf_columns are not created until end) + vector col_names; // host: array of column names + bool* h_parseCol; // host : array of booleans stating if column should be parsed in reading process: parseCol[x]=false means that the column x needs to be filtered out. + bool* d_parseCol; // device : array of booleans stating if column should be parsed in reading process: parseCol[x]=false means that the column x needs to be filtered out. + + long byte_range_offset; // offset into the data to start parsing + long byte_range_size; // length of the data of interest to parse + + gdf_size_type header_row; ///< host: Row index of the header + gdf_size_type nrows; ///< host: Number of rows to read. -1 for all rows + gdf_size_type skiprows; ///< host: Number of rows to skip from the start + gdf_size_type skipfooter; ///< host: Number of rows to skip from the end + std::vector header; ///< host: Header row data, for parsing column names + string prefix; ///< host: Prepended to column ID if there is no header or input column names + + rmm::device_vector d_trueTrie; // device: serialized trie of values to recognize as true + rmm::device_vector d_falseTrie;// device: serialized trie of values to recognize as false + rmm::device_vector d_naTrie; // device: serialized trie of NA values +} raw_csv_t; + +typedef struct column_data_ { + unsigned long long countFloat; + unsigned long long countDateAndTime; + unsigned long long countString; + unsigned long long countInt8; + unsigned long long countInt16; + unsigned long long countInt32; + unsigned long long countInt64; + gdf_size_type countNULL; +} column_data_t; + +using string_pair = std::pair; + +// +//---------------create and process --------------------------------------------- +// +gdf_error parseArguments(csv_read_arg *args, raw_csv_t *csv); +// gdf_error getColNamesAndTypes(const char **col_names, const char **dtypes, raw_csv_t *d); +gdf_error inferCompressionType(const char* compression_arg, const char* filepath, string& compression_type); +gdf_error getUncompressedHostData(const char* h_data, size_t num_bytes, + const string& compression, + vector& h_uncomp_data); +gdf_error uploadDataToDevice(const char* h_uncomp_data, size_t h_uncomp_size, raw_csv_t * raw_csv); +gdf_error allocateGdfDataSpace(gdf_column *); +gdf_dtype convertStringToDtype(std::string &dtype); + +#define checkError(error, txt) if ( error != GDF_SUCCESS) { std::cerr << "ERROR: " << error << " in " << txt << std::endl; return error; } + +// +//---------------CUDA Kernel --------------------------------------------- +// + +__device__ int findSetBit(int tid, long num_bits, uint64_t *f_bits, int x); + +gdf_error launch_countRecords(const char* h_data, size_t h_size, raw_csv_t * raw_csv, gdf_size_type& rec_cnt); +gdf_error launch_storeRecordStart(const char* h_data, size_t h_size, raw_csv_t * csvData); +gdf_error launch_dataConvertColumns(raw_csv_t * raw_csv, void** d_gdf, gdf_valid_type** valid, gdf_dtype* d_dtypes, string_pair **str_cols, unsigned long long *); + +gdf_error launch_dataTypeDetection(raw_csv_t * raw_csv, column_data_t* d_columnData); + +__global__ void countRecords(char *data, const char terminator, const char quotechar, long num_bytes, long num_bits, cu_reccnt_t* num_records); +__global__ void storeRecordStart(char *data, size_t chunk_offset, + const char terminator, const char quotechar, bool include_first_row, + long num_bytes, long num_bits, cu_reccnt_t* num_records, + cu_recstart_t* recStart); +__global__ void convertCsvToGdf(char *csv, const ParseOptions opts, + gdf_size_type num_records, int num_columns, bool *parseCol, + cu_recstart_t *recStart, gdf_dtype *dtype, void **gdf_data, gdf_valid_type **valid, + string_pair **str_cols, unsigned long long *num_valid); +__global__ void dataTypeDetection(char *raw_csv, const ParseOptions opts, + gdf_size_type num_records, int num_columns, bool *parseCol, + cu_recstart_t *recStart, column_data_t* d_columnData); + +// +//---------------CUDA Valid (8 blocks of 8-bits) Bitmap Kernels --------------------------------------------- +// +__device__ long whichBitmap(long record) { return (record/8); } +__device__ int whichBit(long record) { return (record % 8); } + +__inline__ __device__ void validAtomicOR(gdf_valid_type* address, gdf_valid_type val) +{ + int32_t *base_address = (int32_t*)((gdf_valid_type*)address - ((size_t)address & 3)); + int32_t int_val = (int32_t)val << (((size_t) address & 3) * 8); + + atomicOr(base_address, int_val); +} + +__device__ void setBit(gdf_valid_type* address, int bit) { + gdf_valid_type bitMask[8] = {1, 2, 4, 8, 16, 32, 64, 128}; + validAtomicOR(address, bitMask[bit]); +} + + +/**---------------------------------------------------------------------------* + * @brief Estimates the maximum expected length or a row, based on the number + * of columns + * + * If the number of columns is not available, it will return a value large + * enough for most use cases + * + * @param[in] num_columns Number of columns in the CSV file (optional) + * + * @return Estimated maximum size of a row, in bytes + *---------------------------------------------------------------------------**/ + constexpr size_t calculateMaxRowSize(int num_columns=0) noexcept { + constexpr size_t max_row_bytes = 16*1024; // 16KB + constexpr size_t column_bytes = 64; + constexpr size_t base_padding = 1024; // 1KB + if (num_columns == 0){ + // Use flat size if the number of columns is not known + return max_row_bytes; + } + else { + // Expand the size based on the number of columns, if available + return base_padding + num_columns * column_bytes; + } +} +/** +* @brief Removes the first and Last quote in the string +*/ +string removeQuotes(string str, char quotechar) { + // Exclude first and last quotation char + const size_t first_quote = str.find(quotechar); + if (first_quote != string::npos) { + str.erase(first_quote, 1); + } + const size_t last_quote = str.rfind(quotechar); + if (last_quote != string::npos) { + str.erase(last_quote, 1); + } + + return str; +} + +/** + * @brief Parse the first row to set the column names in the raw_csv parameter + * + * The first row can be either the header row, or the first data row + * + * @param[in,out] raw_csv Structure containing the csv parsing parameters + * and intermediate results + * + * @return gdf_error with error code on failure, otherwise GDF_SUCCESS +*/ +gdf_error setColumnNamesFromCsv(raw_csv_t* raw_csv) { + vector first_row = raw_csv->header; + // No header, read the first data row + if (first_row.empty()) { + cu_recstart_t first_row_len{}; + // If file only contains one row, raw_csv->recStart[1] is not valid + if (raw_csv->num_records > 1) { + CUDA_TRY(cudaMemcpy(&first_row_len, raw_csv->recStart + 1, sizeof(cu_recstart_t), cudaMemcpyDefault)); + } + else { + // File has one row - use the file size for the row size + first_row_len = raw_csv->num_bytes / sizeof(char); + } + first_row.resize(first_row_len); + CUDA_TRY(cudaMemcpy(first_row.data(), raw_csv->data, first_row_len * sizeof(char), cudaMemcpyDefault)); + } + + int num_cols = 0; + + bool quotation = false; + for (size_t pos = 0, prev = 0; pos < first_row.size(); ++pos) { + // Flip the quotation flag if current character is a quotechar + if(first_row[pos] == raw_csv->opts.quotechar) { + quotation = !quotation; + } + // Check if end of a column/row + else if (pos == first_row.size() - 1 || + (!quotation && first_row[pos] == raw_csv->opts.delimiter)) { + // This is the header, add the column name + if (raw_csv->header_row >= 0) { + // Include the current character, in case the line is not terminated + int col_name_len = pos - prev + 1; + // Exclude the delimiter/terminator is present + if (first_row[pos] == raw_csv->opts.delimiter || first_row[pos] == raw_csv->opts.terminator) { + --col_name_len; + } + // Also exclude '\r' character at the end of the column name if it's part of the terminator + if (col_name_len > 0 && + raw_csv->opts.terminator == '\n' && + first_row[pos] == '\n' && + first_row[pos - 1] == '\r') { + --col_name_len; + } + + const string new_col_name(first_row.data() + prev, col_name_len); + raw_csv->col_names.push_back(removeQuotes(new_col_name, raw_csv->opts.quotechar)); + } + else { + // This is the first data row, add the automatically generated name + raw_csv->col_names.push_back(raw_csv->prefix + std::to_string(num_cols)); + } + num_cols++; + + // Skip adjacent delimiters if delim_whitespace is set + while (raw_csv->opts.multi_delimiter && + pos < first_row.size() && + first_row[pos] == raw_csv->opts.delimiter && + first_row[pos + 1] == raw_csv->opts.delimiter) { + ++pos; + } + prev = pos + 1; + } + } + return GDF_SUCCESS; +} + +/**---------------------------------------------------------------------------* + * @brief Read in a CSV file, extract all fields and return + * a GDF (array of gdf_columns) + * + * @param[in,out] args Structure containing both the the input arguments + * and the returned data + * + * @return gdf_error + *---------------------------------------------------------------------------**/ +gdf_error read_csv(csv_read_arg *args) +{ + gdf_error error = gdf_error::GDF_SUCCESS; + + //----------------------------------------------------------------------------- + // create the CSV data structure - this will be filled in as the CSV data is processed. + // Done first to validate data types + raw_csv_t * raw_csv = new raw_csv_t(); + // error = parseArguments(args, raw_csv); + raw_csv->num_actual_cols = args->num_cols; + raw_csv->num_active_cols = args->num_cols; + raw_csv->num_records = 0; + + raw_csv->header_row = args->header; + raw_csv->skiprows = args->skiprows; + raw_csv->skipfooter = args->skipfooter; + raw_csv->nrows = args->nrows; + raw_csv->prefix = args->prefix == nullptr ? "" : string(args->prefix); + + if (args->delim_whitespace) { + raw_csv->opts.delimiter = ' '; + raw_csv->opts.multi_delimiter = true; + } else { + raw_csv->opts.delimiter = args->delimiter; + raw_csv->opts.multi_delimiter = false; + } + if (args->windowslinetermination) { + raw_csv->opts.terminator = '\n'; + } else { + raw_csv->opts.terminator = args->lineterminator; + } + if (args->quotechar != '\0' && args->quoting != QUOTE_NONE) { + raw_csv->opts.quotechar = args->quotechar; + raw_csv->opts.keepquotes = false; + raw_csv->opts.doublequote = args->doublequote; + } else { + raw_csv->opts.quotechar = '\0'; + raw_csv->opts.keepquotes = true; + raw_csv->opts.doublequote = false; + } + raw_csv->opts.skipblanklines = args->skip_blank_lines; + raw_csv->opts.comment = args->comment; + raw_csv->opts.dayfirst = args->dayfirst; + raw_csv->opts.decimal = args->decimal; + raw_csv->opts.thousands = args->thousands; + if (raw_csv->opts.decimal == raw_csv->opts.delimiter) { + checkError(GDF_INVALID_API_CALL, "Decimal point cannot be the same as the delimiter"); + } + if (raw_csv->opts.thousands == raw_csv->opts.delimiter) { + checkError(GDF_INVALID_API_CALL, "Thousands separator cannot be the same as the delimiter"); + } + + string compression_type; + error = inferCompressionType(args->compression, args->filepath_or_buffer, compression_type); + checkError(error, "call to inferCompressionType"); + + raw_csv->byte_range_offset = args->byte_range_offset; + raw_csv->byte_range_size = args->byte_range_size; + if (raw_csv->byte_range_offset > 0 || raw_csv->byte_range_size > 0) { + if (raw_csv->nrows >= 0 || raw_csv->skiprows > 0 || raw_csv->skipfooter > 0) { + checkError(GDF_INVALID_API_CALL, + "Cannot manually limit rows to be read when using the byte range parameter"); + } + if (compression_type != "none") { + checkError(GDF_INVALID_API_CALL, + "Cannot read compressed input when using the byte range parameter"); + } + } + + // Handle user-defined booleans values, whereby field data is substituted + // with true/false values; CUDF booleans are int types of 0 or 1 + vector true_values{"True", "TRUE"}; + if (args->true_values != nullptr && args->num_true_values > 0) { + for (int i = 0; i < args->num_true_values; ++i) { + true_values.emplace_back(args->true_values[i]); + } + } + raw_csv->d_trueTrie = createSerializedTrie(true_values); + raw_csv->opts.trueValuesTrie = raw_csv->d_trueTrie.data().get(); + + vector false_values{"False", "FALSE"}; + if (args->false_values != nullptr && args->num_false_values > 0) { + for (int i = 0; i < args->num_false_values; ++i) { + false_values.emplace_back(args->false_values[i]); + } + } + raw_csv->d_falseTrie = createSerializedTrie(false_values); + raw_csv->opts.falseValuesTrie = raw_csv->d_falseTrie.data().get(); + + if (args->na_filter && + (args->keep_default_na || (args->na_values != nullptr && args->num_na_values > 0))) { + vector na_values{ + "#N/A", "#N/A N/A", "#NA", "-1.#IND", + "-1.#QNAN", "-NaN", "-nan", "1.#IND", + "1.#QNAN", "N/A", "NA", "NULL", + "NaN", "n/a", "nan", "null"}; + if(!args->keep_default_na){ + na_values.clear(); + } + + if (args->na_values != nullptr && args->num_na_values > 0) { + for (int i = 0; i < args->num_na_values; ++i) { + na_values.emplace_back(args->na_values[i]); + } + } + + raw_csv->d_naTrie = createSerializedTrie(na_values); + raw_csv->opts.naValuesTrie = raw_csv->d_naTrie.data().get(); + } + + //----------------------------------------------------------------------------- + // memory map in the data + void * map_data = NULL; + size_t map_size = 0; + size_t map_offset = 0; + int fd = 0; + if (args->input_data_form == gdf_csv_input_form::FILE_PATH) + { + fd = open(args->filepath_or_buffer, O_RDONLY ); + if (fd < 0) { close(fd); checkError(GDF_FILE_ERROR, "Error opening file"); } + + struct stat st{}; + if (fstat(fd, &st)) { close(fd); checkError(GDF_FILE_ERROR, "cannot stat file"); } + + const auto file_size = st.st_size; + const auto page_size = sysconf(_SC_PAGESIZE); + + if (args->byte_range_offset >= (size_t)file_size) { + close(fd); + checkError(GDF_INVALID_API_CALL, "The byte_range offset is larger than the file size"); + } + + // Have to align map offset to page size + map_offset = (args->byte_range_offset/page_size)*page_size; + + // Set to rest-of-the-file size, will reduce based on the byte range size + raw_csv->num_bytes = map_size = file_size - map_offset; + + // Include the page padding in the mapped size + const size_t page_padding = args->byte_range_offset - map_offset; + const size_t padded_byte_range_size = raw_csv->byte_range_size + page_padding; + + if (raw_csv->byte_range_size != 0 && padded_byte_range_size < map_size) { + // Need to make sure that w/ padding we don't overshoot the end of file + map_size = min(padded_byte_range_size + calculateMaxRowSize(args->num_cols), map_size); + + } + + // Ignore page padding for parsing purposes + raw_csv->num_bytes = map_size - page_padding; + + map_data = mmap(0, map_size, PROT_READ, MAP_PRIVATE, fd, map_offset); + + if (map_data == MAP_FAILED || map_size==0) { close(fd); checkError(GDF_C_ERROR, "Error mapping file"); } + } + else if (args->input_data_form == gdf_csv_input_form::HOST_BUFFER) + { + map_data = (void *)args->filepath_or_buffer; + raw_csv->num_bytes = map_size = args->buffer_size; + } + else { checkError(GDF_C_ERROR, "invalid input type"); } + + const char* h_uncomp_data; + size_t h_uncomp_size = 0; + // Used when the input data is compressed, to ensure the allocated uncompressed data is freed + vector h_uncomp_data_owner; + if (compression_type == "none") { + // Do not use the owner vector here to avoid copying the whole file to the heap + h_uncomp_data = (const char*)map_data + (args->byte_range_offset - map_offset); + h_uncomp_size = raw_csv->num_bytes; + } + else { + error = getUncompressedHostData( (const char *)map_data, map_size, compression_type, h_uncomp_data_owner); + checkError(error, "call to getUncompressedHostData"); + h_uncomp_data = h_uncomp_data_owner.data(); + h_uncomp_size = h_uncomp_data_owner.size(); + } + assert(h_uncomp_data != nullptr); + assert(h_uncomp_size != 0); + + error = launch_countRecords(h_uncomp_data, h_uncomp_size, raw_csv, raw_csv->num_records); + checkError(error, "call to record number of rows"); + + //----------------------------------------------------------------------------- + //-- Allocate space to hold the record starting points + const bool last_line_terminated = (h_uncomp_data[h_uncomp_size - 1] == raw_csv->opts.terminator); + // If the last line is not terminated, allocate space for the EOF entry (added later) + const gdf_size_type record_start_count = raw_csv->num_records + (last_line_terminated ? 0 : 1); + RMM_TRY( RMM_ALLOC(&raw_csv->recStart, sizeof(cu_recstart_t) * record_start_count, 0) ); + + //----------------------------------------------------------------------------- + //-- Scan data and set the starting positions + error = launch_storeRecordStart(h_uncomp_data, h_uncomp_size, raw_csv); + checkError(error, "call to record initial position store"); + + // Previous kernel stores the record pinput_file.typeositions as encountered by all threads + // Sort the record positions as subsequent processing may require filtering + // certain rows or other processing on specific records + thrust::sort(rmm::exec_policy()->on(0), raw_csv->recStart, raw_csv->recStart + raw_csv->num_records); + + // Currently, ignoring lineterminations within quotes is handled by recording + // the records of both, and then filtering out the records that is a quotechar + // or a linetermination within a quotechar pair. The future major refactoring + // of csv_reader and its kernels will probably use a different tactic. + if (raw_csv->opts.quotechar != '\0') { + vector h_rec_starts(raw_csv->num_records); + const size_t rec_start_size = sizeof(cu_recstart_t) * (h_rec_starts.size()); + CUDA_TRY( cudaMemcpy(h_rec_starts.data(), raw_csv->recStart, rec_start_size, cudaMemcpyDeviceToHost) ); + + auto recCount = raw_csv->num_records; + + bool quotation = false; + for (gdf_size_type i = 1; i < raw_csv->num_records; ++i) { + if (h_uncomp_data[h_rec_starts[i] - 1] == raw_csv->opts.quotechar) { + quotation = !quotation; + h_rec_starts[i] = raw_csv->num_bytes; + recCount--; + } + else if (quotation) { + h_rec_starts[i] = raw_csv->num_bytes; + recCount--; + } + } + + CUDA_TRY( cudaMemcpy(raw_csv->recStart, h_rec_starts.data(), rec_start_size, cudaMemcpyHostToDevice) ); + thrust::sort(rmm::exec_policy()->on(0), raw_csv->recStart, raw_csv->recStart + raw_csv->num_records); + raw_csv->num_records = recCount; + } + + if (!last_line_terminated){ + // Add the EOF as the last record when the terminator is missing in the last line + const cu_recstart_t eof_offset = h_uncomp_size; + CUDA_TRY(cudaMemcpy(raw_csv->recStart + raw_csv->num_records, &eof_offset, sizeof(cu_recstart_t), cudaMemcpyDefault)); + // Update the record count + ++raw_csv->num_records; + } + + error = uploadDataToDevice(h_uncomp_data, h_uncomp_size, raw_csv); + if (error != GDF_SUCCESS) { + return error; + } + + //----------------------------------------------------------------------------- + //-- Populate the header + + // Check if the user gave us a list of column names + if(args->names == nullptr) { + + error = setColumnNamesFromCsv(raw_csv); + if (error != GDF_SUCCESS) { + return error; + } + const int h_num_cols = raw_csv->col_names.size(); + + // Allocating a boolean array that will use to state if a column needs to read or filtered. + raw_csv->h_parseCol = (bool*)malloc(sizeof(bool) * (h_num_cols)); + RMM_TRY( RMM_ALLOC((void**)&raw_csv->d_parseCol,(sizeof(bool) * (h_num_cols)),0 ) ); + for (int i = 0; ih_parseCol[i]=true; + + // Rename empty column names to "Unnamed: col_index" + for (size_t col_idx = 0; col_idx < raw_csv->col_names.size(); ++col_idx) { + if (raw_csv->col_names[col_idx].empty()) { + raw_csv->col_names[col_idx] = string("Unnamed: ") + std::to_string(col_idx); + } + } + + int h_dup_cols_removed = 0; + // Looking for duplicates + for (auto it = raw_csv->col_names.begin(); it != raw_csv->col_names.end(); it++){ + bool found_dupe = false; + for (auto it2 = (it+1); it2 != raw_csv->col_names.end(); it2++){ + if (*it==*it2){ + found_dupe=true; + break; + } + } + if(found_dupe){ + int count=1; + for (auto it2 = (it+1); it2 != raw_csv->col_names.end(); it2++){ + if (*it==*it2){ + if(args->mangle_dupe_cols){ + // Replace all the duplicates of column X with X.1,X.2,... First appearance stays as X. + std::string newColName = *it2; + newColName += "." + std::to_string(count); + count++; + *it2 = newColName; + } else{ + // All duplicate fields will be ignored. + int pos=std::distance(raw_csv->col_names.begin(), it2); + raw_csv->h_parseCol[pos]=false; + h_dup_cols_removed++; + } + } + } + } + } + + raw_csv->num_actual_cols = h_num_cols; // Actual number of columns in the CSV file + raw_csv->num_active_cols = h_num_cols-h_dup_cols_removed; // Number of fields that need to be processed based on duplicatation fields + + CUDA_TRY(cudaMemcpy(raw_csv->d_parseCol, raw_csv->h_parseCol, sizeof(bool) * (h_num_cols), cudaMemcpyHostToDevice)); + } + else { + raw_csv->h_parseCol = (bool*)malloc(sizeof(bool) * (args->num_cols)); + RMM_TRY( RMM_ALLOC((void**)&raw_csv->d_parseCol,(sizeof(bool) * (args->num_cols)),0 ) ); + + for (int i = 0; inum_actual_cols; i++){ + raw_csv->h_parseCol[i]=true; + std::string col_name = args->names[i]; + raw_csv->col_names.push_back(col_name); + + } + CUDA_TRY(cudaMemcpy(raw_csv->d_parseCol, raw_csv->h_parseCol, sizeof(bool) * (args->num_cols), cudaMemcpyHostToDevice)); + } + + // User can give + if (args->use_cols_int!=NULL || args->use_cols_char!=NULL){ + if(args->use_cols_int!=NULL){ + for (int i = 0; inum_actual_cols; i++) + raw_csv->h_parseCol[i]=false; + for(int i=0; i < args->use_cols_int_len; i++){ + int pos = args->use_cols_int[i]; + raw_csv->h_parseCol[pos]=true; + } + raw_csv->num_active_cols = args->use_cols_int_len; + }else{ + for (int i = 0; inum_actual_cols; i++) + raw_csv->h_parseCol[i]=false; + int countFound=0; + for(int i=0; i < args->use_cols_char_len; i++){ + std::string colName(args->use_cols_char[i]); + for (auto it = raw_csv->col_names.begin(); it != raw_csv->col_names.end(); it++){ + if(colName==*it){ + countFound++; + int pos=std::distance(raw_csv->col_names.begin(), it); + raw_csv->h_parseCol[pos]=true; + break; + } + } + } + raw_csv->num_active_cols = countFound; + } + CUDA_TRY(cudaMemcpy(raw_csv->d_parseCol, raw_csv->h_parseCol, sizeof(bool) * (raw_csv->num_actual_cols), cudaMemcpyHostToDevice)); + } + + + //----------------------------------------------------------------------------- + //--- done with host data + if (args->input_data_form == gdf_csv_input_form::FILE_PATH) + { + close(fd); + munmap(map_data, map_size); + } + + + //----------------------------------------------------------------------------- + //--- Auto detect types of the vectors + + if(args->dtype==NULL){ + if (raw_csv->num_records == 0) { + checkError(GDF_INVALID_API_CALL, "read_csv: no data available for data type inference"); + } + + column_data_t *d_ColumnData,*h_ColumnData; + + h_ColumnData = (column_data_t*)malloc(sizeof(column_data_t) * (raw_csv->num_active_cols)); + RMM_TRY( RMM_ALLOC((void**)&d_ColumnData,(sizeof(column_data_t) * (raw_csv->num_active_cols)),0 ) ); + + CUDA_TRY( cudaMemset(d_ColumnData, 0, (sizeof(column_data_t) * (raw_csv->num_active_cols)) ) ) ; + + launch_dataTypeDetection(raw_csv, d_ColumnData); + + CUDA_TRY( cudaMemcpy(h_ColumnData,d_ColumnData, sizeof(column_data_t) * (raw_csv->num_active_cols), cudaMemcpyDeviceToHost)); + + vector d_detectedTypes; // host: array of dtypes (since gdf_columns are not created until end) + + raw_csv->dtypes.clear(); + + for(int col = 0; col < raw_csv->num_active_cols; col++){ + unsigned long long countInt = h_ColumnData[col].countInt8+h_ColumnData[col].countInt16+ + h_ColumnData[col].countInt32+h_ColumnData[col].countInt64; + + if (h_ColumnData[col].countNULL == raw_csv->num_records){ + d_detectedTypes.push_back(GDF_INT8); // Entire column is NULL. Allocating the smallest amount of memory + } else if(h_ColumnData[col].countString>0L){ + d_detectedTypes.push_back(GDF_CATEGORY); // For auto-detection, we are currently not supporting strings. + } else if(h_ColumnData[col].countDateAndTime>0L){ + d_detectedTypes.push_back(GDF_DATE64); + } else if(h_ColumnData[col].countFloat > 0L || + (h_ColumnData[col].countFloat==0L && countInt >0L && h_ColumnData[col].countNULL >0L) ) { + // The second condition has been added to conform to PANDAS which states that a colum of + // integers with a single NULL record need to be treated as floats. + d_detectedTypes.push_back(GDF_FLOAT64); + } + else { + d_detectedTypes.push_back(GDF_INT64); + } + } + + raw_csv->dtypes=d_detectedTypes; + + free(h_ColumnData); + RMM_TRY( RMM_FREE( d_ColumnData, 0 ) ); + } + else{ + for ( int x = 0; x < raw_csv->num_actual_cols; x++) { + + std::string temp_type = args->dtype[x]; + gdf_dtype col_dtype = convertStringToDtype( temp_type ); + + if (col_dtype == GDF_invalid) + return GDF_UNSUPPORTED_DTYPE; + + raw_csv->dtypes.push_back(col_dtype); + } + } + + + //----------------------------------------------------------------------------- + //--- allocate space for the results + gdf_column **cols = (gdf_column **)malloc( sizeof(gdf_column *) * raw_csv->num_active_cols); + + void **d_data,**h_data; + gdf_valid_type **d_valid,**h_valid; + unsigned long long *d_valid_count; + gdf_dtype *d_dtypes,*h_dtypes; + + + + + + h_dtypes = (gdf_dtype*)malloc ( sizeof(gdf_dtype)* (raw_csv->num_active_cols)); + h_data = (void**)malloc ( sizeof(void*)* (raw_csv->num_active_cols)); + h_valid = (gdf_valid_type**)malloc ( sizeof(gdf_valid_type*)* (raw_csv->num_active_cols)); + + RMM_TRY( RMM_ALLOC((void**)&d_dtypes, (sizeof(gdf_dtype) * raw_csv->num_active_cols), 0 ) ); + RMM_TRY( RMM_ALLOC((void**)&d_data, (sizeof(void *) * raw_csv->num_active_cols), 0 ) ); + RMM_TRY( RMM_ALLOC((void**)&d_valid, (sizeof(gdf_valid_type *) * raw_csv->num_active_cols), 0 ) ); + RMM_TRY( RMM_ALLOC((void**)&d_valid_count, (sizeof(unsigned long long) * raw_csv->num_active_cols), 0 ) ); + CUDA_TRY( cudaMemset(d_valid_count, 0, (sizeof(unsigned long long) * raw_csv->num_active_cols)) ); + + + int stringColCount=0; + for (int col = 0; col < raw_csv->num_active_cols; col++) { + if(raw_csv->dtypes[col]==gdf_dtype::GDF_STRING) + stringColCount++; + } + + string_pair **h_str_cols = NULL, **d_str_cols = NULL; + + if (stringColCount > 0 ) { + h_str_cols = (string_pair**) malloc ((sizeof(string_pair *) * stringColCount)); + RMM_TRY( RMM_ALLOC((void**)&d_str_cols, (sizeof(string_pair *) * stringColCount), 0) ); + + for (int col = 0; col < stringColCount; col++) { + RMM_TRY( RMM_ALLOC((void**)(h_str_cols + col), sizeof(string_pair) * (raw_csv->num_records), 0) ); + } + + CUDA_TRY(cudaMemcpy(d_str_cols, h_str_cols, sizeof(string_pair *) * stringColCount, cudaMemcpyHostToDevice)); + } + + for (int acol = 0,col=-1; acol < raw_csv->num_actual_cols; acol++) { + if(raw_csv->h_parseCol[acol]==false) + continue; + col++; + + gdf_column *gdf = (gdf_column *)malloc(sizeof(gdf_column) * 1); + + gdf->size = raw_csv->num_records; + gdf->dtype = raw_csv->dtypes[col]; + gdf->null_count = 0; // will be filled in later + + //--- column name + std::string str = raw_csv->col_names[acol]; + int len = str.length() + 1; + gdf->col_name = (char *)malloc(sizeof(char) * len); + memcpy(gdf->col_name, str.c_str(), len); + gdf->col_name[len -1] = '\0'; + + error = allocateGdfDataSpace(gdf); + if (error != GDF_SUCCESS) { + return error; + } + + cols[col] = gdf; + h_dtypes[col] = gdf->dtype; + h_data[col] = gdf->data; + h_valid[col] = gdf->valid; + } + + CUDA_TRY( cudaMemcpy(d_dtypes,h_dtypes, sizeof(gdf_dtype) * (raw_csv->num_active_cols), cudaMemcpyHostToDevice)); + CUDA_TRY( cudaMemcpy(d_data,h_data, sizeof(void*) * (raw_csv->num_active_cols), cudaMemcpyHostToDevice)); + CUDA_TRY( cudaMemcpy(d_valid,h_valid, sizeof(gdf_valid_type*) * (raw_csv->num_active_cols), cudaMemcpyHostToDevice)); + + free(h_dtypes); + free(h_valid); + free(h_data); + free(raw_csv->h_parseCol); + + if (raw_csv->num_records != 0) { + error = launch_dataConvertColumns(raw_csv, d_data, d_valid, d_dtypes, d_str_cols, d_valid_count); + if (error != GDF_SUCCESS) { + return error; + } + // Sync with the default stream, just in case create_from_index() is asynchronous + CUDA_TRY(cudaStreamSynchronize(0)); + } + + // Free buffers that are not used from this point on + RMM_TRY( RMM_FREE( d_data, 0 ) ); + RMM_TRY( RMM_FREE ( raw_csv->recStart, 0) ); + RMM_TRY( RMM_FREE( raw_csv->d_parseCol, 0 ) ); + RMM_TRY( RMM_FREE( d_dtypes, 0 ) ); + RMM_TRY( RMM_FREE( d_valid, 0 ) ); + + if (raw_csv->num_records != 0) { + stringColCount=0; + for (int col = 0; col < raw_csv->num_active_cols; col++) { + + gdf_column *gdf = cols[col]; + + if (gdf->dtype != gdf_dtype::GDF_STRING) + continue; + + NVStrings* const stringCol = NVStrings::create_from_index(h_str_cols[stringColCount],size_t(raw_csv->num_records)); + RMM_TRY( RMM_FREE( h_str_cols [stringColCount], 0 ) ); + + if ((raw_csv->opts.quotechar != '\0') && (raw_csv->opts.doublequote==true)) { + // In PANDAS, default of enabling doublequote for two consecutive + // quotechar in quote fields results in reduction to single + const string quotechar(1, raw_csv->opts.quotechar); + const string doublequotechar(2, raw_csv->opts.quotechar); + gdf->data = stringCol->replace(doublequotechar.c_str(), quotechar.c_str()); + NVStrings::destroy(stringCol); + } + else { + gdf->data = stringCol; + } + + stringColCount++; + } + + vector h_valid_count(raw_csv->num_active_cols); + CUDA_TRY( cudaMemcpy(h_valid_count.data(), d_valid_count, sizeof(unsigned long long) * h_valid_count.size(), cudaMemcpyDeviceToHost)); + + //--- set the null count + for (size_t col = 0; col < h_valid_count.size(); col++) { + cols[col]->null_count = raw_csv->num_records - h_valid_count[col]; + } + } + + // Free up remaining internal buffers + RMM_TRY( RMM_FREE( d_valid_count, 0 ) ); + + RMM_TRY( RMM_FREE ( raw_csv->data, 0) ); + + args->data = cols; + args->num_cols_out = raw_csv->num_active_cols; + args->num_rows_out = raw_csv->num_records; + + delete raw_csv; + return error; +} + + + +/* + * What is passed in is the data type as a string, need to convert that into gdf_dtype enum + */ +gdf_dtype convertStringToDtype(std::string &dtype) { + + if (dtype.compare( "str") == 0) return GDF_STRING; + if (dtype.compare( "date") == 0) return GDF_DATE64; + if (dtype.compare( "date32") == 0) return GDF_DATE32; + if (dtype.compare( "date64") == 0) return GDF_DATE64; + if (dtype.compare( "timestamp") == 0) return GDF_TIMESTAMP; + if (dtype.compare( "category") == 0) return GDF_CATEGORY; + if (dtype.compare( "float") == 0) return GDF_FLOAT32; + if (dtype.compare( "float32") == 0) return GDF_FLOAT32; + if (dtype.compare( "float64") == 0) return GDF_FLOAT64; + if (dtype.compare( "double") == 0) return GDF_FLOAT64; + if (dtype.compare( "short") == 0) return GDF_INT16; + if (dtype.compare( "int") == 0) return GDF_INT32; + if (dtype.compare( "int32") == 0) return GDF_INT32; + if (dtype.compare( "int64") == 0) return GDF_INT64; + if (dtype.compare( "long") == 0) return GDF_INT64; + + return GDF_invalid; +} + + +/**---------------------------------------------------------------------------* + * @brief Infer the compression type from the compression parameter and + * the input file name + * + * Returns "none" if the input is not compressed. + * + * @param[in] compression_arg Input string that is potentially describing + * the compression type. Can also be nullptr, "none", or "infer" + * @param[in] filepath path + name of the input file + * @param[out] compression_type String describing the inferred compression type + * + * @return gdf_error with error code on failure, otherwise GDF_SUCCESS + *---------------------------------------------------------------------------**/ +gdf_error inferCompressionType(const char* compression_arg, const char* filepath, string& compression_type) +{ + if (compression_arg && 0 == strcasecmp(compression_arg, "none")) { + compression_arg = nullptr; + } + if (compression_arg && 0 == strcasecmp(compression_arg, "infer")) + { + const char *file_ext = strrchr(filepath, '.'); + compression_arg = nullptr; + if (file_ext) + { + if (!strcasecmp(file_ext, ".gz")) + compression_arg = "gzip"; + else if (!strcasecmp(file_ext, ".zip")) + compression_arg = "zip"; + else if (!strcasecmp(file_ext, ".bz2")) + compression_arg = "bz2"; + else if (!strcasecmp(file_ext, ".xz")) + compression_arg = "xz"; + else { + // TODO: return error here + } + } + } + compression_type = compression_arg == nullptr? "none":string(compression_arg); + + return GDF_SUCCESS; +} + + +/**---------------------------------------------------------------------------* + * @brief Uncompresses the input data and stores the allocated result into + * a vector. + * + * @param[in] h_data Pointer to the csv data in host memory + * @param[in] num_bytes Size of the input data, in bytes + * @param[in] compression String describing the compression type + * @param[out] h_uncomp_data Vector containing the output uncompressed data + * + * @return gdf_error with error code on failure, otherwise GDF_SUCCESS + *---------------------------------------------------------------------------**/ +gdf_error getUncompressedHostData(const char* h_data, size_t num_bytes, const string& compression, vector& h_uncomp_data) +{ + int comp_type = IO_UNCOMP_STREAM_TYPE_INFER; + if (compression == "gzip") + comp_type = IO_UNCOMP_STREAM_TYPE_GZIP; + else if (compression == "zip") + comp_type = IO_UNCOMP_STREAM_TYPE_ZIP; + else if (compression == "bz2") + comp_type = IO_UNCOMP_STREAM_TYPE_BZIP2; + else if (compression == "xz") + comp_type = IO_UNCOMP_STREAM_TYPE_XZ; + + return io_uncompress_single_h2d(h_data, num_bytes, comp_type, h_uncomp_data); +} + + +/**---------------------------------------------------------------------------* + * @brief Uploads the relevant segment of the input csv data onto the GPU. + * + * Only rows that need to be read are copied to the GPU, based on parameters + * like nrows, skipheader, skipfooter. + * Also updates the array of record starts to match the device data offset. + * + * @param[in] h_uncomp_data Pointer to the uncompressed csv data in host memory + * @param[in] h_uncomp_size Size of the input data, in bytes + * @param[in,out] raw_csv Structure containing the csv parsing parameters + * and intermediate results + * + * @return gdf_error with error code on failure, otherwise GDF_SUCCESS + *---------------------------------------------------------------------------**/ +gdf_error uploadDataToDevice(const char *h_uncomp_data, size_t h_uncomp_size, + raw_csv_t *raw_csv) { + + // Exclude the rows that are to be skipped from the start + GDF_REQUIRE(raw_csv->num_records > raw_csv->skiprows, GDF_INVALID_API_CALL); + const auto first_row = raw_csv->skiprows; + raw_csv->num_records = raw_csv->num_records - first_row; + + std::vector h_rec_starts(raw_csv->num_records); + CUDA_TRY(cudaMemcpy(h_rec_starts.data(), raw_csv->recStart + first_row, + sizeof(cu_recstart_t) * h_rec_starts.size(), + cudaMemcpyDefault)); + + // Trim lines that are outside range, but keep one greater for the end offset + if (raw_csv->byte_range_size != 0) { + auto it = h_rec_starts.end() - 1; + while (it >= h_rec_starts.begin() && + *it > cu_recstart_t(raw_csv->byte_range_size)) { + --it; + } + if ((it + 2) < h_rec_starts.end()) { + h_rec_starts.erase(it + 2, h_rec_starts.end()); + } + } + + // Discard only blank lines, only fully comment lines, or both. + // If only handling one of them, ensure it doesn't match against \0 as we do + // not want certain scenarios to be filtered out (end-of-file) + if (raw_csv->opts.skipblanklines || raw_csv->opts.comment != '\0') { + const auto match1 = raw_csv->opts.skipblanklines ? raw_csv->opts.terminator + : raw_csv->opts.comment; + const auto match2 = raw_csv->opts.comment != '\0' ? raw_csv->opts.comment + : match1; + h_rec_starts.erase( + std::remove_if(h_rec_starts.begin(), h_rec_starts.end(), + [&](cu_recstart_t i) { + return (h_uncomp_data[i] == match1 || + h_uncomp_data[i] == match2); + }), + h_rec_starts.end()); + } + + raw_csv->num_records = h_rec_starts.size(); + + // Exclude the rows before the header row (inclusive) + // But copy the header data for parsing the column names later (if necessary) + if (raw_csv->header_row >= 0) { + raw_csv->header.assign( + h_uncomp_data + h_rec_starts[raw_csv->header_row], + h_uncomp_data + h_rec_starts[raw_csv->header_row + 1]); + h_rec_starts.erase(h_rec_starts.begin(), + h_rec_starts.begin() + raw_csv->header_row + 1); + raw_csv->num_records = h_rec_starts.size(); + } + + // Exclude the rows that exceed past the requested number + if (raw_csv->nrows >= 0 && raw_csv->nrows < raw_csv->num_records) { + h_rec_starts.resize(raw_csv->nrows + 1); // include end offset + raw_csv->num_records = h_rec_starts.size(); + } + + // Exclude the rows that are to be skipped from the end + if (raw_csv->skipfooter > 0) { + h_rec_starts.resize(h_rec_starts.size() - raw_csv->skipfooter); + raw_csv->num_records = h_rec_starts.size(); + } + + // Check that there is actual data to parse + GDF_REQUIRE(raw_csv->num_records > 0, GDF_INVALID_API_CALL); + + const auto start_offset = h_rec_starts.front(); + const auto end_offset = h_rec_starts.back(); + raw_csv->num_bytes = end_offset - start_offset; + assert(raw_csv->num_bytes <= h_uncomp_size); + raw_csv->num_bits = (raw_csv->num_bytes + 63) / 64; + + // Resize and upload the rows of interest + RMM_TRY(RMM_REALLOC(&raw_csv->recStart, + sizeof(cu_recstart_t) * raw_csv->num_records, 0)); + CUDA_TRY(cudaMemcpy(raw_csv->recStart, h_rec_starts.data(), + sizeof(cu_recstart_t) * raw_csv->num_records, + cudaMemcpyDefault)); + + // Upload the raw data that is within the rows of interest + RMM_TRY(RMM_ALLOC(&raw_csv->data, raw_csv->num_bytes, 0)); + CUDA_TRY(cudaMemcpy(raw_csv->data, h_uncomp_data + start_offset, + raw_csv->num_bytes, cudaMemcpyHostToDevice)); + + // Adjust row start positions to account for the data subcopy + thrust::transform(rmm::exec_policy()->on(0), raw_csv->recStart, + raw_csv->recStart + raw_csv->num_records, + thrust::make_constant_iterator(start_offset), + raw_csv->recStart, thrust::minus()); + + // The array of row offsets includes EOF + // reduce the number of records by one to exclude it from the row count + raw_csv->num_records--; + + return GDF_SUCCESS; +} + + +/**---------------------------------------------------------------------------* + * @brief Allocates memory for a column's parsed output and its validity bitmap + * + * Memory for column data is simply based upon number of rows and the size of + * the output data type, regardless of actual validity of the row element. + * + * @param[in,out] col The column whose memory will be allocated + * + * @return gdf_error GDF_SUCCESS upon completion + *---------------------------------------------------------------------------**/ +gdf_error allocateGdfDataSpace(gdf_column *col) { + // TODO: We should not need to allocate space if there is nothing to parse + // Need to debug/refactor the code to eliminate this requirement + const auto num_rows = std::max(col->size, 1); + const auto num_masks = gdf_valid_allocation_size(num_rows); + + RMM_TRY(RMM_ALLOC(&col->valid, sizeof(gdf_valid_type) * num_masks, 0)); + CUDA_TRY(cudaMemset(col->valid, 0, sizeof(gdf_valid_type) * num_masks)); + + if (col->dtype != gdf_dtype::GDF_STRING) { + int column_byte_width = 0; + checkError(get_column_byte_width(col, &column_byte_width), + "Could not get column width using data type"); + RMM_TRY(RMM_ALLOC(&col->data, num_rows * column_byte_width, 0)); + } + + return GDF_SUCCESS; +} + +//---------------------------------------------------------------------------------------------------------------- +// CUDA Kernels +//---------------------------------------------------------------------------------------------------------------- + + +/**---------------------------------------------------------------------------* + * @brief Counts the number of rows in the input csv file. + * + * Does not load the entire file into the GPU memory at any time, so it can + * be used to parse large files. + * Does not take quotes into consideration, so it will return extra rows + * if the line terminating characters are present within quotes. + * Because of this the result should be postprocessed to remove + * the fake line endings. + * + * @param[in] h_data Pointer to the csv data in host memory + * @param[in] h_size Size of the input data, in bytes + * @param[in] terminator Line terminator character + * @param[in] quote Quote character + * @param[out] rec_cnt The resulting number of rows (records) + * + * @return gdf_error with error code on failure, otherwise GDF_SUCCESS + *---------------------------------------------------------------------------**/ +gdf_error launch_countRecords(const char *h_data, size_t h_size, + raw_csv_t *raw_csv, gdf_size_type &rec_cnt) +{ + const size_t chunk_count = (h_size + max_chunk_bytes - 1) / max_chunk_bytes; + rmm::device_vector d_counts(chunk_count); + + char* d_chunk = nullptr; + RMM_TRY(RMM_ALLOC (&d_chunk, max_chunk_bytes, 0)); + + int blockSize; // suggested thread count to use + int minGridSize; // minimum block count required + CUDA_TRY(cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, countRecords)); + + for (size_t ci = 0; ci < chunk_count; ++ci) { + const auto h_chunk = h_data + ci * max_chunk_bytes; + const auto chunk_bytes = std::min((size_t)(h_size - ci * max_chunk_bytes), max_chunk_bytes); + const auto chunk_bits = (chunk_bytes + 63) / 64; + + // Copy chunk to device + CUDA_TRY(cudaMemcpy(d_chunk, h_chunk, chunk_bytes, cudaMemcpyDefault)); + + const int gridSize = (chunk_bits + blockSize - 1) / blockSize; + countRecords <<< gridSize, blockSize >>> ( + d_chunk, raw_csv->opts.terminator, raw_csv->opts.quotechar, + chunk_bytes, chunk_bits, thrust::raw_pointer_cast(&d_counts[ci]) + ); + } + + RMM_TRY( RMM_FREE(d_chunk, 0) ); + + CUDA_TRY(cudaGetLastError()); + + // Row count is used to allocate/track row start positions + // If not starting at an offset, add an extra row to account for offset=0 + rec_cnt = thrust::reduce(rmm::exec_policy()->on(0), d_counts.begin(), d_counts.end()); + if (raw_csv->byte_range_offset == 0) { + rec_cnt++; + } + + return GDF_SUCCESS; +} + + +/**---------------------------------------------------------------------------* + * @brief CUDA kernel that counts the number of rows in the given + * file segment, based on the location of line terminators. + * + * @param[in] data Device memory pointer to the csv data, + * potentially a chunk of the whole file + * @param[in] terminator Line terminator character + * @param[in] quotechar Quote character + * @param[in] num_bytes Number of bytes in the input data + * @param[in] num_bits Number of 'bits' in the input data. Each 'bit' is + * processed by a separate CUDA thread + * @param[in,out] num_records Device memory pointer to the number of found rows + * + * @return gdf_error with error code on failure, otherwise GDF_SUCCESS + *---------------------------------------------------------------------------**/ +__global__ void countRecords(char *data, const char terminator, const char quotechar, long num_bytes, long num_bits, + cu_reccnt_t* num_records) { + + // thread IDs range per block, so also need the block id + const long tid = threadIdx.x + (blockDim.x * blockIdx.x); + + if (tid >= num_bits) + return; + + // data ID is a multiple of 64 + const long did = tid * 64L; + + const char *raw = (data + did); + + const long byteToProcess = ((did + 64L) < num_bytes) ? 64L : (num_bytes - did); + + // process the data + cu_reccnt_t tokenCount = 0; + for (long x = 0; x < byteToProcess; x++) { + // Scan and log records. If quotations are enabled, then also log quotes + // for a postprocess ignore, as the chunk here has limited visibility. + if ((raw[x] == terminator) || (quotechar != '\0' && raw[x] == quotechar)) { + tokenCount++; + } + } + atomicAdd(num_records, tokenCount); +} + + +/**---------------------------------------------------------------------------* + * @brief Finds the start of each row (record) in the given file, based on + * the location of line terminators. The offset of each found row is stored + * in the recStart data member of the csvData parameter. + * + * Does not load the entire file into the GPU memory at any time, so it can + * be used to parse large files. + * Does not take quotes into consideration, so it will return extra rows + * if the line terminating characters are present within quotes. + * Because of this the result should be postprocessed to remove + * the fake line endings. + * + * @param[in] h_data Pointer to the csv data in host memory + * @param[in] h_size Size of the input data, in bytes + * @param[in,out] csvData Structure containing the csv parsing parameters + * and intermediate results + * + * @return gdf_error with error code on failure, otherwise GDF_SUCCESS + *---------------------------------------------------------------------------**/ +gdf_error launch_storeRecordStart(const char *h_data, size_t h_size, + raw_csv_t *csvData) { + + char* d_chunk = nullptr; + RMM_TRY(RMM_ALLOC (&d_chunk, max_chunk_bytes, 0)); + + cu_reccnt_t* d_num_records; + RMM_TRY(RMM_ALLOC((void**)&d_num_records, sizeof(cu_reccnt_t), 0) ); + CUDA_TRY(cudaMemset(d_num_records, 0ull, sizeof(cu_reccnt_t))); + + int blockSize; // suggested thread count to use + int minGridSize; // minimum block count required + CUDA_TRY(cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, storeRecordStart) ); + + const size_t chunk_count = (h_size + max_chunk_bytes - 1) / max_chunk_bytes; + for (size_t ci = 0; ci < chunk_count; ++ci) { + const auto chunk_offset = ci * max_chunk_bytes; + const auto h_chunk = h_data + chunk_offset; + const auto chunk_bytes = std::min((size_t)(h_size - ci * max_chunk_bytes), max_chunk_bytes); + const auto chunk_bits = (chunk_bytes + 63) / 64; + // include_first_row should only apply to the first chunk + const bool cu_include_first_row = (ci == 0) && (csvData->byte_range_offset == 0); + + // Copy chunk to device + CUDA_TRY(cudaMemcpy(d_chunk, h_chunk, chunk_bytes, cudaMemcpyDefault)); + + const int gridSize = (chunk_bits + blockSize - 1) / blockSize; + storeRecordStart <<< gridSize, blockSize >>> ( + d_chunk, chunk_offset, csvData->opts.terminator, csvData->opts.quotechar, cu_include_first_row, + chunk_bytes, chunk_bits, d_num_records, + csvData->recStart + ); + } + + RMM_TRY( RMM_FREE( d_num_records, 0 ) ); + RMM_TRY( RMM_FREE( d_chunk, 0 ) ); + + CUDA_TRY( cudaGetLastError() ); + + return GDF_SUCCESS; +} + + +/**---------------------------------------------------------------------------* + * @brief CUDA kernel that finds the start of each row (record) in the given + * file segment, based on the location of line terminators. + * + * The offset of each found row is stored in a device memory array. + * The kernel operate on a segment (chunk) of the csv file. + * + * @param[in] data Device memory pointer to the csv data, + * potentially a chunk of the whole file + * @param[in] chunk_offset Offset of the data pointer from the start of the file + * @param[in] terminator Line terminator character + * @param[in] quotechar Quote character + * @param[in] num_bytes Number of bytes in the input data + * @param[in] num_bits Number of 'bits' in the input data. Each 'bit' is + * processed by a separate CUDA thread + * @param[in,out] num_records Device memory pointer to the number of found rows + * @param[out] recStart device memory array containing the offset of each record + * + * @return void + *---------------------------------------------------------------------------**/ +__global__ void storeRecordStart(char *data, size_t chunk_offset, + const char terminator, const char quotechar, bool include_first_row, + long num_bytes, long num_bits, cu_reccnt_t* num_records, + cu_recstart_t* recStart) { + + // thread IDs range per block, so also need the block id + const long tid = threadIdx.x + (blockDim.x * blockIdx.x); + + if ( tid >= num_bits) + return; + + // data ID - multiple of 64 + const long did = tid * 64L; + + if (did == 0 && include_first_row) { + const auto pos = atomicAdd(num_records, 1ull); + recStart[pos] = 0; + } + + const char *raw = (data + did); + + const long byteToProcess = ((did + 64L) < num_bytes) ? 64L : (num_bytes - did); + + // process the data + for (long x = 0; x < byteToProcess; x++) { + // Scan and log records. If quotations are enabled, then also log quotes + // for a postprocess ignore, as the chunk here has limited visibility. + if ((raw[x] == terminator) || (quotechar != '\0' && raw[x] == quotechar)) { + const auto pos = atomicAdd(num_records, 1ull); + recStart[pos] = did + chunk_offset + x + 1; + } + } +} + + +/**---------------------------------------------------------------------------* + * @brief Helper function to setup and launch CSV parsing CUDA kernel. + * + * @param[in,out] raw_csv The metadata for the CSV data + * @param[out] gdf The output column data + * @param[out] valid The bitmaps indicating whether column fields are valid + * @param[out] str_cols The start/end offsets for string data types + * @param[out] num_valid The numbers of valid fields in columns + * + * @return gdf_error GDF_SUCCESS upon completion + *---------------------------------------------------------------------------**/ +gdf_error launch_dataConvertColumns(raw_csv_t *raw_csv, void **gdf, + gdf_valid_type **valid, gdf_dtype *d_dtypes, + string_pair **str_cols, + unsigned long long *num_valid) { + int blockSize; // suggested thread count to use + int minGridSize; // minimum block count required + CUDA_TRY(cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, + convertCsvToGdf)); + + // Calculate actual block count to use based on records count + int gridSize = (raw_csv->num_records + blockSize - 1) / blockSize; + + convertCsvToGdf <<< gridSize, blockSize >>> ( + raw_csv->data, raw_csv->opts, raw_csv->num_records, + raw_csv->num_actual_cols, raw_csv->d_parseCol, raw_csv->recStart, + d_dtypes, gdf, + valid, str_cols, num_valid); + + CUDA_TRY(cudaGetLastError()); + return GDF_SUCCESS; +} + +/**---------------------------------------------------------------------------* + * @brief Functor for converting CSV data to cuDF data type value. + *---------------------------------------------------------------------------**/ +struct ConvertFunctor { + /**---------------------------------------------------------------------------* + * @brief Template specialization for operator() that handles integer types + * that additionally checks whether the parsed data value should be overridden + * with user-specified true/false matches. + * + * It is handled here rather than within convertStrToValue() as that function + * is already used to construct the true/false match list from user-provided + * strings at the start of parsing. + *---------------------------------------------------------------------------**/ + template ::value> * = nullptr> + __host__ __device__ __forceinline__ void operator()( + const char *csvData, void *gdfColumnData, long rowIndex, long start, + long end, const ParseOptions &opts) { + T &value{static_cast(gdfColumnData)[rowIndex]}; + value = convertStrToValue(csvData, start, end, opts); + + // Check for user-specified true/false values where the output is + // replaced with 1/0 respectively + const size_t field_len = end - start + 1; + if (serializedTrieContains(opts.trueValuesTrie, csvData + start, field_len)) { + value = 1; + } else if (serializedTrieContains(opts.falseValuesTrie, csvData + start, field_len)) { + value = 0; + } + } + + /**---------------------------------------------------------------------------* + * @brief Default template operator() dispatch specialization all data types + * (including wrapper types) that is not covered by integral specialization. + *---------------------------------------------------------------------------**/ + template ::value> * = nullptr> + __host__ __device__ __forceinline__ void operator()( + const char *csvData, void *gdfColumnData, long rowIndex, long start, + long end, const ParseOptions &opts) { + T &value{static_cast(gdfColumnData)[rowIndex]}; + value = convertStrToValue(csvData, start, end, opts); + } +}; + +/**---------------------------------------------------------------------------* + * @brief CUDA kernel iterates over the data until the end of the current field + * + * Also iterates over (one or more) delimiter characters after the field. + * + * @param[in] raw_csv The entire CSV data to read + * @param[in] opts A set of parsing options + * @param[in] pos Offset to start the seeking from + * @param[in] stop Offset of the end of the row + * + * @return long position of the last character in the field, including the + * delimiter(s) folloing the field data + *---------------------------------------------------------------------------**/ +__device__ +long seekFieldEnd(const char *raw_csv, const ParseOptions opts, long pos, long stop) { + bool quotation = false; + while(true){ + // Use simple logic to ignore control chars between any quote seq + // Handles nominal cases including doublequotes within quotes, but + // may not output exact failures as PANDAS for malformed fields + if(raw_csv[pos] == opts.quotechar){ + quotation = !quotation; + } + else if(quotation==false){ + if(raw_csv[pos] == opts.delimiter){ + while (opts.multi_delimiter && + pos < stop && + raw_csv[pos + 1] == opts.delimiter) { + ++pos; + } + break; + } + else if(raw_csv[pos] == opts.terminator){ + break; + } + else if(raw_csv[pos] == '\r' && ((pos+1) < stop && raw_csv[pos+1] == '\n')){ + stop--; + break; + } + } + if(pos>=stop) + break; + pos++; + } + return pos; +} + +/**---------------------------------------------------------------------------* + * @brief CUDA kernel that parses and converts CSV data into cuDF column data. + * + * Data is processed one record at a time + * + * @param[in] raw_csv The entire CSV data to read + * @param[in] opts A set of parsing options + * @param[in] num_records The number of lines/rows of CSV data + * @param[in] num_columns The number of columns of CSV data + * @param[in] parseCol Whether to parse or skip a column + * @param[in] recStart The start the CSV data of interest + * @param[in] dtype The data type of the column + * @param[out] gdf_data The output column data + * @param[out] valid The bitmaps indicating whether column fields are valid + * @param[out] str_cols The start/end offsets for string data types + * @param[out] num_valid The numbers of valid fields in columns + * + * @return gdf_error GDF_SUCCESS upon completion + *---------------------------------------------------------------------------**/ +__global__ +void convertCsvToGdf(char *raw_csv, + const ParseOptions opts, + gdf_size_type num_records, + int num_columns, + bool *parseCol, + cu_recstart_t *recStart, + gdf_dtype *dtype, + void **gdf_data, + gdf_valid_type **valid, + string_pair **str_cols, + unsigned long long *num_valid) +{ + // thread IDs range per block, so also need the block id + long rec_id = threadIdx.x + (blockDim.x * blockIdx.x); // this is entry into the field array - tid is an elements within the num_entries array + + // we can have more threads than data, make sure we are not past the end of the data + if ( rec_id >= num_records) + return; + + long start = recStart[rec_id]; + long stop = recStart[rec_id + 1]; + + long pos = start; + int col = 0; + int actual_col = 0; + int stringCol = 0; + + while(colstop) + break; + + pos = seekFieldEnd(raw_csv, opts, pos, stop); + + if(parseCol[col]==true){ + + // check if the entire field is a NaN string - consistent with pandas + const bool is_na = serializedTrieContains(opts.naValuesTrie, raw_csv + start, pos - start); + + // Modify start & end to ignore whitespace and quotechars + long tempPos=pos-1; + if(!is_na && dtype[actual_col] != gdf_dtype::GDF_CATEGORY && dtype[actual_col] != gdf_dtype::GDF_STRING){ + adjustForWhitespaceAndQuotes(raw_csv, &start, &tempPos, opts.quotechar); + } + + if(!is_na && start<=(tempPos)) { // Empty fields are not legal values + + // Type dispatcher does not handle GDF_STRINGS + if (dtype[actual_col] == gdf_dtype::GDF_STRING) { + long end = pos; + if(opts.keepquotes==false){ + if((raw_csv[start] == opts.quotechar) && (raw_csv[end-1] == opts.quotechar)){ + start++; + end--; + } + } + str_cols[stringCol][rec_id].first = raw_csv+start; + str_cols[stringCol][rec_id].second = size_t(end-start); + stringCol++; + } else { + cudf::type_dispatcher( + dtype[actual_col], ConvertFunctor{}, raw_csv, + gdf_data[actual_col], rec_id, start, tempPos, opts); + } + + // set the valid bitmap - all bits were set to 0 to start + long bitmapIdx = whichBitmap(rec_id); // which bitmap + long bitIdx = whichBit(rec_id); // which bit - over an 8-bit index + setBit(valid[actual_col]+bitmapIdx, bitIdx); // This is done with atomics + + atomicAdd((unsigned long long int*)&num_valid[actual_col],(unsigned long long int)1); + } + else if(dtype[actual_col]==gdf_dtype::GDF_STRING){ + str_cols[stringCol][rec_id].first = NULL; + str_cols[stringCol][rec_id].second = 0; + stringCol++; + } + actual_col++; + } + pos++; + start=pos; + col++; + + } +} + +/**---------------------------------------------------------------------------* + * @brief Helper function to setup and launch CSV data type detect CUDA kernel. + * + * @param[in] raw_csv The metadata for the CSV data + * @param[out] d_columnData The count for each column data type + * + * @return gdf_error GDF_SUCCESS upon completion + *---------------------------------------------------------------------------**/ +gdf_error launch_dataTypeDetection(raw_csv_t *raw_csv, + column_data_t *d_columnData) { + int blockSize; // suggested thread count to use + int minGridSize; // minimum block count required + CUDA_TRY(cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, + dataTypeDetection)); + + // Calculate actual block count to use based on records count + int gridSize = (raw_csv->num_records + blockSize - 1) / blockSize; + + dataTypeDetection <<< gridSize, blockSize >>> ( + raw_csv->data, raw_csv->opts, raw_csv->num_records, + raw_csv->num_actual_cols, raw_csv->d_parseCol, raw_csv->recStart, + d_columnData); + + CUDA_TRY(cudaGetLastError()); + return GDF_SUCCESS; +} + +/** +* @brief Returns true is the input character is a valid digit. +* Supports both decimal and hexadecimal digits (uppercase and lowercase). +*/ +__device__ __forceinline__ +bool isDigit(char c, bool is_hex){ + if (c >= '0' && c <= '9') return true; + if (is_hex) { + if (c >= 'A' && c <= 'F') return true; + if (c >= 'a' && c <= 'f') return true; + } + return false; +} + +/**---------------------------------------------------------------------------* + * @brief CUDA kernel that parses and converts CSV data into cuDF column data. + * + * Data is processed in one row/record at a time, so the number of total + * threads (tid) is equal to the number of rows. + * + * @param[in] raw_csv The entire CSV data to read + * @param[in] opts A set of parsing options + * @param[in] num_records The number of lines/rows of CSV data + * @param[in] num_columns The number of columns of CSV data + * @param[in] parseCol Whether to parse or skip a column + * @param[in] recStart The start the CSV data of interest + * @param[out] d_columnData The count for each column data type + * + * @returns GDF_SUCCESS upon successful computation + *---------------------------------------------------------------------------**/ +__global__ +void dataTypeDetection(char *raw_csv, + const ParseOptions opts, + gdf_size_type num_records, + int num_columns, + bool *parseCol, + cu_recstart_t *recStart, + column_data_t *d_columnData) +{ + // thread IDs range per block, so also need the block id + long rec_id = threadIdx.x + (blockDim.x * blockIdx.x); // this is entry into the field array - tid is an elements within the num_entries array + + // we can have more threads than data, make sure we are not past the end of the data + if ( rec_id >= num_records) + return; + + long start = recStart[rec_id]; + long stop = recStart[rec_id + 1]; + + long pos = start; + int col = 0; + int actual_col = 0; + + // Going through all the columns of a given record + while(colstop) + break; + + pos = seekFieldEnd(raw_csv, opts, pos, stop); + + // Checking if this is a column that the user wants --- user can filter columns + if(parseCol[col]==true){ + + long tempPos=pos-1; + + // Checking if the record is NULL + if(start>(tempPos)){ + atomicAdd(& d_columnData[actual_col].countNULL, 1L); + pos++; + start=pos; + col++; + actual_col++; + continue; + } + + long countNumber=0; + long countDecimal=0; + long countSlash=0; + long countDash=0; + long countColon=0; + long countString=0; + + // Modify start & end to ignore whitespace and quotechars + // This could possibly result in additional empty fields + adjustForWhitespaceAndQuotes(raw_csv, &start, &tempPos); + + const long strLen = tempPos - start + 1; + + const bool maybe_hex = ((strLen > 2 && raw_csv[start] == '0' && raw_csv[start + 1] == 'x') || + (strLen > 3 && raw_csv[start] == '-' && raw_csv[start + 1] == '0' && raw_csv[start + 2] == 'x')); + + for(long startPos=start; startPos<=tempPos; startPos++){ + if(isDigit(raw_csv[startPos], maybe_hex)){ + countNumber++; + continue; + } + // Looking for unique characters that will help identify column types. + switch (raw_csv[startPos]){ + case '.': + countDecimal++;break; + case '-': + countDash++; break; + case '/': + countSlash++;break; + case ':': + countColon++;break; + default: + countString++; + break; + } + } + + // Integers have to have the length of the string + long int_req_number_cnt = strLen; + // Off by one if they start with a minus sign + if(raw_csv[start]=='-' && strLen > 1){ + --int_req_number_cnt; + } + // Off by one if they are a hexadecimal number + if(maybe_hex) { + --int_req_number_cnt; + } + + if(strLen==0){ // Removed spaces ' ' in the pre-processing and thus we can have an empty string. + atomicAdd(& d_columnData[actual_col].countNULL, 1L); + } + else if(countNumber==int_req_number_cnt){ + // Checking to see if we the integer value requires 8,16,32,64 bits. + // This will allow us to allocate the exact amount of memory. + const auto value = convertStrToValue(raw_csv, start, tempPos, opts); + const size_t field_len = tempPos - start + 1; + if (serializedTrieContains(opts.trueValuesTrie, raw_csv + start, field_len) || + serializedTrieContains(opts.falseValuesTrie, raw_csv + start, field_len)){ + atomicAdd(& d_columnData[actual_col].countInt8, 1L); + } + else if(value >= (1L<<31)){ + atomicAdd(& d_columnData[actual_col].countInt64, 1L); + } + else if(value >= (1L<<15)){ + atomicAdd(& d_columnData[actual_col].countInt32, 1L); + } + else if(value >= (1L<<7)){ + atomicAdd(& d_columnData[actual_col].countInt16, 1L); + } + else{ + atomicAdd(& d_columnData[actual_col].countInt8, 1L); + } + } + // Floating point numbers are made up of numerical strings, have to have a decimal sign, and can have a minus sign. + else if((countNumber==(strLen-1) && countDecimal==1) || (strLen>2 && countNumber==(strLen-2) && raw_csv[start]=='-')){ + atomicAdd(& d_columnData[actual_col].countFloat, 1L); + } + // The date-time field cannot have more than 3 strings. As such if an entry has more than 3 string characters, it is not + // a data-time field. Also, if a string has multiple decimals, then is not a legit number. + else if(countString > 3 || countDecimal > 1){ + atomicAdd(& d_columnData[actual_col].countString, 1L); + } + else { + // A date field can have either one or two '-' or '\'. A legal combination will only have one of them. + // To simplify the process of auto column detection, we are not covering all the date-time formation permutations. + if((countDash>0 && countDash<=2 && countSlash==0)|| (countDash==0 && countSlash>0 && countSlash<=2) ){ + if((countColon<=2)){ + atomicAdd(& d_columnData[actual_col].countDateAndTime, 1L); + } + else{ + atomicAdd(& d_columnData[actual_col].countString, 1L); + } + } + // Default field is string type. + else{ + atomicAdd(& d_columnData[actual_col].countString, 1L); + } + } + actual_col++; + } + pos++; + start=pos; + col++; + + } +} diff --git a/cuda_code/csv_reader_5.cu b/cuda_code/csv_reader_5.cu new file mode 100644 index 0000000000000000000000000000000000000000..8b8c2176f1b69999c5a2e8b211484e270dc23710 --- /dev/null +++ b/cuda_code/csv_reader_5.cu @@ -0,0 +1,1374 @@ +/* + * Copyright (c) 2018, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file csv-reader.cu code to read csv data + * + * CSV Reader + */ + + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + +#include "type_conversion.cuh" +#include "datetime_parser.cuh" + +#include "cudf.h" +#include "utilities/error_utils.hpp" +#include "utilities/trie.cuh" +#include "utilities/type_dispatcher.hpp" +#include "utilities/cudf_utils.h" + +#include + +#include "rmm/rmm.h" +#include "rmm/thrust_rmm_allocator.h" +#include "io/comp/io_uncomp.h" + +#include "io/cuio_common.hpp" +#include "io/utilities/parsing_utils.cuh" +#include "io/utilities/wrapper_utils.hpp" + +using std::vector; +using std::string; + +/**---------------------------------------------------------------------------* + * @brief Struct used for internal parsing state + *---------------------------------------------------------------------------**/ +typedef struct raw_csv_ { + device_buffer data; // on-device: the raw unprocessed CSV data - loaded as a large char * array + device_buffer recStart; // on-device: Starting position of the records. + + ParseOptions opts; // options to control parsing behavior + + long num_bytes; // host: the number of bytes in the data + long num_bits; // host: the number of 64-bit bitmaps (different than valid) + gdf_size_type num_records; // host: number of records loaded into device memory, and then number of records to read + int num_active_cols;// host: number of columns that will be return to user. + int num_actual_cols;// host: number of columns in the file --- based on the number of columns in header + vector dtypes; // host: array of dtypes (since gdf_columns are not created until end) + vector col_names; // host: array of column names + + thrust::host_vector h_parseCol; // host : array of booleans stating if column should be parsed in reading process: parseCol[x]=false means that the column x needs to be filtered out. + rmm::device_vector d_parseCol; // device : array of booleans stating if column should be parsed in reading process: parseCol[x]=false means that the column x needs to be filtered out. + + long byte_range_offset; // offset into the data to start parsing + long byte_range_size; // length of the data of interest to parse + + gdf_size_type header_row; ///< host: Row index of the header + gdf_size_type nrows; ///< host: Number of rows to read. -1 for all rows + gdf_size_type skiprows; ///< host: Number of rows to skip from the start + gdf_size_type skipfooter; ///< host: Number of rows to skip from the end + std::vector header; ///< host: Header row data, for parsing column names + string prefix; ///< host: Prepended to column ID if there is no header or input column names + + rmm::device_vector d_trueTrie; // device: serialized trie of values to recognize as true + rmm::device_vector d_falseTrie;// device: serialized trie of values to recognize as false + rmm::device_vector d_naTrie; // device: serialized trie of NA values +} raw_csv_t; + +typedef struct column_data_ { + gdf_size_type countFloat; + gdf_size_type countDateAndTime; + gdf_size_type countString; + gdf_size_type countBool; + gdf_size_type countInt8; + gdf_size_type countInt16; + gdf_size_type countInt32; + gdf_size_type countInt64; + gdf_size_type countNULL; +} column_data_t; + +using string_pair = std::pair; + +// +//---------------create and process --------------------------------------------- +// +gdf_error parseArguments(csv_read_arg *args, raw_csv_t *csv); +// gdf_error getColNamesAndTypes(const char **col_names, const char **dtypes, raw_csv_t *d); +gdf_error inferCompressionType(const char* compression_arg, const char* filepath, string& compression_type); +gdf_error getUncompressedHostData(const char* h_data, size_t num_bytes, + const string& compression, + vector& h_uncomp_data); +gdf_error uploadDataToDevice(const char* h_uncomp_data, size_t h_uncomp_size, raw_csv_t * raw_csv); + +#define checkError(error, txt) if ( error != GDF_SUCCESS) { std::cerr << "ERROR: " << error << " in " << txt << std::endl; return error; } + +// +//---------------CUDA Kernel --------------------------------------------- +// + +gdf_error launch_dataConvertColumns(raw_csv_t *raw_csv, void **d_gdf, + gdf_valid_type **valid, gdf_dtype *d_dtypes, + gdf_size_type *num_valid); +gdf_error launch_dataTypeDetection(raw_csv_t *raw_csv, + column_data_t *d_columnData); + +__global__ void convertCsvToGdf(char *csv, const ParseOptions opts, + gdf_size_type num_records, int num_columns, + bool *parseCol, uint64_t *recStart, + gdf_dtype *dtype, void **gdf_data, + gdf_valid_type **valid, + gdf_size_type *num_valid); +__global__ void dataTypeDetection(char *raw_csv, const ParseOptions opts, + gdf_size_type num_records, int num_columns, + bool *parseCol, uint64_t *recStart, + column_data_t *d_columnData); + +/**---------------------------------------------------------------------------* + * @brief Estimates the maximum expected length or a row, based on the number + * of columns + * + * If the number of columns is not available, it will return a value large + * enough for most use cases + * + * @param[in] num_columns Number of columns in the CSV file (optional) + * + * @return Estimated maximum size of a row, in bytes + *---------------------------------------------------------------------------**/ + constexpr size_t calculateMaxRowSize(int num_columns=0) noexcept { + constexpr size_t max_row_bytes = 16*1024; // 16KB + constexpr size_t column_bytes = 64; + constexpr size_t base_padding = 1024; // 1KB + if (num_columns == 0){ + // Use flat size if the number of columns is not known + return max_row_bytes; + } + else { + // Expand the size based on the number of columns, if available + return base_padding + num_columns * column_bytes; + } +} +/** +* @brief Removes the first and Last quote in the string +*/ +string removeQuotes(string str, char quotechar) { + // Exclude first and last quotation char + const size_t first_quote = str.find(quotechar); + if (first_quote != string::npos) { + str.erase(first_quote, 1); + } + const size_t last_quote = str.rfind(quotechar); + if (last_quote != string::npos) { + str.erase(last_quote, 1); + } + + return str; +} + +/** + * @brief Parse the first row to set the column names in the raw_csv parameter + * + * The first row can be either the header row, or the first data row + * + * @param[in,out] raw_csv Structure containing the csv parsing parameters + * and intermediate results + * + * @return gdf_error with error code on failure, otherwise GDF_SUCCESS +*/ +gdf_error setColumnNamesFromCsv(raw_csv_t* raw_csv) { + vector first_row = raw_csv->header; + // No header, read the first data row + if (first_row.empty()) { + uint64_t first_row_len{}; + // If file only contains one row, raw_csv->recStart[1] is not valid + if (raw_csv->num_records > 1) { + CUDA_TRY(cudaMemcpy(&first_row_len, raw_csv->recStart.data() + 1, sizeof(uint64_t), cudaMemcpyDefault)); + } + else { + // File has one row - use the file size for the row size + first_row_len = raw_csv->num_bytes / sizeof(char); + } + first_row.resize(first_row_len); + CUDA_TRY(cudaMemcpy(first_row.data(), raw_csv->data.data(), first_row_len * sizeof(char), cudaMemcpyDefault)); + } + + int num_cols = 0; + + bool quotation = false; + for (size_t pos = 0, prev = 0; pos < first_row.size(); ++pos) { + // Flip the quotation flag if current character is a quotechar + if(first_row[pos] == raw_csv->opts.quotechar) { + quotation = !quotation; + } + // Check if end of a column/row + else if (pos == first_row.size() - 1 || + (!quotation && first_row[pos] == raw_csv->opts.terminator) || + (!quotation && first_row[pos] == raw_csv->opts.delimiter)) { + // This is the header, add the column name + if (raw_csv->header_row >= 0) { + // Include the current character, in case the line is not terminated + int col_name_len = pos - prev + 1; + // Exclude the delimiter/terminator is present + if (first_row[pos] == raw_csv->opts.delimiter || first_row[pos] == raw_csv->opts.terminator) { + --col_name_len; + } + // Also exclude '\r' character at the end of the column name if it's part of the terminator + if (col_name_len > 0 && + raw_csv->opts.terminator == '\n' && + first_row[pos] == '\n' && + first_row[pos - 1] == '\r') { + --col_name_len; + } + + const string new_col_name(first_row.data() + prev, col_name_len); + raw_csv->col_names.push_back(removeQuotes(new_col_name, raw_csv->opts.quotechar)); + + // Stop parsing when we hit the line terminator; relevant when there is a blank line following the header. + // In this case, first_row includes multiple line terminators at the end, as the new recStart belongs + // to a line that comes after the blank line(s) + if (!quotation && first_row[pos] == raw_csv->opts.terminator){ + break; + } + } + else { + // This is the first data row, add the automatically generated name + raw_csv->col_names.push_back(raw_csv->prefix + std::to_string(num_cols)); + } + num_cols++; + + // Skip adjacent delimiters if delim_whitespace is set + while (raw_csv->opts.multi_delimiter && + pos < first_row.size() && + first_row[pos] == raw_csv->opts.delimiter && + first_row[pos + 1] == raw_csv->opts.delimiter) { + ++pos; + } + prev = pos + 1; + } + } + return GDF_SUCCESS; +} + +/**---------------------------------------------------------------------------* + * @brief Updates the raw_csv_t object with the total number of rows and + * quotation characters in the file + * + * Does not count the quotations if quotechar is set to '/0'. + * + * @param[in] h_data Pointer to the csv data in host memory + * @param[in] h_size Size of the input data, in bytes + * @param[in,out] raw_csv Structure containing the csv parsing parameters + * and intermediate results + * + * @return gdf_error + *---------------------------------------------------------------------------**/ +gdf_error countRecordsAndQuotes(const char *h_data, size_t h_size, raw_csv_t *raw_csv) { + vector chars_to_count{raw_csv->opts.terminator}; + if (raw_csv->opts.quotechar != '\0') { + chars_to_count.push_back(raw_csv->opts.quotechar); + } + + raw_csv->num_records = countAllFromSet(h_data, h_size, chars_to_count); + + // If not starting at an offset, add an extra row to account for the first row in the file + if (raw_csv->byte_range_offset == 0) { + ++raw_csv->num_records; + } + + return GDF_SUCCESS; +} + +/**---------------------------------------------------------------------------* + * @brief Updates the raw_csv_t object with the offset of each row in the file + * Also add positions of each quotation character in the file. + * + * Does not process the quotations if quotechar is set to '/0'. + * + * @param[in] h_data Pointer to the csv data in host memory + * @param[in] h_size Size of the input data, in bytes + * @param[in,out] raw_csv Structure containing the csv parsing parameters + * and intermediate results + * + * @return gdf_error + *---------------------------------------------------------------------------**/ +gdf_error setRecordStarts(const char *h_data, size_t h_size, raw_csv_t *raw_csv) { + // Allocate space to hold the record starting points + const bool last_line_terminated = (h_data[h_size - 1] == raw_csv->opts.terminator); + // If the last line is not terminated, allocate space for the EOF entry (added later) + const gdf_size_type record_start_count = raw_csv->num_records + (last_line_terminated ? 0 : 1); + raw_csv->recStart = device_buffer(record_start_count); + + auto* find_result_ptr = raw_csv->recStart.data(); + if (raw_csv->byte_range_offset == 0) { + find_result_ptr++; + CUDA_TRY(cudaMemsetAsync(raw_csv->recStart.data(), 0ull, sizeof(uint64_t))); + } + vector chars_to_find{raw_csv->opts.terminator}; + if (raw_csv->opts.quotechar != '\0') { + chars_to_find.push_back(raw_csv->opts.quotechar); + } + // Passing offset = 1 to return positions AFTER the found character + findAllFromSet(h_data, h_size, chars_to_find, 1, find_result_ptr); + + // Previous call stores the record pinput_file.typeositions as encountered by all threads + // Sort the record positions as subsequent processing may require filtering + // certain rows or other processing on specific records + thrust::sort(rmm::exec_policy()->on(0), raw_csv->recStart.data(), raw_csv->recStart.data() + raw_csv->num_records); + + // Currently, ignoring lineterminations within quotes is handled by recording + // the records of both, and then filtering out the records that is a quotechar + // or a linetermination within a quotechar pair. The future major refactoring + // of csv_reader and its kernels will probably use a different tactic. + if (raw_csv->opts.quotechar != '\0') { + vector h_rec_starts(raw_csv->num_records); + const size_t rec_start_size = sizeof(uint64_t) * (h_rec_starts.size()); + CUDA_TRY( cudaMemcpy(h_rec_starts.data(), raw_csv->recStart.data(), rec_start_size, cudaMemcpyDeviceToHost) ); + + auto recCount = raw_csv->num_records; + + bool quotation = false; + for (gdf_size_type i = 1; i < raw_csv->num_records; ++i) { + if (h_data[h_rec_starts[i] - 1] == raw_csv->opts.quotechar) { + quotation = !quotation; + h_rec_starts[i] = raw_csv->num_bytes; + recCount--; + } + else if (quotation) { + h_rec_starts[i] = raw_csv->num_bytes; + recCount--; + } + } + + CUDA_TRY( cudaMemcpy(raw_csv->recStart.data(), h_rec_starts.data(), rec_start_size, cudaMemcpyHostToDevice) ); + thrust::sort(rmm::exec_policy()->on(0), raw_csv->recStart.data(), raw_csv->recStart.data() + raw_csv->num_records); + raw_csv->num_records = recCount; + } + + if (!last_line_terminated){ + // Add the EOF as the last record when the terminator is missing in the last line + const uint64_t eof_offset = h_size; + CUDA_TRY(cudaMemcpy(raw_csv->recStart.data() + raw_csv->num_records, &eof_offset, sizeof(uint64_t), cudaMemcpyDefault)); + // Update the record count + ++raw_csv->num_records; + } + + return GDF_SUCCESS; +} + +/**---------------------------------------------------------------------------* + * @brief Reads CSV-structured data and returns an array of gdf_columns. + * + * @param[in,out] args Structure containing input and output args + * + * @return gdf_error GDF_SUCCESS if successful, otherwise an error code. + *---------------------------------------------------------------------------**/ +gdf_error read_csv(csv_read_arg *args) +{ + gdf_error error = gdf_error::GDF_SUCCESS; + + //----------------------------------------------------------------------------- + // create the CSV data structure - this will be filled in as the CSV data is processed. + // Done first to validate data types + raw_csv_t raw_csv{}; + // error = parseArguments(args, raw_csv); + raw_csv.num_actual_cols = args->num_names; + raw_csv.num_active_cols = args->num_names; + raw_csv.num_records = 0; + + raw_csv.header_row = args->header; + raw_csv.skiprows = args->skiprows; + raw_csv.skipfooter = args->skipfooter; + raw_csv.nrows = args->nrows; + raw_csv.prefix = args->prefix == nullptr ? "" : string(args->prefix); + + if (args->delim_whitespace) { + raw_csv.opts.delimiter = ' '; + raw_csv.opts.multi_delimiter = true; + } else { + raw_csv.opts.delimiter = args->delimiter; + raw_csv.opts.multi_delimiter = false; + } + if (args->windowslinetermination) { + raw_csv.opts.terminator = '\n'; + } else { + raw_csv.opts.terminator = args->lineterminator; + } + if (args->quotechar != '\0' && args->quoting != QUOTE_NONE) { + raw_csv.opts.quotechar = args->quotechar; + raw_csv.opts.keepquotes = false; + raw_csv.opts.doublequote = args->doublequote; + } else { + raw_csv.opts.quotechar = '\0'; + raw_csv.opts.keepquotes = true; + raw_csv.opts.doublequote = false; + } + raw_csv.opts.skipblanklines = args->skip_blank_lines; + raw_csv.opts.comment = args->comment; + raw_csv.opts.dayfirst = args->dayfirst; + raw_csv.opts.decimal = args->decimal; + raw_csv.opts.thousands = args->thousands; + if (raw_csv.opts.decimal == raw_csv.opts.delimiter) { + checkError(GDF_INVALID_API_CALL, "Decimal point cannot be the same as the delimiter"); + } + if (raw_csv.opts.thousands == raw_csv.opts.delimiter) { + checkError(GDF_INVALID_API_CALL, "Thousands separator cannot be the same as the delimiter"); + } + + string compression_type; + error = inferCompressionType(args->compression, args->filepath_or_buffer, compression_type); + checkError(error, "call to inferCompressionType"); + + raw_csv.byte_range_offset = args->byte_range_offset; + raw_csv.byte_range_size = args->byte_range_size; + if (raw_csv.byte_range_offset > 0 || raw_csv.byte_range_size > 0) { + if (raw_csv.nrows >= 0 || raw_csv.skiprows > 0 || raw_csv.skipfooter > 0) { + checkError(GDF_INVALID_API_CALL, + "Cannot manually limit rows to be read when using the byte range parameter"); + } + if (compression_type != "none") { + checkError(GDF_INVALID_API_CALL, + "Cannot read compressed input when using the byte range parameter"); + } + } + + // Handle user-defined booleans values, whereby field data is substituted + // with true/false values; CUDF booleans are int types of 0 or 1 + vector true_values{"True", "TRUE", "true"}; + if (args->true_values != nullptr && args->num_true_values > 0) { + for (int i = 0; i < args->num_true_values; ++i) { + true_values.emplace_back(args->true_values[i]); + } + } + raw_csv.d_trueTrie = createSerializedTrie(true_values); + raw_csv.opts.trueValuesTrie = raw_csv.d_trueTrie.data().get(); + + vector false_values{"False", "FALSE", "false"}; + if (args->false_values != nullptr && args->num_false_values > 0) { + for (int i = 0; i < args->num_false_values; ++i) { + false_values.emplace_back(args->false_values[i]); + } + } + raw_csv.d_falseTrie = createSerializedTrie(false_values); + raw_csv.opts.falseValuesTrie = raw_csv.d_falseTrie.data().get(); + + if (args->na_filter && + (args->keep_default_na || (args->na_values != nullptr && args->num_na_values > 0))) { + vector na_values{ + "#N/A", "#N/A N/A", "#NA", "-1.#IND", + "-1.#QNAN", "-NaN", "-nan", "1.#IND", + "1.#QNAN", "N/A", "NA", "NULL", + "NaN", "n/a", "nan", "null"}; + if(!args->keep_default_na){ + na_values.clear(); + } + + if (args->na_values != nullptr && args->num_na_values > 0) { + for (int i = 0; i < args->num_na_values; ++i) { + na_values.emplace_back(args->na_values[i]); + } + } + + raw_csv.d_naTrie = createSerializedTrie(na_values); + raw_csv.opts.naValuesTrie = raw_csv.d_naTrie.data().get(); + } + args->data = nullptr; + args->num_cols_out = 0; + args->num_rows_out = 0; + + //----------------------------------------------------------------------------- + // memory map in the data + void * map_data = NULL; + size_t map_size = 0; + size_t map_offset = 0; + int fd = 0; + if (args->input_data_form == gdf_csv_input_form::FILE_PATH) + { + fd = open(args->filepath_or_buffer, O_RDONLY ); + if (fd < 0) { close(fd); checkError(GDF_FILE_ERROR, "Error opening file"); } + + struct stat st{}; + if (fstat(fd, &st)) { close(fd); checkError(GDF_FILE_ERROR, "cannot stat file"); } + + const auto file_size = st.st_size; + if (args->byte_range_offset > (size_t)file_size) { + close(fd); + CUDF_FAIL("The byte_range offset is larger than the file size"); + } + + // Can't map an empty file, will return an empty dataframe further down + if (file_size != 0) { + // Have to align map offset to page size + const auto page_size = sysconf(_SC_PAGESIZE); + map_offset = (args->byte_range_offset/page_size)*page_size; + + // Set to rest-of-the-file size, will reduce based on the byte range size + raw_csv.num_bytes = map_size = file_size - map_offset; + + // Include the page padding in the mapped size + const size_t page_padding = args->byte_range_offset - map_offset; + const size_t padded_byte_range_size = raw_csv.byte_range_size + page_padding; + + if (raw_csv.byte_range_size != 0 && padded_byte_range_size < map_size) { + // Need to make sure that w/ padding we don't overshoot the end of file + map_size = min(padded_byte_range_size + calculateMaxRowSize(std::max(args->num_names, args->num_dtype)), map_size); + } + + // Ignore page padding for parsing purposes + raw_csv.num_bytes = map_size - page_padding; + + map_data = mmap(0, map_size, PROT_READ, MAP_PRIVATE, fd, map_offset); + if (map_data == MAP_FAILED || map_size==0) { close(fd); CUDF_FAIL("Error mapping file"); } + } + } + else if (args->input_data_form == gdf_csv_input_form::HOST_BUFFER) + { + map_data = (void *)args->filepath_or_buffer; + raw_csv.num_bytes = map_size = args->buffer_size; + } + else { checkError(GDF_C_ERROR, "invalid input type"); } + + // Return an empty dataframe if the input is empty and user did not specify the column names and types + if (raw_csv.num_bytes == 0 && (args->names == nullptr || args->dtype == nullptr)){ + return GDF_SUCCESS; + } + + const char* h_uncomp_data = nullptr; + size_t h_uncomp_size = 0; + // Used when the input data is compressed, to ensure the allocated uncompressed data is freed + vector h_uncomp_data_owner; + // Skip if the input is empty and proceed to set the column names and types based on user's input + if(raw_csv.num_bytes != 0) { + if (compression_type == "none") { + // Do not use the owner vector here to avoid copying the whole file to the heap + h_uncomp_data = (const char*)map_data + (args->byte_range_offset - map_offset); + h_uncomp_size = raw_csv.num_bytes; + } + else { + error = getUncompressedHostData( (const char *)map_data, map_size, compression_type, h_uncomp_data_owner); + checkError(error, "call to getUncompressedHostData"); + h_uncomp_data = h_uncomp_data_owner.data(); + h_uncomp_size = h_uncomp_data_owner.size(); + } + + error = countRecordsAndQuotes(h_uncomp_data, h_uncomp_size, &raw_csv); + checkError(error, "call to count the number of rows"); + + error = setRecordStarts(h_uncomp_data, h_uncomp_size, &raw_csv); + checkError(error, "call to store the row offsets"); + + error = uploadDataToDevice(h_uncomp_data, h_uncomp_size, &raw_csv); + checkError(error, "call to upload the CSV data to the device"); + } + + //----------------------------------------------------------------------------- + //--- done with host data + if (args->input_data_form == gdf_csv_input_form::FILE_PATH) + { + close(fd); + if (map_data != nullptr) { + munmap(map_data, map_size); + } + } + + //----------------------------------------------------------------------------- + //-- Populate the header + + // Check if the user gave us a list of column names + if(args->names == nullptr) { + + error = setColumnNamesFromCsv(&raw_csv); + if (error != GDF_SUCCESS) { + return error; + } + raw_csv.num_actual_cols = raw_csv.num_active_cols = raw_csv.col_names.size(); + + // Initialize a boolean array that states if a column needs to read or filtered. + raw_csv.h_parseCol = thrust::host_vector(raw_csv.num_actual_cols, true); + + // Rename empty column names to "Unnamed: col_index" + for (size_t col_idx = 0; col_idx < raw_csv.col_names.size(); ++col_idx) { + if (raw_csv.col_names[col_idx].empty()) { + raw_csv.col_names[col_idx] = string("Unnamed: ") + std::to_string(col_idx); + } + } + + // Looking for duplicates + std::unordered_map col_names_histogram; + for (auto& col_name: raw_csv.col_names){ + // Operator [] inserts a default-initialized value if the given key is not present + if (++col_names_histogram[col_name] > 1){ + if (args->mangle_dupe_cols) { + // Rename duplicates of column X as X.1, X.2, ...; First appearance stays as X + col_name += "." + std::to_string(col_names_histogram[col_name] - 1); + } + else { + // All duplicate columns will be ignored; First appearance is parsed + const auto idx = &col_name - raw_csv.col_names.data(); + raw_csv.h_parseCol[idx] = false; + } + } + } + + // Update the number of columns to be processed, if some might have been removed + if (!args->mangle_dupe_cols) { + raw_csv.num_active_cols = col_names_histogram.size(); + } + } + else { + raw_csv.h_parseCol = thrust::host_vector(args->num_names, true); + + for (int i = 0; inames[i]); + } + } + + // User can give + if (args->use_cols_int!=NULL || args->use_cols_char!=NULL){ + if(args->use_cols_int!=NULL){ + for (int i = 0; iuse_cols_int_len; i++){ + int pos = args->use_cols_int[i]; + raw_csv.h_parseCol[pos]=true; + } + raw_csv.num_active_cols = args->use_cols_int_len; + }else{ + for (int i = 0; iuse_cols_char_len; i++){ + std::string colName(args->use_cols_char[i]); + for (auto it = raw_csv.col_names.begin(); it != raw_csv.col_names.end(); it++){ + if(colName==*it){ + countFound++; + int pos=std::distance(raw_csv.col_names.begin(), it); + raw_csv.h_parseCol[pos]=true; + break; + } + } + } + raw_csv.num_active_cols = countFound; + } + } + raw_csv.d_parseCol = raw_csv.h_parseCol; + + //----------------------------------------------------------------------------- + //--- Auto detect types of the vectors + + if(args->dtype == NULL){ + if (raw_csv.num_records == 0) { + raw_csv.dtypes = vector(raw_csv.num_active_cols, GDF_STRING); + } else { + vector h_ColumnData(raw_csv.num_active_cols); + device_buffer d_ColumnData(raw_csv.num_active_cols); + CUDA_TRY(cudaMemset(d_ColumnData.data(), 0, sizeof(column_data_t) * raw_csv.num_active_cols)); + + launch_dataTypeDetection(&raw_csv, d_ColumnData.data()); + CUDA_TRY(cudaMemcpy(h_ColumnData.data(), d_ColumnData.data(), sizeof(column_data_t) * raw_csv.num_active_cols, cudaMemcpyDeviceToHost)); + + // host: array of dtypes (since gdf_columns are not created until end) + vector d_detectedTypes; + + for(int col = 0; col < raw_csv.num_active_cols; col++){ + unsigned long long countInt = h_ColumnData[col].countInt8 + h_ColumnData[col].countInt16 + + h_ColumnData[col].countInt32 + h_ColumnData[col].countInt64; + + if (h_ColumnData[col].countNULL == raw_csv.num_records){ + // Entire column is NULL; allocate the smallest amount of memory + d_detectedTypes.push_back(GDF_INT8); + } else if(h_ColumnData[col].countString > 0L){ + d_detectedTypes.push_back(GDF_STRING); + } else if(h_ColumnData[col].countDateAndTime > 0L){ + d_detectedTypes.push_back(GDF_DATE64); + } else if(h_ColumnData[col].countBool > 0L) { + d_detectedTypes.push_back(GDF_BOOL8); + } else if(h_ColumnData[col].countFloat > 0L || + (h_ColumnData[col].countFloat == 0L && + countInt > 0L && h_ColumnData[col].countNULL > 0L)) { + // The second condition has been added to conform to + // PANDAS which states that a column of integers with + // a single NULL record need to be treated as floats. + d_detectedTypes.push_back(GDF_FLOAT64); + } else { + // All other integers are stored as 64-bit to conform to PANDAS + d_detectedTypes.push_back(GDF_INT64); + } + } + raw_csv.dtypes = d_detectedTypes; + } + } + else { + const bool is_dict = std::all_of(args->dtype, args->dtype + args->num_dtype, + [](const auto& s) { return strchr(s, ':') != nullptr; }); + if (!is_dict) { + CUDF_EXPECTS(args->num_dtype >= raw_csv.num_actual_cols, "Must specify data types for all columns"); + for (int col = 0; col < raw_csv.num_actual_cols; col++) { + if (raw_csv.h_parseCol[col]) { + // dtype is an array of types, assign types to active columns in the given order + raw_csv.dtypes.push_back(convertStringToDtype(args->dtype[col])); + CUDF_EXPECTS(raw_csv.dtypes.back() != GDF_invalid, "Unsupported data type"); + } + } + } else { + // dtype is a column name->type dictionary, create a map from the dtype array to speed up processing + std::unordered_map col_type_map; + for (int dtype_idx = 0; dtype_idx < args->num_dtype; dtype_idx++) { + // Split the dtype elements around the last ':' character + const char* colon = strrchr(args->dtype[dtype_idx], ':'); + const std::string col(args->dtype[dtype_idx], colon); + const std::string type(colon + 1); + + col_type_map[col] = convertStringToDtype(type); + CUDF_EXPECTS(col_type_map[col] != GDF_invalid, "Unsupported data type"); + } + + for (int col = 0; col < raw_csv.num_actual_cols; col++) { + if (raw_csv.h_parseCol[col]) { + CUDF_EXPECTS(col_type_map.find(raw_csv.col_names[col]) != col_type_map.end(), + "Must specify data types for all active columns"); + raw_csv.dtypes.push_back(col_type_map[raw_csv.col_names[col]]); + } + } + } + } + // Alloc output; columns' data memory is still expected for empty dataframe + std::vector columns; + for (int col = 0, active_col = 0; col < raw_csv.num_actual_cols; ++col) { + if (raw_csv.h_parseCol[col]) { + columns.emplace_back(raw_csv.num_records, raw_csv.dtypes[active_col], + gdf_dtype_extra_info{TIME_UNIT_NONE}, + raw_csv.col_names[col]); + CUDF_EXPECTS(columns.back().allocate() == GDF_SUCCESS, "Cannot allocate columns"); + active_col++; + } + } + + // Convert CSV input to cuDF output + if (raw_csv.num_records != 0) { + thrust::host_vector h_dtypes(raw_csv.num_active_cols); + thrust::host_vector h_data(raw_csv.num_active_cols); + thrust::host_vector h_valid(raw_csv.num_active_cols); + + for (int i = 0; i < raw_csv.num_active_cols; ++i) { + h_dtypes[i] = columns[i]->dtype; + h_data[i] = columns[i]->data; + h_valid[i] = columns[i]->valid; + } + + rmm::device_vector d_dtypes = h_dtypes; + rmm::device_vector d_data = h_data; + rmm::device_vector d_valid = h_valid; + rmm::device_vector d_valid_counts(raw_csv.num_active_cols, 0); + + CUDF_EXPECTS( + launch_dataConvertColumns(&raw_csv, d_data.data().get(), + d_valid.data().get(), d_dtypes.data().get(), + d_valid_counts.data().get()) == GDF_SUCCESS, + "Cannot convert CSV data to cuDF columns"); + CUDA_TRY(cudaStreamSynchronize(0)); + + thrust::host_vector h_valid_counts = d_valid_counts; + for (int i = 0; i < raw_csv.num_active_cols; ++i) { + columns[i]->null_count = columns[i]->size - h_valid_counts[i]; + } + } + + for (int i = 0; i < raw_csv.num_active_cols; ++i) { + if (columns[i]->dtype == GDF_STRING) { + std::unique_ptr str_data( + NVStrings::create_from_index(static_cast(columns[i]->data), columns[i]->size), + &NVStrings::destroy); + RMM_TRY(RMM_FREE(columns[i]->data, 0)); + + // PANDAS' default behavior of enabling doublequote for two consecutive + // quotechars in quoted fields results in reduction to a single quotechar + if ((raw_csv.opts.quotechar != '\0') && + (raw_csv.opts.doublequote == true)) { + const std::string quotechar(1, raw_csv.opts.quotechar); + const std::string doublequotechar(2, raw_csv.opts.quotechar); + columns[i]->data = str_data->replace(doublequotechar.c_str(), quotechar.c_str()); + } + else { + columns[i]->data = str_data.release(); + } + } + } + + // Transfer ownership to raw pointer output arguments + args->data = (gdf_column **)malloc(sizeof(gdf_column *) * raw_csv.num_active_cols); + for (int i = 0; i < raw_csv.num_active_cols; ++i) { + args->data[i] = columns[i].release(); + } + args->num_cols_out = raw_csv.num_active_cols; + args->num_rows_out = raw_csv.num_records; + + return error; +} + +/**---------------------------------------------------------------------------* + * @brief Infer the compression type from the compression parameter and + * the input file name + * + * Returns "none" if the input is not compressed. + * + * @param[in] compression_arg Input string that is potentially describing + * the compression type. Can also be nullptr, "none", or "infer" + * @param[in] filepath path + name of the input file + * @param[out] compression_type String describing the inferred compression type + * + * @return gdf_error with error code on failure, otherwise GDF_SUCCESS + *---------------------------------------------------------------------------**/ +gdf_error inferCompressionType(const char* compression_arg, const char* filepath, string& compression_type) +{ + if (compression_arg && 0 == strcasecmp(compression_arg, "none")) { + compression_arg = nullptr; + } + if (compression_arg && 0 == strcasecmp(compression_arg, "infer")) + { + const char *file_ext = strrchr(filepath, '.'); + compression_arg = nullptr; + if (file_ext) + { + if (!strcasecmp(file_ext, ".gz")) + compression_arg = "gzip"; + else if (!strcasecmp(file_ext, ".zip")) + compression_arg = "zip"; + else if (!strcasecmp(file_ext, ".bz2")) + compression_arg = "bz2"; + else if (!strcasecmp(file_ext, ".xz")) + compression_arg = "xz"; + else { + // TODO: return error here + } + } + } + compression_type = compression_arg == nullptr? "none":string(compression_arg); + + return GDF_SUCCESS; +} + +/**---------------------------------------------------------------------------* + * @brief Uploads the relevant segment of the input csv data onto the GPU. + * + * Only rows that need to be read are copied to the GPU, based on parameters + * like nrows, skipheader, skipfooter. + * Also updates the array of record starts to match the device data offset. + * + * @param[in] h_uncomp_data Pointer to the uncompressed csv data in host memory + * @param[in] h_uncomp_size Size of the input data, in bytes + * @param[in,out] raw_csv Structure containing the csv parsing parameters + * and intermediate results + * + * @return gdf_error with error code on failure, otherwise GDF_SUCCESS + *---------------------------------------------------------------------------**/ +gdf_error uploadDataToDevice(const char *h_uncomp_data, size_t h_uncomp_size, + raw_csv_t *raw_csv) { + + // Exclude the rows that are to be skipped from the start + GDF_REQUIRE(raw_csv->num_records > raw_csv->skiprows, GDF_INVALID_API_CALL); + const auto first_row = raw_csv->skiprows; + raw_csv->num_records = raw_csv->num_records - first_row; + + std::vector h_rec_starts(raw_csv->num_records); + CUDA_TRY(cudaMemcpy(h_rec_starts.data(), raw_csv->recStart.data() + first_row, + sizeof(uint64_t) * h_rec_starts.size(), + cudaMemcpyDefault)); + + // Trim lines that are outside range, but keep one greater for the end offset + if (raw_csv->byte_range_size != 0) { + auto it = h_rec_starts.end() - 1; + while (it >= h_rec_starts.begin() && + *it > uint64_t(raw_csv->byte_range_size)) { + --it; + } + if ((it + 2) < h_rec_starts.end()) { + h_rec_starts.erase(it + 2, h_rec_starts.end()); + } + } + + // Discard only blank lines, only fully comment lines, or both. + // If only handling one of them, ensure it doesn't match against \0 as we do + // not want certain scenarios to be filtered out (end-of-file) + if (raw_csv->opts.skipblanklines || raw_csv->opts.comment != '\0') { + const auto match_newline = raw_csv->opts.skipblanklines ? raw_csv->opts.terminator + : raw_csv->opts.comment; + const auto match_comment = raw_csv->opts.comment != '\0' ? raw_csv->opts.comment + : match_newline; + const auto match_return = (raw_csv->opts.skipblanklines && + raw_csv->opts.terminator == '\n') ? '\r' + : match_comment; + h_rec_starts.erase( + std::remove_if(h_rec_starts.begin(), h_rec_starts.end(), + [&](uint64_t i) { + return (h_uncomp_data[i] == match_newline || + h_uncomp_data[i] == match_return || + h_uncomp_data[i] == match_comment); + }), + h_rec_starts.end()); + } + + raw_csv->num_records = h_rec_starts.size(); + + // Exclude the rows before the header row (inclusive) + // But copy the header data for parsing the column names later (if necessary) + if (raw_csv->header_row >= 0) { + raw_csv->header.assign( + h_uncomp_data + h_rec_starts[raw_csv->header_row], + h_uncomp_data + h_rec_starts[raw_csv->header_row + 1]); + h_rec_starts.erase(h_rec_starts.begin(), + h_rec_starts.begin() + raw_csv->header_row + 1); + raw_csv->num_records = h_rec_starts.size(); + } + + // Exclude the rows that exceed past the requested number + if (raw_csv->nrows >= 0 && raw_csv->nrows < raw_csv->num_records) { + h_rec_starts.resize(raw_csv->nrows + 1); // include end offset + raw_csv->num_records = h_rec_starts.size(); + } + + // Exclude the rows that are to be skipped from the end + if (raw_csv->skipfooter > 0) { + h_rec_starts.resize(h_rec_starts.size() - raw_csv->skipfooter); + raw_csv->num_records = h_rec_starts.size(); + } + + // Check that there is actual data to parse + GDF_REQUIRE(raw_csv->num_records > 0, GDF_INVALID_API_CALL); + + const auto start_offset = h_rec_starts.front(); + const auto end_offset = h_rec_starts.back(); + raw_csv->num_bytes = end_offset - start_offset; + assert(raw_csv->num_bytes <= h_uncomp_size); + raw_csv->num_bits = (raw_csv->num_bytes + 63) / 64; + + // Resize and upload the rows of interest + raw_csv->recStart.resize(raw_csv->num_records); + CUDA_TRY(cudaMemcpy(raw_csv->recStart.data(), h_rec_starts.data(), + sizeof(uint64_t) * raw_csv->num_records, + cudaMemcpyDefault)); + + // Upload the raw data that is within the rows of interest + raw_csv->data = device_buffer(raw_csv->num_bytes); + CUDA_TRY(cudaMemcpy(raw_csv->data.data(), h_uncomp_data + start_offset, + raw_csv->num_bytes, cudaMemcpyHostToDevice)); + + // Adjust row start positions to account for the data subcopy + thrust::transform(rmm::exec_policy()->on(0), raw_csv->recStart.data(), + raw_csv->recStart.data() + raw_csv->num_records, + thrust::make_constant_iterator(start_offset), + raw_csv->recStart.data(), thrust::minus()); + + // The array of row offsets includes EOF + // reduce the number of records by one to exclude it from the row count + raw_csv->num_records--; + + return GDF_SUCCESS; +} + +//---------------------------------------------------------------------------------------------------------------- +// CUDA Kernels +//---------------------------------------------------------------------------------------------------------------- + +/**---------------------------------------------------------------------------* + * @brief Helper function to setup and launch CSV parsing CUDA kernel. + * + * @param[in,out] raw_csv The metadata for the CSV data + * @param[out] gdf The output column data + * @param[out] valid The bitmaps indicating whether column fields are valid + * @param[out] str_cols The start/end offsets for string data types + * @param[out] num_valid The numbers of valid fields in columns + * + * @return gdf_error GDF_SUCCESS upon completion + *---------------------------------------------------------------------------**/ +gdf_error launch_dataConvertColumns(raw_csv_t *raw_csv, void **gdf, + gdf_valid_type **valid, gdf_dtype *d_dtypes, + gdf_size_type *num_valid) { + int blockSize; // suggested thread count to use + int minGridSize; // minimum block count required + CUDA_TRY(cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, + convertCsvToGdf)); + + // Calculate actual block count to use based on records count + int gridSize = (raw_csv->num_records + blockSize - 1) / blockSize; + + convertCsvToGdf <<< gridSize, blockSize >>> ( + raw_csv->data.data(), raw_csv->opts, raw_csv->num_records, + raw_csv->num_actual_cols, raw_csv->d_parseCol.data().get(), raw_csv->recStart.data(), + d_dtypes, gdf, valid, num_valid); + + CUDA_TRY(cudaGetLastError()); + return GDF_SUCCESS; +} + +/**---------------------------------------------------------------------------* + * @brief Functor for converting CSV data to cuDF data type value. + *---------------------------------------------------------------------------**/ +struct ConvertFunctor { + /**---------------------------------------------------------------------------* + * @brief Template specialization for operator() for types whose values can be + * convertible to a 0 or 1 to represent false/true. The converting is done by + * checking against the default and user-specified true/false values list. + * + * It is handled here rather than within convertStrToValue() as that function + * is used by other types (ex. timestamp) that aren't 'booleable'. + *---------------------------------------------------------------------------**/ + template ::value> * = nullptr> + __host__ __device__ __forceinline__ void operator()( + const char *csvData, void *gdfColumnData, long rowIndex, long start, + long end, const ParseOptions &opts) { + T &value{static_cast(gdfColumnData)[rowIndex]}; + + // Check for user-specified true/false values first, where the output is + // replaced with 1/0 respectively + const size_t field_len = end - start + 1; + if (serializedTrieContains(opts.trueValuesTrie, csvData + start, field_len)) { + value = 1; + } else if (serializedTrieContains(opts.falseValuesTrie, csvData + start, field_len)) { + value = 0; + } else { + value = convertStrToValue(csvData, start, end, opts); + } + } + + /**---------------------------------------------------------------------------* + * @brief Default template operator() dispatch specialization all data types + * (including wrapper types) that is not covered by above. + *---------------------------------------------------------------------------**/ + template ::value> * = nullptr> + __host__ __device__ __forceinline__ void operator()( + const char *csvData, void *gdfColumnData, long rowIndex, long start, + long end, const ParseOptions &opts) { + T &value{static_cast(gdfColumnData)[rowIndex]}; + value = convertStrToValue(csvData, start, end, opts); + } +}; + +/**---------------------------------------------------------------------------* + * @brief CUDA kernel that parses and converts CSV data into cuDF column data. + * + * Data is processed one record at a time + * + * @param[in] raw_csv The entire CSV data to read + * @param[in] opts A set of parsing options + * @param[in] num_records The number of lines/rows of CSV data + * @param[in] num_columns The number of columns of CSV data + * @param[in] parseCol Whether to parse or skip a column + * @param[in] recStart The start the CSV data of interest + * @param[in] dtype The data type of the column + * @param[out] gdf_data The output column data + * @param[out] valid The bitmaps indicating whether column fields are valid + * @param[out] num_valid The numbers of valid fields in columns + * + * @return gdf_error GDF_SUCCESS upon completion + *---------------------------------------------------------------------------**/ +__global__ void convertCsvToGdf(char *raw_csv, const ParseOptions opts, + gdf_size_type num_records, int num_columns, + bool *parseCol, uint64_t *recStart, + gdf_dtype *dtype, void **gdf_data, + gdf_valid_type **valid, + gdf_size_type *num_valid) +{ + // thread IDs range per block, so also need the block id + long rec_id = threadIdx.x + (blockDim.x * blockIdx.x); // this is entry into the field array - tid is an elements within the num_entries array + + // we can have more threads than data, make sure we are not past the end of the data + if ( rec_id >= num_records) + return; + + long start = recStart[rec_id]; + long stop = recStart[rec_id + 1]; + + long pos = start; + int col = 0; + int actual_col = 0; + + while(colstop) + break; + + pos = seekFieldEnd(raw_csv, opts, pos, stop); + + if(parseCol[col]==true){ + + // check if the entire field is a NaN string - consistent with pandas + const bool is_na = serializedTrieContains(opts.naValuesTrie, raw_csv + start, pos - start); + + // Modify start & end to ignore whitespace and quotechars + long tempPos=pos-1; + if(!is_na && dtype[actual_col] != gdf_dtype::GDF_CATEGORY && dtype[actual_col] != gdf_dtype::GDF_STRING){ + adjustForWhitespaceAndQuotes(raw_csv, &start, &tempPos, opts.quotechar); + } + + if(!is_na && start<=(tempPos)) { // Empty fields are not legal values + + // Type dispatcher does not handle GDF_STRINGS + if (dtype[actual_col] == gdf_dtype::GDF_STRING) { + long end = pos; + if(opts.keepquotes==false){ + if((raw_csv[start] == opts.quotechar) && (raw_csv[end-1] == opts.quotechar)){ + start++; + end--; + } + } + auto str_list = static_cast(gdf_data[actual_col]); + str_list[rec_id].first = raw_csv + start; + str_list[rec_id].second = end - start; + } else { + cudf::type_dispatcher( + dtype[actual_col], ConvertFunctor{}, raw_csv, + gdf_data[actual_col], rec_id, start, tempPos, opts); + } + + // set the valid bitmap - all bits were set to 0 to start + setBitmapBit(valid[actual_col], rec_id); + atomicAdd(&num_valid[actual_col], 1); + } + else if(dtype[actual_col]==gdf_dtype::GDF_STRING){ + auto str_list = static_cast(gdf_data[actual_col]); + str_list[rec_id].first = nullptr; + str_list[rec_id].second = 0; + } + actual_col++; + } + pos++; + start=pos; + col++; + + } +} + +/**---------------------------------------------------------------------------* + * @brief Helper function to setup and launch CSV data type detect CUDA kernel. + * + * @param[in] raw_csv The metadata for the CSV data + * @param[out] d_columnData The count for each column data type + * + * @return gdf_error GDF_SUCCESS upon completion + *---------------------------------------------------------------------------**/ +gdf_error launch_dataTypeDetection(raw_csv_t *raw_csv, + column_data_t *d_columnData) { + int blockSize; // suggested thread count to use + int minGridSize; // minimum block count required + CUDA_TRY(cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, + dataTypeDetection)); + + // Calculate actual block count to use based on records count + int gridSize = (raw_csv->num_records + blockSize - 1) / blockSize; + + dataTypeDetection <<< gridSize, blockSize >>> ( + raw_csv->data.data(), raw_csv->opts, raw_csv->num_records, + raw_csv->num_actual_cols, raw_csv->d_parseCol.data().get(), raw_csv->recStart.data(), + d_columnData); + + CUDA_TRY(cudaGetLastError()); + return GDF_SUCCESS; +} + +/**---------------------------------------------------------------------------* + * @brief CUDA kernel that parses and converts CSV data into cuDF column data. + * + * Data is processed in one row/record at a time, so the number of total + * threads (tid) is equal to the number of rows. + * + * @param[in] raw_csv The entire CSV data to read + * @param[in] opts A set of parsing options + * @param[in] num_records The number of lines/rows of CSV data + * @param[in] num_columns The number of columns of CSV data + * @param[in] parseCol Whether to parse or skip a column + * @param[in] recStart The start the CSV data of interest + * @param[out] d_columnData The count for each column data type + * + * @returns GDF_SUCCESS upon successful computation + *---------------------------------------------------------------------------**/ +__global__ +void dataTypeDetection(char *raw_csv, + const ParseOptions opts, + gdf_size_type num_records, + int num_columns, + bool *parseCol, + uint64_t *recStart, + column_data_t *d_columnData) +{ + // thread IDs range per block, so also need the block id + long rec_id = threadIdx.x + (blockDim.x * blockIdx.x); // this is entry into the field array - tid is an elements within the num_entries array + + // we can have more threads than data, make sure we are not past the end of the data + if ( rec_id >= num_records) + return; + + long start = recStart[rec_id]; + long stop = recStart[rec_id + 1]; + + long pos = start; + int col = 0; + int actual_col = 0; + + // Going through all the columns of a given record + while(colstop) + break; + + pos = seekFieldEnd(raw_csv, opts, pos, stop); + + // Checking if this is a column that the user wants --- user can filter columns + if(parseCol[col]==true){ + + long tempPos=pos-1; + + // Checking if the record is NULL + if(start>(tempPos)){ + atomicAdd(& d_columnData[actual_col].countNULL, 1L); + pos++; + start=pos; + col++; + actual_col++; + continue; + } + + long countNumber=0; + long countDecimal=0; + long countSlash=0; + long countDash=0; + long countPlus=0; + long countColon=0; + long countString=0; + long countExponent=0; + + // Modify start & end to ignore whitespace and quotechars + // This could possibly result in additional empty fields + adjustForWhitespaceAndQuotes(raw_csv, &start, &tempPos); + + const long strLen = tempPos - start + 1; + + const bool maybe_hex = ((strLen > 2 && raw_csv[start] == '0' && raw_csv[start + 1] == 'x') || + (strLen > 3 && raw_csv[start] == '-' && raw_csv[start + 1] == '0' && raw_csv[start + 2] == 'x')); + + for(long startPos=start; startPos<=tempPos; startPos++){ + if(isDigit(raw_csv[startPos], maybe_hex)){ + countNumber++; + continue; + } + // Looking for unique characters that will help identify column types. + switch (raw_csv[startPos]){ + case '.': + countDecimal++;break; + case '-': + countDash++; break; + case '+': + countPlus++; break; + case '/': + countSlash++;break; + case ':': + countColon++;break; + case 'e': + case 'E': + if (!maybe_hex && startPos > start && startPos < tempPos) + countExponent++;break; + default: + countString++; + break; + } + } + const int countSign = countDash + countPlus; + + // Integers have to have the length of the string + long int_req_number_cnt = strLen; + // Off by one if they start with a minus sign + if((raw_csv[start]=='-' || raw_csv[start]=='+') && strLen > 1){ + --int_req_number_cnt; + } + // Off by one if they are a hexadecimal number + if(maybe_hex) { + --int_req_number_cnt; + } + + if(strLen==0){ // Removed spaces ' ' in the pre-processing and thus we can have an empty string. + atomicAdd(& d_columnData[actual_col].countNULL, 1L); + } + else if(countNumber==int_req_number_cnt){ + // Checking to see if we the integer value requires 8,16,32,64 bits. + // This will allow us to allocate the exact amount of memory. + const auto value = convertStrToValue(raw_csv, start, tempPos, opts); + const size_t field_len = tempPos - start + 1; + if (serializedTrieContains(opts.trueValuesTrie, raw_csv + start, field_len) || + serializedTrieContains(opts.falseValuesTrie, raw_csv + start, field_len)){ + atomicAdd(& d_columnData[actual_col].countBool, 1); + } + else if(value >= (1L<<31)){ + atomicAdd(& d_columnData[actual_col].countInt64, 1); + } + else if(value >= (1L<<15)){ + atomicAdd(& d_columnData[actual_col].countInt32, 1); + } + else if(value >= (1L<<7)){ + atomicAdd(& d_columnData[actual_col].countInt16, 1); + } + else{ + atomicAdd(& d_columnData[actual_col].countInt8, 1); + } + } + else if(isLikeFloat(strLen, countNumber, countDecimal, countSign, countExponent)){ + atomicAdd(& d_columnData[actual_col].countFloat, 1); + } + // The date-time field cannot have more than 3 strings. As such if an entry has more than 3 string characters, it is not + // a data-time field. Also, if a string has multiple decimals, then is not a legit number. + else if(countString > 3 || countDecimal > 1){ + atomicAdd(& d_columnData[actual_col].countString, 1); + } + else { + // A date field can have either one or two '-' or '\'. A legal combination will only have one of them. + // To simplify the process of auto column detection, we are not covering all the date-time formation permutations. + if((countDash>0 && countDash<=2 && countSlash==0)|| (countDash==0 && countSlash>0 && countSlash<=2) ){ + if((countColon<=2)){ + atomicAdd(& d_columnData[actual_col].countDateAndTime, 1); + } + else{ + atomicAdd(& d_columnData[actual_col].countString, 1); + } + } + // Default field is string type. + else{ + atomicAdd(& d_columnData[actual_col].countString, 1); + } + } + actual_col++; + } + pos++; + start=pos; + col++; + + } +} diff --git a/cuda_code/csv_test_12.cu b/cuda_code/csv_test_12.cu new file mode 100644 index 0000000000000000000000000000000000000000..6ce9f6f142deb6dc7e747ac7754ea6a57220a729 --- /dev/null +++ b/cuda_code/csv_test_12.cu @@ -0,0 +1,829 @@ +/* + * Copyright (c) 2018, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include + +#include +#include + +#include + +#include +#include +#include + +#include + +#include "tests/utilities/cudf_test_fixtures.h" + +TempDirTestEnvironment* const temp_env = static_cast( + ::testing::AddGlobalTestEnvironment(new TempDirTestEnvironment)); + +MATCHER_P(FloatNearPointwise, tolerance, "Out of range") +{ + return (std::get<0>(arg)>std::get<1>(arg)-tolerance && + std::get<0>(arg)(arg)+tolerance) ; +} + +bool checkFile(const std::string fname) +{ + struct stat st; + return (stat(fname.c_str(), &st) ? 0 : 1); +} + +// DESCRIPTION: Simple test internal helper class to transfer cudf column data +// from device to host for test comparisons and debugging/development +template +class gdf_host_column +{ +public: + gdf_host_column() = delete; + explicit gdf_host_column(gdf_column* const col) + { + m_hostdata = std::vector(col->size); + cudaMemcpy(m_hostdata.data(), col->data, sizeof(T) * col->size, cudaMemcpyDeviceToHost); + } + + auto hostdata() const -> const auto& + { + return m_hostdata; + } + void print() const + { + for (size_t i = 0; i < m_hostdata.size(); ++i) + { + std::cout.precision(17); + std::cout << "[" << i << "]: value=" << m_hostdata[i] << "\n"; + } + } + +private: + std::vector m_hostdata; +}; + +TEST(gdf_csv_test, DetectColumns) +{ + const std::string fname = temp_env->get_temp_dir()+"DetectColumnsTest.csv"; + const char* names[] = { "A", "B", "C" }; + const char* use_cols[] = { "A", "C" }; + + // types are { "int", "float64", "int" }; + std::ofstream outfile(fname, std::ofstream::out); + outfile << " 20, 0.40, 100\n"\ + "-21,-0.41, 101\n"\ + " 22, 0.42, 102\n"\ + "-23,-0.43, 103\n"; + outfile.close(); + ASSERT_TRUE( checkFile(fname) ); + + { + csv_read_arg args{}; + args.input_data_form = gdf_csv_input_form::FILE_PATH; + args.filepath_or_buffer = fname.c_str(); + args.num_names = std::extent::value; + args.names = names; + args.dtype = NULL; + args.delimiter = ','; + args.lineterminator = '\n'; + args.decimal = '.'; + args.skip_blank_lines = true; + args.header = -1; + args.nrows = -1; + args.use_cols_char = use_cols; + args.use_cols_char_len = 2; + EXPECT_EQ( read_csv(&args), GDF_SUCCESS ); + + // cudf auto detect type code uses INT64 + ASSERT_EQ( args.data[0]->dtype, GDF_INT64 ); + ASSERT_EQ( args.data[1]->dtype, GDF_INT64 ); + auto ACol = gdf_host_column(args.data[0]); + auto BCol = gdf_host_column(args.data[1]); + EXPECT_THAT( ACol.hostdata(), ::testing::ElementsAre(20, -21, 22, -23) ); + EXPECT_THAT( BCol.hostdata(), ::testing::ElementsAre(100, 101, 102, 103) ); + } +} + +TEST(gdf_csv_test, UseColumns) +{ + const std::string fname = temp_env->get_temp_dir()+"UseColumnsTest.csv"; + const char* names[] = { "A", "B", "C" }; + const char* types[] = { "int", "float64", "int" }; + const char* use_cols[] = { "A", "C" }; + + std::ofstream outfile(fname, std::ofstream::out); + outfile << " 20, 0.40, 100\n"\ + "-21,-0.41, 101\n"\ + " 22, 0.42, 102\n"\ + "-23,-0.43, 103\n"; + outfile.close(); + ASSERT_TRUE( checkFile(fname) ); + + { + csv_read_arg args{}; + args.input_data_form = gdf_csv_input_form::FILE_PATH; + args.filepath_or_buffer = fname.c_str(); + args.num_names = std::extent::value; + args.names = names; + args.num_dtype = std::extent::value; + args.dtype = types; + args.delimiter = ','; + args.lineterminator = '\n'; + args.decimal = '.'; + args.skip_blank_lines = true; + args.header = -1; + args.nrows = -1; + args.use_cols_char = use_cols; + args.use_cols_char_len = 2; + EXPECT_EQ( read_csv(&args), GDF_SUCCESS ); + + ASSERT_EQ( args.data[0]->dtype, GDF_INT32 ); + ASSERT_EQ( args.data[1]->dtype, GDF_INT32 ); + auto ACol = gdf_host_column(args.data[0]); + auto BCol = gdf_host_column(args.data[1]); + EXPECT_THAT( ACol.hostdata(), ::testing::ElementsAre(20, -21, 22, -23) ); + EXPECT_THAT( BCol.hostdata(), ::testing::ElementsAre(100, 101, 102, 103) ); + } +} + +TEST(gdf_csv_test, Numbers) +{ + const std::string fname = temp_env->get_temp_dir()+"CsvNumbersTest.csv"; + const char* names[] = { "A", "B", "C", "D", "E" }; + const char* types[] = { "short", "int", "long", "float64", "float32" }; + + std::ofstream outfile(fname, std::ofstream::out); + outfile << " 10, 20, 30, 0.40, 50000\n"\ + "-11,-21,-31,-0.41,-51111\n"\ + " 12, 22, 32, 0.42, 52222\n"\ + "-13,-23,-33,-0.43,-53333\n"; + outfile.close(); + ASSERT_TRUE( checkFile(fname) ); + + { + csv_read_arg args{}; + args.input_data_form = gdf_csv_input_form::FILE_PATH; + args.filepath_or_buffer = fname.c_str(); + args.num_names = std::extent::value; + args.names = names; + args.num_dtype = std::extent::value; + args.dtype = types; + args.delimiter = ','; + args.lineterminator = '\n'; + args.decimal = '.'; + args.skip_blank_lines = true; + args.header = -1; + args.nrows = -1; + EXPECT_EQ( read_csv(&args), GDF_SUCCESS ); + + ASSERT_EQ( args.data[0]->dtype, GDF_INT16 ); + ASSERT_EQ( args.data[1]->dtype, GDF_INT32 ); + ASSERT_EQ( args.data[2]->dtype, GDF_INT64 ); + ASSERT_EQ( args.data[3]->dtype, GDF_FLOAT64 ); + ASSERT_EQ( args.data[4]->dtype, GDF_FLOAT32 ); + auto ACol = gdf_host_column(args.data[0]); + auto BCol = gdf_host_column(args.data[1]); + auto CCol = gdf_host_column(args.data[2]); + auto DCol = gdf_host_column(args.data[3]); + auto ECol = gdf_host_column(args.data[4]); + EXPECT_THAT( ACol.hostdata(), ::testing::ElementsAre(10, -11, 12, -13) ); + EXPECT_THAT( BCol.hostdata(), ::testing::ElementsAre(20, -21, 22, -23) ); + EXPECT_THAT( CCol.hostdata(), ::testing::ElementsAre(30, -31, 32, -33) ); + EXPECT_THAT( DCol.hostdata(), + ::testing::Pointwise(FloatNearPointwise(1e-7), + std::vector{ 0.40, -0.41, 0.42, -0.43 }) ); + EXPECT_THAT( ECol.hostdata(), + ::testing::Pointwise(FloatNearPointwise(1e-7), + std::vector{ 50000, -51111, 52222, -53333 }) ); + } +} + +TEST(gdf_csv_test, MortPerf) +{ + gdf_error error = GDF_SUCCESS; + + csv_read_arg args{}; + const int num_cols = 31; + + args.num_names = num_cols; + args.nrows = -1; + + const char ** dnames = new const char *[num_cols] { + "loan_id", + "monthly_reporting_period", + "servicer", + "interest_rate", + "current_actual_upb", + "loan_age", + "remaining_months_to_legal_maturity", + "adj_remaining_months_to_maturity", + "maturity_date", + "msa", + "current_loan_delinquency_status", + "mod_flag", + "zero_balance_code", + "zero_balance_effective_date", + "last_paid_installment_date", + "foreclosed_after", + "disposition_date", + "foreclosure_costs", + "prop_preservation_and_repair_costs", + "asset_recovery_costs", + "misc_holding_expenses", + "holding_taxes", + "net_sale_proceeds", + "credit_enhancement_proceeds", + "repurchase_make_whole_proceeds", + "other_foreclosure_proceeds", + "non_interest_bearing_upb", + "principal_forgiveness_upb", + "repurchase_make_whole_proceeds_flag", + "foreclosure_principal_write_off_amount", + "servicing_activity_indicator" + }; + args.names = dnames; + + args.num_dtype = num_cols; + const char ** dtype = new const char *[num_cols] { + "int64", + "date", + "category", + "float64", + "float64", + "float64", + "float64", + "float64", + "date", + "float64", + "category", + "category", + "category", + "date", + "date", + "date", + "date", + "float64", + "float64", + "float64", + "float64", + "float64", + "float64", + "float64", + "float64", + "float64", + "float64", + "float64", + "category", + "float64", + "category" + }; + + args.dtype = dtype; + + args.input_data_form = gdf_csv_input_form::FILE_PATH; + args.filepath_or_buffer = (char *)("Performance_2000Q1.txt"); + + if ( checkFile(args.filepath_or_buffer)) + { + args.delimiter = '|'; + args.lineterminator = '\n'; + args.delim_whitespace = 0; + args.skipinitialspace = 0; + args.skiprows = 0; + args.skipfooter = 0; + args.dayfirst = 0; + args.mangle_dupe_cols=true; + args.num_cols_out=0; + + args.use_cols_int = NULL; + args.use_cols_char = NULL; + args.use_cols_char_len = 0; + args.use_cols_int_len = 0; + + + args.names = NULL; + args.dtype = NULL; + + + error = read_csv(&args); + } + + EXPECT_TRUE( error == GDF_SUCCESS ); +} + +TEST(gdf_csv_test, Strings) +{ + const std::string fname = temp_env->get_temp_dir()+"CsvStringsTest.csv"; + const char* names[] = { "line", "verse" }; + const char* types[] = { "int32", "str" }; + + std::ofstream outfile(fname, std::ofstream::out); + outfile << names[0] << ',' << names[1] << ',' << '\n'; + outfile << "10,abc def ghi" << '\n'; + outfile << "20,\"jkl mno pqr\"" << '\n'; + outfile << "30,stu \"\"vwx\"\" yz" << '\n'; + outfile.close(); + ASSERT_TRUE( checkFile(fname) ); + + { + csv_read_arg args{}; + args.input_data_form = gdf_csv_input_form::FILE_PATH; + args.filepath_or_buffer = fname.c_str(); + args.num_names = std::extent::value; + args.names = names; + args.num_dtype = args.num_names; + args.dtype = types; + args.delimiter = ','; + args.lineterminator = '\n'; + args.skip_blank_lines = true; + args.header = 0; + args.nrows = -1; + EXPECT_EQ( read_csv(&args), GDF_SUCCESS ); + + // No filtering of any columns + EXPECT_EQ( args.num_cols_out, args.num_names ); + + // Check the parsed string column metadata + ASSERT_EQ( args.data[1]->dtype, GDF_STRING ); + auto stringList = reinterpret_cast(args.data[1]->data); + + ASSERT_NE( stringList, nullptr ); + auto stringCount = stringList->size(); + ASSERT_EQ( stringCount, 3u ); + auto stringLengths = std::unique_ptr{ new int[stringCount] }; + ASSERT_NE( stringList->byte_count(stringLengths.get(), false), 0u ); + + // Check the actual strings themselves + auto strings = std::unique_ptr{ new char*[stringCount] }; + for (size_t i = 0; i < stringCount; ++i) { + ASSERT_GT( stringLengths[i], 0 ); + strings[i] = new char[stringLengths[i] + 1]; + strings[i][stringLengths[i]] = 0; + } + EXPECT_EQ( stringList->to_host(strings.get(), 0, stringCount), 0 ); + + EXPECT_STREQ( strings[0], "abc def ghi" ); + EXPECT_STREQ( strings[1], "\"jkl mno pqr\"" ); + EXPECT_STREQ( strings[2], "stu \"\"vwx\"\" yz" ); + for (size_t i = 0; i < stringCount; ++i) { + delete[] strings[i]; + } + } +} + +TEST(gdf_csv_test, QuotedStrings) +{ + const std::string fname = temp_env->get_temp_dir()+"CsvQuotedStringsTest.csv"; + const char* names[] = { "line", "verse" }; + const char* types[] = { "int32", "str" }; + + std::ofstream outfile(fname, std::ofstream::out); + outfile << names[0] << ',' << names[1] << ',' << '\n'; + outfile << "10,`abc,\ndef, ghi`" << '\n'; + outfile << "20,`jkl, ``mno``, pqr`" << '\n'; + outfile << "30,stu `vwx` yz" << '\n'; + outfile.close(); + ASSERT_TRUE( checkFile(fname) ); + + { + csv_read_arg args{}; + args.input_data_form = gdf_csv_input_form::FILE_PATH; + args.filepath_or_buffer = fname.c_str(); + args.num_names = std::extent::value; + args.names = names; + args.num_dtype = args.num_names; + args.dtype = types; + args.delimiter = ','; + args.lineterminator = '\n'; + args.quotechar = '`'; + args.quoting = QUOTE_ALL; // enable quoting + args.doublequote = true; // replace double quotechar with single + args.skip_blank_lines = true; + args.header = 0; + args.nrows = -1; + EXPECT_EQ( read_csv(&args), GDF_SUCCESS ); + + // No filtering of any columns + EXPECT_EQ( args.num_cols_out, args.num_names ); + + // Check the parsed string column metadata + ASSERT_EQ( args.data[1]->dtype, GDF_STRING ); + auto stringList = reinterpret_cast(args.data[1]->data); + + ASSERT_NE( stringList, nullptr ); + auto stringCount = stringList->size(); + ASSERT_EQ( stringCount, 3u ); + auto stringLengths = std::unique_ptr{ new int[stringCount] }; + ASSERT_NE( stringList->len(stringLengths.get(), false), 0u ); + + // Check the actual strings themselves + auto strings = std::unique_ptr{ new char*[stringCount] }; + for (size_t i = 0; i < stringCount; ++i) { + ASSERT_GT( stringLengths[i], 0 ); + strings[i] = new char[stringLengths[i]+1]; + strings[i][stringLengths[i]] = 0; + } + EXPECT_EQ( stringList->to_host(strings.get(), 0, stringCount), 0 ); + EXPECT_STREQ( strings[0], "abc,\ndef, ghi" ); + EXPECT_STREQ( strings[1], "jkl, `mno`, pqr" ); + EXPECT_STREQ( strings[2], "stu `vwx` yz" ); + for (size_t i = 0; i < stringCount; ++i) { + delete[] strings[i]; + } + } +} + +TEST(gdf_csv_test, IgnoreQuotes) +{ + const std::string fname = temp_env->get_temp_dir()+"CsvIgnoreQuotesTest.csv"; + const char* names[] = { "line", "verse" }; + const char* types[] = { "int32", "str" }; + + std::ofstream outfile(fname, std::ofstream::out); + outfile << names[0] << ',' << names[1] << ',' << '\n'; + outfile << "10,\"abcdef ghi\"" << '\n'; + outfile << "20,\"jkl \"\"mno\"\" pqr\"" << '\n'; + outfile << "30,stu \"vwx\" yz" << '\n'; + outfile.close(); + ASSERT_TRUE( checkFile(fname) ); + + { + csv_read_arg args{}; + args.input_data_form = gdf_csv_input_form::FILE_PATH; + args.filepath_or_buffer = fname.c_str(); + args.num_names = std::extent::value; + args.names = names; + args.num_dtype = args.num_names; + args.dtype = types; + args.delimiter = ','; + args.lineterminator = '\n'; + args.quotechar = '\"'; + args.quoting = QUOTE_NONE; // disable quoting + args.doublequote = false; // do not replace double quotechar with single + args.skip_blank_lines = true; + args.header = 0; + args.nrows = -1; + EXPECT_EQ( read_csv(&args), GDF_SUCCESS ); + + // No filtering of any columns + EXPECT_EQ( args.num_cols_out, args.num_names ); + + // Check the parsed string column metadata + ASSERT_EQ( args.data[1]->dtype, GDF_STRING ); + auto stringList = reinterpret_cast(args.data[1]->data); + + ASSERT_NE( stringList, nullptr ); + auto stringCount = stringList->size(); + ASSERT_EQ( stringCount, 3u ); + auto stringLengths = std::unique_ptr{ new int[stringCount] }; + ASSERT_NE( stringList->byte_count(stringLengths.get(), false), 0u ); + + // Check the actual strings themselves + auto strings = std::unique_ptr{ new char*[stringCount] }; + for (size_t i = 0; i < stringCount; ++i) { + ASSERT_GT( stringLengths[i], 0 ); + strings[i] = new char[stringLengths[i] + 1]; + strings[i][stringLengths[i]] = 0; + } + EXPECT_EQ( stringList->to_host(strings.get(), 0, stringCount), 0 ); + EXPECT_STREQ( strings[0], "\"abcdef ghi\"" ); + EXPECT_STREQ( strings[1], "\"jkl \"\"mno\"\" pqr\"" ); + EXPECT_STREQ( strings[2], "stu \"vwx\" yz" ); + for (size_t i = 0; i < stringCount; ++i) { + delete[] strings[i]; + } + } +} + +TEST(gdf_csv_test, Booleans) +{ + const std::string fname = temp_env->get_temp_dir() + "CsvBooleansTest.csv"; + const char* names[] = {"A", "B", "C", "D"}; + const char* types[] = {"int32", "int32", "short", "bool"}; + const char* trueValues[] = {"yes", "Yes", "YES", "foo", "FOO"}; + const char* falseValues[] = {"no", "No", "NO", "Bar", "bar"}; + + std::ofstream outfile(fname, std::ofstream::out); + outfile << "YES,1,bar,true\nno,2,FOO,true\nBar,3,yes,false\nNo,4,NO," + "true\nYes,5,foo,false\n"; + outfile.close(); + ASSERT_TRUE(checkFile(fname)); + + { + csv_read_arg args{}; + args.input_data_form = gdf_csv_input_form::FILE_PATH; + args.filepath_or_buffer = fname.c_str(); + args.num_names = std::extent::value; + args.names = names; + args.num_dtype = args.num_names; + args.dtype = types; + args.delimiter = ','; + args.lineterminator = '\n'; + args.skip_blank_lines = true; + args.true_values = trueValues; + args.num_true_values = std::extent::value; + args.false_values = falseValues; + args.num_false_values = std::extent::value; + args.header = -1; + args.nrows = -1; + EXPECT_EQ( read_csv(&args), GDF_SUCCESS ); + + // Booleans are the same (integer) data type, but valued at 0 or 1 + EXPECT_EQ( args.num_cols_out, args.num_names ); + ASSERT_EQ( args.data[0]->dtype, GDF_INT32 ); + ASSERT_EQ( args.data[2]->dtype, GDF_INT16 ); + ASSERT_EQ( args.data[3]->dtype, GDF_BOOL8 ); + + auto firstCol = gdf_host_column(args.data[0]); + EXPECT_THAT(firstCol.hostdata(), ::testing::ElementsAre(1, 0, 0, 0, 1)); + auto thirdCol = gdf_host_column(args.data[2]); + EXPECT_THAT(thirdCol.hostdata(), ::testing::ElementsAre(0, 1, 1, 0, 1)); + auto fourthCol = gdf_host_column(args.data[3]); + EXPECT_THAT( + fourthCol.hostdata(), + ::testing::ElementsAre(cudf::true_v, cudf::true_v, cudf::false_v, + cudf::true_v, cudf::false_v)); + } +} + +TEST(gdf_csv_test, Dates) +{ + const std::string fname = temp_env->get_temp_dir()+"CsvDatesTest.csv"; + const char* names[] = { "A" }; + const char* types[] = { "date" }; + + std::ofstream outfile(fname, std::ofstream::out); + outfile << "05/03/2001\n31/10/2010\n20/10/1994\n18/10/1990\n1/1/1970\n"; + outfile << "18/04/1995\n14/07/1994\n07/06/2006 11:20:30.400\n"; + outfile << "16/09/2005T1:2:30.400PM\n2/2/1970\n"; + outfile.close(); + ASSERT_TRUE( checkFile(fname) ); + + { + csv_read_arg args{}; + args.input_data_form = gdf_csv_input_form::FILE_PATH; + args.filepath_or_buffer = fname.c_str(); + args.num_names = std::extent::value; + args.names = names; + args.num_dtype = args.num_names; + args.dtype = types; + args.delimiter = ','; + args.lineterminator = '\n'; + args.dayfirst = true; + args.skip_blank_lines = true; + args.header = -1; + args.nrows = -1; + EXPECT_EQ( read_csv(&args), GDF_SUCCESS ); + + EXPECT_EQ( args.num_cols_out, args.num_names ); + ASSERT_EQ( args.data[0]->dtype, GDF_DATE64 ); + + auto ACol = gdf_host_column(args.data[0]); + EXPECT_THAT( ACol.hostdata(), + ::testing::ElementsAre(983750400000, 1288483200000, 782611200000, + 656208000000, 0, 798163200000, 774144000000, + 1149679230400, 1126875750400, 2764800000) ); + } +} + +TEST(gdf_csv_test, FloatingPoint) +{ + const std::string fname = temp_env->get_temp_dir()+"CsvFloatingPoint.csv"; + const char* names[] = { "A" }; + const char* types[] = { "float32" }; + + std::ofstream outfile(fname, std::ofstream::out); + outfile << "5.6;0.5679e2;1.2e10;0.07e1;3000e-3;12.34e0;3.1e-001;-73.98007199999998;"; + outfile.close(); + ASSERT_TRUE( checkFile(fname) ); + + { + csv_read_arg args{}; + args.input_data_form = gdf_csv_input_form::FILE_PATH; + args.filepath_or_buffer = fname.c_str(); + args.num_names = std::extent::value; + args.names = names; + args.num_dtype = args.num_names; + args.dtype = types; + args.decimal = '.'; + args.delimiter = ','; + args.lineterminator = ';'; + args.skip_blank_lines = true; + args.header = -1; + args.nrows = -1; + EXPECT_EQ( read_csv(&args), GDF_SUCCESS ); + + EXPECT_EQ( args.num_cols_out, args.num_names ); + ASSERT_EQ( args.data[0]->dtype, GDF_FLOAT32 ); + + auto ACol = gdf_host_column(args.data[0]); + EXPECT_THAT( ACol.hostdata(), + ::testing::Pointwise(FloatNearPointwise(1e-6), + std::vector{ 5.6, 56.79, 12000000000, 0.7, 3.000, 12.34, 0.31, -73.98007199999998 }) ); + } +} + +TEST(gdf_csv_test, Category) +{ + const std::string fname = temp_env->get_temp_dir()+"CsvCategory.csv"; + const char* names[] = { "UserID" }; + const char* types[] = { "category" }; + + std::ofstream outfile(fname, std::ofstream::out); + outfile << "HBM0676;KRC0842;ILM1441;EJV0094;"; + outfile.close(); + ASSERT_TRUE( checkFile(fname) ); + + { + csv_read_arg args{}; + args.input_data_form = gdf_csv_input_form::FILE_PATH; + args.filepath_or_buffer = fname.c_str(); + args.num_names = std::extent::value; + args.names = names; + args.num_dtype = args.num_names; + args.dtype = types; + args.delimiter = ','; + args.lineterminator = ';'; + args.header = -1; + args.nrows = -1; + EXPECT_EQ( read_csv(&args), GDF_SUCCESS ); + + EXPECT_EQ( args.num_cols_out, args.num_names ); + ASSERT_EQ( args.data[0]->dtype, GDF_CATEGORY ); + + auto ACol = gdf_host_column(args.data[0]); + EXPECT_THAT( ACol.hostdata(), + ::testing::ElementsAre(2022314536, -189888986, 1512937027, 397836265) ); + } +} + +TEST(gdf_csv_test, SkiprowsNrows) +{ + const std::string fname = temp_env->get_temp_dir()+"CsvSkiprowsNrows.csv"; + const char* names[] = { "A" }; + const char* types[] = { "int32" }; + + std::ofstream outfile(fname, std::ofstream::out); + outfile << "1\n2\n3\n4\n5\n6\n7\n8\n9\n"; + outfile.close(); + ASSERT_TRUE( checkFile(fname) ); + + { + csv_read_arg args{}; + args.input_data_form = gdf_csv_input_form::FILE_PATH; + args.filepath_or_buffer = fname.c_str(); + args.num_names = std::extent::value; + args.names = names; + args.num_dtype = args.num_names; + args.dtype = types; + args.delimiter = ','; + args.lineterminator = '\n'; + args.skip_blank_lines = true; + args.header = 1; + args.skiprows = 2; + args.nrows = 2; + EXPECT_EQ( read_csv(&args), GDF_SUCCESS ); + + EXPECT_EQ( args.num_cols_out, args.num_names ); + ASSERT_EQ( args.data[0]->dtype, GDF_INT32 ); + + auto ACol = gdf_host_column(args.data[0]); + EXPECT_THAT( ACol.hostdata(), ::testing::ElementsAre(5, 6) ); + } +} + +TEST(gdf_csv_test, ByteRange) +{ + const std::string fname = temp_env->get_temp_dir()+"CsvByteRange.csv"; + const char* names[] = { "A" }; + const char* types[] = { "int32" }; + + std::ofstream outfile(fname, std::ofstream::out); + outfile << "1000\n2000\n3000\n4000\n5000\n6000\n7000\n8000\n9000\n"; + outfile.close(); + ASSERT_TRUE( checkFile(fname) ); + + { + csv_read_arg args{}; + args.input_data_form = gdf_csv_input_form::FILE_PATH; + args.filepath_or_buffer = fname.c_str(); + args.num_names = std::extent::value; + args.names = names; + args.num_dtype = args.num_names; + args.dtype = types; + args.delimiter = ','; + args.lineterminator = '\n'; + args.skip_blank_lines = true; + args.header = -1; + args.nrows = -1; + args.byte_range_offset = 11; + args.byte_range_size = 15; + EXPECT_EQ( read_csv(&args), GDF_SUCCESS ); + + EXPECT_EQ( args.num_cols_out, args.num_names ); + ASSERT_EQ( args.data[0]->dtype, GDF_INT32 ); + + auto ACol = gdf_host_column(args.data[0]); + EXPECT_THAT( ACol.hostdata(), ::testing::ElementsAre(4000, 5000, 6000) ); + } +} + +TEST(gdf_csv_test, BlanksAndComments) +{ + const std::string fname = temp_env->get_temp_dir()+"BlanksAndComments.csv"; + const char* names[] = { "A" }; + const char* types[] = { "int32" }; + + std::ofstream outfile(fname, std::ofstream::out); + outfile << "1\n#blank\n3\n4\n5\n#blank\n\n\n8\n9\n"; + outfile.close(); + ASSERT_TRUE( checkFile(fname) ); + + { + csv_read_arg args{}; + args.input_data_form = gdf_csv_input_form::FILE_PATH; + args.filepath_or_buffer = fname.c_str(); + args.num_names = std::extent::value; + args.names = names; + args.num_dtype = args.num_names; + args.dtype = types; + args.delimiter = ','; + args.lineterminator = '\n'; + args.skip_blank_lines = true; + args.header = -1; + args.comment = '#'; + args.nrows = -1; + EXPECT_EQ( read_csv(&args), GDF_SUCCESS ); + + EXPECT_EQ( args.num_cols_out, args.num_names ); + ASSERT_EQ( args.data[0]->dtype, GDF_INT32 ); + + auto ACol = gdf_host_column(args.data[0]); + EXPECT_THAT( ACol.hostdata(), ::testing::ElementsAre(1, 3, 4, 5, 8, 9) ); + } +} + +TEST(gdf_csv_test, Writer) +{ + const std::string fname = temp_env->get_temp_dir()+"CsvWriteTest.csv"; + const char* names[] = { "boolean", "integer", "float", "string" }; + const char* types[] = { "bool", "int32", "float32", "str" }; + + std::ofstream outfile(fname, std::ofstream::out); + outfile << "true,1,1.0,one" << '\n'; + outfile << "false,2,2.25,two" << '\n'; + outfile << "false,3,3.50,three" << '\n'; + outfile << "true,4,4.75,four" << '\n'; + outfile << "false,5,5.0,five" << '\n'; + outfile.close(); + + csv_read_arg rargs{}; + rargs.input_data_form = gdf_csv_input_form::FILE_PATH; + rargs.filepath_or_buffer = fname.c_str(); + rargs.num_names = std::extent::value; + rargs.names = names; + rargs.num_dtype = rargs.num_names; + rargs.dtype = types; + rargs.decimal = '.'; + rargs.delimiter = ','; + rargs.lineterminator = '\n'; + rargs.skip_blank_lines = true; + rargs.header = -1; + rargs.nrows = -1; + EXPECT_EQ( read_csv(&rargs), GDF_SUCCESS ); + + const std::string ofname = temp_env->get_temp_dir()+"CsvWriteTestOut.csv"; + csv_write_arg wargs{}; + wargs.columns = rargs.data; // columns from reader above + wargs.filepath = ofname.c_str(); + wargs.num_cols = rargs.num_cols_out; + wargs.delimiter = ','; + wargs.line_terminator = "\n"; + + EXPECT_EQ( write_csv(&wargs), GDF_SUCCESS ); + + std::ifstream infile(ofname); + std::string csv((std::istreambuf_iterator(infile)), std::istreambuf_iterator()); + std::string verify = + "\"boolean\",\"integer\",\"float\",\"string\"\n" + "true,1,1,\"one\"\n" + "false,2,2.25,\"two\"\n" + "false,3,3.5,\"three\"\n" + "true,4,4.75,\"four\"\n" + "false,5,5,\"five\"\n"; + EXPECT_STREQ( csv.c_str(), verify.c_str() ); +} diff --git a/cuda_code/csv_writer_10.cu b/cuda_code/csv_writer_10.cu new file mode 100644 index 0000000000000000000000000000000000000000..770d0d22b9795befab626e7b36be51b6f1a99f91 --- /dev/null +++ b/cuda_code/csv_writer_10.cu @@ -0,0 +1,391 @@ +/* + * Copyright (c) 2019, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include + +// Functor for type-dispatcher converts columns into strings +struct column_to_strings_fn +{ + const gdf_column* column; + gdf_valid_type* valid; + gdf_size_type row_offset, rows; + const char* true_string; + const char* false_string; + template + NVStrings* operator()() + { + throw std::runtime_error("column type not supported"); + } + + // convert cudf time units to nvstrings timestamp units + NVStrings::timestamp_units cudf2nvs( gdf_time_unit time_unit ) + { + if( time_unit==TIME_UNIT_s ) + return NVStrings::seconds; + if( time_unit==TIME_UNIT_us ) + return NVStrings::us; + if( time_unit==TIME_UNIT_ns ) + return NVStrings::ns; + return NVStrings::ms; + } +}; + +// specialization code for each type +template<> +NVStrings* column_to_strings_fn::operator()() +{ + auto d_src = (static_cast(column->data)) + row_offset; + device_buffer int_buffer(rows); + thrust::transform( rmm::exec_policy()->on(0), d_src, d_src + rows, int_buffer.data(), + [] __device__(const int8_t value) { return int32_t{value}; }); + return NVStrings::itos(int_buffer.data(), rows, valid); +} + +template<> +NVStrings* column_to_strings_fn::operator()() +{ + auto d_src = (static_cast(column->data)) + row_offset; + device_buffer int_buffer(rows); + thrust::transform( rmm::exec_policy()->on(0), d_src, d_src + rows, int_buffer.data(), + [] __device__(const int16_t value) { return int32_t{value}; }); + return NVStrings::itos(int_buffer.data(), rows, valid); +} + +template<> +NVStrings* column_to_strings_fn::operator()() +{ + return NVStrings::itos((static_cast(column->data)) + row_offset, rows, valid); +} + +template<> +NVStrings* column_to_strings_fn::operator()() +{ + return NVStrings::ltos((static_cast(column->data)) + row_offset, rows, valid); +} + +template<> +NVStrings* column_to_strings_fn::operator()() +{ + return NVStrings::ftos((static_cast(column->data)) + row_offset, rows, valid); +} + +template<> +NVStrings* column_to_strings_fn::operator()() +{ + return NVStrings::dtos((static_cast(column->data)) + row_offset, rows, valid); +} + +template<> +NVStrings* column_to_strings_fn::operator()() +{ + if( sizeof(bool) == sizeof(cudf::bool8) ) + return NVStrings::create_from_bools((static_cast(column->data)) + row_offset, rows, true_string, false_string, valid); + else + { + auto d_src = (static_cast(column->data)) + row_offset; + device_buffer bool_buffer(rows); + thrust::transform( rmm::exec_policy()->on(0), d_src, d_src + rows, bool_buffer.data(), + [] __device__(const cudf::bool8 value) { return bool{value}; }); + return NVStrings::create_from_bools(bool_buffer.data(), rows, true_string, false_string, valid); + } +} + +template<> +NVStrings* column_to_strings_fn::operator()() +{ + NVStrings::timestamp_units units = NVStrings::days; + if( column->dtype_info.time_unit != TIME_UNIT_NONE ) + units = cudf2nvs(column->dtype_info.time_unit); + auto d_src = (static_cast(column->data)) + row_offset; + device_buffer ulong_buffer(rows); + thrust::transform( rmm::exec_policy()->on(0), d_src, d_src + rows, ulong_buffer.data(), + [] __device__(const cudf::date32 value) { return (unsigned long)(int32_t{value}); }); + return NVStrings::long2timestamp(ulong_buffer.data(), rows, units, nullptr, valid); +} + +template<> +NVStrings* column_to_strings_fn::operator()() +{ + return NVStrings::long2timestamp(static_cast(column->data) + row_offset, rows, + NVStrings::ms, nullptr, valid); +} + +template<> +NVStrings* column_to_strings_fn::operator()() +{ + NVStrings::timestamp_units units = cudf2nvs(column->dtype_info.time_unit); + return NVStrings::long2timestamp(static_cast(column->data) + row_offset, rows, + units, nullptr, valid); +} + +template<> +NVStrings* column_to_strings_fn::operator()() +{ + NVCategory* category = reinterpret_cast(column->dtype_info.category); + CUDF_EXPECTS( category != nullptr, "write_csv: invalid category column"); + return category->gather_strings((static_cast(column->data)) + row_offset, rows); +} + + +// +// This is called by the write_csv method below. +// +// Parameters: +// - column: The column to be converted. +// - row_offset: Number entries from the beginning to skip; must be multiple of 8. +// - rows: Number of rows from the offset that should be converted for this column. +// - delimiter: Separator to append to the column strings +// - null_representation: String to use for null entries +// - true_string: String to use for 'true' values in boolean columns +// - false_string: String to use for 'false' values in boolean columns +// Return: NVStrings instance formated for CSV column output. +// +NVStrings* column_to_strings_csv(const gdf_column* column, gdf_size_type row_offset, gdf_size_type rows, + const char* delimiter, const char* null_representation, + const char* true_string, const char* false_string ) +{ + NVStrings* rtn = nullptr; + // point the null bitmask to the next set of bits associated with this chunk of rows + gdf_valid_type* valid = column->valid; + if( valid ) // normalize row_offset (number of bits here) + valid += (row_offset / GDF_VALID_BITSIZE); // to appropriate pointer for the bitmask + + if( column->dtype == GDF_STRING ) + rtn = (static_cast(column->data))->sublist(row_offset,row_offset+rows); + else + rtn = cudf::type_dispatcher(column->dtype, column_to_strings_fn{column,valid,row_offset,rows,true_string,false_string}); + + CUDF_EXPECTS( rtn != nullptr, "write_csv: unsupported column type"); + + // replace nulls if specified + if( null_representation ) + { + NVStrings* nstr = rtn->fillna(null_representation); + NVStrings::destroy(rtn); + rtn = nstr; + } + + // probably could collapse this more + bool bquoted = (column->dtype==GDF_STRING || column->dtype==GDF_DATE64 || column->dtype==GDF_TIMESTAMP); + // check for delimiters and quotes + bool* bmatches = nullptr; + RMM_TRY( RMM_ALLOC(&bmatches,rows*sizeof(bool),0) ); + if( rtn->contains("\"",bmatches) > 0 ) + { + NVStrings* esc = rtn->replace("\"","\"\""); + NVStrings::destroy(rtn); + rtn = esc; + } + else if( rtn->contains(",",bmatches) > 0 ) + bquoted = true; + RMM_TRY( RMM_FREE( bmatches, 0 ) ); + if( bquoted ) + { + // prepend and append quotes if needed + NVStrings* pre = rtn->slice_replace("\"",0,0); + NVStrings::destroy(rtn); + rtn = pre->slice_replace("\"",-1,-1); + NVStrings::destroy(pre); + } + // append the delimiter last + if( delimiter && *delimiter ) + { + NVStrings* dstr = rtn->slice_replace(delimiter,-1,-1); + NVStrings::destroy(rtn); + rtn = dstr; + } + return rtn; +} + +//--------------------------------------------------------------------------- +// Creates CSV file from array of gdf_columns. +// +// This will create the CSV format by allocating host memory for the +// entire output and determine pointers for each row/column entry. +// Each column is converted to an NVStrings instance and then +// copied into their position in the output memory. This way, +// one column is processed at a time minimizing device memory usage. +// +//--------------------------------------------------------------------------- +gdf_error write_csv(csv_write_arg* args) +{ + // when args becomes a struct/class these can be modified + auto columns = args->columns; + unsigned int count = (unsigned int)args->num_cols; + gdf_size_type total_rows = columns[0]->size; + const char* filepath = args->filepath; + char delimiter[2] = {',','\0'}; + if( args->delimiter ) + delimiter[0] = args->delimiter; + const char* terminator = "\n"; + if( args->line_terminator ) + terminator = args->line_terminator; + const char* narep = ""; + if( args->na_rep ) + narep = args->na_rep; + const char* true_value = (args->true_value ? args->true_value : "true"); + const char* false_value = (args->false_value ? args->false_value : "false"); + bool include_header = args->include_header; + + // check for issues here + CUDF_EXPECTS( filepath!=nullptr, "write_csv: filepath not specified" ); + CUDF_EXPECTS( count!=0, "write_csv: num_cols is required" ); + CUDF_EXPECTS( columns!=0, "write_csv: invalid data values" ); + + // check all columns are the same size + const bool all_sizes_match = std::all_of( columns, columns+count, + [total_rows] (auto col) { + if( col->dtype==GDF_STRING ) + { + NVStrings* strs = (NVStrings*)col->data; + unsigned int elems = strs != nullptr ? strs->size() : 0; + return (total_rows==(gdf_size_type)elems); + } + return (total_rows==col->size); + }); + CUDF_EXPECTS( all_sizes_match, "write_csv: columns sizes do not match" ); + + // check the file can be written + std::ofstream filecsv(filepath,std::ios::out|std::ios::binary|std::ios::trunc); + CUDF_EXPECTS( filecsv.is_open(), "write_csv: file could not be opened"); + + // + // This outputs the CSV in row chunks to save memory. + // Maybe we can use the total_rows*count calculation and a memory threshold + // instead of an arbitrary chunk count. + // The entire CSV chunk must fit in CPU memory before writing it out. + // + gdf_size_type rows_chunk = (args->rows_per_chunk/8)*8; // must be divisible by 8 + CUDF_EXPECTS( rows_chunk>0, "write_csv: invalid chunk_rows; must be at least 8" ); + + gdf_size_type row_offset = 0; + gdf_size_type rows = total_rows; + while( rows > 0 ) + { + if( rows > rows_chunk ) + rows = rows_chunk; + // + // Compute string lengths for each string to go into the CSV output. + std::unique_ptr pstring_lengths(new int[rows*count]); // matrix of lengths + int* string_lengths = pstring_lengths.get(); // each string length in each row,column + size_t memsize = 0; + for( unsigned int idx=0; idx < count; ++idx ) + { + const gdf_column* col = columns[idx]; + const char* delim = ((idx+1)byte_count(string_lengths + (idx*rows),false); + NVStrings::destroy(strs); + } + + // + // Example string_lengths matrix for 4 columns and 7 rows + // row-sums + // col0: 1, 1, 2, 11, 12, 7, 7 | 41 + // col1: 1, 1, 2, 2, 3, 7, 6 | 22 + // col2: 20, 20, 20, 20, 20, 20, 20 | 140 + // col3: 5, 6, 4, 6, 4, 4, 5 | 34 + // -------------------------------- + // col- 27, 28, 28, 39, 39, 38, 38 = 237 (for reference only) + // sums + // + // Need to convert this into the following -- string_locations (below) + // 0, 27, 55, 83, 122, 161, 199 + // 1, 28, 57, 94, 134, 168, 206 + // 2, 29, 59, 96, 137, 175, 212 + // 22, 49, 79, 116, 157, 195, 232 + // + // This is essentially an exclusive-scan (prefix-sum) across columns. + // Moving left-to-right, add up each column and carry each value to the next column. + // Looks like we could transpose the matrix, scan it, and then untranspose it. + // Should be able to parallelize the math for this -- will look at prefix-sum algorithms. + // + std::vector buffer(memsize+1); + std::vector string_locations(rows*count); // all the memory pointers for each column + string_locations[0] = 0; // first one is always 0 + // compute offsets as described above into locations matrix + size_t offset = 0; + for( gdf_size_type jdx=0; jdx < rows; ++jdx ) + { + // add up column values for each row + // this is essentially an exclusive-scan across columns + string_locations[jdx] = (size_t)(buffer.data() + offset); // initialize first item + for( unsigned int idx=0; idx < count; ++idx ) + { + int* in = string_lengths + (idx*rows); + int len = in[jdx]; + offset += (len > 0 ? len:0); + if( (idx+1) < count ) + { + size_t* out = string_locations.data() + ((idx+1)*rows); + out[jdx] = (size_t)(buffer.data() + offset); + } + } + } + // now fill in the memory one column at a time + for( unsigned int idx=0; idx < count; ++idx ) + { + const gdf_column* col = columns[idx]; + const char* delim = ((idx+1)to_host((char**)colptrs,0,rows); + NVStrings::destroy(strs); + } + //buffer[memsize] = 0; // just so we can printf if needed + // now write buffer to file + // first write the header + if(include_header) + { + for( unsigned int idx=0; idx < count; ++idx ) + { + const gdf_column* col = columns[idx]; + const char* delim = ((idx+1)col_name ) + filecsv << "\"" << col->col_name << "\""; + filecsv << delim; + } + } + // now write the data + filecsv.write(buffer.data(),memsize); + + // get ready for the next chunk of rows + row_offset += rows_chunk; + if( row_offset < total_rows ) + rows = total_rows - row_offset; + else + rows = 0; + // prevent header for subsequent chunks + include_header = false; + } + + filecsv.close(); + return gdf_error::GDF_SUCCESS; +} diff --git a/cuda_code/ctc_loss_layer_4.cu b/cuda_code/ctc_loss_layer_4.cu new file mode 100644 index 0000000000000000000000000000000000000000..1ebb64e58287014d971fc26d374c020e8b0728f1 --- /dev/null +++ b/cuda_code/ctc_loss_layer_4.cu @@ -0,0 +1,62 @@ +#include "caffe/layers/ctc_loss_layer.hpp" + +namespace caffe { + template <> + void CtcLossLayer::Forward_gpu( + const vector*>& bottom, const vector*>& top) { + NOT_IMPLEMENTED; + } + + template + void CtcLossLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + cudaDeviceSynchronize(); + auto options = ctcOptions{}; + options.loc = CTC_GPU; + CUDA_CHECK(cudaStreamCreate(&(options.stream))); + options.blank_label = blank_label_; + int mini_batch = bottom[0]->shape()[1]; + int alphabet_size = alphabet_size_; + const Dtype* const activations = bottom[0]->gpu_data(); + Dtype* gradients = bottom[0]->mutable_gpu_diff(); + CHECK(gradients != NULL) << "Oops, gradients is null"; + + FlattenLabels(bottom[1]); + size_t size_bytes; + CHECK_CTC_STATUS(get_workspace_size(label_lengths_.data(), + input_lengths_.data(), alphabet_size, + mini_batch, options, &size_bytes)); + void* workspace; + CUDA_CHECK(cudaMalloc(&workspace, size_bytes)); + vector cost(mini_batch); + CHECK_CTC_STATUS(compute_ctc_loss(activations, gradients, + flat_labels_.data(), + label_lengths_.data(), input_lengths_.data(), + alphabet_size, mini_batch, cost.data(), + workspace, options)); + Dtype loss = std::accumulate(cost.begin(), cost.end(), Dtype(0)); + top[0]->mutable_cpu_data()[0] = loss / mini_batch; + + CUDA_CHECK(cudaFree(workspace)); + CUDA_CHECK(cudaStreamDestroy(options.stream)); + CUDA_POST_KERNEL_CHECK; + } + + template <> + void CtcLossLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + NOT_IMPLEMENTED; + } + + template + void CtcLossLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + if(propagate_down[0]) { + cudaDeviceSynchronize(); + caffe_gpu_scal(bottom[0]->count(), top[0]->cpu_diff()[0], + bottom[0]->mutable_gpu_diff()); + CUDA_POST_KERNEL_CHECK; + } + } + INSTANTIATE_LAYER_GPU_FUNCS(CtcLossLayer); +} diff --git a/cuda_code/ctrtri_lower_batched_3.cu b/cuda_code/ctrtri_lower_batched_3.cu new file mode 100644 index 0000000000000000000000000000000000000000..9f3cde9b50ddbd6eafcf843ccc31aa3b69fe0008 --- /dev/null +++ b/cuda_code/ctrtri_lower_batched_3.cu @@ -0,0 +1,283 @@ +/* + -- MAGMA (version 2.1.0) -- + Univ. of Tennessee, Knoxville + Univ. of California, Berkeley + Univ. of Colorado, Denver + @date August 2016 + + @generated from magmablas/ztrtri_lower_batched.cu, normal z -> c, Tue Aug 30 09:38:35 2016 + + @author Peng Du + @author Tingxing Dong + @author Mark Gates + @author Azzam Haidar + + This file implements lower case, and is called by ctrtri_kernel.cu. + It's convenient to have separate files for lower & upper, to diff the sources. +*/ + +#include "magma_internal.h" + +#define TRTRI_BATCHED +#include "ctrtri.cuh" +#include "ctrtri_lower_device.cuh" + + + +/******************************************************************************/ +__global__ void +ctrtri_diag_lower_kernel_batched( + magma_diag_t diag, int n, magmaFloatComplex const * const * dA_array, int lda, magmaFloatComplex **dinvA_array) +{ + int batchid = blockIdx.z; + ctrtri_diag_lower_device(diag, n, dA_array[batchid], lda, dinvA_array[batchid]); +} + + +/******************************************************************************/ +__global__ void +triple_cgemm16_part1_lower_kernel_batched( + int n, magmaFloatComplex const * const * Ain_array, int lda, magmaFloatComplex **dinvA_array, int jb, int npages) +{ + int batchid = blockIdx.z; + triple_cgemm16_part1_lower_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages); +} + + +/******************************************************************************/ +__global__ void +triple_cgemm16_part2_lower_kernel_batched( + int n, magmaFloatComplex const * const * Ain_array, int lda, magmaFloatComplex **dinvA_array, int jb, int npages) +{ + int batchid = blockIdx.z; + triple_cgemm16_part2_lower_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages); +} + + +/******************************************************************************/ +__global__ void +triple_cgemm32_part1_lower_kernel_batched( + int n, magmaFloatComplex const * const * Ain_array, int lda, magmaFloatComplex **dinvA_array, int jb, int npages) +{ + int batchid = blockIdx.z; + triple_cgemm32_part1_lower_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages); +} + + +/******************************************************************************/ +__global__ void +triple_cgemm32_part2_lower_kernel_batched( + int n, magmaFloatComplex const * const * Ain_array, int lda, magmaFloatComplex **dinvA_array, int jb, int npages) +{ + int batchid = blockIdx.z; + triple_cgemm32_part2_lower_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages); +} + + +/******************************************************************************/ +__global__ void +triple_cgemm64_part1_lower_kernel_batched( + int n, magmaFloatComplex const * const * Ain_array, int lda, magmaFloatComplex **dinvA_array, int jb, int npages) +{ + int batchid = blockIdx.z; + triple_cgemm64_part1_lower_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages); +} + + +/******************************************************************************/ +__global__ void +triple_cgemm64_part2_lower_kernel_batched( + int n, magmaFloatComplex const * const * Ain_array, int lda, magmaFloatComplex **dinvA_array, int jb, int npages) +{ + int batchid = blockIdx.z; + triple_cgemm64_part2_lower_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages); +} + + +/******************************************************************************/ +__global__ void +triple_cgemm_above64_part1_lower_kernel_batched( + int n, magmaFloatComplex const * const * Ain_array, int lda, magmaFloatComplex **dinvA_array, int jb, int npages) +{ + int batchid = blockIdx.z; + triple_cgemm_above64_part1_lower_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages); +} + + +/******************************************************************************/ +__global__ void +triple_cgemm_above64_part2_lower_kernel_batched( + int n, magmaFloatComplex const * const * Ain_array, int lda, magmaFloatComplex **dinvA_array, int jb, int npages) +{ + int batchid = blockIdx.z; + triple_cgemm_above64_part2_lower_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages); +} + + +/******************************************************************************/ +__global__ void +triple_cgemm_above64_part3_lower_kernel_batched( + int n, magmaFloatComplex const * const * Ain_array, int lda, magmaFloatComplex **dinvA_array, int jb, int npages) +{ + int batchid = blockIdx.z; + triple_cgemm_above64_part3_lower_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages); +} + + +// ============================================================================= +// vbatched kernels + + +/******************************************************************************/ +__global__ void +ctrtri_diag_lower_kernel_vbatched( + magma_diag_t diag, magma_int_t* n, magmaFloatComplex const * const * dA_array, magma_int_t* lda, magmaFloatComplex **dinvA_array) +{ + const int batchid = blockIdx.z; + const int my_n = (int)n[batchid]; + if(my_n <= 0) return; + + if(blockIdx.x >= magma_ceildiv(my_n, IB)) return; + + ctrtri_diag_lower_device(diag, my_n, dA_array[batchid], (int)lda[batchid], dinvA_array[batchid]); +} + + +// The kernels below have 3D grids +// grid.x and grid.y are independent from my_n +// only grid.y is dependent on my_n, so terminating thread blocks is based on blockIdx.y + + +/******************************************************************************/ +__global__ void +triple_cgemm16_part1_lower_kernel_vbatched( + magma_int_t* n, magmaFloatComplex const * const * Ain_array, magma_int_t* lda, magmaFloatComplex **dinvA_array, int jb, int npages) +{ + const int batchid = blockIdx.z; + const int my_n = (int)n[batchid]; + if(my_n <= 0) return; + + const int my_npages = magma_ceildiv(my_n, jb*2); + if(blockIdx.y >= my_npages*(jb/16) ) return; + triple_cgemm16_part1_lower_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages); +} + + +/******************************************************************************/ +__global__ void +triple_cgemm16_part2_lower_kernel_vbatched( + magma_int_t* n, magmaFloatComplex const * const * Ain_array, magma_int_t* lda, magmaFloatComplex **dinvA_array, int jb, int npages) +{ + const int batchid = blockIdx.z; + const int my_n = (int)n[batchid]; + if(my_n <= 0) return; + + const int my_npages = magma_ceildiv(my_n, jb*2); + if(blockIdx.y >= my_npages*(jb/16) ) return; + triple_cgemm16_part2_lower_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages); +} + + +/******************************************************************************/ +__global__ void +triple_cgemm32_part1_lower_kernel_vbatched( + magma_int_t* n, magmaFloatComplex const * const * Ain_array, magma_int_t* lda, magmaFloatComplex **dinvA_array, int jb, int npages) +{ + const int batchid = blockIdx.z; + const int my_n = (int)n[batchid]; + if(my_n <= 0) return; + + const int my_npages = magma_ceildiv(my_n, jb*2); + if(blockIdx.y >= my_npages*(jb/16) ) return; + triple_cgemm32_part1_lower_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages); +} + + +/******************************************************************************/ +__global__ void +triple_cgemm32_part2_lower_kernel_vbatched( + magma_int_t* n, magmaFloatComplex const * const * Ain_array, magma_int_t* lda, magmaFloatComplex **dinvA_array, int jb, int npages) +{ + const int batchid = blockIdx.z; + const int my_n = (int)n[batchid]; + if(my_n <= 0) return; + + const int my_npages = magma_ceildiv(my_n, jb*2); + if(blockIdx.y >= my_npages*(jb/16) ) return; + triple_cgemm32_part2_lower_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages); +} + + +/******************************************************************************/ +__global__ void +triple_cgemm64_part1_lower_kernel_vbatched( + magma_int_t* n, magmaFloatComplex const * const * Ain_array, magma_int_t* lda, magmaFloatComplex **dinvA_array, int jb, int npages) +{ + const int batchid = blockIdx.z; + const int my_n = (int)n[batchid]; + if(my_n <= 0) return; + + const int my_npages = magma_ceildiv(my_n, jb*2); + if(blockIdx.y >= my_npages*(jb/16) ) return; + triple_cgemm64_part1_lower_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages); +} + + +/******************************************************************************/ +__global__ void +triple_cgemm64_part2_lower_kernel_vbatched( + magma_int_t* n, magmaFloatComplex const * const * Ain_array, magma_int_t* lda, magmaFloatComplex **dinvA_array, int jb, int npages) +{ + const int batchid = blockIdx.z; + const int my_n = (int)n[batchid]; + if(my_n <= 0) return; + + const int my_npages = magma_ceildiv(my_n, jb*2); + if(blockIdx.y >= my_npages*(jb/16) ) return; + triple_cgemm64_part2_lower_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages); +} + + +/******************************************************************************/ +__global__ void +triple_cgemm_above64_part1_lower_kernel_vbatched( + magma_int_t* n, magmaFloatComplex const * const * Ain_array, magma_int_t* lda, magmaFloatComplex **dinvA_array, int jb, int npages) +{ + const int batchid = blockIdx.z; + const int my_n = (int)n[batchid]; + if(my_n <= 0) return; + + const int my_npages = magma_ceildiv(my_n, jb*2); + if(blockIdx.y >= my_npages*(jb/16) ) return; + triple_cgemm_above64_part1_lower_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages); +} + + +/******************************************************************************/ +__global__ void +triple_cgemm_above64_part2_lower_kernel_vbatched( + magma_int_t* n, magmaFloatComplex const * const * Ain_array, magma_int_t* lda, magmaFloatComplex **dinvA_array, int jb, int npages) +{ + const int batchid = blockIdx.z; + const int my_n = (int)n[batchid]; + if(my_n <= 0) return; + + const int my_npages = magma_ceildiv(my_n, jb*2); + if(blockIdx.y >= my_npages*(jb/16) ) return; + triple_cgemm_above64_part2_lower_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages); +} + + +/******************************************************************************/ +__global__ void +triple_cgemm_above64_part3_lower_kernel_vbatched( + magma_int_t* n, magmaFloatComplex const * const * Ain_array, magma_int_t* lda, magmaFloatComplex **dinvA_array, int jb, int npages) +{ + const int batchid = blockIdx.z; + const int my_n = (int)n[batchid]; + if(my_n <= 0) return; + + const int my_npages = magma_ceildiv(my_n, jb*2); + if(blockIdx.y >= my_npages*(jb/16) ) return; + triple_cgemm_above64_part3_lower_device( my_n, Ain_array[batchid], (int)lda[batchid], dinvA_array[batchid], jb, my_npages); +} diff --git a/cuda_code/cuCorrTimeDomain.cu b/cuda_code/cuCorrTimeDomain.cu new file mode 100644 index 0000000000000000000000000000000000000000..1167129867e8e09eb6278790b5b7cd6abf45460b --- /dev/null +++ b/cuda_code/cuCorrTimeDomain.cu @@ -0,0 +1,188 @@ +/* + * @file cuCorrTimetime.cu + * @brief Correlation between two sets of images in time domain + * + * This code is adapted from the nxcor package. + */ + +#include "cuAmpcorUtil.h" + + +// cuda kernel for cuCorrTimeDomain +template +__global__ void cuArraysCorrTime_kernel( + const int nImages, + const float *templateIn, const int templateNX, const int templateNY, const int templateSize, + const float *imageIn, const int imageNX, const int imageNY, const int imageSize, + float *resultOut, const int resultNX, const int resultNY, const int resultSize) +{ + __shared__ float shmem[nthreads*(1+NPT)]; + const int tid = threadIdx.x; + const int bid = blockIdx.x; + const int yc = blockIdx.y*NPT; + + const int imageIdx = bid; + const int imageOffset = imageIdx * imageSize; + const int templateOffset = imageIdx * templateSize; + const int resultOffset = imageIdx * resultSize; + + const float * imageD = imageIn + imageOffset + tid; + const float *templateD = templateIn + templateOffset + tid; + float * resultD = resultOut + resultOffset; + + const int q = min(nthreads/resultNY, 4); + const int nt = nthreads/q; + const int ty = threadIdx.x / nt; + const int tx = threadIdx.x - nt * ty; + + const int templateNYq = templateNY/q; + const int jbeg = templateNYq * ty; + const int jend = ty+1 >= q ? templateNY : templateNYq + jbeg; + + float *shTemplate = shmem; + float *shImage = shmem + nthreads; + float *shImage1 = shImage + tx; + + float corrCoeff[NPT]; + for (int k = 0; k < NPT; k++) + corrCoeff[k] = 0.0f; + + int iaddr = yc*imageNY; + + + float img[NPT]; + for (int k = 0; k < NPT-1; k++, iaddr += imageNY) + img[k] = imageD[iaddr]; + for (int taddr = 0; taddr < templateSize; taddr += templateNY, iaddr += imageNY) + { + shTemplate[tid] = templateD[taddr]; + img [NPT-1] = imageD[iaddr]; + for (int k = 0; k < NPT; k++) + shImage[tid + nthreads*k] = img[k]; + for (int k = 0; k < NPT-1; k++) + img[k] = img[k+1]; + __syncthreads(); + + if (tx < resultNY && ty < q) + { +#pragma unroll 8 + for (int j = jbeg; j < jend; j++) + for (int k = 0; k < NPT; k++) + corrCoeff[k] += shTemplate[j]*shImage1[j + nthreads*k]; + } + __syncthreads(); + } + + for (int k = 0; k < NPT; k++) + shmem[tid + nthreads*k] = corrCoeff[k]; + __syncthreads(); + + for (int j = tx + nt; j < nthreads; j += nt) + for (int k = 0; k < NPT; k++) + corrCoeff[k] += shmem[j + nthreads*k]; + __syncthreads(); + + if (tid < resultNY) + { + int raddr = yc*resultNY + tid; + for (int k = 0; k < NPT; k++, raddr += resultNY) + if (raddr < resultSize) + resultD[raddr] = corrCoeff[k]; + } +} + +/** + * Perform cross correlation in time domain + * @param[in] templates Reference images + * @param[in] images Secondary images + * @param[out] results Output correlation surface + * @param[in] stream cudaStream + */ +void cuCorrTimeDomain(cuArrays *templates, + cuArrays *images, + cuArrays *results, + cudaStream_t stream) +{ + /* compute correlation matrix */ + const int nImages = images->count; + const int imageNY = images->width; + const int NPT = 8; + + + const dim3 grid(nImages, (results->width-1)/NPT+1, 1); + if (imageNY <= 64) { + cuArraysCorrTime_kernel< 64,NPT><<>>(nImages, + templates->devData, templates->height, templates->width, templates->size, + images->devData, images->height, images->width, images->size, + results->devData, results->height, results->width, results->size); + getLastCudaError("cuArraysCorrTime error"); + } + else if (imageNY <= 128) { + cuArraysCorrTime_kernel< 128,NPT><<>>(nImages, + templates->devData, templates->height, templates->width, templates->size, + images->devData, images->height, images->width, images->size, + results->devData, results->height, results->width, results->size); + getLastCudaError("cuArraysCorrTime error"); + } + else if (imageNY <= 192) { + cuArraysCorrTime_kernel< 192,NPT><<>>(nImages, + templates->devData, templates->height, templates->width, templates->size, + images->devData, images->height, images->width, images->size, + results->devData, results->height, results->width, results->size); + getLastCudaError("cuArraysCorrTime error"); + } + else if (imageNY <= 256) { + cuArraysCorrTime_kernel< 256,NPT><<>>(nImages, + templates->devData, templates->height, templates->width, templates->size, + images->devData, images->height, images->width, images->size, + results->devData, results->height, results->width, results->size); + getLastCudaError("cuArraysCorrTime error"); + } + else if (imageNY <= 384) { + cuArraysCorrTime_kernel< 384,NPT><<>>(nImages, + templates->devData, templates->height, templates->width, templates->size, + images->devData, images->height, images->width, images->size, + results->devData, results->height, results->width, results->size); + getLastCudaError("cuArraysCorrTime error"); + } + else if (imageNY <= 512) { + cuArraysCorrTime_kernel< 512,NPT><<>>(nImages, + templates->devData, templates->height, templates->width, templates->size, + images->devData, images->height, images->width, images->size, + results->devData, results->height, results->width, results->size); + getLastCudaError("cuArraysCorrTime error"); + } + else if (imageNY <= 640) { + cuArraysCorrTime_kernel< 640,NPT><<>>(nImages, + templates->devData, templates->height, templates->width, templates->size, + images->devData, images->height, images->width, images->size, + results->devData, results->height, results->width, results->size); + getLastCudaError("cuArraysCorrTime error"); + } + else if (imageNY <= 768) { + cuArraysCorrTime_kernel< 768,NPT><<>>(nImages, + templates->devData, templates->height, templates->width, templates->size, + images->devData, images->height, images->width, images->size, + results->devData, results->height, results->width, results->size); + getLastCudaError("cuArraysCorrTime error"); + } + else if (imageNY <= 896) { + cuArraysCorrTime_kernel< 896,NPT><<>>(nImages, + templates->devData, templates->height, templates->width, templates->size, + images->devData, images->height, images->width, images->size, + results->devData, results->height, results->width, results->size); + getLastCudaError("cuArraysCorrTime error"); + } + else if (imageNY <= 1024) { + cuArraysCorrTime_kernel<1024,NPT><<>>(nImages, + templates->devData, templates->height, templates->width, templates->size, + images->devData, images->height, images->width, images->size, + results->devData, results->height, results->width, results->size); + getLastCudaError("cuArraysCorrTime error"); + } + else { + fprintf(stderr, "The (oversampled) window size along the across direction %d should be smaller than 1024.\n", imageNY); + throw; + } +} +// end of file diff --git a/cuda_code/cuEigh_internal.cu b/cuda_code/cuEigh_internal.cu new file mode 100644 index 0000000000000000000000000000000000000000..ede08bf40206f0abba5a73db3efd55597b32d87f --- /dev/null +++ b/cuda_code/cuEigh_internal.cu @@ -0,0 +1,207 @@ +#include "linalg/linalg_internal_gpu/cuEigh_internal.hpp" +#include "cytnx_error.hpp" +#include "Type.hpp" +#include "utils/lapack_wrapper.hpp" + +namespace cytnx{ + + namespace linalg_internal{ + + /// cuEigh + void cuEigh_internal_cd(const boost::intrusive_ptr &in, boost::intrusive_ptr &e, boost::intrusive_ptr &v, const cytnx_int32 &L){ + cusolverEigMode_t jobz = CUSOLVER_EIG_MODE_VECTOR; + if( v->dtype == Type.Void) jobz = CUSOLVER_EIG_MODE_NOVECTOR; + + // create handles: + cusolverDnHandle_t cusolverH = NULL; + checkCudaErrors(cusolverDnCreate(&cusolverH)); + + + cytnx_complex128 *tA; + if(v!=NULL){ + tA = (cytnx_complex128*)v->Mem; + checkCudaErrors(cudaMemcpy(v->Mem,in->Mem,sizeof(cytnx_complex128)*cytnx_uint64(L)*L,cudaMemcpyDeviceToDevice)); + }else{ + checkCudaErrors(cudaMalloc((void**)&tA,cytnx_uint64(L)*L*sizeof(cytnx_complex128))); + checkCudaErrors(cudaMemcpy(tA,in->Mem,sizeof(cytnx_complex128)*cytnx_uint64(L)*L,cudaMemcpyDeviceToDevice)); + } + + + // query buffer: + cytnx_int32 lwork = 0; + checkCudaErrors(cusolverDnZheevd_bufferSize(cusolverH,jobz,CUBLAS_FILL_MODE_UPPER,L, (cuDoubleComplex*)tA, L,(cytnx_double*)e->Mem,&lwork)); + + // allocate working space: + cytnx_complex128 *work; + checkCudaErrors(cudaMalloc((void**)&work,sizeof(cytnx_complex128)*lwork)); + + + // call : + cytnx_int32 info; + cytnx_int32 *devinfo; + checkCudaErrors(cudaMalloc((void**)&devinfo,sizeof(cytnx_int32))); + checkCudaErrors(cusolverDnZheevd(cusolverH, jobz, CUBLAS_FILL_MODE_UPPER, L ,(cuDoubleComplex*)tA, L, (cytnx_double*)e->Mem, (cuDoubleComplex*)work,lwork, devinfo)); + + + // get info + checkCudaErrors(cudaMemcpy(&info,devinfo,sizeof(cytnx_int32),cudaMemcpyDeviceToHost)); + + cytnx_error_msg(info != 0, "%s %d", "Error in cuBlas function 'cusolverDnZheevd': cuBlas INFO = ", info); + + + cudaFree(work); + if(v->dtype==Type.Void) cudaFree(tA); + + cudaFree(devinfo); + cusolverDnDestroy(cusolverH); + + } + void cuEigh_internal_cf(const boost::intrusive_ptr &in, boost::intrusive_ptr &e, boost::intrusive_ptr &v, const cytnx_int32 &L){ + cusolverEigMode_t jobz = CUSOLVER_EIG_MODE_VECTOR; + if( v->dtype == Type.Void) jobz = CUSOLVER_EIG_MODE_NOVECTOR; + + // create handles: + cusolverDnHandle_t cusolverH = NULL; + checkCudaErrors(cusolverDnCreate(&cusolverH)); + + + cytnx_complex64 *tA; + if(v!=NULL){ + tA = (cytnx_complex64*)v->Mem; + checkCudaErrors(cudaMemcpy(v->Mem,in->Mem,sizeof(cytnx_complex64)*cytnx_uint64(L)*L,cudaMemcpyDeviceToDevice)); + }else{ + checkCudaErrors(cudaMalloc((void**)&tA,cytnx_uint64(L)*L*sizeof(cytnx_complex64))); + checkCudaErrors(cudaMemcpy(tA,in->Mem,sizeof(cytnx_complex64)*cytnx_uint64(L)*L,cudaMemcpyDeviceToDevice)); + } + + + // query buffer: + cytnx_int32 lwork = 0; + checkCudaErrors(cusolverDnCheevd_bufferSize(cusolverH,jobz,CUBLAS_FILL_MODE_UPPER,L, (cuFloatComplex*)tA, L,(cytnx_float*)e->Mem,&lwork)); + + // allocate working space: + cytnx_complex64 *work; + checkCudaErrors(cudaMalloc((void**)&work,sizeof(cytnx_complex64)*lwork)); + + + // call : + cytnx_int32 info; + cytnx_int32 *devinfo; + checkCudaErrors(cudaMalloc((void**)&devinfo,sizeof(cytnx_int32))); + checkCudaErrors(cusolverDnCheevd(cusolverH, jobz, CUBLAS_FILL_MODE_UPPER, L ,(cuFloatComplex*)tA, L, (cytnx_float*)e->Mem, (cuFloatComplex*)work,lwork, devinfo)); + + + // get info + checkCudaErrors(cudaMemcpy(&info,devinfo,sizeof(cytnx_int32),cudaMemcpyDeviceToHost)); + + cytnx_error_msg(info != 0, "%s %d", "Error in cuBlas function 'cusolverDnZheevd': cuBlas INFO = ", info); + + + cudaFree(work); + if(v->dtype==Type.Void) cudaFree(tA); + + cudaFree(devinfo); + cusolverDnDestroy(cusolverH); + } + void cuEigh_internal_d(const boost::intrusive_ptr &in, boost::intrusive_ptr &e, boost::intrusive_ptr &v, const cytnx_int32 &L){ + cusolverEigMode_t jobz = CUSOLVER_EIG_MODE_VECTOR; + if( v->dtype == Type.Void) jobz = CUSOLVER_EIG_MODE_NOVECTOR; + + // create handles: + cusolverDnHandle_t cusolverH = NULL; + checkCudaErrors(cusolverDnCreate(&cusolverH)); + + + cytnx_double *tA; + if(v->dtype!=Type.Void){ + tA = (cytnx_double*)v->Mem; + checkCudaErrors(cudaMemcpy(v->Mem,in->Mem,sizeof(cytnx_double)*cytnx_uint64(L)*L,cudaMemcpyDeviceToDevice)); + }else{ + checkCudaErrors(cudaMalloc((void**)&tA,cytnx_uint64(L)*L*sizeof(cytnx_double))); + checkCudaErrors(cudaMemcpy(tA,in->Mem,sizeof(cytnx_double)*cytnx_uint64(L)*L,cudaMemcpyDeviceToDevice)); + } + + + // query buffer: + cytnx_int32 lwork = 0; + checkCudaErrors(cusolverDnDsyevd_bufferSize(cusolverH,jobz,CUBLAS_FILL_MODE_UPPER,L, tA, L,(cytnx_double*)e->Mem,&lwork)); + + // allocate working space: + cytnx_double *work; + checkCudaErrors(cudaMalloc((void**)&work,sizeof(cytnx_double)*lwork)); + + + // call : + cytnx_int32 info; + cytnx_int32 *devinfo; + checkCudaErrors(cudaMalloc((void**)&devinfo,sizeof(cytnx_int32))); + checkCudaErrors(cusolverDnDsyevd(cusolverH, jobz, CUBLAS_FILL_MODE_UPPER, L ,tA, L, (cytnx_double*)e->Mem, work,lwork, devinfo)); + + + // get info + checkCudaErrors(cudaMemcpy(&info,devinfo,sizeof(cytnx_int32),cudaMemcpyDeviceToHost)); + + cytnx_error_msg(info != 0, "%s %d", "Error in cuBlas function 'cusolverDnDsysevd': cuBlas INFO = ", info); + + + cudaFree(work); + if(v->dtype==Type.Void) cudaFree(tA); + + cudaFree(devinfo); + cusolverDnDestroy(cusolverH); + + } + void cuEigh_internal_f(const boost::intrusive_ptr &in, boost::intrusive_ptr &e, boost::intrusive_ptr &v, const cytnx_int32 &L){ + cusolverEigMode_t jobz = CUSOLVER_EIG_MODE_VECTOR; + if( v->dtype == Type.Void) jobz = CUSOLVER_EIG_MODE_NOVECTOR; + + // create handles: + cusolverDnHandle_t cusolverH = NULL; + checkCudaErrors(cusolverDnCreate(&cusolverH)); + + + cytnx_float *tA; + if(v->dtype!=Type.Void){ + tA = (cytnx_float*)v->Mem; + checkCudaErrors(cudaMemcpy(v->Mem,in->Mem,sizeof(cytnx_float)*cytnx_uint64(L)*L,cudaMemcpyDeviceToDevice)); + }else{ + checkCudaErrors(cudaMalloc((void**)&tA,cytnx_uint64(L)*L*sizeof(cytnx_float))); + checkCudaErrors(cudaMemcpy(tA,in->Mem,sizeof(cytnx_float)*cytnx_uint64(L)*L,cudaMemcpyDeviceToDevice)); + } + + + // query buffer: + cytnx_int32 lwork = 0; + checkCudaErrors(cusolverDnSsyevd_bufferSize(cusolverH,jobz,CUBLAS_FILL_MODE_UPPER,L, tA, L,(cytnx_float*)e->Mem,&lwork)); + + // allocate working space: + cytnx_float *work; + checkCudaErrors(cudaMalloc((void**)&work,sizeof(cytnx_float)*lwork)); + + + // call : + cytnx_int32 info; + cytnx_int32 *devinfo; + checkCudaErrors(cudaMalloc((void**)&devinfo,sizeof(cytnx_int32))); + checkCudaErrors(cusolverDnSsyevd(cusolverH, jobz, CUBLAS_FILL_MODE_UPPER, L ,tA, L, (cytnx_float*)e->Mem, work,lwork, devinfo)); + + + // get info + checkCudaErrors(cudaMemcpy(&info,devinfo,sizeof(cytnx_int32),cudaMemcpyDeviceToHost)); + + cytnx_error_msg(info != 0, "%s %d", "Error in cuBlas function 'cusolverDnDsysevd': cuBlas INFO = ", info); + + + cudaFree(work); + if(v->dtype==Type.Void) cudaFree(tA); + + cudaFree(devinfo); + cusolverDnDestroy(cusolverH); + + } + + }//linalg_internal +}//cytnx + + + diff --git a/cuda_code/cu_mandelbrot.cu b/cuda_code/cu_mandelbrot.cu new file mode 100644 index 0000000000000000000000000000000000000000..319f2ed1ec5fab6b19c18eb0cdf9e3ae22aaaf61 --- /dev/null +++ b/cuda_code/cu_mandelbrot.cu @@ -0,0 +1,254 @@ + +//+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+// +// // +// cu_mandelbrot.cu // +// // +// D. C. Groothuizen Dijkema - April, 2020 // +//+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+// + +// CUDA accelerated implementation for producing fractals from the Mandelbrot Set + + +#include + +__device__ int iterate(cuDoubleComplex x, const cuDoubleComplex &c, const int max_itr) +{ + // + // CUDA device to iterate a given number under x^2 + c until its absolute value becomes greater than 2 or the maximum number of + // iterations is reached. + // + // parameters + // ---------- + // x : cuDoubleComplex + // - the starting value of the iteration + // c : const cuDoubleComplex & + // - the constant added at each iteration + // max_itr : const int + // - the maximum number of iterations allowed + // + // returns + // ------- + // int + // - the number of iterations needed for the aboslute value of x became greater than 2 under iteration + // NPP_MAX_32S if the absolute value of x did not become greater in 2 within `max_itr` iterations + // + + for (int itr=0;itr2) { return itr; } + x=cuCadd(cuCmul(x,x),c); + } + return NPP_MAX_32S; +} + +__global__ void compute_mandelbrot(int * const d_iterations, const int max_itr, const int xresolution, const int yresolution + , const double startx, const double starty, const double deltax, const double deltay) +{ + // + // CUDA kernel to determine if a given number of the complex plane is contained within the Mandelbrot Set through iteration + // + // parameters + // ---------- + // d_iterations : int * const + // - 1D flat array representing a 2D array to write out either the number of iterations needed reach a root or a marker that this root + // could not be reached + // max_itr : const int + // - the maximum number of iterations allowed + // xresolution,yresolution : const int + // - the number of steps to take in the x-direction (the real components) and the y-direction (the imaginary components) + // startx,starty : const int + // - the real and imaginary components of the number defining the bottom left corner of the entire space being sampled + // deltax,deltay : const int + // - the size of the step to take in the x- and y-direction + // + + // determine where we are in memory + const int idy=blockIdx.y*blockDim.y+threadIdx.y,idx=blockIdx.x*blockDim.x+threadIdx.x,ind=idy*xresolution+idx; + // check we haven't gone out of bounds + if (idx>=xresolution||idy>=yresolution) { return; } + + // determine the current point + const double imag=starty+deltay*idy,real=startx+deltax*idx; + // determine the number of iterations + d_iterations[ind]=iterate(make_cuDoubleComplex(0.,0.),make_cuDoubleComplex(real,imag),max_itr); +} + +__global__ void compute_julia(int * const d_iterations, const double re, const double im, const int max_itr + , const int xresolution, const int yresolution, const double startx, const double starty, const double deltax, const double deltay) +{ + // + // CUDA kernel to determine if a given number of the complex plane is contained within the Julia Set of a given complex number through + // iteration + // + // parameters + // ---------- + // d_iterations : int * const + // - 1D flat array representing a 2D array to write out either the number of iterations needed reach a root or a marker that this root + // could not be reached + // re,im : const double + // - the real and imaginary parts of the complex number to find the Julia Set of + // max_itr : const int + // - the maximum number of iterations allowed + // xresolution,yresolution : const int + // - the number of steps to take in the x-direction (the real components) and the y-direction (the imaginary components) + // startx,starty : const int + // - the real and imaginary components of the number defining the bottom left corner of the entire space being sampled + // deltax,deltay : const int + // - the size of the step to take in the x- and y-direction + // + + // determine where we are in memory + const int idy=blockIdx.y*blockDim.y+threadIdx.y,idx=blockIdx.x*blockDim.x+threadIdx.x,ind=idy*xresolution+idx; + // check we haven't gone out of bounds + if (idx>=xresolution||idy>=yresolution) { return; } + + // determine the current point + const double imag=starty+deltay*idy,real=startx+deltax*idx; + // determine the number of iterations + d_iterations[ind]=iterate(make_cuDoubleComplex(real,imag),make_cuDoubleComplex(re,im),max_itr); +} + +int __declspec(dllexport) sample_mandelbrot(int * const h_itr, const int max_itr, const int xresolution, const int yresolution + , const double startx, const double endx, const double starty, const double endy, const bool verbose) +{ + // + // Determine if numbers in a given subset of the complex plane are contained within the Mandelbrot Set through iteration, with CUDA + // acceleration + // + // parameters + // ---------- + // h_itr : int * const + // - 1D flat array representing a 2D array to write out either the number of iterations needed to diverge or a marker that the number was + // stable + // max_itr : const int + // - the maximum number of iterations allowed + // xresolution,yresolution : const int + // - the number of steps to take in the x- and y-direction (the real and imaginary components) + // startx,endx,starty,endy : const int + // - the first and last values to sample at + // verbose : bool + // - flag to control logging to console + // + // returns + // ------- + // int + // - the value which marks that a starting point could not be shown to not be in the Mandelbrot Set + // + + // computation parameters + const double deltax=(endx-startx)/xresolution,deltay=(endy-starty)/yresolution; + const int total=xresolution*yresolution; + // memory parameters + const int i_size=total*sizeof(int); + + // device memory pointers + int *d_itr=nullptr; + // allocate device memory + CUDA_REQUIRE_SUCCESS(cudaMalloc(reinterpret_cast(&d_itr),static_cast(i_size))); + + // GPU memory setup + const dim3 dim_block(32,32),dim_grid((xresolution+dim_block.x-1)/dim_block.x,(yresolution+dim_block.y-1)/dim_block.y); + // run and time + float elapsed; + cudaEvent_t start,stop; + + CUDA_REQUIRE_SUCCESS(cudaEventCreate(&start)); + CUDA_REQUIRE_SUCCESS(cudaEventCreate(&stop)); + CUDA_REQUIRE_SUCCESS(cudaEventRecord(start,0)); + + compute_mandelbrot<<>>(d_itr,max_itr,xresolution,yresolution,startx,starty,deltax,deltay); + // check for errors + CUDA_REQUIRE_SUCCESS(cudaPeekAtLastError()); + CUDA_REQUIRE_SUCCESS(cudaDeviceSynchronize()); + + CUDA_REQUIRE_SUCCESS(cudaEventRecord(stop,0)); + CUDA_REQUIRE_SUCCESS(cudaEventSynchronize(stop)); + CUDA_REQUIRE_SUCCESS(cudaEventElapsedTime(&elapsed,start,stop)); + + if (verbose) + { + std::cout << total << " points processed." << std::endl + << "Time taken: " << elapsed/1000 << "s." << std::endl; + } + + // copy back to host + CUDA_REQUIRE_SUCCESS(cudaMemcpy(h_itr,d_itr,static_cast(i_size),cudaMemcpyDeviceToHost)); + + // free GPU memory + CUDA_REQUIRE_SUCCESS(cudaFree(d_itr)); + + return NPP_MAX_32S; +} + +int __declspec(dllexport) sample_julia(int * const h_itr, const double re, const double im, const int max_itr + , const int xresolution, const int yresolution, const double startx, const double endx, const double starty, const double endy + , const bool verbose) +{ + // + // Determine if numbers in a given subset of the complex plane are contained within the Julia Set of a given complex number through + // iteration, with CUDA acceleration + // + // parameters + // ---------- + // h_itr : int * const + // - 1D flat array representing a 2D array to write out either the number of iterations needed to diverge or a marker that the number was + // stable + // max_itr : const int + // - the maximum number of iterations allowed + // xresolution,yresolution : const int + // - the number of steps to take in the x- and y-direction (the real and imaginary components) + // startx,endx,starty,endy : const int + // - the first and last values to sample at + // verbose : bool + // - flag to control logging to console + // + // returns + // ------- + // int + // - the value which marks that a starting point could not be shown to not be in the Mandelbrot Set + // + + // computation parameters + const double deltax=(endx-startx)/xresolution,deltay=(endy-starty)/yresolution; + const int total=xresolution*yresolution; + // memory parameters + const int i_size=total*sizeof(int); + + // device memory pointers + int *d_itr=nullptr; + // allocate device memory + CUDA_REQUIRE_SUCCESS(cudaMalloc(reinterpret_cast(&d_itr),static_cast(i_size))); + + // GPU memory setup + const dim3 dim_block(32,32),dim_grid((xresolution+dim_block.x-1)/dim_block.x,(yresolution+dim_block.y-1)/dim_block.y); + // run and time + float elapsed; + cudaEvent_t start,stop; + + CUDA_REQUIRE_SUCCESS(cudaEventCreate(&start)); + CUDA_REQUIRE_SUCCESS(cudaEventCreate(&stop)); + CUDA_REQUIRE_SUCCESS(cudaEventRecord(start,0)); + + compute_julia<<>>(d_itr,re,im,max_itr,xresolution,yresolution,startx,starty,deltax,deltay); + // check for errors + CUDA_REQUIRE_SUCCESS(cudaPeekAtLastError()); + CUDA_REQUIRE_SUCCESS(cudaDeviceSynchronize()); + + CUDA_REQUIRE_SUCCESS(cudaEventRecord(stop,0)); + CUDA_REQUIRE_SUCCESS(cudaEventSynchronize(stop)); + CUDA_REQUIRE_SUCCESS(cudaEventElapsedTime(&elapsed,start,stop)); + + if (verbose) + { + std::cout << total << " points processed." << std::endl + << "Time taken: " << elapsed/1000 << "s." << std::endl; + } + + // copy back to host + CUDA_REQUIRE_SUCCESS(cudaMemcpy(h_itr,d_itr,static_cast(i_size),cudaMemcpyDeviceToHost)); + + // free GPU memory + CUDA_REQUIRE_SUCCESS(cudaFree(d_itr)); + + return NPP_MAX_32S; +} diff --git a/cuda_code/cu_raycast.cu b/cuda_code/cu_raycast.cu new file mode 100644 index 0000000000000000000000000000000000000000..5d16c30364c78f7625504e1e3477906f53445db2 --- /dev/null +++ b/cuda_code/cu_raycast.cu @@ -0,0 +1,313 @@ +#include "cu_raycast.h" + +#include "MatUtils.h" +#include "launch_utils.h" +#include "InvalidValue.h" + +namespace roo +{ + +////////////////////////////////////////////////////// +// Phong shading. +////////////////////////////////////////////////////// + +__host__ __device__ inline +float PhongShade(const float3 p_c, const float3 n_c) +{ + const float ambient = 0.4; + const float diffuse = 0.4; + const float specular = 0.2; + const float3 eyedir = -1.0f * p_c / length(p_c); + const float3 _lightdir = make_float3(0.4,0.4,-1); + const float3 lightdir = _lightdir / length(_lightdir); + const float ldotn = dot(lightdir,n_c); + const float3 lightreflect = 2*ldotn*n_c + (-1.0) * lightdir; + const float edotr = fmaxf(0,dot(eyedir,lightreflect)); + const float spec = edotr*edotr*edotr*edotr*edotr*edotr*edotr*edotr*edotr*edotr; + return ambient + diffuse * ldotn + specular * spec; +} + +////////////////////////////////////////////////////// +// Raycast SDF +////////////////////////////////////////////////////// + +__global__ void KernRaycastSdf(Image imgdepth, Image norm, Image img, const BoundedVolume vol, const Mat T_wc, ImageIntrinsics K, float near, float far, float trunc_dist, bool subpix ) +{ + const int u = blockIdx.x*blockDim.x + threadIdx.x; + const int v = blockIdx.y*blockDim.y + threadIdx.y; + + if( u < img.w && v < img.h ) { + const float3 c_w = SE3Translation(T_wc); + const float3 ray_c = K.Unproject(u,v); + const float3 ray_w = mulSO3(T_wc, ray_c); + + // Raycast bounding box to find valid ray segment of sdf + // http://www.cs.utah.edu/~awilliam/box/box.pdf + const float3 tminbound = (vol.bbox.Min() - c_w) / ray_w; + const float3 tmaxbound = (vol.bbox.Max() - c_w) / ray_w; + const float3 tmin = fminf(tminbound,tmaxbound); + const float3 tmax = fmaxf(tminbound,tmaxbound); + const float max_tmin = fmaxf(fmaxf(fmaxf(tmin.x, tmin.y), tmin.z), near); + const float min_tmax = fminf(fminf(fminf(tmax.x, tmax.y), tmax.z), far); + + float depth = 0.0f; + + // If ray intersects bounding box + if(max_tmin < min_tmax ) { + // Go between max_tmin and min_tmax + float lambda = max_tmin; + float last_sdf = InvalidValue::Value(); + float min_delta_lambda = vol.VoxelSizeUnits().x; + float delta_lambda = 0; + + // March through space + while(lambda < min_tmax) { + const float3 pos_w = c_w + lambda * ray_w; + const float sdf = vol.GetUnitsTrilinearClamped(pos_w); + + if( sdf <= 0 ) { + if( last_sdf > 0) { + // surface! + if(subpix) { + lambda = lambda + delta_lambda * sdf / (last_sdf - sdf); + } + depth = lambda; + } + break; + } + delta_lambda = sdf > 0 ? fmaxf(sdf, min_delta_lambda) : trunc_dist; + lambda += delta_lambda; + last_sdf = sdf; + } + } + + // Compute normal + const float3 pos_w = c_w + depth * ray_w; + const float3 _n_w = vol.GetUnitsBackwardDiffDxDyDz(pos_w); + const float len_n_w = length(_n_w); + const float3 n_w = len_n_w > 0 ? _n_w / len_n_w : make_float3(0,0,1); + const float3 n_c = mulSO3inv(T_wc,n_w); + const float3 p_c = depth * ray_c; + + if(depth > 0 ) { +// img(u,v) = (depth - near) / (far - near); + imgdepth(u,v) = depth; + img(u,v) = PhongShade(p_c, n_c); +// norm(u,v) = make_float4(0.5,0.5,0.5,1) + make_float4(n_c, 0) /2.0f; + norm(u,v) = make_float4(n_c, 1); + }else{ + imgdepth(u,v) = InvalidValue::Value(); + img(u,v) = 0; + norm(u,v) = make_float4(0,0,0,0); + } + } +} + +void RaycastSdf(Image depth, Image norm, Image img, const BoundedVolume vol, const Mat T_wc, ImageIntrinsics K, float near, float far, float trunc_dist, bool subpix ) +{ + dim3 blockDim, gridDim; +// InitDimFromOutputImageOver(blockDim, gridDim, img, 16, 16); + InitDimFromOutputImageOver(blockDim, gridDim, img); + KernRaycastSdf<<>>(depth, norm, img, vol, T_wc, K, near, far, trunc_dist, subpix); + GpuCheckErrors(); +} + +////////////////////////////////////////////////////// +// Raycast Color SDF +////////////////////////////////////////////////////// + +__global__ void KernRaycastSdf(Image imgdepth, Image norm, Image img, const BoundedVolume vol, const BoundedVolume colorVol, const Mat T_wc, ImageIntrinsics K, float near, float far, float trunc_dist, bool subpix ) +{ + const int u = blockIdx.x*blockDim.x + threadIdx.x; + const int v = blockIdx.y*blockDim.y + threadIdx.y; + + if( u < img.w && v < img.h ) { + const float3 c_w = SE3Translation(T_wc); + const float3 ray_c = K.Unproject(u,v); + const float3 ray_w = mulSO3(T_wc, ray_c); + + // Raycast bounding box to find valid ray segment of sdf + // http://www.cs.utah.edu/~awilliam/box/box.pdf + const float3 tminbound = (vol.bbox.Min() - c_w) / ray_w; + const float3 tmaxbound = (vol.bbox.Max() - c_w) / ray_w; + const float3 tmin = fminf(tminbound,tmaxbound); + const float3 tmax = fmaxf(tminbound,tmaxbound); + const float max_tmin = fmaxf(fmaxf(fmaxf(tmin.x, tmin.y), tmin.z), near); + const float min_tmax = fminf(fminf(fminf(tmax.x, tmax.y), tmax.z), far); + + float depth = 0.0f; + + // If ray intersects bounding box + if(max_tmin < min_tmax ) { + // Go between max_tmin and min_tmax + float lambda = max_tmin; + float last_sdf = InvalidValue::Value(); + float min_delta_lambda = vol.VoxelSizeUnits().x; + float delta_lambda = 0; + + // March through space + while(lambda < min_tmax) { + const float3 pos_w = c_w + lambda * ray_w; + const float sdf = vol.GetUnitsTrilinearClamped(pos_w); + + if( sdf <= 0 ) { + if( last_sdf > 0) { + // surface! + if(subpix) { + lambda = lambda + delta_lambda * sdf / (last_sdf - sdf); + } + depth = lambda; + } + break; + } + delta_lambda = sdf > 0 ? fmaxf(sdf, min_delta_lambda) : trunc_dist; + lambda += delta_lambda; + last_sdf = sdf; + } + } + + // Compute normal + const float3 pos_w = c_w + depth * ray_w; + const float3 _n_w = vol.GetUnitsBackwardDiffDxDyDz(pos_w); + const float c = colorVol.GetUnitsTrilinearClamped(pos_w); + const float len_n_w = length(_n_w); + const float3 n_w = len_n_w > 0 ? _n_w / len_n_w : make_float3(0,0,1); + const float3 n_c = mulSO3inv(T_wc,n_w); + + if(depth > 0 ) { + imgdepth(u,v) = depth; + img(u,v) = c; + norm(u,v) = make_float4(n_c, 1); + }else{ + imgdepth(u,v) = InvalidValue::Value(); + img(u,v) = 0; + norm(u,v) = make_float4(0,0,0,0); + } + } +} + +void RaycastSdf(Image depth, Image norm, Image img, const BoundedVolume vol, const BoundedVolume colorVol, const Mat T_wc, ImageIntrinsics K, float near, float far, float trunc_dist, bool subpix ) +{ + dim3 blockDim, gridDim; +// InitDimFromOutputImageOver(blockDim, gridDim, img, 16, 16); + InitDimFromOutputImageOver(blockDim, gridDim, img); + KernRaycastSdf<<>>(depth, norm, img, vol, colorVol, T_wc, K, near, far, trunc_dist, subpix); + GpuCheckErrors(); +} + +////////////////////////////////////////////////////// +// Raycast box +////////////////////////////////////////////////////// + +__global__ void KernRaycastBox(Image imgd, const Mat T_wc, ImageIntrinsics K, const BoundingBox bbox ) +{ + const int u = blockIdx.x*blockDim.x + threadIdx.x; + const int v = blockIdx.y*blockDim.y + threadIdx.y; + + if( u < imgd.w && v < imgd.h ) { + const float3 c_w = SE3Translation(T_wc); + const float3 ray_c = K.Unproject(u,v); + const float3 ray_w = mulSO3(T_wc, ray_c); + + // Raycast bounding box to find valid ray segment of sdf + // http://www.cs.utah.edu/~awilliam/box/box.pdf + const float3 tminbound = (bbox.Min() - c_w) / ray_w; + const float3 tmaxbound = (bbox.Max() - c_w) / ray_w; + const float3 tmin = fminf(tminbound,tmaxbound); + const float3 tmax = fmaxf(tminbound,tmaxbound); + const float max_tmin = fmaxf(fmaxf(tmin.x, tmin.y), tmin.z); + const float min_tmax = fminf(fminf(tmax.x, tmax.y), tmax.z); + + float d; + + // If ray intersects bounding box + if(max_tmin < min_tmax ) { + d = max_tmin; + }else{ + d = InvalidValue::Value(); + } + + imgd(u,v) = d; + } +} + +void RaycastBox(Image imgd, const Mat T_wc, ImageIntrinsics K, const BoundingBox bbox ) +{ + dim3 blockDim, gridDim; + InitDimFromOutputImageOver(blockDim, gridDim, imgd); + KernRaycastBox<<>>(imgd, T_wc, K, bbox); + GpuCheckErrors(); +} + +////////////////////////////////////////////////////// +// Raycast sphere +////////////////////////////////////////////////////// + +__global__ void KernRaycastSphere(Image imgd, Image img, ImageIntrinsics K, float3 center_c, float r) +{ + const int u = blockIdx.x*blockDim.x + threadIdx.x; + const int v = blockIdx.y*blockDim.y + threadIdx.y; + + if( u < imgd.w && v < imgd.h ) { + const float3 ray_c = K.Unproject(u,v); + + const float ldotc = dot(ray_c,center_c); + const float lsq = dot(ray_c,ray_c); + const float csq = dot(center_c,center_c); + float depth = (ldotc - sqrt(ldotc*ldotc - lsq*(csq - r*r) )) / lsq; + + const float prev_depth = imgd(u,v); + if(depth > 0 && (depth < prev_depth || !isfinite(prev_depth)) ) { + imgd(u,v) = depth; + if(img.ptr) { + const float3 p_c = depth * ray_c; + const float3 n_c = p_c - center_c; + img(u,v) = PhongShade(p_c, n_c / length(n_c)); + } + } + } +} + +void RaycastSphere(Image imgd, Image img, const Mat T_wc, ImageIntrinsics K, float3 center, float r) +{ + dim3 blockDim, gridDim; + InitDimFromOutputImageOver(blockDim, gridDim, imgd); + const float3 center_c = mulSE3inv(T_wc, center); + KernRaycastSphere<<>>(imgd, img, K, center_c, r); + GpuCheckErrors(); +} + +////////////////////////////////////////////////////// +// Raycast plane +////////////////////////////////////////////////////// + +__global__ void KernRaycastPlane(Image imgd, Image img, ImageIntrinsics K, const float3 n_c) +{ + const int u = blockIdx.x*blockDim.x + threadIdx.x; + const int v = blockIdx.y*blockDim.y + threadIdx.y; + + if( u < img.w && v < img.h ) { + const float3 ray_c = K.Unproject(u,v); + const float depth = -1 / dot(n_c, ray_c); + + const float prev_depth = imgd(u,v); + if(depth > 0 && (depth < prev_depth || !isfinite(prev_depth)) ) { + const float3 p_c = depth * ray_c; + img(u,v) = PhongShade(p_c, n_c / length(n_c) ); + imgd(u,v) = depth; + } + } +} + +void RaycastPlane(Image imgd, Image img, const Mat T_wc, ImageIntrinsics K, const float3 n_w ) +{ + const float3 n_c = Plane_b_from_a(T_wc, n_w); + + dim3 blockDim, gridDim; + InitDimFromOutputImageOver(blockDim, gridDim, img); + KernRaycastPlane<<>>(imgd, img, K, n_c ); + GpuCheckErrors(); +} + + +} diff --git a/cuda_code/cu_reduce_1.cu b/cuda_code/cu_reduce_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..36832db78e431725d108d383f31541f955adf67d --- /dev/null +++ b/cuda_code/cu_reduce_1.cu @@ -0,0 +1,129 @@ +#include + +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include + +namespace imp { +namespace cu { + +//----------------------------------------------------------------------------- +template +__global__ void k_reduce(Pixel* d_dst, size_t stride, + std::uint32_t dst_width, std::uint32_t dst_height, + std::uint32_t roi_x, std::uint32_t roi_y, + float sf_x, float sf_y, Texture2D src_tex) +{ + const int x = blockIdx.x*blockDim.x + threadIdx.x + roi_x; + const int y = blockIdx.y*blockDim.y + threadIdx.y + roi_y; + if (x +void reduce(ImageGpu& dst, + const ImageGpu& src, + imp::InterpolationMode interp, bool gauss_prefilter) +{ + imp::Roi2u src_roi = src.roi(); + imp::Roi2u dst_roi = dst.roi(); + + // scale factor for x/y > 0 && < 1 (for multiplication with dst coords in the kernel!) + float sf_x = static_cast(src_roi.width()) / static_cast(dst_roi.width()); + float sf_y = static_cast(src_roi.height()) / static_cast(dst_roi.height()); + + std::unique_ptr> filtered; + if (gauss_prefilter) + { + float sf = .5f*(sf_x+sf_y); + + filtered.reset(new ImageGpu(src.size())); + float sigma = 1/(3*sf) ; // empirical magic + std::uint16_t kernel_size = std::ceil(6.0f*sigma); + if (kernel_size % 2 == 0) + kernel_size++; + + imp::cu::filterGauss(*filtered, src, sigma, kernel_size); + } + + cudaTextureFilterMode tex_filter_mode = (interp == InterpolationMode::linear) ? + cudaFilterModeLinear : cudaFilterModePoint; + if (src.bitDepth() < 32) + tex_filter_mode = cudaFilterModePoint; + + std::shared_ptr src_tex; + if (filtered) + src_tex = filtered->genTexture(false, tex_filter_mode); + else + src_tex = src.genTexture(false, tex_filter_mode); + + + Fragmentation<> dst_frag(dst_roi.size()); + + switch(interp) + { + case InterpolationMode::point: + case InterpolationMode::linear: + // fallthrough intended + k_reduce + <<< + dst_frag.dimGrid, dst_frag.dimBlock/*, 0, stream*/ + >>> (dst.data(), dst.stride(), dst.width(), dst.height(), + dst_roi.x(), dst_roi.y(), sf_x , sf_y, *src_tex); + break; + // case InterpolationMode::cubic: + // cuTransformCubicKernel_32f_C1 + // <<< dimGridOut, dimBlock, 0, stream >>> (dst.data(), dst.stride(), dst.width(), dst.height(), + // sf_x , sf_y); + // break; + // case InterpolationMode::cubicSpline: + // cuTransformCubicSplineKernel_32f_C1 + // <<< dimGridOut, dimBlock, 0, stream >>> (dst.data(), dst.stride(), dst.width(), dst.height(), + // sf_x , sf_y); + // break; + default: + IMP_THROW_EXCEPTION("unsupported interpolation type"); + } + + IMP_CUDA_CHECK(); +} + +//============================================================================== +// +// template instantiations for all our image types +// + +template void reduce(ImageGpu8uC1& dst, const ImageGpu8uC1& src, InterpolationMode interp, bool gauss_prefilter); +template void reduce(ImageGpu8uC2& dst, const ImageGpu8uC2& src, InterpolationMode interp, bool gauss_prefilter); +template void reduce(ImageGpu8uC4& dst, const ImageGpu8uC4& src, InterpolationMode interp, bool gauss_prefilter); + +template void reduce(ImageGpu16uC1& dst, const ImageGpu16uC1& src, InterpolationMode interp, bool gauss_prefilter); +template void reduce(ImageGpu16uC2& dst, const ImageGpu16uC2& src, InterpolationMode interp, bool gauss_prefilter); +template void reduce(ImageGpu16uC4& dst, const ImageGpu16uC4& src, InterpolationMode interp, bool gauss_prefilter); + +template void reduce(ImageGpu32sC1& dst, const ImageGpu32sC1& src, InterpolationMode interp, bool gauss_prefilter); +template void reduce(ImageGpu32sC2& dst, const ImageGpu32sC2& src, InterpolationMode interp, bool gauss_prefilter); +template void reduce(ImageGpu32sC4& dst, const ImageGpu32sC4& src, InterpolationMode interp, bool gauss_prefilter); + +template void reduce(ImageGpu32fC1& dst, const ImageGpu32fC1& src, InterpolationMode interp, bool gauss_prefilter); +template void reduce(ImageGpu32fC2& dst, const ImageGpu32fC2& src, InterpolationMode interp, bool gauss_prefilter); +template void reduce(ImageGpu32fC4& dst, const ImageGpu32fC4& src, InterpolationMode interp, bool gauss_prefilter); + + +} // namespace cu +} // namespace imp + diff --git a/cuda_code/cu_vector_operations.cu b/cuda_code/cu_vector_operations.cu new file mode 100644 index 0000000000000000000000000000000000000000..64ed7f34bc9c105e13617d6b5c1575812f104741 --- /dev/null +++ b/cuda_code/cu_vector_operations.cu @@ -0,0 +1,284 @@ +/* + * SPDX-FileCopyrightText: Copyright 2021, Siavash Ameli + * SPDX-License-Identifier: BSD-3-Clause + * SPDX-FileType: SOURCE + * + * This program is free software: you can redistribute it and/or modify it + * under the terms of the license found in the LICENSE.txt file in the root + * directory of this source tree. + */ + + +// ======= +// Headers +// ======= + +#include "./cu_vector_operations.h" +#include // sqrt +#include // assert +#include "./cublas_interface.h" // cublas_interface + + +// =========== +// copy vector +// =========== + +/// \brief Copies a vector to a new vector. Result is written in-place +/// +/// \param[in] input_vector +/// A 1D array +/// \param[in] vector_size +/// Length of vector array +/// \param[out] output_vector +/// Output vector (written in place). + +template +void cuVectorOperations::copy_vector( + cublasHandle_t cublas_handle, + const DataType* input_vector, + const LongIndexType vector_size, + DataType* output_vector) +{ + int incx = 1; + int incy = 1; + + cublasStatus_t status = cublas_interface::cublasXcopy( + cublas_handle, vector_size, input_vector, incx, output_vector, + incy); + + assert(status == CUBLAS_STATUS_SUCCESS); +} + +// ================== +// copy scaled vector +// ================== + +/// \brief Scales a vector and stores to a new vector. +/// +/// \param[in] input_vector +/// A 1D array +/// \param[in] vector_size +/// Length of vector array +/// \param[in] scale +/// Scale coefficient to the input vector. If this is equal to one, +/// the function effectively becomes the same as \e copy_vector. +/// \param[out] output_vector +/// Output vector (written in place). + +template +void cuVectorOperations::copy_scaled_vector( + cublasHandle_t cublas_handle, + const DataType* input_vector, + const LongIndexType vector_size, + const DataType scale, + DataType* output_vector) +{ + cublasStatus_t status; + int incx = 1; + int incy = 1; + + // Copy input to output vector + status = cublas_interface::cublasXcopy(cublas_handle, vector_size, + input_vector, incx, + output_vector, incy); + + assert(status == CUBLAS_STATUS_SUCCESS); + + // Scale outpu vector + status = cublas_interface::cublasXscal(cublas_handle, vector_size, &scale, + output_vector, incy); + + assert(status == CUBLAS_STATUS_SUCCESS); +} + + +// ====================== +// subtract scaled vector +// ====================== + +/// \brief Subtracts the scaled input vector from the output vector. +/// +/// \details Performs the following operation: +/// \f[ +/// \boldsymbol{b} = \boldsymbol{b} - c \boldsymbol{a}, +/// \f] +/// where +/// * \f$ \boldsymbol{a} \f$ is the input vector, +/// * \f$ c \f$ is a scalar scale to the input vector, and +/// * \f$ \boldsymbol{b} \f$ is the output vector that is +/// written in-place. +/// +/// \param[in] input_vector +/// A 1D array +/// \param[in] vector_size +/// Length of vector array +/// \param[in] scale +/// Scale coefficient to the input vector. +/// \param[in,out] output_vector Output vector (written in place). + +template +void cuVectorOperations::subtract_scaled_vector( + cublasHandle_t cublas_handle, + const DataType* input_vector, + const LongIndexType vector_size, + const DataType scale, + DataType* output_vector) +{ + if (scale == 0.0) + { + return; + } + + int incx = 1; + int incy = 1; + + DataType neg_scale = -scale; + cublasStatus_t status = cublas_interface::cublasXaxpy( + cublas_handle, vector_size, &neg_scale, input_vector, incx, + output_vector, incy); + + assert(status == CUBLAS_STATUS_SUCCESS); +} + + +// ============= +// inner product +// ============= + +/// \brief Computes Euclidean inner product of two vectors. +/// +/// \param[in] vector1 +/// 1D array +/// \param[in] vector2 +/// 1D array +/// \param[in] vector_size Length of array +/// \return Inner product of two vectors. + +template +DataType cuVectorOperations::inner_product( + cublasHandle_t cublas_handle, + const DataType* vector1, + const DataType* vector2, + const LongIndexType vector_size) +{ + DataType inner_prod; + int incx = 1; + int incy = 1; + + cublasStatus_t status = cublas_interface::cublasXdot( + cublas_handle, vector_size, vector1, incx, vector2, incy, + &inner_prod); + + assert(status == CUBLAS_STATUS_SUCCESS); + + return inner_prod; +} + + +// ============== +// euclidean norm +// ============== + +/// \brief Computes the Euclidean 2-norm of a 1D array. +/// +/// \param[in] vector +/// A pointer to 1D array +/// \param[in] vector_size +/// Length of the array +/// \return Euclidean norm + +template +DataType cuVectorOperations::euclidean_norm( + cublasHandle_t cublas_handle, + const DataType* vector, + const LongIndexType vector_size) +{ + DataType norm; + int incx = 1; + + cublasStatus_t status = cublas_interface::cublasXnrm2( + cublas_handle, vector_size, vector, incx, &norm); + + assert(status == CUBLAS_STATUS_SUCCESS); + + return norm; +} + + +// ========================= +// normalize vector in place +// ========================= + +/// \brief Normalizes a vector based on Euclidean 2-norm. The result +/// is written in-place. +/// +/// \param[in, out] vector +/// Input vector to be normalized in-place. +/// \param[in] vector_size +/// Length of the input vector +/// \return 2-Norm of the input vector (before normalization) + +template +DataType cuVectorOperations::normalize_vector_in_place( + cublasHandle_t cublas_handle, + DataType* vector, + const LongIndexType vector_size) +{ + // Norm of vector + DataType norm = cuVectorOperations::euclidean_norm( + cublas_handle, vector, vector_size); + + // Normalize in place + DataType scale = 1.0 / norm; + int incx = 1; + cublasStatus_t status = cublas_interface::cublasXscal( + cublas_handle, vector_size, &scale, vector, incx); + + assert(status == CUBLAS_STATUS_SUCCESS); + + return norm; +} + + +// ========================= +// normalize vector and copy +// ========================= + +/// \brief Normalizes a vector based on Euclidean 2-norm. The result is +/// written into another vector. +/// +/// \param[in] vector +/// Input vector. +/// \param[in] vector_size +/// Length of the input vector +/// \param[out] output_vector +/// Output vector, which is the normalization of the input vector. +/// \return 2-norm of the input vector + +template +DataType cuVectorOperations::normalize_vector_and_copy( + cublasHandle_t cublas_handle, + const DataType* vector, + const LongIndexType vector_size, + DataType* output_vector) +{ + // Norm of vector + DataType norm = cuVectorOperations::euclidean_norm( + cublas_handle, vector, vector_size); + + // Normalize to output + DataType scale = 1.0 / norm; + cuVectorOperations::copy_scaled_vector(cublas_handle, vector, + vector_size, scale, + output_vector); + + return norm; +} + + +// =============================== +// Explicit template instantiation +// =============================== + +template class cuVectorOperations; +template class cuVectorOperations; diff --git a/cuda_code/cuadrado.cu b/cuda_code/cuadrado.cu new file mode 100644 index 0000000000000000000000000000000000000000..2b293de014dea72108e0d25d0338d85cfd7a890f --- /dev/null +++ b/cuda_code/cuadrado.cu @@ -0,0 +1,50 @@ +#include + +__global__ void elevar_al_cuadrado(float * d_salida, float * d_entrada){ + int idx = threadIdx.x; + float f = d_entrada[idx]; + d_salida[idx] = f*f; +} + +int main(int argc, char ** argv) { + + const int TAMANIO_ARREGLO = 100; + const int BYTES_ARREGLO = TAMANIO_ARREGLO * sizeof(float); + + // Generamos el arreglo de entrada en el anfitrion + float h_entrada[TAMANIO_ARREGLO]; + + for (int i = 0; i < TAMANIO_ARREGLO; i++) { + h_entrada[i] = float(i); + } + + float h_salida[TAMANIO_ARREGLO]; + + // Declaramos apuntadores de memoria en GPU + float * d_entrada; + float * d_salida; + + // Reservamos memoria del GPU + cudaMalloc((void**) &d_entrada, BYTES_ARREGLO); + cudaMalloc((void**) &d_salida, BYTES_ARREGLO); + + // Copiamos informacion al GPU + cudaMemcpy(d_entrada, h_entrada, BYTES_ARREGLO, cudaMemcpyHostToDevice); + + // Lanza el kernel + elevar_al_cuadrado<<<1, TAMANIO_ARREGLO>>>(d_salida, d_entrada); + + // Copiamos el arreglo resultante al GPU + cudaMemcpy(h_salida, d_salida, BYTES_ARREGLO, cudaMemcpyDeviceToHost); + + // Imprimimos el arreglo resultante + for (int i =0; i < TAMANIO_ARREGLO; i++) { + printf("%f", h_salida[i]); + printf(((i % 4) != 3) ? "\t" : "\n"); + } + + cudaFree(d_entrada); + cudaFree(d_salida); + + return 0; +} diff --git a/cuda_code/cubSort.cu b/cuda_code/cubSort.cu new file mode 100644 index 0000000000000000000000000000000000000000..55f0ec349b94ea3c996b139a9eb78f8459181aa9 --- /dev/null +++ b/cuda_code/cubSort.cu @@ -0,0 +1,139 @@ +/* + * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// An example of device radix sort by CUB +#define CUB_STDERR // print CUDA runtime error to console + +#include +#include +//#include +#include + +using namespace cub; + +struct Pair +{ + float key; + int value; + + bool operator<(const Pair &b) const + { + if (key < b.key) + return true; + if (key > b.key) + return false; + + unsigned int key_bits = *reinterpret_cast(const_cast(&key)); + unsigned int b_key_bits = *reinterpret_cast(const_cast(&b.key)); + unsigned int HIGH_BIT = 1u << 31; + return ((key_bits & HIGH_BIT) != 0) && ((b_key_bits & HIGH_BIT) == 0); // true if key == -0 && b.key == +0 + } +}; + +int main() +{ + const int nItem = 150; + float *hKey = new float[nItem]; + float *hKeyCPU = new float[nItem]; + float *hKeyGPU = new float[nItem]; + int *hValue = new int[nItem]; + int *hValueCPU = new int[nItem]; + int *hValueGPU = new int[nItem]; + + // initialization + for (int i = 0; i < nItem; ++i) + { + hKey[i] = (float)rand(); + hValue[i] = i; + } + + // sort by CPU + Pair *hPair = new Pair[nItem]; + for (int i = 0; i < nItem; ++i) + { + hPair[i].key = hKey[i]; + hPair[i].value = hValue[i]; + } + + std::stable_sort(hPair, hPair + nItem); + + for (int i = 0; i < nItem; ++i) + { + hKeyCPU[i] = hPair[i].key; + hValueCPU[i] = hPair[i].value; + } + + delete[] hPair; + + // sort by GPU + DoubleBuffer dKey; // special data structure used by CUB + DoubleBuffer dValue; + cudaMalloc((void**)&dKey.d_buffers[0], sizeof(float) * nItem); + cudaMalloc((void**)&dKey.d_buffers[1], sizeof(float) * nItem); + cudaMalloc((void**)&dValue.d_buffers[0], sizeof(int) * nItem); + cudaMalloc((void**)&dValue.d_buffers[1], sizeof(int) * nItem); + + size_t tempByte = 0; + void *dTemp = NULL; + DeviceRadixSort::SortPairs(dTemp, tempByte, dKey, dValue, nItem); // get temporary workspace, usually small but could not to be ignored + cudaMalloc(&dTemp, tempByte); + + printf("before sort, dKey.selector = %d, dValue.selector = %d\n",dKey.selector, dValue.selector);// valid data locate in dKey[selector] and dValue[] + cudaMemcpy(dKey.Current(), hKey, sizeof(float) * nItem, cudaMemcpyHostToDevice); + cudaMemcpy(dValue.Current(), hValue, sizeof(int) * nItem, cudaMemcpyHostToDevice); + + DeviceRadixSort::SortPairs(dTemp, tempByte, dKey, dValue, nItem); // DeviceRadixSort::SortPairs, SortPairsDescending + + printf("after sort, dKey.selector = %d, dValue.selector = %d\n",dKey.selector, dValue.selector);// selector changed during the sort + cudaMemcpy(hKeyGPU, dKey.Current(), sizeof(float) * nItem, cudaMemcpyDeviceToHost); + cudaMemcpy(hValueGPU, dValue.Current(), sizeof(int) * nItem, cudaMemcpyDeviceToHost); + + // check result + bool pass = true; + for(int i = 0;i < nItem && pass == true; i++) + { + if(hKeyCPU[i] != hKeyGPU[i]) + { + printf("error at i = %d, hKeyCPU[i] = %f, hKeyGPU[i] = %f\n", i, hKeyCPU[i], hKeyGPU[i]); + pass = false; + } + if(hValueCPU[i] != hValueGPU[i]) + { + printf("error at i = %d, hValueCPU[i] = %d, hValueGPU[i] = %d\n", i, hValueCPU[i], hValueGPU[i]); + pass = false; + } + } + printf("Test %s\n", pass?"succeed!":"failed!"); + for(int i = 0;i < nItem; i++) + { + printf("%3d: input(%.4E,%3d), outputCPU(%.4E,%3d), outputGPU(%.4E,%3d)\n", + i, hKey[i], hValue[i], hKeyCPU[i], hValueCPU[i], hKeyGPU[i], hValueGPU[i]); + } + + if (hKey) delete[] hKey; + if (hKeyCPU) delete[] hKeyCPU; + if (hKeyGPU) delete[] hKeyGPU; + if (hValue) delete[] hValue; + if (hValueCPU) delete[] hValueCPU; + if (hValueGPU) delete[] hValueGPU; + if (dKey.d_buffers[0]) cudaFree(dKey.d_buffers[0]); + if (dKey.d_buffers[1]) cudaFree(dKey.d_buffers[1]); + if (dValue.d_buffers[0])cudaFree(dValue.d_buffers[0]); + if (dValue.d_buffers[1])cudaFree(dValue.d_buffers[1]); + if (dTemp) cudaFree(dTemp); + + return 0; +} diff --git a/cuda_code/cublas_test_1.cu b/cuda_code/cublas_test_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..6e3f4a37345b5352fc83e689e6ecc6f15b49f52d --- /dev/null +++ b/cuda_code/cublas_test_1.cu @@ -0,0 +1,159 @@ +#include +#include +#include +#include +#include +#include + +using namespace std; + +const char* cublasGetErrorString(cublasStatus_t status) { + switch(status) { + case CUBLAS_STATUS_SUCCESS: return "CUBLAS_STATUS_SUCCESS"; + case CUBLAS_STATUS_NOT_INITIALIZED: return "CUBLAS_STATUS_NOT_INITIALIZED"; + case CUBLAS_STATUS_ALLOC_FAILED: return "CUBLAS_STATUS_ALLOC_FAILED"; + case CUBLAS_STATUS_INVALID_VALUE: return "CUBLAS_STATUS_INVALID_VALUE"; + case CUBLAS_STATUS_ARCH_MISMATCH: return "CUBLAS_STATUS_ARCH_MISMATCH"; + case CUBLAS_STATUS_MAPPING_ERROR: return "CUBLAS_STATUS_MAPPING_ERROR"; + case CUBLAS_STATUS_EXECUTION_FAILED: return "CUBLAS_STATUS_EXECUTION_FAILED"; + case CUBLAS_STATUS_INTERNAL_ERROR: return "CUBLAS_STATUS_INTERNAL_ERROR"; + } + return "unknown error"; +} + +cublasHandle_t blas_handle() { + static int init[16] = {0}; + static cublasHandle_t handle[16]; + const int n = 0; + //cudaError_t status = cudaGetDevice(&n); + if(!init[n]) { + cublasStatus_t st = cublasCreate(&handle[n]); + if (st != CUBLAS_STATUS_SUCCESS) { + printf("blas_handle create failed! %s:%d, code:%s\n", __FILE__, __LINE__, cublasGetErrorString(st)); + } + init[n] = 1; + } + return handle[n]; +} + +template +void createBatchBuffers(T* buff[], T* data, const size_t len_per_batch, const int batch_num) { + for(int i = 0; i < batch_num; ++i) { + buff[i] = data + len_per_batch * i; + } +} + +void cublas_mat(float* d_C, float* d_A, float *d_B, const int A_ROW, const int A_COL, const int B_COL) { + cublasHandle_t handle = blas_handle(); + float alpha = 1.0, beta = 0.0; + cublasStatus_t st = cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, B_COL, A_ROW, A_COL,&alpha, + d_B, B_COL, d_A, A_COL, &beta, d_C, B_COL); + + if (st != CUBLAS_STATUS_SUCCESS) { + printf("cublasSgemm error occurred! %s : %d, error_code:%s\n", __FILE__, __LINE__, + cublasGetErrorString(st)); + exit(-1); + } + +} + +#define MAX_BATCH_SIZE 5 + +void cublas_bmm(float* d_C, float* d_A, float* d_B, + const int A_ROW, const int A_COL, const int B_COL, const int batch_num) { + cublasHandle_t handle = blas_handle(); + float alpha = 1.0, beta = 0.0; + + float* A_buff[MAX_BATCH_SIZE]; + float* B_buff[MAX_BATCH_SIZE]; + float* C_buff[MAX_BATCH_SIZE]; + float** dA_buff, **dB_buff, **dC_buff; + createBatchBuffers(A_buff, d_A, A_ROW * A_COL, batch_num); + createBatchBuffers(B_buff, d_B, A_COL * B_COL, batch_num); + createBatchBuffers(C_buff, d_C, A_ROW * B_COL, batch_num); + + cudaMalloc(&dA_buff, sizeof(float*) * batch_num); + cudaMalloc(&dB_buff, sizeof(float*) * batch_num); + cudaMalloc(&dC_buff, sizeof(float*) * batch_num); + + cudaMemcpy(dA_buff, A_buff, sizeof(float*) * batch_num, cudaMemcpyHostToDevice); + cudaMemcpy(dB_buff, B_buff, sizeof(float*) * batch_num, cudaMemcpyHostToDevice); + cudaMemcpy(dC_buff, C_buff, sizeof(float*) * batch_num, cudaMemcpyHostToDevice); + + cublasStatus_t st = cublasSgemmBatched(handle, CUBLAS_OP_N, CUBLAS_OP_N, + B_COL, A_ROW, A_COL, &alpha, dB_buff, B_COL, + dA_buff, A_COL, &beta, dC_buff, B_COL, batch_num); + + if (st != CUBLAS_STATUS_SUCCESS) { + printf("cublasSgemm error occurred! %s : %d, error_code:%s\n", __FILE__, __LINE__, + cublasGetErrorString(st)); + exit(-1); + } + + cudaFree(dA_buff); + cudaFree(dB_buff); + cudaFree(dC_buff); +} + +int main() { + cudaSetDevice(0); + float* A, *B, *C; + float* d_A, *d_B, *d_C; + float* out; + int A_ROW = 7; + int A_COL = 9; + int B_COL = 10; + std::default_random_engine generator; + std::normal_distribution distribution(10., 2.); + + size_t A_size = A_ROW * A_COL; + size_t B_size = A_COL * B_COL; + size_t C_size = A_ROW * B_COL; + A = new float[A_size]; + B = new float[B_size]; + C = new float[C_size]; + out = new float[C_size]; + + cudaMalloc((void**)&d_A, sizeof(float) * A_size); + cudaMalloc((void**)&d_B, sizeof(float) * B_size); + cudaMalloc((void**)&d_C, sizeof(float) * C_size); + + for(int i = 0; i < A_ROW*A_COL; ++i) { + A[i] = distribution(generator); + } + for(int i = 0; i < A_COL*B_COL; ++i) { + B[i] = distribution(generator); + } + float tmp; + for(int i = 0; i < A_ROW; ++i) { + for(int j = 0; j < B_COL; ++j) { + tmp = 0; + for(int k = 0; k < A_COL; ++k) { + tmp += A[i * A_COL + k] * B[k * B_COL + j]; + } + C[i * B_COL + j] = tmp; + } + } + + cudaMemcpy(d_A, A, sizeof(float) * A_size, cudaMemcpyHostToDevice); + cudaMemcpy(d_B, B, sizeof(float) * B_size, cudaMemcpyHostToDevice); + + //cublas_mat(d_C, d_A, d_B, A_ROW, A_COL, B_COL); + cublas_bmm(d_C, d_A, d_B, A_ROW, A_COL, B_COL, 1); + + cudaMemcpy(out, d_C, sizeof(float) * A_ROW * B_COL, cudaMemcpyDeviceToHost); + /// varification + for(size_t i = 0; i < C_size; ++i) { + float err = fabs(out[i] - C[i]); + if (err > 1e-3) { + cout << "i:" << i << " out:" << out[i] << " c:" << C[i] << " err:" << err << endl; + } + assert(err < 1e-3); + } + cout << "cublas test successfully!" << endl; + + delete [] A; + delete [] B; + delete [] C; + delete [] out; +} diff --git a/cuda_code/cudnn_deconv_layer_15.cu b/cuda_code/cudnn_deconv_layer_15.cu new file mode 100644 index 0000000000000000000000000000000000000000..eb1df32918fb763eb00298cc3b05ed522068ba3a --- /dev/null +++ b/cuda_code/cudnn_deconv_layer_15.cu @@ -0,0 +1,138 @@ +#ifdef USE_CUDNN +#include + +#include "caffe/layers/cudnn_deconv_layer.hpp" + +namespace caffe { + +__global__ void sync_deconv_groups() {} + +template +void CuDNNDeconvolutionLayer::Forward_gpu( + const vector*>& bottom, const vector*>& top) { + const Dtype* weight = this->blobs_[0]->gpu_data(); + for (int i = 0; i < bottom.size(); ++i) { + const Dtype* bottom_data = bottom[i]->gpu_data(); + Dtype* top_data = top[i]->mutable_gpu_data(); + + // Forward through cuDNN in parallel over groups. + for (int g = 0; g < this->group_; g++) { + // Filters. + CUDNN_CHECK(cudnnConvolutionBackwardData( + handle_[g], + cudnn::dataType::one, + filter_desc_, + weight + this->weight_offset_ * g, + bottom_descs_[i], + bottom_data + bottom_offset_ * g, + conv_descs_[i], + bwd_data_algo_[i], + workspace[g], + workspace_bwd_data_sizes_[i], + cudnn::dataType::zero, + top_descs_[i], + top_data + top_offset_ * g)); + + // Bias. + if (this->bias_term_) { + const Dtype* bias_data = this->blobs_[1]->gpu_data(); + CUDNN_CHECK(cudnnAddTensor(handle_[g], + cudnn::dataType::one, + bias_desc_, + bias_data + bias_offset_ * g, + cudnn::dataType::one, + top_descs_[i], + top_data + top_offset_ * g)); + } + } + + // Synchronize the work across groups, each of which went into its own + // stream, by launching an empty kernel into the default (null) stream. + // NOLINT_NEXT_LINE(whitespace/operators) + sync_deconv_groups<<<1, 1>>>(); + } +} + +template +void CuDNNDeconvolutionLayer::Backward_gpu( + const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + const Dtype* weight = NULL; + Dtype* weight_diff = NULL; + if (this->param_propagate_down_[0]) { + weight = this->blobs_[0]->gpu_data(); + weight_diff = this->blobs_[0]->mutable_gpu_diff(); + } + Dtype* bias_diff = NULL; + if (this->bias_term_ && this->param_propagate_down_[1]) { + bias_diff = this->blobs_[1]->mutable_gpu_diff(); + } + for (int i = 0; i < top.size(); ++i) { + const Dtype* top_diff = top[i]->gpu_diff(); + // Backward through cuDNN in parallel over groups and gradients. + for (int g = 0; g < this->group_; g++) { + // Gradient w.r.t. bias. + if (this->bias_term_ && this->param_propagate_down_[1]) { + CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0 * this->group_ + g], + cudnn::dataType::one, + top_descs_[i], + top_diff + top_offset_ * g, + cudnn::dataType::one, + bias_desc_, + bias_diff + bias_offset_ * g)); + } + + // Gradient w.r.t. weights. + if (this->param_propagate_down_[0]) { + const Dtype* bottom_data = bottom[i]->gpu_data(); + CUDNN_CHECK(cudnnConvolutionBackwardFilter( + handle_[1 * this->group_ + g], + cudnn::dataType::one, + top_descs_[i], + top_diff + top_offset_ * g, + bottom_descs_[i], + bottom_data + bottom_offset_ * g, + conv_descs_[i], + bwd_filter_algo_[i], + workspace[1 * this->group_ + g], + workspace_bwd_filter_sizes_[i], + cudnn::dataType::one, + filter_desc_, + weight_diff + this->weight_offset_ * g)); + } + + // Gradient w.r.t. bottom data. + if (propagate_down[i]) { + if (weight == NULL) { + weight = this->blobs_[0]->gpu_data(); + } + Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); + CUDNN_CHECK( + cudnnConvolutionForward(handle_[2 * this->group_ + g], + cudnn::dataType::one, + top_descs_[i], + top_diff + top_offset_ * g, + filter_desc_, + weight + this->weight_offset_ * g, + conv_descs_[i], + fwd_algo_[i], + workspace[2 * this->group_ + g], + workspace_fwd_sizes_[i], + cudnn::dataType::zero, + bottom_descs_[i], + bottom_diff + bottom_offset_ * g)); + } + } + + // Synchronize the work across groups, each of which went into its own + // stream, by launching an empty kernel into the default (null) stream. + // NOLINT_NEXT_LINE(whitespace/operators) + sync_deconv_groups<<<1, 1>>>(); + } +} + +INSTANTIATE_LAYER_GPU_FUNCS(CuDNNDeconvolutionLayer); + +} // namespace caffe +#endif diff --git a/cuda_code/cudpp_1.cu b/cuda_code/cudpp_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..f49c5209a23f9dd7792323766e81bfdc22a67c2c --- /dev/null +++ b/cuda_code/cudpp_1.cu @@ -0,0 +1,281 @@ +/** + * k2/csrc/cudpp/cudpp.cu + * + * Copyright (c) 2021 Xiaomi Corporation (authors: Fangjun Kuang) + * + * See LICENSE for clarification regarding multiple authors + */ + +// This file is modified from CUDPP. The code style is kept +// largely the same with CUDPP, which is different from what k2 is using. +// +// *************************************************************** +// cuDPP -- CUDA Data Parallel Primitives library +// ------------------------------------------------------------- +// $Revision: 3505 $ +// $Date: 2007-07-06 09:26:06 -0700 (Fri, 06 Jul 2007) $ +// ------------------------------------------------------------- +// This source code is distributed under the terms of license.txt in +// the root directory of this source distribution. +// ------------------------------------------------------------- + +#include +#include +#include + +#include "k2/csrc/array.h" +#include "k2/csrc/context.h" +#include "k2/csrc/cudpp/cudpp_util.h" +#include "k2/csrc/cudpp/segmented_scan_cta.h" +#include "k2/csrc/cudpp/segmented_scan_kernel.h" +#include "k2/csrc/cudpp/vector_kernel.h" +#include "k2/csrc/log.h" + +namespace k2 { + +struct SegmentedScanPlan { + public: + explicit SegmentedScanPlan(int32_t num_elements, + int32_t element_size_in_bytes, ContextPtr c) + : num_elements(num_elements), + element_size_in_bytes(element_size_in_bytes) { + Allocate(c); + } + + void Allocate(ContextPtr c) { + int32_t numElts = num_elements; + + int32_t level = 0; + + do { + int32_t numBlocks = + max(1, (int32_t)ceil((double)numElts / + (SEGSCAN_ELTS_PER_THREAD * SCAN_CTA_SIZE))); + if (numBlocks > 1) { + level++; + } + numElts = numBlocks; + } while (numElts > 1); + + block_sums.reset(new int8_t *[level]); + block_flags.reset(new uint32_t *[level]); + block_indexes.reset(new uint32_t *[level]); + + numElts = num_elements; + + level = 0; + + do { + int32_t numBlocks = + max(1, (int32_t)ceil((double)numElts / + (SEGSCAN_ELTS_PER_THREAD * SCAN_CTA_SIZE))); + if (numBlocks > 1) { + buf_i8.push_back(Array1(c, numBlocks * element_size_in_bytes)); + block_sums[level] = buf_i8.back().Data(); + + buf_ui32.push_back(Array1(c, numBlocks)); + block_flags[level] = buf_ui32.back().Data(); + + buf_ui32.push_back(Array1(c, numBlocks)); + block_indexes[level] = buf_ui32.back().Data(); + + level++; + } + numElts = numBlocks; + } while (numElts > 1); + } + + // Intermediate block sums array + std::unique_ptr block_sums; + + // Intermediate block flags array + std::unique_ptr block_flags; + + // Intermediate block indexes array + std::unique_ptr block_indexes; + + int32_t num_elements; + int32_t element_size_in_bytes; + + std::vector> buf_i8; + std::vector> buf_ui32; +}; + +template +static void SegmentedScanArrayRecursive( + ContextPtr context, T *d_out, const T *d_idata, const uint32_t *d_iflags, + T **d_blockSums, uint32_t **block_flags, uint32_t **block_indexes, + int num_elements, int level, bool sm12OrBetterHw) { + int32_t numBlocks = + max(1, (int32_t)std::ceil((double)num_elements / + (SEGSCAN_ELTS_PER_THREAD * SCAN_CTA_SIZE))); + + // This is the number of elements per block that the + // CTA level API is aware of + uint32_t numEltsPerBlock = SCAN_CTA_SIZE * 2; + + // Space to store flags - we need two sets. One gets modified and the + // other doesn't + uint32_t flagSpace = numEltsPerBlock * sizeof(uint32_t); + + // Space to store indexes + uint32_t idxSpace = numEltsPerBlock * sizeof(uint32_t); + + // Total shared memory space + uint32_t sharedMemSize = sizeof(T) * (numEltsPerBlock) + idxSpace + flagSpace; + + // setup execution parameters + dim3 grid(max(1, numBlocks), 1, 1); + dim3 threads(SCAN_CTA_SIZE, 1, 1); + + bool fullBlock = + (num_elements == (numBlocks * SEGSCAN_ELTS_PER_THREAD * SCAN_CTA_SIZE)); + + uint32_t traitsCode = 0; + if (numBlocks > 1) traitsCode |= 1; + if (fullBlock) traitsCode |= 2; + if (sm12OrBetterHw) traitsCode |= 4; + + cudaStream_t stream = context->GetCudaStream(); + + switch (traitsCode) { + case 0: // single block, single row, non-full last block + K2_CUDA_SAFE_CALL( + segmentedScan4< + T, SegmentedScanTraits> + <<>>(d_out, d_idata, d_iflags, + num_elements, 0, 0, 0)); + break; + case 1: // multi block, single row, non-full last block + K2_CUDA_SAFE_CALL( + segmentedScan4< + T, SegmentedScanTraits> + <<>>( + d_out, d_idata, d_iflags, num_elements, d_blockSums[level], + block_flags[level], block_indexes[level])); + break; + case 2: // single block, single row, full last block + K2_CUDA_SAFE_CALL( + segmentedScan4< + T, SegmentedScanTraits> + <<>>(d_out, d_idata, d_iflags, + num_elements, 0, 0, 0)); + break; + case 3: // multi block, single row, full last block + K2_CUDA_SAFE_CALL( + segmentedScan4< + T, SegmentedScanTraits> + <<>>( + d_out, d_idata, d_iflags, num_elements, d_blockSums[level], + block_flags[level], block_indexes[level])); + break; + case 4: // single block, single row, non-full last block + K2_CUDA_SAFE_CALL( + segmentedScan4< + T, SegmentedScanTraits> + <<>>(d_out, d_idata, d_iflags, + num_elements, 0, 0, 0)); + break; + case 5: // multi block, single row, non-full last block + K2_CUDA_SAFE_CALL( + segmentedScan4< + T, SegmentedScanTraits> + <<>>( + d_out, d_idata, d_iflags, num_elements, d_blockSums[level], + block_flags[level], block_indexes[level])); + break; + case 6: // single block, single row, full last block + K2_CUDA_SAFE_CALL( + segmentedScan4< + T, SegmentedScanTraits> + <<>>(d_out, d_idata, d_iflags, + num_elements, 0, 0, 0)); + break; + case 7: // multi block, single row, full last block + K2_CUDA_SAFE_CALL( + segmentedScan4< + T, SegmentedScanTraits> + <<>>( + d_out, d_idata, d_iflags, num_elements, d_blockSums[level], + block_flags[level], block_indexes[level])); + break; + } + + if (numBlocks > 1) { + // After scanning all the sub-blocks, we are mostly done. But + // now we need to take all of the last values of the + // sub-blocks and segment scan those. This will give us a new value + // that must be sdded to the first segment of each block to get + // the final results. + SegmentedScanArrayRecursive( + context, (T *)d_blockSums[level], (const T *)d_blockSums[level], + block_flags[level], (T **)d_blockSums, block_flags, block_indexes, + numBlocks, level + 1, sm12OrBetterHw); + + if (isBackward) { + if (fullBlock) + K2_CUDA_SAFE_CALL(vectorSegmentedAddUniformToRight4 + <<>>( + d_out, d_blockSums[level], block_indexes[level], + num_elements, 0, 0)); + else + K2_CUDA_SAFE_CALL(vectorSegmentedAddUniformToRight4 + <<>>( + d_out, d_blockSums[level], block_indexes[level], + num_elements, 0, 0)); + } else { + if (fullBlock) + K2_CUDA_SAFE_CALL(vectorSegmentedAddUniform4 + <<>>( + d_out, d_blockSums[level], block_indexes[level], + num_elements, 0, 0)); + else + K2_CUDA_SAFE_CALL(vectorSegmentedAddUniform4 + <<>>( + d_out, d_blockSums[level], block_indexes[level], + num_elements, 0, 0)); + } + } +} + +template +void SegmentedExclusiveSum(ContextPtr context, const T *d_in, + int32_t num_elements, const uint32_t *d_iflags, + T *d_out) { + SegmentedScanPlan plan(num_elements, sizeof(T), context); + + SegmentedScanArrayRecursive, false /*isBackward*/, + true /*isExclusive*/, false /*isBackward*/>( + context, d_out, d_in, d_iflags, + reinterpret_cast(plan.block_sums.get()), plan.block_flags.get(), + plan.block_indexes.get(), num_elements, 0, true /*sm12OrBetterHw*/); +} + +template void SegmentedExclusiveSum(ContextPtr context, + const int32_t *d_in, + int32_t num_elements, + const uint32_t *d_iflags, + int32_t *d_out); + +template void SegmentedExclusiveSum(ContextPtr context, + const float *d_in, + int32_t num_elements, + const uint32_t *d_iflags, + float *d_out); + +template void SegmentedExclusiveSum(ContextPtr context, + const double *d_in, + int32_t num_elements, + const uint32_t *d_iflags, + double *d_out); + +} // namespace k2 diff --git a/cuda_code/cufft_calls.cu b/cuda_code/cufft_calls.cu new file mode 100644 index 0000000000000000000000000000000000000000..4a671e0d96cbe0373eb9b32f7a4086594e1cf821 --- /dev/null +++ b/cuda_code/cufft_calls.cu @@ -0,0 +1,84 @@ +/* +Copyright (c) 2022, Mahendra Verma, Manthan verma, Soumyadeep Chatterjee +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +/* + \brief ---> Code to compute FFT on multi-node on GPUs Scaling upto 512 GPUs for grid size upto 4096^3 + \author ---> Manthan verma, Soumyadeep Chatterjee, Gaurav Garg, Bharatkumar Sharma, Nishant Arya, Shashi Kumar, Mahendra Verma + \dated --> Feb 2022 + \copyright New BSD License +*/ + +#include "header.cuh" + +// Explicit initialization of CUFFT R2C +template<> void cufft_call_r2c(cufftHandle &plan,cufftReal* input_data,cufftComplex* output_data) +{ + gpuerrcheck_cufft(cufftExecR2C(plan, input_data, output_data), __LINE__); +} + +template<> void cufft_call_r2c(cufftHandle &plan,cufftDoubleReal* input_data,cufftDoubleComplex* output_data) +{ + gpuerrcheck_cufft(cufftExecD2Z(plan, input_data, output_data), __LINE__); +} + + +// Explicit initialization of CUFFT C2C +template<> void cufft_call_c2c(cufftHandle &plan,cufftComplex* input_data, int direction) +{ + gpuerrcheck_cufft(cufftExecC2C(plan, input_data, input_data,direction), __LINE__); +} + +template<> void cufft_call_c2c(cufftHandle &plan,cufftDoubleComplex* input_data,int direction) +{ + gpuerrcheck_cufft(cufftExecZ2Z(plan, input_data, input_data,direction), __LINE__); +} + +// Explicit initialization of CUFFT C2R +template<> void cufft_call_c2r(cufftHandle &plan,cufftComplex* input_data,cufftReal* output_data) +{ + gpuerrcheck_cufft(cufftExecC2R(plan, input_data, output_data), __LINE__); +} + +template<> void cufft_call_c2r(cufftHandle &plan,cufftDoubleComplex* input_data,cufftDoubleReal* output_data) +{ + gpuerrcheck_cufft(cufftExecZ2D(plan, input_data, output_data), __LINE__); +} + +// MPI CALLS datatype + +template<> MPI_Datatype mpi_type_call(float a) +{ + return MPI_CXX_COMPLEX; +} + +template<> MPI_Datatype mpi_type_call(double a) +{ + return MPI_CXX_DOUBLE_COMPLEX; +} \ No newline at end of file diff --git a/cuda_code/cugraph_1.cu b/cuda_code/cugraph_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..66e0fa268a6cea46cbe073a122e02cc52bc29c16 --- /dev/null +++ b/cuda_code/cugraph_1.cu @@ -0,0 +1,538 @@ +// -*-c++-*- + + /* + * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + * + */ + +// Graph analytics features + +#include +#include "utilities/graph_utils.cuh" +#include "converters/COOtoCSR.cuh" +#include "utilities/error_utils.h" +#include "converters/renumber.cuh" +#include +#include +#include +#include "utilities/cusparse_helper.h" +#include +#include +/* + * cudf has gdf_column_free and using this is, in general, better design than + * creating our own, but we will keep this as cudf is planning to remove the + * function. cudf plans to redesign cudf::column to fundamentally solve this + * problem, so once they finished the redesign, we need to update this code to + * use their new features. Until that time, we may rely on this as a temporary + * solution. + */ + +namespace cugraph { +int get_device(const void *ptr) { + cudaPointerAttributes att; + cudaPointerGetAttributes(&att, ptr); + return att.device; +} + +void gdf_col_delete(gdf_column* col) { + if (col != nullptr) { + cudaStream_t stream {nullptr}; + if (col->data != nullptr) { + ALLOC_FREE_TRY(col->data, stream); + } + if (col->valid != nullptr) { + ALLOC_FREE_TRY(col->valid, stream); + } +#if 0 + /* Currently, gdf_column_view does not set col_name, and col_name can have + an arbitrary value, so freeing col_name can lead to freeing a ranodom + address. This problem should be cleaned up once cudf finishes + redesigning cudf::column. */ + if (col->col_name != nullptr) { + free(col->col_name); + } +#endif + delete col; + } +} + +void gdf_col_release(gdf_column* col) { + delete col; +} + +void cpy_column_view(const gdf_column *in, gdf_column *out) { + if (in != nullptr && out !=nullptr) { + gdf_column_view(out, in->data, in->valid, in->size, in->dtype); + } +} + +void transposed_adj_list_view(Graph *graph, const gdf_column *offsets, + const gdf_column *indices, + const gdf_column *edge_data) { + //This function returns an error if this graph object has at least one graph + //representation to prevent a single object storing two different graphs. + CUGRAPH_EXPECTS( ((graph->edgeList == nullptr) && (graph->adjList == nullptr) && (graph->transposedAdjList == nullptr)), + "Invalid API parameter: Graph data is NULL"); + + CUGRAPH_EXPECTS( offsets->null_count == 0 , "Input column has non-zero null count: offsets->null_count is 0"); + CUGRAPH_EXPECTS( indices->null_count == 0 , "Input column has non-zero null count: indices->null_count is 0"); + CUGRAPH_EXPECTS( (offsets->dtype == indices->dtype), "Unsupported data type: graph data type mismatch" ); + CUGRAPH_EXPECTS( ((offsets->dtype == GDF_INT32)), "Unsupported data type: graph is of wrong data type" ); + CUGRAPH_EXPECTS( (offsets->size > 0), "Column is empty"); + + graph->transposedAdjList = new gdf_adj_list; + graph->transposedAdjList->offsets = new gdf_column; + graph->transposedAdjList->indices = new gdf_column; + graph->transposedAdjList->ownership = 0; + + cpy_column_view(offsets, graph->transposedAdjList->offsets); + cpy_column_view(indices, graph->transposedAdjList->indices); + + if (!graph->prop) + graph->prop = new Graph_properties(); + + if (edge_data) { + CUGRAPH_EXPECTS(indices->size == edge_data->size, "Column size mismatch"); + graph->transposedAdjList->edge_data = new gdf_column; + cpy_column_view(edge_data, graph->transposedAdjList->edge_data); + + bool has_neg_val; + + switch (graph->adjList->edge_data->dtype) { + case GDF_INT8: + has_neg_val = cugraph::detail::has_negative_val( + static_cast(graph->transposedAdjList->edge_data->data), + graph->transposedAdjList->edge_data->size); + break; + case GDF_INT16: + has_neg_val = cugraph::detail::has_negative_val( + static_cast(graph->transposedAdjList->edge_data->data), + graph->transposedAdjList->edge_data->size); + break; + case GDF_INT32: + has_neg_val = cugraph::detail::has_negative_val( + static_cast(graph->transposedAdjList->edge_data->data), + graph->transposedAdjList->edge_data->size); + break; + case GDF_INT64: + has_neg_val = cugraph::detail::has_negative_val( + static_cast(graph->transposedAdjList->edge_data->data), + graph->transposedAdjList->edge_data->size); + break; + case GDF_FLOAT32: + has_neg_val = cugraph::detail::has_negative_val( + static_cast(graph->transposedAdjList->edge_data->data), + graph->transposedAdjList->edge_data->size); + break; + case GDF_FLOAT64: + has_neg_val = cugraph::detail::has_negative_val( + static_cast(graph->transposedAdjList->edge_data->data), + graph->transposedAdjList->edge_data->size); + break; + default: + has_neg_val = false; + } + graph->prop->has_negative_edges = + (has_neg_val) ? GDF_PROP_TRUE : GDF_PROP_FALSE; + } else { + graph->transposedAdjList->edge_data = nullptr; + graph->prop->has_negative_edges = GDF_PROP_FALSE; + } + + graph->numberOfVertices = graph->transposedAdjList->offsets->size - 1; +} + +void adj_list_view(Graph *graph, const gdf_column *offsets, + const gdf_column *indices, + const gdf_column *edge_data) { + //This function returns an error if this graph object has at least one graph + //representation to prevent a single object storing two different graphs. + CUGRAPH_EXPECTS( ((graph->edgeList == nullptr) && (graph->adjList == nullptr) && + (graph->transposedAdjList == nullptr)), "Invalid API parameter: graph data is NULL"); + CUGRAPH_EXPECTS( offsets->null_count == 0 , "Input column has non-zero null count"); + CUGRAPH_EXPECTS( indices->null_count == 0 , "Input column has non-zero null count"); + CUGRAPH_EXPECTS( (offsets->dtype == indices->dtype), "Unsupported data type" ); + CUGRAPH_EXPECTS( ((offsets->dtype == GDF_INT32)), "Unsupported data type" ); + CUGRAPH_EXPECTS( (offsets->size > 0), "Column is empty"); + + graph->adjList = new gdf_adj_list; + graph->adjList->offsets = new gdf_column; + graph->adjList->indices = new gdf_column; + graph->adjList->ownership = 0; + + cpy_column_view(offsets, graph->adjList->offsets); + cpy_column_view(indices, graph->adjList->indices); + + if (!graph->prop) + graph->prop = new Graph_properties(); + + if (edge_data) { + CUGRAPH_EXPECTS(indices->size == edge_data->size, "Column size mismatch"); + graph->adjList->edge_data = new gdf_column; + cpy_column_view(edge_data, graph->adjList->edge_data); + + bool has_neg_val; + + switch (graph->adjList->edge_data->dtype) { + case GDF_INT8: + has_neg_val = cugraph::detail::has_negative_val( + static_cast(graph->adjList->edge_data->data), + graph->adjList->edge_data->size); + break; + case GDF_INT16: + has_neg_val = cugraph::detail::has_negative_val( + static_cast(graph->adjList->edge_data->data), + graph->adjList->edge_data->size); + break; + case GDF_INT32: + has_neg_val = cugraph::detail::has_negative_val( + static_cast(graph->adjList->edge_data->data), + graph->adjList->edge_data->size); + break; + case GDF_INT64: + has_neg_val = cugraph::detail::has_negative_val( + static_cast(graph->adjList->edge_data->data), + graph->adjList->edge_data->size); + break; + case GDF_FLOAT32: + has_neg_val = cugraph::detail::has_negative_val( + static_cast(graph->adjList->edge_data->data), + graph->adjList->edge_data->size); + break; + case GDF_FLOAT64: + has_neg_val = cugraph::detail::has_negative_val( + static_cast(graph->adjList->edge_data->data), + graph->adjList->edge_data->size); + break; + default: + has_neg_val = false; + } + graph->prop->has_negative_edges = + (has_neg_val) ? GDF_PROP_TRUE : GDF_PROP_FALSE; + } else { + graph->adjList->edge_data = nullptr; + graph->prop->has_negative_edges = GDF_PROP_FALSE; + } + + graph->numberOfVertices = graph->adjList->offsets->size - 1; + +} + +void gdf_adj_list::get_vertex_identifiers(gdf_column *identifiers) { + CUGRAPH_EXPECTS( offsets != nullptr , "Invalid API parameter"); + CUGRAPH_EXPECTS( offsets->data != nullptr , "Invalid API parameter"); + cugraph::detail::sequence((int)offsets->size-1, (int*)identifiers->data); + + +} + +void gdf_adj_list::get_source_indices (gdf_column *src_indices) { + CUGRAPH_EXPECTS( offsets != nullptr , "Invalid API parameter"); + CUGRAPH_EXPECTS( offsets->data != nullptr , "Invalid API parameter"); + CUGRAPH_EXPECTS( src_indices->size == indices->size, "Column size mismatch" ); + CUGRAPH_EXPECTS( src_indices->dtype == indices->dtype, "Unsupported data type" ); + CUGRAPH_EXPECTS( src_indices->size > 0, "Column is empty"); + + cugraph::detail::offsets_to_indices((int*)offsets->data, offsets->size-1, (int*)src_indices->data); + + +} + +void edge_list_view(Graph *graph, const gdf_column *src_indices, + const gdf_column *dest_indices, + const gdf_column *edge_data) { + //This function returns an error if this graph object has at least one graph + //representation to prevent a single object storing two different graphs. + + CUGRAPH_EXPECTS( ((graph->edgeList == nullptr) && (graph->adjList == nullptr) && + (graph->transposedAdjList == nullptr)), "Invalid API parameter"); + CUGRAPH_EXPECTS( src_indices->size == dest_indices->size, "Column size mismatch" ); + CUGRAPH_EXPECTS( src_indices->dtype == dest_indices->dtype, "Unsupported data type" ); + CUGRAPH_EXPECTS( src_indices->dtype == GDF_INT32, "Unsupported data type" ); + CUGRAPH_EXPECTS( src_indices->size > 0, "Column is empty"); + CUGRAPH_EXPECTS( src_indices->null_count == 0 , "Input column has non-zero null count"); + CUGRAPH_EXPECTS( dest_indices->null_count == 0 , "Input column has non-zero null count"); + + + graph->edgeList = new gdf_edge_list; + graph->edgeList->src_indices = new gdf_column; + graph->edgeList->dest_indices = new gdf_column; + graph->edgeList->ownership = 0; + + cpy_column_view(src_indices, graph->edgeList->src_indices); + cpy_column_view(dest_indices, graph->edgeList->dest_indices); + + if (!graph->prop) + graph->prop = new Graph_properties(); + + if (edge_data) { + CUGRAPH_EXPECTS(src_indices->size == edge_data->size, "Column size mismatch"); + graph->edgeList->edge_data = new gdf_column; + cpy_column_view(edge_data, graph->edgeList->edge_data); + + bool has_neg_val; + + switch (graph->edgeList->edge_data->dtype) { + case GDF_INT8: + has_neg_val = cugraph::detail::has_negative_val( + static_cast(graph->edgeList->edge_data->data), + graph->edgeList->edge_data->size); + break; + case GDF_INT16: + has_neg_val = cugraph::detail::has_negative_val( + static_cast(graph->edgeList->edge_data->data), + graph->edgeList->edge_data->size); + break; + case GDF_INT32: + has_neg_val = cugraph::detail::has_negative_val( + static_cast(graph->edgeList->edge_data->data), + graph->edgeList->edge_data->size); + break; + case GDF_INT64: + has_neg_val = cugraph::detail::has_negative_val( + static_cast(graph->edgeList->edge_data->data), + graph->edgeList->edge_data->size); + break; + case GDF_FLOAT32: + has_neg_val = cugraph::detail::has_negative_val( + static_cast(graph->edgeList->edge_data->data), + graph->edgeList->edge_data->size); + break; + case GDF_FLOAT64: + has_neg_val = cugraph::detail::has_negative_val( + static_cast(graph->edgeList->edge_data->data), + graph->edgeList->edge_data->size); + break; + default: + has_neg_val = false; + } + graph->prop->has_negative_edges = + (has_neg_val) ? GDF_PROP_TRUE : GDF_PROP_FALSE; + + } else { + graph->edgeList->edge_data = nullptr; + graph->prop->has_negative_edges = GDF_PROP_FALSE; + } + + cugraph::detail::indexing_check ( + static_cast(graph->edgeList->src_indices->data), + static_cast(graph->edgeList->dest_indices->data), + graph->edgeList->dest_indices->size); +} + +template +void add_adj_list_impl (Graph *graph) { + if (graph->adjList == nullptr) { + CUGRAPH_EXPECTS( graph->edgeList != nullptr , "Invalid API parameter"); + int nnz = graph->edgeList->src_indices->size; + graph->adjList = new gdf_adj_list; + graph->adjList->offsets = new gdf_column; + graph->adjList->indices = new gdf_column; + graph->adjList->ownership = 1; + + if (graph->edgeList->edge_data!= nullptr) { + graph->adjList->edge_data = new gdf_column; + + CSR_Result_Weighted adj_list; + ConvertCOOtoCSR_weighted((int*)graph->edgeList->src_indices->data, (int*)graph->edgeList->dest_indices->data, (WT*)graph->edgeList->edge_data->data, nnz, adj_list); + + gdf_column_view(graph->adjList->offsets, adj_list.rowOffsets, + nullptr, adj_list.size+1, graph->edgeList->src_indices->dtype); + gdf_column_view(graph->adjList->indices, adj_list.colIndices, + nullptr, adj_list.nnz, graph->edgeList->src_indices->dtype); + gdf_column_view(graph->adjList->edge_data, adj_list.edgeWeights, + nullptr, adj_list.nnz, graph->edgeList->edge_data->dtype); + } + else { + CSR_Result adj_list; + ConvertCOOtoCSR((int*)graph->edgeList->src_indices->data,(int*)graph->edgeList->dest_indices->data, nnz, adj_list); + gdf_column_view(graph->adjList->offsets, adj_list.rowOffsets, + nullptr, adj_list.size+1, graph->edgeList->src_indices->dtype); + gdf_column_view(graph->adjList->indices, adj_list.colIndices, + nullptr, adj_list.nnz, graph->edgeList->src_indices->dtype); + } + graph->numberOfVertices = graph->adjList->offsets->size - 1; + } +} + +void add_edge_list (Graph *graph) { + if (graph->edgeList == nullptr) { + CUGRAPH_EXPECTS( graph->adjList != nullptr , "Invalid API parameter"); + int *d_src; + graph->edgeList = new gdf_edge_list; + graph->edgeList->src_indices = new gdf_column; + graph->edgeList->dest_indices = new gdf_column; + graph->edgeList->ownership = 2; + + cudaStream_t stream{nullptr}; + ALLOC_TRY((void**)&d_src, sizeof(int) * graph->adjList->indices->size, stream); + + cugraph::detail::offsets_to_indices((int*)graph->adjList->offsets->data, + graph->adjList->offsets->size-1, + (int*)d_src); + + gdf_column_view(graph->edgeList->src_indices, d_src, + nullptr, graph->adjList->indices->size, graph->adjList->indices->dtype); + cpy_column_view(graph->adjList->indices, graph->edgeList->dest_indices); + + if (graph->adjList->edge_data != nullptr) { + graph->edgeList->edge_data = new gdf_column; + cpy_column_view(graph->adjList->edge_data, graph->edgeList->edge_data); + } + } + +} + + +template +void add_transposed_adj_list_impl (Graph *graph) { + if (graph->transposedAdjList == nullptr ) { + CUGRAPH_EXPECTS( graph->edgeList != nullptr , "Invalid API parameter"); + int nnz = graph->edgeList->src_indices->size; + graph->transposedAdjList = new gdf_adj_list; + graph->transposedAdjList->offsets = new gdf_column; + graph->transposedAdjList->indices = new gdf_column; + graph->transposedAdjList->ownership = 1; + + if (graph->edgeList->edge_data) { + graph->transposedAdjList->edge_data = new gdf_column; + CSR_Result_Weighted adj_list; + ConvertCOOtoCSR_weighted( (int*)graph->edgeList->dest_indices->data, (int*)graph->edgeList->src_indices->data, (WT*)graph->edgeList->edge_data->data, nnz, adj_list); + gdf_column_view(graph->transposedAdjList->offsets, adj_list.rowOffsets, + nullptr, adj_list.size+1, graph->edgeList->src_indices->dtype); + gdf_column_view(graph->transposedAdjList->indices, adj_list.colIndices, + nullptr, adj_list.nnz, graph->edgeList->src_indices->dtype); + gdf_column_view(graph->transposedAdjList->edge_data, adj_list.edgeWeights, + nullptr, adj_list.nnz, graph->edgeList->edge_data->dtype); + } + else { + + CSR_Result adj_list; + ConvertCOOtoCSR((int*)graph->edgeList->dest_indices->data, (int*)graph->edgeList->src_indices->data, nnz, adj_list); + gdf_column_view(graph->transposedAdjList->offsets, adj_list.rowOffsets, + nullptr, adj_list.size+1, graph->edgeList->src_indices->dtype); + gdf_column_view(graph->transposedAdjList->indices, adj_list.colIndices, + nullptr, adj_list.nnz, graph->edgeList->src_indices->dtype); + } + graph->numberOfVertices = graph->transposedAdjList->offsets->size - 1; + } + +} + +void add_adj_list(Graph *graph) { + if (graph->adjList == nullptr) { + CUGRAPH_EXPECTS( graph->edgeList != nullptr , "Invalid API parameter"); + CUGRAPH_EXPECTS( graph->edgeList->src_indices->dtype == GDF_INT32, "Unsupported data type" ); + + if (graph->edgeList->edge_data != nullptr) { + switch (graph->edgeList->edge_data->dtype) { + case GDF_FLOAT32: return cugraph::add_adj_list_impl(graph); + case GDF_FLOAT64: return cugraph::add_adj_list_impl(graph); + default: CUGRAPH_FAIL("Unsupported data type"); + } + } + else { + return cugraph::add_adj_list_impl(graph); + } + } +} + +void add_transposed_adj_list(Graph *graph) { + if (graph->transposedAdjList == nullptr) { + if (graph->edgeList == nullptr) + cugraph::add_edge_list(graph); + + CUGRAPH_EXPECTS(graph->edgeList->src_indices->dtype == GDF_INT32, "Unsupported data type"); + CUGRAPH_EXPECTS(graph->edgeList->dest_indices->dtype == GDF_INT32, "Unsupported data type"); + + if (graph->edgeList->edge_data != nullptr) { + switch (graph->edgeList->edge_data->dtype) { + case GDF_FLOAT32: return cugraph::add_transposed_adj_list_impl(graph); + case GDF_FLOAT64: return cugraph::add_transposed_adj_list_impl(graph); + default: CUGRAPH_FAIL("Unsupported data type"); + } + } + else { + return cugraph::add_transposed_adj_list_impl(graph); + } + } +} + +void delete_adj_list(Graph *graph) { + if (graph->adjList) { + delete graph->adjList; + } + graph->adjList = nullptr; + +} + +void delete_edge_list(Graph *graph) { + if (graph->edgeList) { + delete graph->edgeList; + } + graph->edgeList = nullptr; + +} + +void delete_transposed_adj_list(Graph *graph) { + if (graph->transposedAdjList) { + delete graph->transposedAdjList; + } + graph->transposedAdjList = nullptr; + +} + +void number_of_vertices(Graph *graph) { + if (graph->numberOfVertices != 0) + + + // + // int32_t implementation for now, since that's all that + // is supported elsewhere. + // + CUGRAPH_EXPECTS( (graph->edgeList != nullptr), "Invalid API parameter"); + CUGRAPH_EXPECTS( (graph->edgeList->src_indices->dtype == GDF_INT32), "Unsupported data type" ); + + int32_t h_max[2]; + int32_t *d_max; + void *d_temp_storage = nullptr; + size_t temp_storage_bytes = 0; + + ALLOC_TRY(&d_max, sizeof(int32_t), nullptr); + + // + // Compute size of temp storage + // + int32_t *tmp = static_cast(graph->edgeList->src_indices->data); + + cub::DeviceReduce::Max(d_temp_storage, temp_storage_bytes, tmp, d_max, graph->edgeList->src_indices->size); + + // + // Compute max of src indices and copy to host + // + ALLOC_TRY(&d_temp_storage, temp_storage_bytes, nullptr); + cub::DeviceReduce::Max(d_temp_storage, temp_storage_bytes, tmp, d_max, graph->edgeList->src_indices->size); + + CUDA_TRY(cudaMemcpy(h_max, d_max, sizeof(int32_t), cudaMemcpyDeviceToHost)); + + // + // Compute max of dest indices and copy to host + // + tmp = static_cast(graph->edgeList->dest_indices->data); + cub::DeviceReduce::Max(d_temp_storage, temp_storage_bytes, tmp, d_max, graph->edgeList->src_indices->size); + CUDA_TRY(cudaMemcpy(h_max + 1, d_max, sizeof(int32_t), cudaMemcpyDeviceToHost)); + + ALLOC_FREE_TRY(d_temp_storage, nullptr); + ALLOC_FREE_TRY(d_max, nullptr); + + graph->numberOfVertices = 1 + std::max(h_max[0], h_max[1]); + +} + +} //namespace diff --git a/cuda_code/culayer_kernel.cu b/cuda_code/culayer_kernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..c7ec456c25eac0767bb2bd546bbe78a93091385b --- /dev/null +++ b/cuda_code/culayer_kernel.cu @@ -0,0 +1,113 @@ +#include "culayer.hpp" +#include + +// https://developer.download.nvidia.com/assets/cuda/files/reduction.pdf + +#define DIV_UP(x,y) (1 + ((x - 1) / y)) + + +__constant__ seed_t RNG_G = (seed_t)(6364136223846793005ull); +__constant__ seed_t RNG_C = (seed_t)(1442695040888963407ull); +__constant__ seed_t RNG_P = (seed_t)(1) << 63; + +__device__ __forceinline__ float cu_rnd_real(seed_t* seed) { + float inv_RNG_P = (float)(1) / (float)(RNG_P); + *seed = (RNG_G * *seed + RNG_C) % RNG_P; + return (float)(*seed) * inv_RNG_P; +} + +__global__ void particle_step_kernel(int n, + Particle* particles, + int steps, + float const* const sigs_in, + float const* const absorption_rates_in, + float * const weights_absorbed_out, + int min_index, + int max_index, + float dx) +{ + extern __shared__ float sdata[]; + + int n_cells = max_index-min_index; + + float * const sigs = sdata; + float * const absorption_rates = sdata + n_cells; + float * const weights_absorbed = sdata + 2*n_cells; + + for (int j = 0; j < DIV_UP(n_cells, blockDim.x); j++){ + int cpy_ind = j*blockDim.x + threadIdx.x; + if (cpy_ind < n_cells){ + sigs[cpy_ind] = sigs_in[cpy_ind]; + absorption_rates[cpy_ind] = absorption_rates_in[cpy_ind]; + weights_absorbed[cpy_ind] = 0; + } + } + __syncthreads(); + + int i = blockIdx.x * blockDim.x + threadIdx.x; + + if (i < n){ + Particle particle = particles[i]; + + for (int step = 0; step < steps; step++){ + if (particle.index >= min_index && particle.index < max_index) + { + int local_index = particle.index - min_index; + const float interaction_rate = 1.0 - absorption_rates[local_index]; + const float sig_a = sigs[local_index] * absorption_rates[local_index]; + const float sig_i = sigs[local_index] * interaction_rate; + + // calculate theoretic movement + const float h = cu_rnd_real(&particle.seed); + float di = MAXREAL; + if (sig_i > EPS_PRECISION){ + // This should always be true + di = -log(h) / sig_i; + } + + // -- possible new cell -- + float mu_sign = copysignf(1.0, particle.mu); + int index_new = __float2int_rn(mu_sign) + particle.index; + float x_new_edge = particle.index * dx; + if (mu_sign == 1){ + x_new_edge += dx; + } + + float di_edge = MAXREAL; + if (particle.mu < -EPS_PRECISION || EPS_PRECISION < particle.mu){ + di_edge = (x_new_edge - particle.x) / particle.mu; + } + + if (di < di_edge) { + /* move inside cell an draw new mu */ + index_new = particle.index; + particle.x += di * particle.mu; + particle.mu = 2 * cu_rnd_real(&particle.seed) - 1; + } else { + /* set position to border */ + di = di_edge; + particle.x = x_new_edge; + } + + // -- Calculate amount of absorbed energy -- + const float dw = (1 - expf(-sig_a * di)) * particle.wmc; + + /* Weight removed from particle is added to the layer */ + particle.wmc -= dw; + atomicAdd(weights_absorbed + local_index, dw); + particle.index = index_new; + + + } + } + particles[i] = particle; + } + + __syncthreads(); + for (int j = 0; j < DIV_UP(n_cells, blockDim.x); j++){ + int cpy_ind = j*blockDim.x + threadIdx.x; + if (cpy_ind < n_cells){ + atomicAdd(weights_absorbed_out + cpy_ind, weights_absorbed[cpy_ind]); + } + } +} \ No newline at end of file diff --git a/cuda_code/curysq_eri_sss01.cu b/cuda_code/curysq_eri_sss01.cu new file mode 100644 index 0000000000000000000000000000000000000000..abe5b76808fbdb8161dbbc42eabedf485a198db0 --- /dev/null +++ b/cuda_code/curysq_eri_sss01.cu @@ -0,0 +1,583 @@ +/** + @file + To do: the file has become very messy needs to be cleaned up a bit + the device memory accesses should be converted to use texture + there is no support for double texture fetches, however the thing can be accomplished + using integer texture fetches and unpacking int4 into two double + The memory allocation and copy should be simplified + + finally, the code needs to be slightly modified to have high functions on center C. + instead of on center D. +*/ + +#include "eri/rysq_eri_int2d.h" +#include "cuda/curysq_util.h" +#include "vec.h" +#include "util.h" +#include +#include "roots/rysq_roots0.h" +#include "roots/rysq_roots1.h" +#include +#include +#include + +#define pow2(x) ((x)*(x)) + +extern __shared__ double shmem[]; + +/** + @brief + @param a + @param b +*/ +void cuRysq_packExp(Rysq_shell_t a, Rysq_shell_t b, + Rysq_shell_t c, Rysq_shell_t d, + double2 *exp) { + + int Kbra = a.K*b.K; + + for(int j = 0, ij = 0; j < b.K; ++j) { + for(int i = 0; i < a.K; ++i, ++ij) { + exp[ij].x = a.a[i]; + exp[ij].y = b.a[j]; + } + } + + for(int l = 0, kl = 0; l < d.K; ++l) { + for(int k = 0; k < c.K; ++k, ++kl) { + exp[kl+Kbra].x = c.a[k]; + exp[kl+Kbra].y = d.a[l]; + } + } +} + + +/** + @brief + @param +*/ +__global__ void cuRysq_eri_ssss(int flags, double tole, + double2 *dev_AB, + int Kbra, int Kket, double2 *dev_braket, + double *dev_rbra, double *dev_rket, + double scale, double *dev_I) { + + typedef struct { + double rbra[6]; + double rket[6]; + double rij[3]; + double rkl[3]; + double rij2; + double rkl2; + double *eij, *ekl; + double *rA, *rB; + } sh_t; + + short rank = ctaRank(); + short bsize = ctaSize(); + + __shared__ sh_t sh; + + // load bra and ket centers + for(int i = rank; i < 6; i += bsize) { + sh.rbra[i] = dev_rbra[i]; + sh.rket[i] = dev_rket[i]; + } + __syncthreads(); + + // compute bra and ket distances + for(int i = rank; i < 3; i += bsize) { + sh.rij[i] = sh.rbra[i] - sh.rbra[i+3]; + sh.rkl[i] = sh.rket[i] - sh.rket[i+3]; + } + __syncthreads(); + + + if(rank == 0) { + sh.rij2 = 0.0; + sh.rkl2 = 0.0; + for(int i = 0; i < 3; ++i) { + sh.rij2 += pow2(sh.rij[i]); + sh.rkl2 += pow2(sh.rkl[i]); + } + // computed bra values + sh.rA = shmem; + sh.eij = sh.rA + 3*Kbra; + + // computed ket values + sh.rB = sh.eij + Kbra; + sh.ekl = sh.rB + 3*Kket; + } + + __syncthreads(); + + // compute bra values + for(int Kij = rank; Kij < Kbra; Kij += bsize) { + double A1 = 1.0/(dev_braket[Kij].x + dev_braket[Kij].y); + sh.eij[Kij] = exp(-dev_braket[Kij].x*dev_braket[Kij].y*A1*sh.rij2); + for(int i = 0; i<3; ++i) { + sh.rA[i+Kij*3] = A1*(dev_braket[Kij].x*sh.rbra[i] + dev_braket[Kij].y*sh.rbra[i+3]); + } + } + + // compute ket values + for(int Kkl = rank; Kkl < Kket; Kkl += bsize) { + double B1 = 1.0/(dev_braket[Kkl+Kbra].x + dev_braket[Kkl+Kbra].y); + sh.ekl[Kkl] = exp(-dev_braket[Kkl+Kbra].x*dev_braket[Kkl+Kbra].y*B1*sh.rkl2); + for(int i = 0; i<3; ++i) { + sh.rB[i+Kkl*3] = B1*(dev_braket[Kkl+Kbra].x*sh.rket[i] + dev_braket[Kkl+Kbra].y*sh.rket[i+3]); + } + } + __syncthreads(); + + double q0 = 0.0; + + // compute contractions + for(int Kkl = threadIdx.z; Kkl < Kket; Kkl += blockDim.z) { + //the bra Kij contractions are mapped to threadIdx.y + int K = threadIdx.y + Kkl*Kbra; + + double2 AB = dev_AB[K]; + double CAB2e = AB.x*sh.eij[threadIdx.y]*sh.ekl[Kkl]; + + //if (fabs(CAB2e) < tole) continue; + + double X = 0.0; + for(int i = 0; i < 3; ++i) { + double rAB = sh.rA[i+threadIdx.y*3] - sh.rB[i+Kkl*3]; + X += pow2(rAB); + } + X *= AB.y; + + double W = Rysq_roots0(X); + q0 += CAB2e*W; + + } + __syncthreads(); + + // reduce values + q0 = ctaReduce(q0, shmem); + + // put values into shared memory + if(rank == 0) { + // scale and put values into global memory + dev_I[0] = scale*SQRT_4PI5*q0; + } +} + +/** + @brief + @param flags +*/ +__global__ void cuRysq_eri_sssp(int flags, double tole, + double2 *dev_AB2rho, + int Kbra, int Kket, double2 *dev_braket, + double *dev_rbra, double *dev_rket, + double scale, double *dev_I) { + + typedef struct { + double rbra[6]; + double rket[6]; + double rij[3]; + double rkl[3]; + double rij2; + double rkl2; + double *eij, *ekl; + double *rA, *rB; + double *B; + double *rBk; + } sh_t; + + short rank = ctaRank(); + short bsize = ctaSize(); + + __shared__ sh_t sh; + + // load bra and ket centers + for(int i = rank; i < 6; i += bsize) { + sh.rbra[i] = dev_rbra[i]; + sh.rket[i] = dev_rket[i]; + } + __syncthreads(); + + // compute bra and ket distances + for(int i = rank; i < 3; i += bsize) { + sh.rij[i] = sh.rbra[i] - sh.rbra[i+3]; + sh.rkl[i] = sh.rket[i] - sh.rket[i+3]; + } + __syncthreads(); + + + if(rank == 0) { + sh.rij2 = 0.0; + sh.rkl2 = 0.0; + for(int i = 0; i < 3; ++i) { + sh.rij2 += pow2(sh.rij[i]); + sh.rkl2 += pow2(sh.rkl[i]); + } + // computed bra values + sh.rA = shmem; + sh.eij = sh.rA + 3*Kbra; + + // computed ket values + sh.rB = sh.eij + Kbra; + sh.ekl = sh.rB + 3*Kket; + + sh.B = sh.ekl + Kket; + sh.rBk = sh.B + Kket; + } + __syncthreads(); + + // compute bra values + for(int Kij = rank; Kij < Kbra; Kij += bsize) { + double A1 = 1.0/(dev_braket[Kij].x + dev_braket[Kij].y); + sh.eij[Kij] = exp(-dev_braket[Kij].x*dev_braket[Kij].y*A1*sh.rij2); + for(int i=0; i<3; ++i) { + sh.rA[i+Kij*3] = A1*(dev_braket[Kij].x*sh.rbra[i] + dev_braket[Kij].y*sh.rbra[i+3]); + } + } + + // compute ket values + for(int Kkl = rank; Kkl < Kket; Kkl += bsize) { + sh.B[Kkl] = dev_braket[Kkl+Kbra].x + dev_braket[Kkl+Kbra].y; + double B1 = 1.0/(dev_braket[Kkl+Kbra].x + dev_braket[Kkl+Kbra].y); + sh.ekl[Kkl] = exp(-dev_braket[Kkl+Kbra].x*dev_braket[Kkl+Kbra].y*B1*sh.rkl2); + for(int i = 0; i < 3; ++i) { + sh.rB[i+Kkl*3] = B1*(dev_braket[Kkl+Kbra].x*sh.rket[i] + dev_braket[Kkl+Kbra].y*sh.rket[i+3]); + sh.rBk[i+Kkl*3] = sh.rB[i+Kkl*3] - sh.rket[i+3]; + } + } + __syncthreads(); + + double q[3] = { 0.0, 0.0, 0.0 }; + + // compute contractions + for(unsigned short int Kkl = threadIdx.z; Kkl < Kket; Kkl += blockDim.z) { + //the bra Kij contractions are mapped to threadIdx.y + const unsigned short Kij = threadIdx.y; + const unsigned short K = Kij + Kkl*Kbra; + + double CAB2e = dev_AB2rho[K].x*sh.eij[Kij]*sh.ekl[Kkl]; + // should be absolute value to compare + //if (fabs(CAB2e) < tole) continue; + + double X = 0.0; + for(int i = 0; i < 3; ++i) { + double rAB = (sh.rA[i+Kij*3]-sh.rB[i+Kkl*3]); + X += pow2(rAB); + } + X *= (sh.B[Kkl])*dev_AB2rho[K].y; + double t2, W; + Rysq_roots1(X, &t2, &W); + + CAB2e *= W; + t2 *= dev_AB2rho[K].y; + + for(int i = 0; i < 3; ++i) { + q[i] += CAB2e*(sh.rBk[i+Kkl*3] + (sh.rA[i+Kij*3]-sh.rB[i+Kkl*3])*t2); + } + } + __syncthreads(); + + // reduce values + q[0] = ctaReduce(q[0], shmem); + q[1] = ctaReduce(q[1], shmem); + q[2] = ctaReduce(q[2], shmem); + + // put values into shared memory + if(rank == 0) { + for(int i = 0; i < 3; ++i) { + shmem[i] = q[i]; + } + } + __syncthreads(); + + // scale and put values into global memory + for(int i = rank; i < 3; i += bsize) { + dev_I[i] = scale*SQRT_4PI5*shmem[i]; + } +} + + +/** + @brief + @param flags + @param tole + @param bra + @param ket + @param dev_rho + @param dev_AB2 + @param dev_rbra + @param dev_rket + @param scale + @param dev_I +*/ +__global__ void cuRysq_eri_ssssp(int flags, double tole, + double2 *dev_AB2rho, double *dev_Csp, + int Kbra, int Kket, double2 *dev_braket, + double *dev_rbra, double *dev_rket, + double scale, double *dev_I) { + + + typedef struct { + double rbra[6]; + double rket[6]; + double rij[3]; + double rkl[3]; + double rij2; + double rkl2; + double *eij, *ekl; + double *rA, *rB; + double *B; + double *rBk; + } sh_t; + + short rank = ctaRank(); + short bsize = ctaSize(); + + __shared__ sh_t sh; + + // load bra and ket centers + for(int i = rank; i < 6; i += bsize) { + sh.rbra[i] = dev_rbra[i]; + sh.rket[i] = dev_rket[i]; + } + __syncthreads(); + + // compute bra and ket distances + for(int i = rank; i < 3; i += bsize) { + sh.rij[i] = sh.rbra[i] - sh.rbra[i+3]; + sh.rkl[i] = sh.rket[i] - sh.rket[i+3]; + } + __syncthreads(); + + + if(rank == 0) { + sh.rij2 = 0.0; + sh.rkl2 = 0.0; + for(int i = 0; i < 3; ++i) { + sh.rij2 += pow2(sh.rij[i]); + sh.rkl2 += pow2(sh.rkl[i]); + } + // computed bra values + sh.rA = shmem; + sh.eij = sh.rA + 3*Kbra; + + // computed ket values + sh.rB = sh.eij + Kbra; + sh.ekl = sh.rB + 3*Kket; + + sh.B = sh.ekl + Kket; + sh.rBk = sh.B + Kket; + } + __syncthreads(); + + // compute bra values + for(int Kij = rank; Kij < Kbra; Kij += bsize) { + double A1 = 1.0/(dev_braket[Kij].x + dev_braket[Kij].y); + sh.eij[Kij] = exp(-dev_braket[Kij].x*dev_braket[Kij].y*A1*sh.rij2); + for(int i=0; i<3; ++i) { + sh.rA[i+Kij*3] = A1*(dev_braket[Kij].x*sh.rbra[i] + dev_braket[Kij].y*sh.rbra[i+3]); + } + } + + // compute ket values + for(int Kkl = rank; Kkl < Kket; Kkl += bsize) { + sh.B[Kkl] = dev_braket[Kkl+Kbra].x + dev_braket[Kkl+Kbra].y; + double B1 = 1.0/(dev_braket[Kkl+Kbra].x + dev_braket[Kkl+Kbra].y); + sh.ekl[Kkl] = exp(-dev_braket[Kkl+Kbra].x*dev_braket[Kkl+Kbra].y*B1*sh.rkl2); + for(int i = 0; i < 3; ++i) { + sh.rB[i+Kkl*3] = B1*(dev_braket[Kkl+Kbra].x*sh.rket[i] + dev_braket[Kkl+Kbra].y*sh.rket[i+3]); + sh.rBk[i+Kkl*3] = sh.rB[i+Kkl*3] - sh.rket[i+3]; + } + } + __syncthreads(); + + double q[4] = { 0.0, 0.0, 0.0, 0.0 }; + + // compute contractions + for(unsigned short int Kkl = threadIdx.z; Kkl < Kket; Kkl += blockDim.z) { + //the bra Kij contractions are mapped to threadIdx.y + const unsigned short Kij = threadIdx.y; + const unsigned short K = Kij + Kkl*Kbra; + + double CAB2e = dev_AB2rho[K].x*sh.eij[Kij]*sh.ekl[Kkl]; + + //if (fabs(CAB2e) < tole) continue; + + double X = 0.0; + for(int i = 0; i < 3; ++i) { + double rAB = (sh.rA[i+Kij*3]-sh.rB[i+Kkl*3]); + X += pow2(rAB); + } + X *= (sh.B[Kkl])*dev_AB2rho[K].y; + double t2, W; + Rysq_roots1(X, &t2, &W); + + CAB2e *= W; + t2 *= dev_AB2rho[K].y; + + q[0] += CAB2e*dev_Csp[Kkl]; + + for(int i = 0; i < 3; ++i) { + q[i+1] += CAB2e*(sh.rBk[i+Kkl*3] + (sh.rA[i+Kij*3]-sh.rB[i+Kkl*3])*t2); + } + } + __syncthreads(); + + // reduce values + q[0] = ctaReduce(q[0], shmem); + q[1] = ctaReduce(q[1], shmem); + q[2] = ctaReduce(q[2], shmem); + q[3] = ctaReduce(q[3], shmem); + + // put values into shared memory + if(rank == 0) { + for(int i = 0; i < 4; ++i) { + shmem[i] = q[i]; + } + } + __syncthreads(); + + // scale and put values into global memory + for(int i = rank; i < 4; i += bsize) { + dev_I[i] = scale*SQRT_4PI5*shmem[i]; + } +} + +/** + @brief +*/ +int cuRysq_eri1(int flags, double tol, + Rysq_shell_t a, double *ri, + Rysq_shell_t b, double *rj, + Rysq_shell_t c, double *rk, + Rysq_shell_t d, double *rl, + double scale, double *I) { + + int L = a.L + b.L + c.L + d.L; + int mask = mask(0,1,2,3); + if(L == 1) { + if(a.L == 1) mask = mask(2,3,1,0); + else if(b.L == 1) mask = mask(2,3,0,1); + else if(c.L == 1) mask = mask(0,1,3,2); + } + + double *r1 = ri; + double *r2 = rj; + double *r3 = rk; + double *r4 = rl; + shuffle(a,b,c,d,mask); + shuffle(r1,r2,r3,r4,mask); + + bool sp = (d.nc == 2); + + int Kbra = a.K*b.K; + int Kket = c.K*d.K; + + double2 braket[Kbra+Kket]; + cuRysq_packExp(a, b, c, d, braket); + + double2 AB2rho[Kbra*Kket]; + //Csp is used to create s contraction coefficients + double Csp[Kket]; + + for(int l = 0, ijkl = 0, kl = 0; l < d.K; ++l) { + for(int k = 0; k < c.K; ++k, ++kl) { + double B = braket[kl+Kbra].x+braket[kl+Kbra].y; + double ckl; + if(sp) { + ckl = c.c[0][k]*d.c[1][l]; + Csp[kl] = d.c[0][l]/d.c[1][l]; + } else { + ckl = c.c[0][k]*d.c[0][l]; + } + for(int j = 0, ij = 0; j < b.K; ++j) { + for(int i = 0; i < a.K; ++i, ++ij, ++ijkl) { + double A = braket[ij].x+braket[ij].y; + //rho doesn't include B multiplication + AB2rho[ijkl].y = A/(A + B); + if(L==0) AB2rho[ijkl].y *= B; + double C = a.c[0][i]*b.c[0][j]*ckl; + AB2rho[ijkl].x = (1.0/(sqrt(A+B)*A*B))*C; + } + } + } + } + + double rbra[6], rket[6]; + + for(int i = 0; i < 3; ++i) { + rbra[i] = r1[i]; + rbra[i+3] = r2[i]; + rket[i] = r3[i]; + rket[i+3] = r4[i]; + } + + double *devPtr; + // make space for I[out] + int sizeI = 0; + if(L == 0) sizeI = 1; + else if(sp) sizeI = 4; + else sizeI = 3; + // size includes: exponents of bra, ket; + // rho and AB2+contractions; Csp; center information; + int size = Kbra*2 + Kket*2 + Kbra*Kket*2 + Kket*sp + 12 + sizeI; + cudaMalloc(&devPtr, size*sizeof(double)); + assertCudaSuccess; + + //put all memory onto the device + double2 *dev_braket = (double2*)(devPtr); + double2 *dev_AB2rho = dev_braket + Kbra + Kket; + double *dev_Csp = (double*)(dev_AB2rho + Kbra*Kket); + double *dev_rbra = dev_Csp + Kket*sp; + double *dev_rket = dev_rbra + 6; + double *dev_I = dev_rket + 6; + + cudaMemcpy(dev_braket, braket, (Kbra + Kket)*sizeof(double2), cudaMemcpyHostToDevice); + assertCudaSuccess; + cudaMemcpy(dev_AB2rho, AB2rho, Kbra*Kket*sizeof(double2), cudaMemcpyHostToDevice); + assertCudaSuccess; + cudaMemcpy(dev_Csp, Csp, sp*Kket*sizeof(double), cudaMemcpyHostToDevice); + assertCudaSuccess; + cudaMemcpy(dev_rbra, rbra, 6*sizeof(double), cudaMemcpyHostToDevice); + assertCudaSuccess; + cudaMemcpy(dev_rket, rket, 6*sizeof(double), cudaMemcpyHostToDevice); + assertCudaSuccess; + + dim3 dimG = dim3(1,1); + // map bra to y dimension and ket to z dimension s.t. threadblock is < 128 + dim3 dimB = dim3(1, Kbra, 1); + int Ns = 0; + Ns += Kbra + Kket; //eij and ekl + Ns += 3*Kbra + 3*Kket; // rA and rB + if(L == 1) { + Ns += Kket; // B + Ns += 3*Kket; // rBk + } + Ns = std::max(Ns , int(dimB.x*dimB.y*dimB.z)); // make sure large enough for reduction + if(L == 0) { // + cuRysq_eri_ssss<<>>(flags, tol, + dev_AB2rho, Kbra, Kket, dev_braket, + dev_rbra, dev_rket, scale, dev_I); + } + else if(sp) { // + cuRysq_eri_ssssp<<>>(flags, tol, + dev_AB2rho, dev_Csp, + Kbra, Kket, dev_braket, + dev_rbra, dev_rket, scale, dev_I); + } else { // + cuRysq_eri_sssp<<>>(flags, tol, + dev_AB2rho, Kbra, Kket, dev_braket, + dev_rbra, dev_rket, scale, dev_I); + } + assertCudaSuccess; + + cudaMemcpy(I, dev_I, sizeI*sizeof(double), cudaMemcpyDeviceToHost); + assertCudaSuccess; + cudaFree(devPtr); + assertCudaSuccess; + + return 0; +} + diff --git a/cuda_code/cutlass_reorder_filter_2.cu b/cuda_code/cutlass_reorder_filter_2.cu new file mode 100644 index 0000000000000000000000000000000000000000..2f296aca3faf93bd693c4f0e39e7fb4eced04599 --- /dev/null +++ b/cuda_code/cutlass_reorder_filter_2.cu @@ -0,0 +1,194 @@ +/** + * \file dnn/src/cuda/conv_bias/cutlass_reorder_filter.cu + * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") + * + * Copyright (c) 2014-2021 Megvii Inc. All rights reserved. + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. + */ + +#include "src/cuda/conv_bias/cutlass_reorder_filter.cuh" +#include "src/cuda/query_blocksize.cuh" +#include "src/cuda/integer_subbyte_utils.cuh" + +using namespace megdnn; +using namespace cuda; +using namespace cutlass_wrapper; + +namespace { +template +__device__ __forceinline__ void reorder_ncxhwx_imma_filter_func( + int8_t* dst, const int8_t* src, uint32_t OC, uint32_t IC, uint32_t FH, + uint32_t FW, uint32_t lane, bool trans_oc) { + static constexpr uint32_t elements_per_lane = 128 / size_bits; + static constexpr uint32_t threads_per_interleaved = + interleaved / elements_per_lane; + static constexpr uint32_t instruction_shape_col = 8; + // 4 threads per Quad + static constexpr uint32_t elements_per_thread = instruction_shape_col / 4; + // 4 threads per Quad + static constexpr uint32_t reordered_elements_per_thread = interleaved / 4; + + uint32_t id = lane / threads_per_interleaved; + uint32_t residue = lane % threads_per_interleaved; + uint32_t ICx = IC / interleaved; + uint32_t row = id / (ICx * FH * FW); + uint32_t col = id - row * ICx * FH * FW; + // transpose ncxhwx to cxhwnx + uint32_t src_offset = id * interleaved + residue * elements_per_lane; + + row = (trans_oc) ? (row / interleaved) * interleaved + + ((row % reordered_elements_per_thread) / + elements_per_thread) * + instruction_shape_col + + ((row % interleaved) / + reordered_elements_per_thread) * + elements_per_thread + + (row % elements_per_thread) + : row; + + uint32_t dst_offset = + (col * OC + row) * interleaved + residue * elements_per_lane; + + *(reinterpret_cast(dst + dst_offset * size_bits / 8)) = + *(reinterpret_cast(src + src_offset * size_bits / 8)); +} + +template +__global__ void reorder_ncxhwx_imma_filter_kernel( + int8_t* __restrict__ dst_filter, const int8_t* __restrict__ src_filter, + uint32_t OC, uint32_t IC, uint32_t FH, uint32_t FW, bool trans_oc) { + static constexpr uint32_t elements_per_lane = 128 / size_bits; + const uint32_t size = OC * IC * FH * FW / elements_per_lane; + uint32_t lane = threadIdx.x + blockIdx.x * blockDim.x; + if (lane < size) { + reorder_ncxhwx_imma_filter_func( + dst_filter, src_filter, OC, IC, FH, FW, lane, trans_oc); + } +} + +template +__device__ __forceinline__ void reorder_nhwc_imma_filter_func( + int8_t* dst, const int8_t* src, uint32_t OC, uint32_t IC, uint32_t FH, + uint32_t FW, uint32_t lane, bool trans_oc) { + static constexpr uint32_t elements_per_access = alignbits / size_bits; + static constexpr uint32_t instruction_shape_col = 8; + // 4 threads per Quad + static constexpr uint32_t elements_per_thread = instruction_shape_col / 4; + // 4 threads per Quad + static constexpr uint32_t reordered_elements_per_thread = interleaved / 4; + uint32_t ICx = IC / elements_per_access; + uint32_t k = lane / (ICx * FH * FW); + uint32_t cxrs = lane - k * ICx * FH * FW; + uint32_t rs = cxrs / ICx; + uint32_t cx = cxrs - rs * ICx; + // transpose nhwc to ncxhwx + uint32_t src_offset = lane * elements_per_access; + // reorder k + k = (trans_oc) + ? (k / interleaved) * interleaved + + ((k % reordered_elements_per_thread) / + elements_per_thread) * + instruction_shape_col + + ((k % interleaved) / reordered_elements_per_thread) * + elements_per_thread + + (k % elements_per_thread) + : k; + uint32_t dst_offset = + (k * ICx * FH * FW + cx * FH * FW + rs) * elements_per_access; + + if (alignbits == 32) { + *(reinterpret_cast(dst + dst_offset * size_bits / 8)) = *( + reinterpret_cast(src + src_offset * size_bits / 8)); + } else if (alignbits == 64) { + *(reinterpret_cast(dst + dst_offset * size_bits / 8)) = + *(reinterpret_cast(src + + src_offset * size_bits / 8)); + } else { + *(reinterpret_cast(dst + dst_offset * size_bits / 8)) = + *(reinterpret_cast(src + + src_offset * size_bits / 8)); + } +} + +template +__global__ void reorder_nhwc_imma_filter_kernel( + int8_t* __restrict__ dst_filter, const int8_t* __restrict__ src_filter, + uint32_t OC, uint32_t IC, uint32_t FH, uint32_t FW, bool trans_oc) { + static constexpr uint32_t elements_per_access = alignbits / size_bits; + const uint32_t size = OC * IC * FH * FW / elements_per_access; + uint32_t lane = threadIdx.x + blockIdx.x * blockDim.x; + if (lane < size) { + reorder_nhwc_imma_filter_func( + dst_filter, src_filter, OC, IC, FH, FW, lane, trans_oc); + } +} +} // namespace + +template +void megdnn::cuda::cutlass_wrapper::reorder_ncxhwx_imma_filter( + int8_t* dst_filter, const int8_t* src_filter, uint32_t OC, uint32_t IC, + uint32_t FH, uint32_t FW, bool trans_oc, cudaStream_t stream) { + static constexpr uint32_t elements_per_lane = 128 / size_bits; + uint32_t nr_threads = + query_blocksize_for_kernel(reinterpret_cast( + reorder_ncxhwx_imma_filter_kernel)); + uint32_t vthreads = DIVUP(OC * IC * FH * FW, elements_per_lane); + nr_threads = std::min(nr_threads, vthreads); + uint32_t nr_blocks = DIVUP(vthreads, nr_threads); + reorder_ncxhwx_imma_filter_kernel + <<>>(dst_filter, src_filter, OC, + IC, FH, FW, trans_oc); + after_kernel_launch(); +} + +template +void megdnn::cuda::cutlass_wrapper::reorder_nhwc_imma_filter( + int8_t* dst_filter, const int8_t* src_filter, uint32_t OC, uint32_t IC, + uint32_t FH, uint32_t FW, bool trans_oc, uint32_t oc_interleaved, + cudaStream_t stream) { + static constexpr uint32_t elements_per_access = alignbits / size_bits; + uint32_t nr_threads = + query_blocksize_for_kernel(reinterpret_cast( + reorder_nhwc_imma_filter_kernel)); + uint32_t vthreads = DIVUP(OC * IC * FH * FW, elements_per_access); + nr_threads = std::min(nr_threads, vthreads); + uint32_t nr_blocks = DIVUP(vthreads, nr_threads); + if (oc_interleaved == 32) { + reorder_nhwc_imma_filter_kernel + <<>>( + dst_filter, src_filter, OC, IC, FH, FW, trans_oc); + } else { + reorder_nhwc_imma_filter_kernel + <<>>( + dst_filter, src_filter, OC, IC, FH, FW, trans_oc); + } + after_kernel_launch(); +} + +#define INST(_size_bits, _interleaved) \ + template void megdnn::cuda::cutlass_wrapper::reorder_ncxhwx_imma_filter< \ + _size_bits, _interleaved>(int8_t * dst_filter, \ + const int8_t* src_filter, uint32_t OC, \ + uint32_t IC, uint32_t FH, uint32_t FW, \ + bool trans_oc, cudaStream_t stream); + +INST(8, 32) +INST(4, 64) +#undef INST + +#define INST(_size_bits, _alignbits) \ + template void megdnn::cuda::cutlass_wrapper::reorder_nhwc_imma_filter< \ + _size_bits, _alignbits>( \ + int8_t * dst_filter, const int8_t* src_filter, uint32_t OC, \ + uint32_t IC, uint32_t FH, uint32_t FW, bool trans_oc, \ + uint32_t oc_interleaved, cudaStream_t stream); +INST(4, 32) +INST(4, 64) +INST(4, 128) +#undef INST + +// vim: syntax=cuda.doxygen diff --git a/cuda_code/d_a_star_kernels.cu b/cuda_code/d_a_star_kernels.cu new file mode 100644 index 0000000000000000000000000000000000000000..ff9de8d9abb535f660114d758768aa8ec6462cb6 --- /dev/null +++ b/cuda_code/d_a_star_kernels.cu @@ -0,0 +1,879 @@ +#ifdef __NVCC__ + +// __device__ volatile int PQ[MAX_NODE]; + + +//K in parallel +template +__global__ void extractMin(unsigned int* PQ, unsigned int* PQ_size, int* expandNodes,int* expandNodes_size,U* Cx,int* openList,int N,int K){ + + int id = blockIdx.x*blockDim.x+threadIdx.x; + + if(id0){ + + //extract min from PQ + int front = id* ( (N+K-1)/K ); + int node = PQ[front]; + + // restructure the heap + PQ[front]=PQ[front+PQ_size[id]-1]; + PQ_size[id]-=1; + int pqIndex = 0; + + while(2*pqIndex+1 < PQ_size[id]){ + if(2*pqIndex+2 >= PQ_size[id]){ + if( Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+1]]){ + int swap = PQ[front + 2*pqIndex+1]; + PQ[front + 2*pqIndex+1] = PQ[front +pqIndex]; + PQ[front + pqIndex] = swap; + pqIndex = 2*pqIndex+1; + } + else + break; + } + else{ + if( Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+1]] && Cx[PQ[front+2*pqIndex+1]] <= Cx[PQ[front+2*pqIndex+2]] ){ + int swap = PQ[front + 2*pqIndex+1]; + PQ[front + 2*pqIndex+1] = PQ[front +pqIndex]; + PQ[front + pqIndex] = swap; + pqIndex = 2*pqIndex+1; + } + else if(Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+2]] && Cx[PQ[front+2*pqIndex+2]] <= Cx[PQ[front+2*pqIndex+1]] ){ + int swap = PQ[front + 2*pqIndex+2]; + PQ[front + 2*pqIndex+2] = PQ[front +pqIndex]; + PQ[front + pqIndex] = swap; + pqIndex = 2*pqIndex+2; + } + else{ + break; + } + } + + } + + //removed from openList + openList[node] = -1; + + //added to expand next + int len = atomicAdd(expandNodes_size,1); + expandNodes[len]=node; + } + +} + + +//for K in parallel +template +__global__ void A_star_expand(int* off,int* edge, T* W,U* Hx,int* parent,volatile U* Cx, + int* expandNodes,int* expandNodes_size, int* lock ,int* flagfound,int* openList,int* nVFlag, + int N,int E, int K,int dest, + int flagDiff,int dE, + int* diff_off,int* diff_edge,unsigned int* diff_weight ){ + + int id = blockIdx.x*blockDim.x+threadIdx.x; + + if(id< *expandNodes_size ){ + + int node = expandNodes[id]; + + //reach dest + if(node == dest){ + atomicOr(flagfound,1); + } + + // expand + int start = off[node]; + int end = E; + if(node!=N-1) + end = off[node+1]; + + while(start < end){ + int child = edge[start]; + + //deleted edges + if(child<0){ + start++; + continue; + } + + //array L initilaized with 0 + //get the lock for child to update C(x) + //loop till acquire the lock + bool leaveLoop = false; + + while(leaveLoop==false){ + + if(atomicCAS(&lock[child],0,1)==0){ + //critical section + if( Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){ + Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child]; + __threadfence(); + parent[child] = node; + + if(openList[child]==-1){ + nVFlag[child]=1; + //add only once + } + } + + //end critical section + leaveLoop = true; + + atomicCAS(&lock[child],1,0); + + } + + __syncthreads(); + + } + + start++; + } + + //diff expand + if(flagDiff){ + + start = diff_off[node]; + end = dE; + if(node!=N-1) + end = diff_off[node+1]; + + while(start (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){ + Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child]; + __threadfence(); + parent[child] = node; + + if(openList[child]==-1){ + nVFlag[child]=1; + //add only once + } + } + + //end critical section + leaveLoop = true; + + atomicCAS(&lock[child],1,0); + + } + + __syncthreads(); + + } + + start++; + } + + } + //end diff + + }//end + +} + + +//K in parallel -- O(N) +template +__global__ void keepHeapPQ(unsigned int* PQ,unsigned int* PQ_size,U* Cx,int N,int K){ + int id = blockIdx.x*blockDim.x+threadIdx.x; + if(id < K && PQ_size[id] > 0){ + int front = id*( (N+K-1)/K ); + int size = PQ_size[id]; + + for(int i=front;i costLeft || cost > costRight ){ + int index ; + if(costLeft <= costRight) + index = 2*i+1; + else + index = 2*i+2; + + while(index > front){ + if( Cx[PQ[(index-1)/2]] > Cx[PQ[index]] ){ + int swap = PQ[index]; + PQ[index] = PQ[(index-1)/2]; + PQ[(index-1)/2] = swap; + index = (index-1)/2; + } + else + break; + } + } + } + else if(2*i+1 < front+size){ + if(Cx[PQ[i]] > Cx[PQ[2*i+1]]){ + int index = 2*i+1; + while(index > front){ + if( Cx[PQ[(index-1)/2]] > Cx[PQ[index]] ){ + int swap = PQ[index]; + PQ[index] = PQ[(index-1)/2]; + PQ[(index-1)/2] = swap; + index = (index-1)/2; + } + else + break; + } + } + } + } + } +} + +//N threads +__global__ void setNV(int* nextFlag,int* nextV,int* nvSize,int N){ + int id = blockIdx.x*blockDim.x+threadIdx.x; + if(id < N){ + if(nextFlag[id]==1){ + int index = atomicAdd(nvSize,1); + nextV[index]=id; + } + } +} + + +//for K in parallel +template +__global__ void insertPQ(unsigned int* PQ,unsigned int* PQS,int* nextV,int* nVsize,U* Cx,int K,int N,int* openList){ + int id = blockIdx.x*blockDim.x+threadIdx.x; + if(id < K){ + + int front = id*( (N+K-1)/K ); + int i = id; + + while(i<*nVsize){ + //if not already present + if(openList[nextV[i]]!=-1){ + i+=K; + continue; + } + + PQ[front+PQS[id]]= nextV[i]; + PQS[id]+=1; + + //add in openList + openList[nextV[i]] = id; + + if(PQS[id]>1){ + int index = PQS[id]-1; + while(index>0){ + if(Cx[PQ[front+ (index-1)/2]] > Cx[PQ[front+index]]){ + int swap = PQ[front+index]; + PQ[front+index]=PQ[front+ (index-1)/2]; + PQ[front+ (index-1)/2] = swap; + index = (index-1)/2; + } + else + break; + } + } + i += K; + } + } +} + + +//for K in parallel +template +__global__ void checkMIN(unsigned int* PQ, unsigned int* PQ_size,int* flagEnd,U* Cx,int dest,int N,int K){ + int id = blockIdx.x*blockDim.x+threadIdx.x; + + if(id < K && PQ_size[id] > 0 ){ + int front = id* ( (N+K-1)/K ); + int node = PQ[front]; + //check if atleast one min, dont end the a* + if( Cx[node] < Cx[dest] ){ + atomicAnd(flagEnd,0); + } + } +} + +template +__global__ void propogateDel(int* delEdgesV,int delEdge, volatile U* Cx, + int* rev_offset,int* rev_edges,T* rev_weight,int N,int E, + U* Hx,volatile int* parent,int* parent_old,int* addFlag, + int* rev_diff_offset,int* rev_diff_edges,T* rev_diff_weight,int dE){ + + int id = blockIdx.x*blockDim.x+threadIdx.x; + + if(id0){ + if(ancestor==node){ + flag_cycle = true; + break; + } + ancestor = parent_old[ancestor]; + + } + + + //no need to lock only single parent so only one node in array so one node per thread + if(!flag_cycle && Cx[p]!=INT_MAX && cost > (Cx[p]-Hx[p])+weight+Hx[node] ){ + cost = (Cx[p]-Hx[p] )+weight+Hx[node]; + opt_parent = p; + } + + start++; + } + + start = rev_diff_offset[node]; + end = dE; + if(node!=N-1) + end = rev_diff_offset[node+1]; + + while(start< end){ + int p = rev_diff_edges[start]; + + //del edges + if(p<0 || p==node){ + start++; + continue; + } + + int weight = rev_diff_weight[start]; + int flag_cycle = false; + + //check parent doesn't contain node + int ancestor = parent_old[p]; + + while(ancestor!=-1){ + if(ancestor==node){ + flag_cycle = true; + break; + } + ancestor = parent_old[ancestor]; + + } + + //no need to lock only single parent so only one node in array so one node per thread + if(!flag_cycle && Cx[p]!=INT_MAX && cost > (Cx[p]-Hx[p])+weight+Hx[node] ){ + cost = (Cx[p]-Hx[p] )+weight+Hx[node]; + opt_parent = p; + } + + start++; + } + + //write here + if(cost!=INT_MAX){ + Cx[node]=cost; + parent[node]=opt_parent; + } + + } + +} + +//add inserted edges to propogate +template +__global__ void propogateAdd(int* diff_off, int* diff_edges,T* diff_W,U* Hx,int* addFlag, + volatile U* Cx,int* lock, int* parent, int* parent_old, int N, int dE){ + + int id = blockIdx.x*blockDim.x+threadIdx.x; + + if(id < N){ + int node = id; + + int start = diff_off[node]; + int end = dE; + if(node!=N-1) + end = diff_off[node+1]; + + while(start < end ){ + int child = diff_edges[start]; + + //deleted edges + if(child<0){ + start++; + continue; + } + + //array L initilaized with 0 + //get the lock for child to update C(x) + //loop till acquire the lock + bool leaveLoop = false; + + while(!leaveLoop){ + + if(atomicCAS(&lock[child],0,1)==0){ + //critical section + bool flag_cycle = false; + + int ancestor = node; + while(ancestor > 0){ + if(ancestor==child){ + flag_cycle = true; + break; + } + ancestor = parent_old[ancestor]; + + } + + if(!flag_cycle && Cx[node] != INT_MAX && Cx[child] > (Cx[node] - Hx[node])+ diff_W[start]+ Hx[child] ){ + + Cx[child] = (Cx[node] - Hx[node])+ diff_W[start]+ Hx[child]; + + parent[child] = node; + __threadfence(); + + addFlag[child]=1; + + } + + //end critical section + leaveLoop = true; + + atomicCAS(&lock[child],1,0); + } + + __syncthreads(); + } + + start++; + } + } + +} + + +template +__global__ void insert_propagate(int* nodes, int* size, int* off, int* edge,T* W,U* Hx, + int N,int E,volatile U* Cx,int* lock, int* parent,int* addFlag, + int* diff_off,int* diff_edge,T* diff_W,int dE){ + + int id = blockIdx.x*blockDim.x+threadIdx.x; + if(id < *size){ + int node = nodes[id]; + int start = off[node]; + int end = E; + if(node!=N-1) + end = off[node+1]; + while(start < end ){ + int child = edge[start]; + + //deleted edges + if(child<0){ + start++; + continue; + } + bool leaveLoop = false; + + while(!leaveLoop){ + + if(atomicExch(&lock[child],1)==0){ + + if(Cx[node]!=INT_MAX && Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){ + Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child]; + __threadfence(); + parent[child] = node; + + addFlag[child]=1; + + } + + leaveLoop = true; + atomicExch(&lock[child],0); + } + __syncthreads(); + } + + start++; + } + + start = diff_off[node]; + end = dE; + if(node!=N-1) + end = diff_off[node+1]; + + while(start < end ){ + int child = diff_edge[start]; + + //deleted edges + if(child<0){ + start++; + continue; + } + bool leaveLoop = false; + + while(!leaveLoop){ + + if(atomicCAS(&lock[child],0,1)==0){ + //critical section + + if(Cx[node]!=INT_MAX && Cx[child] > (Cx[node] - Hx[node])+ diff_W[start]+ Hx[child] ){ + Cx[child] = (Cx[node] - Hx[node])+ diff_W[start]+ Hx[child]; + __threadfence(); + parent[child] = node; + + addFlag[child]=1; + + + } + + //end critical section + leaveLoop = true; + + atomicCAS(&lock[child],1,0); + + } + + __syncthreads(); + + } + + start++; + } + + } + +} + +template +__global__ void delete_propagate(int* nodes, int* size, int* off, int* edge,T* W,U* Hx, + int N,int E,volatile U* Cx,int* lock, int* parent,int* parent_old,int* addFlag, + int* diff_off,int* diff_edge,T* diff_W,int dE, + int* rev_offset,int* rev_edges,T* rev_weight, + int* rev_diff_offset,int* rev_diff_edges,T* rev_diff_weight){ + + int id = blockIdx.x*blockDim.x+threadIdx.x; + if(id < *size){ + int node = nodes[id]; + int start = off[node]; + int end = E; + if(node!=N-1) + end = off[node+1]; + + while(start < end ){ + int child = edge[start]; + if(child<0){ + start++; + continue; + } + + bool leaveLoop = false; + + while(!leaveLoop){ + + if(atomicExch(&lock[child],1)==0){ + if(Cx[node]!=INT_MAX && Cx[child]!=INT_MAX && Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){ + Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child]; + __threadfence(); + parent[child] = node; + + addFlag[child]=1; + + } + else + if( (Cx[node]==INT_MAX && parent[child]==node ) || ( parent[child]==node && (Cx[child] < Cx[node] - Hx[node]+ W[start]+ Hx[child]) ) ){ + //use back edges + int rstart = rev_offset[child]; + int rend = E; + if(child!=N-1) + rend = rev_offset[child+1]; + + //there is always one parent that is node. + Cx[child] = INT_MAX; + parent[child]=-1; + + while(rstart < rend){ + int p = rev_edges[rstart]; + if(p<0 || p == child){ + rstart++; + continue; + } + + int weight = rev_weight[rstart]; + bool flag_cycle = false; + + //check parent doesn't contain child + + int ancestor = parent_old[p]; + + while(ancestor > 0){ + if(ancestor==child){ + flag_cycle = true; + break; + } + ancestor = parent_old[ancestor]; + } + + if(!flag_cycle && Cx[p]!=INT_MAX && Cx[child] > (Cx[p]-Hx[p])+weight+Hx[child] ){ + Cx[child] = (Cx[p]-Hx[p] )+weight+Hx[child]; + parent[child] = p; + } + + rstart++; + } + + rstart = rev_diff_offset[child]; + rend = dE; + if(child!=N-1) + rend = rev_diff_offset[child+1]; + + while(rstart < rend){ + int p = rev_diff_edges[rstart]; + + if(p<0 || p==child){ + rstart++; + continue; + } + + int weight = rev_diff_weight[rstart]; + int flag_cycle = false; + + //check parent doesn't contain child + int ancestor = parent_old[p]; + while(ancestor!=-1){ + if(ancestor==child){ + flag_cycle = true; + break; + } + + ancestor = parent_old[ancestor]; + } + + if(!flag_cycle && Cx[p]!=INT_MAX && Cx[child] > (Cx[p]-Hx[p])+weight+Hx[child] ){ + Cx[child] = (Cx[p]-Hx[p] )+weight+Hx[child]; + parent[child] = p; + } + + rstart++; + } + + addFlag[child]=1; + } + + + leaveLoop = true; + + atomicExch(&lock[child],0); + + } + + __syncthreads(); + } + + start++; + + } + + + start = diff_off[node]; + end = dE; + if(node!=N-1) + end = diff_off[node+1]; + + while(start < end ){ + int child = diff_edge[start]; + + //deleted edges + if(child<0){ + start++; + continue; + } + + //array L initilaized with 0 + //get the lock for child to update C(x) + //loop till acquire the lock + bool leaveLoop = false; + + while(!leaveLoop){ + + if(atomicCAS(&lock[child],0,1)==0){ + if(Cx[node]!=INT_MAX && Cx[child]!=INT_MAX && Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){ + Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child]; + __threadfence(); + parent[child] = node; + + addFlag[child]=1; + + } + else + if((Cx[node]==INT_MAX && parent[child]==node )|| ( parent[child]==node && (Cx[child] < Cx[node] - Hx[node]+ diff_W[start]+ Hx[child]) ) ){ + //use back edges + int rstart = rev_offset[child]; + int rend = E; + if(child!=N-1) + rend = rev_offset[child+1]; + + //there is always one parent that is node. + Cx[child] = INT_MAX; + parent[child]=-1; + + while(rstart < rend){ + int p = rev_edges[rstart]; + + if(p<0 || p ==child){ + rstart++; + continue; + } + + int weight = rev_weight[rstart]; + int flag_cycle = false; + + //check parent doesn't contain child + int ancestor = parent_old[p]; + while(ancestor!=-1){ + if(ancestor==child) + flag_cycle = true; + ancestor = parent_old[ancestor]; + } + + + if(!flag_cycle && Cx[p]!=INT_MAX && Cx[child] > (Cx[p]-Hx[p])+weight+Hx[child] ){ + Cx[child] = (Cx[p]-Hx[p] )+weight+Hx[child]; + parent[child] = p; + } + + rstart++; + } + + rstart = rev_diff_offset[child]; + rend = dE; + if(child!=N-1) + rend = rev_diff_offset[child+1]; + + while(rstart < rend){ + int p = rev_diff_edges[rstart]; + + if(p<0 || p==child){ + rstart++; + continue; + } + + int weight = rev_diff_weight[rstart]; + int flag_cycle = false; + + //check parent doesn't contain child + int ancestor = parent_old[p]; + while(ancestor!=-1){ + if(ancestor==child){ + flag_cycle = true; + break; + } + + ancestor = parent_old[ancestor]; + } + + if(!flag_cycle && Cx[p]!=INT_MAX && Cx[child] > (Cx[p]-Hx[p])+weight+Hx[child] ){ + Cx[child] = (Cx[p]-Hx[p] )+weight+Hx[child]; + parent[child] = p; + } + + rstart++; + } + + addFlag[child]=1; + } + + + //end critical section + leaveLoop = true; + + atomicCAS(&lock[child],1,0); + } + + __syncthreads(); + + } + + start++; + } + + } + +} + +//do in 1 thread +template +__global__ void insertDest(unsigned int* PQ,unsigned int* PQ_size,U* Cx,int dest,int* openList){ + int id = 0; + int front = 0; + if(openList[dest]==-1){ + PQ[front+PQ_size[id]]= dest; + PQ_size[id]+=1; + + //add in openList + openList[dest] = id; + + if(PQ_size[id]>1){ + int index = PQ_size[id]-1; + while(index>0){ + if(Cx[PQ[front+ (index-1)/2]] > Cx[PQ[front+index]]){ + int swap = PQ[front+index]; + PQ[front+index]=PQ[front+ (index-1)/2]; + PQ[front+ (index-1)/2] = swap; + index = (index-1)/2; + } + else + break; + } + } + } + +} + +template +__global__ void getCx(U* Cx,int dest,U* val){ + int id = blockIdx.x*blockDim.x+threadIdx.x; + if(id==0){ + *val = Cx[dest]; + } +} + + + +#endif \ No newline at end of file diff --git a/cuda_code/data_normalizer_6.cu b/cuda_code/data_normalizer_6.cu new file mode 100644 index 0000000000000000000000000000000000000000..12f0b0d9813758b95981d5c723c0ab120842b321 --- /dev/null +++ b/cuda_code/data_normalizer_6.cu @@ -0,0 +1,336 @@ +/* + * Copyright (c) 2020, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include + +#include +#include + +namespace nvtext { +namespace detail { +namespace { + +/** + * @brief Bit used to filter out invalid code points. + * + * When normalizing characters to code point values, if this bit is set, + * the code point should be filtered out before returning from the normalizer. + */ +constexpr uint32_t FILTER_BIT = 22; + +/** + * @brief Retrieve new code point from metadata value. + * + * @param metadata Value from the codepoint_metadata table. + * @return The replacement character if appropriate. + */ +__device__ uint32_t get_first_cp(uint32_t metadata) { return metadata & NEW_CP_MASK; } + +/** + * @brief Retrieve token category from the metadata value. + * + * Category values are 0-5: + * 0 - character should be padded + * 1 - pad character if lower-case + * 2 - character should be removed + * 3 - remove character if lower-case + * 4 - whitespace character -- always replace + * 5 - uncategorized + * + * @param metadata Value from the codepoint_metadata table. + * @return Category value. + */ +__device__ uint32_t extract_token_cat(uint32_t metadata) +{ + return (metadata >> TOKEN_CAT_SHIFT) & TOKEN_CAT_MASK; +} + +/** + * @brief Return true if category of metadata value specifies the character should be replaced. + */ +__device__ bool should_remove_cp(uint32_t metadata, bool lower_case) +{ + auto const cat = extract_token_cat(metadata); + return (cat == TOKEN_CAT_REMOVE_CHAR) || (lower_case && (cat == TOKEN_CAT_REMOVE_CHAR_IF_LOWER)); +} + +/** + * @brief Return true if category of metadata value specifies the character should be padded. + */ +__device__ bool should_add_spaces(uint32_t metadata, bool lower_case) +{ + auto const cat = extract_token_cat(metadata); + return (cat == TOKEN_CAT_ADD_SPACE) || (lower_case && (cat == TOKEN_CAT_ADD_SPACE_IF_LOWER)); +} + +/** + * @brief Return true if category of metadata value specifies the character should be replaced. + */ +__device__ bool always_replace(uint32_t metadata) +{ + return extract_token_cat(metadata) == TOKEN_CAT_ALWAYS_REPLACE; +} + +/** + * @brief Returns true if metadata value includes a multi-character transform bit equal to 1. + */ +__device__ bool is_multi_char_transform(uint32_t metadata) +{ + return (metadata >> MULTICHAR_SHIFT) & MULTICHAR_MASK; +} + +/** + * @brief Returns true if the byte passed in could be a valid head byte for + * a utf8 character. That is, not binary `10xxxxxx` + */ +__device__ bool is_head_byte(unsigned char utf8_byte) { return (utf8_byte >> 6) != 2; } + +/** + * @brief Converts a UTF-8 character into a unicode code point value. + * + * If the byte at start_byte_for_thread is the first byte of a UTF-8 character (head byte), + * the UTF-8 character is converted to a unicode code point and returned. + * + * If the byte at start_byte_for_thread is not a head byte, 0 is returned. + * + * All threads start reading bytes from the pointer denoted by strings. + * + * @param strings A pointer to the start of the sequence of characters to be analyzed. + * @param start_byte_for_thread Which byte to start analyzing + * @return New code point value for this byte. + */ +__device__ uint32_t extract_code_points_from_utf8(unsigned char const* strings, + size_t const total_bytes, + uint32_t const start_byte_for_thread) +{ + constexpr uint8_t max_utf8_blocks_for_char = 4; + uint8_t utf8_blocks[max_utf8_blocks_for_char] = {0}; + + for (int i = 0; i < std::min(static_cast(max_utf8_blocks_for_char), + total_bytes - start_byte_for_thread); + ++i) { + utf8_blocks[i] = strings[start_byte_for_thread + i]; + } + + const uint8_t length_encoding_bits = utf8_blocks[0] >> 3; + // UTF-8 format is variable-width character encoding using up to 4 bytes. + // If the first byte is: + // - [x00-x7F] -- beginning of a 1-byte character (ASCII) + // - [xC0-xDF] -- beginning of a 2-byte character + // - [xE0-xEF] -- beginning of a 3-byte character + // - [xF0-xF7] -- beginning of a 3-byte character + // Anything else is an intermediate byte [x80-xBF]. + // So shifted by 3 bits this becomes + // - [x00-x0F] or leb < 16 + // - [x18-x1B] or 24 <= leb <= 27 + // - [x1C-x1D] or 28 <= leb <= 29 + // - [x1E-x1F] or leb >= 30 + // The remaining bits are part of the value as specified by the mask + // specified by x's below. + // - b0xxxxxxx = x7F + // - b110xxxxx = x1F + // - b1110xxxx = x0F + // - b11110xxx = x07 + using encoding_length_pair = thrust::pair; + // Set the number of characters and the top masks based on the length encoding bits. + encoding_length_pair const char_encoding_length = [length_encoding_bits] { + if (length_encoding_bits < 16) return encoding_length_pair{1, 0x7F}; + if (length_encoding_bits >= 24 && length_encoding_bits <= 27) + return encoding_length_pair{2, 0x1F}; + if (length_encoding_bits == 28 || length_encoding_bits == 29) + return encoding_length_pair{3, 0x0F}; + if (length_encoding_bits == 30) return encoding_length_pair{4, 0x07}; + return encoding_length_pair{0, 0}; + }(); + + // Now pack up the bits into a uint32_t. + // Move the first set of values into bits 19-24 in the 32-bit value. + uint32_t code_point = (utf8_blocks[0] & char_encoding_length.second) << 18; + // Move the remaining values which are 6 bits (mask b10xxxxxx = x3F) + // from the remaining bytes into successive positions in the 32-bit result. + code_point |= ((utf8_blocks[1] & 0x3F) << 12); + code_point |= ((utf8_blocks[2] & 0x3F) << 6); + code_point |= utf8_blocks[3] & 0x3F; + + // Adjust the final result by shifting by the character length. + uint8_t const shift_amt = 24 - 6 * char_encoding_length.first; + code_point >>= shift_amt; + return code_point; +} + +/** + * @brief Normalize the characters for the strings input. + * + * Characters are replaced, padded, or removed depending on the `do_lower_case` input + * as well as the metadata values for each code point found in `cp_metadata`. + * + * First, each character is converted from UTF-8 to a unicode code point value. + * This value is then looked up in the `cp_metadata` table to determine its fate. + * The end result is a set of code point values for each character. + * The normalized set of characters make it easier for the tokenizer to identify + * tokens and match up token ids. + * + * @param[in] strings The input strings with characters to normalize to code point values. + * @param[in] total_bytes Total number of bytes in the input `strings` vector. + * @param[in] cp_metadata The metadata lookup table for every unicode code point value. + * @param[in] aux_table Aux table for mapping some multi-byte code point values. + * @param[in] do_lower_case True if normalization should include lower-casing. + * @param[out] code_points The resulting code point values from normalization. + * @param[out] chars_per_thread Output number of code point values per string. + */ +__global__ void kernel_data_normalizer(unsigned char const* strings, + size_t const total_bytes, + uint32_t const* cp_metadata, + uint64_t const* aux_table, + bool const do_lower_case, + uint32_t* code_points, + uint32_t* chars_per_thread) +{ + constexpr uint32_t init_val = (1 << FILTER_BIT); + uint32_t replacement_code_points[MAX_NEW_CHARS] = {init_val, init_val, init_val}; + + uint32_t const char_for_thread = blockDim.x * blockIdx.x + threadIdx.x; + uint32_t num_new_chars = 0; + + if (char_for_thread < total_bytes) { + auto const code_point = extract_code_points_from_utf8(strings, total_bytes, char_for_thread); + auto const metadata = cp_metadata[code_point]; + + if (is_head_byte(strings[char_for_thread]) && !should_remove_cp(metadata, do_lower_case)) { + num_new_chars = 1; + // Apply lower cases and accent stripping if necessary + auto const new_cp = + do_lower_case || always_replace(metadata) ? get_first_cp(metadata) : code_point; + replacement_code_points[0] = new_cp == 0 ? code_point : new_cp; + + if (do_lower_case && is_multi_char_transform(metadata)) { + auto const next_cps = aux_table[code_point]; + replacement_code_points[1] = static_cast(next_cps >> 32); + auto const potential_next_cp = static_cast(next_cps); + replacement_code_points[2] = + potential_next_cp != 0 ? potential_next_cp : replacement_code_points[2]; + num_new_chars = 2 + (potential_next_cp != 0); + } + + if (should_add_spaces(metadata, do_lower_case)) { + // Need to shift all existing code-points up one + // This is a rotate right. There is no thrust equivalent at this time. + for (int loc = num_new_chars; loc > 0; --loc) { + replacement_code_points[loc] = replacement_code_points[loc - 1]; + } + + // Write the required spaces at the end + replacement_code_points[0] = SPACE_CODE_POINT; + replacement_code_points[num_new_chars + 1] = SPACE_CODE_POINT; + num_new_chars += 2; + } + } + } + + chars_per_thread[char_for_thread] = num_new_chars; + + typedef cub:: + BlockStore + BlockStore; + __shared__ typename BlockStore::TempStorage temp_storage; + + // Now we perform coalesced writes back to global memory using cub. + uint32_t* block_base = code_points + blockIdx.x * blockDim.x * MAX_NEW_CHARS; + BlockStore(temp_storage).Store(block_base, replacement_code_points); +} + +} // namespace + +data_normalizer::data_normalizer(cudaStream_t stream, bool do_lower_case) + : do_lower_case(do_lower_case) +{ + d_cp_metadata = detail::get_codepoint_metadata(stream); + d_aux_table = detail::get_aux_codepoint_data(stream); +} + +uvector_pair data_normalizer::normalize(char const* d_strings, + uint32_t const* d_offsets, + uint32_t num_strings, + cudaStream_t stream) +{ + if (num_strings == 0) + return std::make_pair(std::make_unique>(0, stream), + std::make_unique>(0, stream)); + + auto const execpol = rmm::exec_policy(stream); + // copy offsets to working memory + size_t const num_offsets = num_strings + 1; + auto d_strings_offsets = std::make_unique>(num_offsets, stream); + thrust::transform(execpol->on(stream), + thrust::make_counting_iterator(0), + thrust::make_counting_iterator(num_offsets), + d_strings_offsets->begin(), + [d_offsets] __device__(auto idx) { + auto const offset = d_offsets[0]; // adjust for any offset to the offsets + return d_offsets[idx] - offset; + }); + uint32_t const bytes_count = d_strings_offsets->element(num_strings, stream); + if (bytes_count == 0) // if no bytes, nothing to do + return std::make_pair(std::make_unique>(0, stream), + std::make_unique>(0, stream)); + + cudf::detail::grid_1d const grid{static_cast(bytes_count), THREADS_PER_BLOCK, 1}; + size_t const threads_on_device = grid.num_threads_per_block * grid.num_blocks; + size_t const max_new_char_total = MAX_NEW_CHARS * threads_on_device; + + auto d_code_points = std::make_unique>(max_new_char_total, stream); + rmm::device_uvector d_chars_per_thread(threads_on_device, stream); + + kernel_data_normalizer<<>>( + reinterpret_cast(d_strings), + bytes_count, + d_cp_metadata, + d_aux_table, + do_lower_case, + d_code_points->data(), + d_chars_per_thread.data()); + + // Remove the 'empty' code points from the vector + thrust::remove( + execpol->on(stream), d_code_points->begin(), d_code_points->end(), uint32_t{1 << FILTER_BIT}); + + // We also need to prefix sum the number of characters up to an including + // the current character in order to get the new strings lengths. + thrust::inclusive_scan(execpol->on(stream), + d_chars_per_thread.begin(), + d_chars_per_thread.end(), + d_chars_per_thread.begin()); + + // This will reset the offsets to the new generated code point values + thrust::for_each_n( + execpol->on(stream), + thrust::make_counting_iterator(1), + num_strings, + update_strings_lengths_fn{d_chars_per_thread.data(), d_strings_offsets->data()}); + + uint32_t const num_chars = d_strings_offsets->element(num_strings, stream); + d_code_points->resize(num_chars, stream); // should be smaller than original allocated size + + // return the normalized code points and the new offsets + return uvector_pair(std::move(d_code_points), std::move(d_strings_offsets)); +} + +} // namespace detail +} // namespace nvtext diff --git a/cuda_code/dct8x8_kernel2_2.cu b/cuda_code/dct8x8_kernel2_2.cu new file mode 100644 index 0000000000000000000000000000000000000000..4d0d5fb4dd03805a494579bc60e17efcd13b2806 --- /dev/null +++ b/cuda_code/dct8x8_kernel2_2.cu @@ -0,0 +1,445 @@ +/* + * Copyright 1993-2009 NVIDIA Corporation. All rights reserved. + * + * NVIDIA Corporation and its licensors retain all intellectual property and + * proprietary rights in and to this software and related documentation and + * any modifications thereto. Any use, reproduction, disclosure, or + * distribution + * of this software and related documentation without an express license + * agreement from NVIDIA Corporation is strictly prohibited. + * + */ + +/** +************************************************************************** +* \file dct8x8_kernel2.cu +* \brief Contains 2nd kernel implementations of DCT and IDCT routines, used in +* JPEG internal data processing. Optimized device code. +* +* This code implements traditional approach to forward and inverse Discrete +* Cosine Transform to blocks of image pixels (of 8x8 size), as in JPEG standard. +* The data processing is done using floating point representation. +* The routine that performs quantization of coefficients can be found in +* dct8x8_kernel_quantization.cu file. +*/ + +#pragma once + +#include "Common.h" + +__constant__ float C_a = 1.387039845322148f; //!< a = (2^0.5) * cos( pi / + //! 16); Used in forward and +//! inverse DCT. +__constant__ float C_b = 1.306562964876377f; //!< b = (2^0.5) * cos( pi / + //! 8); Used in forward and +//! inverse DCT. +__constant__ float C_c = 1.175875602419359f; //!< c = (2^0.5) * cos(3 * pi / + //! 16); Used in forward and +//! inverse DCT. +__constant__ float C_d = 0.785694958387102f; //!< d = (2^0.5) * cos(5 * pi / + //! 16); Used in forward and +//! inverse DCT. +__constant__ float C_e = 0.541196100146197f; //!< e = (2^0.5) * cos(3 * pi / + //! 8); Used in forward and +//! inverse DCT. +__constant__ float C_f = 0.275899379282943f; //!< f = (2^0.5) * cos(7 * pi / + //! 16); Used in forward and +//! inverse DCT. + +/** +* Normalization constant that is used in forward and inverse DCT +*/ +__constant__ float C_norm = 0.3535533905932737f; // 1 / (8^0.5) + +/** +* Width of data block (2nd kernel) +*/ +#define KER2_BLOCK_WIDTH 32 + +/** +* Height of data block (2nd kernel) +*/ +#define KER2_BLOCK_HEIGHT 16 + +/** +* LOG2 of width of data block (2nd kernel) +*/ +#define KER2_BW_LOG2 5 + +/** +* LOG2 of height of data block (2nd kernel) +*/ +#define KER2_BH_LOG2 4 + +/** +* Stride of shared memory buffer (2nd kernel) +*/ +#define KER2_SMEMBLOCK_STRIDE (KER2_BLOCK_WIDTH + 1) + +/** +************************************************************************** +* Performs in-place DCT of vector of 8 elements. +* +* \param Vect0 [IN] - Pointer to the first element of vector +* \param Step [IN] - Value to add to ptr to access other +*elements +* +* \return None +*/ +__device__ void CUDAsubroutineInplaceDCTvector(float* Vect0, int Step) { + float* Vect1 = Vect0 + Step; + float* Vect2 = Vect1 + Step; + float* Vect3 = Vect2 + Step; + float* Vect4 = Vect3 + Step; + float* Vect5 = Vect4 + Step; + float* Vect6 = Vect5 + Step; + float* Vect7 = Vect6 + Step; + + float X07P = (*Vect0) + (*Vect7); + float X16P = (*Vect1) + (*Vect6); + float X25P = (*Vect2) + (*Vect5); + float X34P = (*Vect3) + (*Vect4); + + float X07M = (*Vect0) - (*Vect7); + float X61M = (*Vect6) - (*Vect1); + float X25M = (*Vect2) - (*Vect5); + float X43M = (*Vect4) - (*Vect3); + + float X07P34PP = X07P + X34P; + float X07P34PM = X07P - X34P; + float X16P25PP = X16P + X25P; + float X16P25PM = X16P - X25P; + + (*Vect0) = C_norm * (X07P34PP + X16P25PP); + (*Vect2) = C_norm * (C_b * X07P34PM + C_e * X16P25PM); + (*Vect4) = C_norm * (X07P34PP - X16P25PP); + (*Vect6) = C_norm * (C_e * X07P34PM - C_b * X16P25PM); + + (*Vect1) = C_norm * (C_a * X07M - C_c * X61M + C_d * X25M - C_f * X43M); + (*Vect3) = C_norm * (C_c * X07M + C_f * X61M - C_a * X25M + C_d * X43M); + (*Vect5) = C_norm * (C_d * X07M + C_a * X61M + C_f * X25M - C_c * X43M); + (*Vect7) = C_norm * (C_f * X07M + C_d * X61M + C_c * X25M + C_a * X43M); +} + +/** +************************************************************************** +* Performs in-place IDCT of vector of 8 elements. +* +* \param Vect0 [IN] - Pointer to the first element of vector +* \param Step [IN] - Value to add to ptr to access other +*elements +* +* \return None +*/ +__device__ void CUDAsubroutineInplaceIDCTvector(float* Vect0, int Step) { + float* Vect1 = Vect0 + Step; + float* Vect2 = Vect1 + Step; + float* Vect3 = Vect2 + Step; + float* Vect4 = Vect3 + Step; + float* Vect5 = Vect4 + Step; + float* Vect6 = Vect5 + Step; + float* Vect7 = Vect6 + Step; + + float Y04P = (*Vect0) + (*Vect4); + float Y2b6eP = C_b * (*Vect2) + C_e * (*Vect6); + + float Y04P2b6ePP = Y04P + Y2b6eP; + float Y04P2b6ePM = Y04P - Y2b6eP; + float Y7f1aP3c5dPP = + C_f * (*Vect7) + C_a * (*Vect1) + C_c * (*Vect3) + C_d * (*Vect5); + float Y7a1fM3d5cMP = + C_a * (*Vect7) - C_f * (*Vect1) + C_d * (*Vect3) - C_c * (*Vect5); + + float Y04M = (*Vect0) - (*Vect4); + float Y2e6bM = C_e * (*Vect2) - C_b * (*Vect6); + + float Y04M2e6bMP = Y04M + Y2e6bM; + float Y04M2e6bMM = Y04M - Y2e6bM; + float Y1c7dM3f5aPM = + C_c * (*Vect1) - C_d * (*Vect7) - C_f * (*Vect3) - C_a * (*Vect5); + float Y1d7cP3a5fMM = + C_d * (*Vect1) + C_c * (*Vect7) - C_a * (*Vect3) + C_f * (*Vect5); + + (*Vect0) = C_norm * (Y04P2b6ePP + Y7f1aP3c5dPP); + (*Vect7) = C_norm * (Y04P2b6ePP - Y7f1aP3c5dPP); + (*Vect4) = C_norm * (Y04P2b6ePM + Y7a1fM3d5cMP); + (*Vect3) = C_norm * (Y04P2b6ePM - Y7a1fM3d5cMP); + + (*Vect1) = C_norm * (Y04M2e6bMP + Y1c7dM3f5aPM); + (*Vect5) = C_norm * (Y04M2e6bMM - Y1d7cP3a5fMM); + (*Vect2) = C_norm * (Y04M2e6bMM + Y1d7cP3a5fMM); + (*Vect6) = C_norm * (Y04M2e6bMP - Y1c7dM3f5aPM); +} + +/** +************************************************************************** +* Performs 8x8 block-wise Forward Discrete Cosine Transform of the given +* image plane and outputs result to the array of coefficients. 2nd +*implementation. +* This kernel is designed to process image by blocks of blocks8x8 that +* utilizes maximum warps capacity, assuming that it is enough of 8 threads +* per block8x8. +* +* \param SrcDst [OUT] - Coefficients +*plane +* \param ImgStride [IN] - Stride of SrcDst +* +* \return None +*/ +__global__ void CUDAkernel2DCT(float* SrcDst, int ImgStride) { + __shared__ float block[KER2_BLOCK_HEIGHT * KER2_SMEMBLOCK_STRIDE]; + + register int reg1, reg2; + + // int OffsThreadInRow = FMUL(threadIdx.y, BLOCK_SIZE) + threadIdx.x; + reg1 = threadIdx.y; + reg1 <<= BLOCK_SIZE_LOG2; + reg1 += threadIdx.x; + int OffsThreadInRow = reg1; + + // int OffsThreadInCol = FMUL(threadIdx.z, BLOCK_SIZE); + reg1 = threadIdx.z; + reg1 <<= BLOCK_SIZE_LOG2; + int OffsThreadInCol = reg1; + + // SrcDst += FMUL(FMUL(blockIdx.y, KER2_BLOCK_HEIGHT) + OffsThreadInCol, + // ImgStride) + FMUL(blockIdx.x, KER2_BLOCK_WIDTH) + OffsThreadInRow; + reg1 = blockIdx.y; + reg1 <<= KER2_BH_LOG2; + reg1 += OffsThreadInCol; + reg1 = FMUL(reg1, ImgStride); + reg2 = blockIdx.x; + reg2 <<= KER2_BW_LOG2; + reg1 += reg2; + reg1 += OffsThreadInRow; + SrcDst += reg1; + + // float *bl_ptr = block + FMUL(OffsThreadInCol, KER2_SMEMBLOCK_STRIDE) + + // OffsThreadInRow; + reg1 = OffsThreadInCol; + reg1 = FMUL(reg1, KER2_SMEMBLOCK_STRIDE); + reg1 += OffsThreadInRow; + float* bl_ptr = block + reg1; + + *(bl_ptr) = *(SrcDst); + SrcDst += ImgStride; + bl_ptr += KER2_SMEMBLOCK_STRIDE; + + *(bl_ptr) = *(SrcDst); + SrcDst += ImgStride; + bl_ptr += KER2_SMEMBLOCK_STRIDE; + + *(bl_ptr) = *(SrcDst); + SrcDst += ImgStride; + bl_ptr += KER2_SMEMBLOCK_STRIDE; + + *(bl_ptr) = *(SrcDst); + SrcDst += ImgStride; + bl_ptr += KER2_SMEMBLOCK_STRIDE; + + *(bl_ptr) = *(SrcDst); + SrcDst += ImgStride; + bl_ptr += KER2_SMEMBLOCK_STRIDE; + + *(bl_ptr) = *(SrcDst); + SrcDst += ImgStride; + bl_ptr += KER2_SMEMBLOCK_STRIDE; + + *(bl_ptr) = *(SrcDst); + SrcDst += ImgStride; + bl_ptr += KER2_SMEMBLOCK_STRIDE; + + *(bl_ptr) = *(SrcDst); + +#ifdef __DEVICE_EMULATION__ + __syncthreads(); +#endif + + // process columns + CUDAsubroutineInplaceDCTvector( + block + OffsThreadInCol * KER2_SMEMBLOCK_STRIDE + OffsThreadInRow, + KER2_SMEMBLOCK_STRIDE); + +#ifdef __DEVICE_EMULATION__ + __syncthreads(); +#endif + + // process rows + CUDAsubroutineInplaceDCTvector( + block + (OffsThreadInCol + threadIdx.x) * KER2_SMEMBLOCK_STRIDE + + OffsThreadInRow - threadIdx.x, + 1); + +#ifdef __DEVICE_EMULATION__ + __syncthreads(); +#endif + + bl_ptr = block + reg1; + SrcDst -= FMUL(ImgStride, 7); + + *(SrcDst) = *(bl_ptr); + SrcDst += ImgStride; + bl_ptr += KER2_SMEMBLOCK_STRIDE; + + *(SrcDst) = *(bl_ptr); + SrcDst += ImgStride; + bl_ptr += KER2_SMEMBLOCK_STRIDE; + + *(SrcDst) = *(bl_ptr); + SrcDst += ImgStride; + bl_ptr += KER2_SMEMBLOCK_STRIDE; + + *(SrcDst) = *(bl_ptr); + SrcDst += ImgStride; + bl_ptr += KER2_SMEMBLOCK_STRIDE; + + *(SrcDst) = *(bl_ptr); + SrcDst += ImgStride; + bl_ptr += KER2_SMEMBLOCK_STRIDE; + + *(SrcDst) = *(bl_ptr); + SrcDst += ImgStride; + bl_ptr += KER2_SMEMBLOCK_STRIDE; + + *(SrcDst) = *(bl_ptr); + SrcDst += ImgStride; + bl_ptr += KER2_SMEMBLOCK_STRIDE; + + *(SrcDst) = *(bl_ptr); +} + +/** +************************************************************************** +* Performs 8x8 block-wise Inverse Discrete Cosine Transform of the given +* coefficients plane and outputs result to the image. 2nd implementation. +* This kernel is designed to process image by blocks of blocks8x8 that +* utilizes maximum warps capacity, assuming that it is enough of 8 threads +* per block8x8. +* +* \param SrcDst [OUT] - Coefficients +*plane +* \param ImgStride [IN] - Stride of SrcDst +* +* \return None +*/ +__global__ void CUDAkernel2IDCT(float* SrcDst, int ImgStride) { + __shared__ float block[KER2_BLOCK_HEIGHT * KER2_SMEMBLOCK_STRIDE]; + + register int reg1, reg2; + + // int OffsThreadInRow = FMUL(threadIdx.y, BLOCK_SIZE) + threadIdx.x; + reg1 = threadIdx.y; + reg1 <<= BLOCK_SIZE_LOG2; + reg1 += threadIdx.x; + int OffsThreadInRow = reg1; + + // int OffsThreadInCol = FMUL(threadIdx.z, BLOCK_SIZE); + reg1 = threadIdx.z; + reg1 <<= BLOCK_SIZE_LOG2; + int OffsThreadInCol = reg1; + + // SrcDst += FMUL(FMUL(blockIdx.y, KER2_BLOCK_HEIGHT) + OffsThreadInCol, + // ImgStride) + FMUL(blockIdx.x, KER2_BLOCK_WIDTH) + OffsThreadInRow; + reg1 = blockIdx.y; + reg1 <<= KER2_BH_LOG2; + reg1 += OffsThreadInCol; + reg1 = FMUL(reg1, ImgStride); + reg2 = blockIdx.x; + reg2 <<= KER2_BW_LOG2; + reg1 += reg2; + reg1 += OffsThreadInRow; + SrcDst += reg1; + + // float *bl_ptr = block + FMUL(OffsThreadInCol, KER2_SMEMBLOCK_STRIDE) + + // OffsThreadInRow; + reg1 = OffsThreadInCol; + reg1 = FMUL(reg1, KER2_SMEMBLOCK_STRIDE); + reg1 += OffsThreadInRow; + float* bl_ptr = block + reg1; + + *(bl_ptr) = *(SrcDst); + SrcDst += ImgStride; + bl_ptr += KER2_SMEMBLOCK_STRIDE; + + *(bl_ptr) = *(SrcDst); + SrcDst += ImgStride; + bl_ptr += KER2_SMEMBLOCK_STRIDE; + + *(bl_ptr) = *(SrcDst); + SrcDst += ImgStride; + bl_ptr += KER2_SMEMBLOCK_STRIDE; + + *(bl_ptr) = *(SrcDst); + SrcDst += ImgStride; + bl_ptr += KER2_SMEMBLOCK_STRIDE; + + *(bl_ptr) = *(SrcDst); + SrcDst += ImgStride; + bl_ptr += KER2_SMEMBLOCK_STRIDE; + + *(bl_ptr) = *(SrcDst); + SrcDst += ImgStride; + bl_ptr += KER2_SMEMBLOCK_STRIDE; + + *(bl_ptr) = *(SrcDst); + SrcDst += ImgStride; + bl_ptr += KER2_SMEMBLOCK_STRIDE; + + *(bl_ptr) = *(SrcDst); + +#ifdef __DEVICE_EMULATION__ + __syncthreads(); +#endif + + // process columns + CUDAsubroutineInplaceIDCTvector( + block + OffsThreadInCol * KER2_SMEMBLOCK_STRIDE + OffsThreadInRow, + KER2_SMEMBLOCK_STRIDE); + +#ifdef __DEVICE_EMULATION__ + __syncthreads(); +#endif + + // process rows + CUDAsubroutineInplaceIDCTvector( + block + (OffsThreadInCol + threadIdx.x) * KER2_SMEMBLOCK_STRIDE + + OffsThreadInRow - threadIdx.x, + 1); + +#ifdef __DEVICE_EMULATION__ + __syncthreads(); +#endif + + bl_ptr = block + reg1; + SrcDst -= FMUL(ImgStride, 7); + + *(SrcDst) = *(bl_ptr); + SrcDst += ImgStride; + bl_ptr += KER2_SMEMBLOCK_STRIDE; + + *(SrcDst) = *(bl_ptr); + SrcDst += ImgStride; + bl_ptr += KER2_SMEMBLOCK_STRIDE; + + *(SrcDst) = *(bl_ptr); + SrcDst += ImgStride; + bl_ptr += KER2_SMEMBLOCK_STRIDE; + + *(SrcDst) = *(bl_ptr); + SrcDst += ImgStride; + bl_ptr += KER2_SMEMBLOCK_STRIDE; + + *(SrcDst) = *(bl_ptr); + SrcDst += ImgStride; + bl_ptr += KER2_SMEMBLOCK_STRIDE; + + *(SrcDst) = *(bl_ptr); + SrcDst += ImgStride; + bl_ptr += KER2_SMEMBLOCK_STRIDE; + + *(SrcDst) = *(bl_ptr); + SrcDst += ImgStride; + bl_ptr += KER2_SMEMBLOCK_STRIDE; + + *(SrcDst) = *(bl_ptr); +} diff --git a/cuda_code/deform_conv_op_2.cu b/cuda_code/deform_conv_op_2.cu new file mode 100644 index 0000000000000000000000000000000000000000..f0f3987130152bd9de6be866344f5641f580f231 --- /dev/null +++ b/cuda_code/deform_conv_op_2.cu @@ -0,0 +1,630 @@ +/*! + ******************* BEGIN Caffe Copyright Notice and Disclaimer **************** + * + * COPYRIGHT + * + * All contributions by the University of California: + * Copyright (c) 2014-2017 The Regents of the University of California (Regents) + * All rights reserved. + * + * All other contributions: + * Copyright (c) 2014-2017, the respective contributors + * All rights reserved. + * + * Caffe uses a shared copyright model: each contributor holds copyright over + * their contributions to Caffe. The project versioning records all such + * contribution and copyright details. If a contributor wants to further mark + * their specific copyright on a particular contribution, they should indicate + * their copyright solely in the commit message of the change when it is + * committed. + * + * LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * CONTRIBUTION AGREEMENT + * + * By contributing to the BVLC/caffe repository through pull-request, comment, + * or otherwise, the contributor releases their content to the + * license and copyright terms herein. + * + ***************** END Caffe Copyright Notice and Disclaimer ******************** + * + * Copyright (c) 2017 Microsoft + * Licensed under The Apache-2.0 License [see LICENSE for details] + * \file deformable_im2col.cuh + * \brief Function definitions of converting an image to + * column matrix based on kernel, padding, dilation, and offset. + * These functions are mainly used in deformable convolution operators. + * \ref: https://arxiv.org/abs/1703.06211 + * \author Yuwen Xiong, Haozhi Qi, Jifeng Dai + */ + +#include +#include +#include "caffe2/core/common.h" +#include "caffe2/core/context_gpu.h" +#include "caffe2/operators/deform_conv_op.h" +#include "caffe2/operators/deform_conv_op_impl.h" + +namespace caffe2 { + +typedef int64_t index_t; +typedef std::vector TShape; + +template +__device__ DType deformable_im2col_bilinear( + const DType* bottom_data, + const int data_width, + const int height, + const int width, + DType h, + DType w) { + int h_low = floor(h); + int w_low = floor(w); + int h_high; + int w_high; + if (h_low >= height - 1) { + h_high = h_low = height - 1; + h = (DType)h_low; + } else { + h_high = h_low + 1; + } + + if (w_low >= width - 1) { + w_high = w_low = width - 1; + w = (DType)w_low; + } else { + w_high = w_low + 1; + } + + DType lh = h - h_low; + DType lw = w - w_low; + DType hh = 1 - lh, hw = 1 - lw; + + DType v1 = bottom_data[h_low * data_width + w_low]; + DType v2 = bottom_data[h_low * data_width + w_high]; + DType v3 = bottom_data[h_high * data_width + w_low]; + DType v4 = bottom_data[h_high * data_width + w_high]; + DType w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; + + DType val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + return val; +} + +template +__device__ DType get_gradient_weight( + DType argmax_h, + DType argmax_w, + const int h, + const int w, + const int height, + const int width) { + if (argmax_h < 0 || argmax_h > height || argmax_w < 0 || argmax_w > width) { + // empty + return 0; + } + + argmax_h = max(argmax_h, (DType)0.0f); + argmax_w = max(argmax_w, (DType)0.0f); + + int argmax_h_low = (int)argmax_h; + int argmax_w_low = (int)argmax_w; + int argmax_h_high; + int argmax_w_high; + if (argmax_h_low >= height - 1) { + argmax_h_high = argmax_h_low = height - 1; + argmax_h = (DType)argmax_h_low; + } else { + argmax_h_high = argmax_h_low + 1; + } + if (argmax_w_low >= width - 1) { + argmax_w_high = argmax_w_low = width - 1; + argmax_w = (DType)argmax_w_low; + } else { + argmax_w_high = argmax_w_low + 1; + } + DType weight = 0; + if (h == argmax_h_low) { + if (w == argmax_w_low) { + weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); + } else if (w == argmax_w_high) { + weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); + } + } else if (h == argmax_h_high) { + if (w == argmax_w_low) { + weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); + } else if (w == argmax_w_high) { + weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); + } + } + return weight; +} + +template +__device__ DType get_coordinate_weight( + DType argmax_h, + DType argmax_w, + const int height, + const int width, + const DType* im_data, + const int data_width, + const int bp_dir) { + if (argmax_h < 0 || argmax_h > height || argmax_w < 0 || argmax_w > width) { + // empty + return 0; + } + + if (argmax_h < 0) + argmax_h = 0; + if (argmax_w < 0) + argmax_w = 0; + + int argmax_h_low = (int)argmax_h; + int argmax_w_low = (int)argmax_w; + int argmax_h_high; + int argmax_w_high; + if (argmax_h_low >= height - 1) { + argmax_h_high = argmax_h_low = height - 1; + argmax_h = (DType)argmax_h_low; + } else { + argmax_h_high = argmax_h_low + 1; + } + if (argmax_w_low >= width - 1) { + argmax_w_high = argmax_w_low = width - 1; + argmax_w = (DType)argmax_w_low; + } else { + argmax_w_high = argmax_w_low + 1; + } + DType weight = 0; + + if (bp_dir == 0) { + weight += -1 * (argmax_w_low + 1 - argmax_w) * + im_data[argmax_h_low * data_width + argmax_w_low]; + weight += -1 * (argmax_w - argmax_w_low) * + im_data[argmax_h_low * data_width + argmax_w_high]; + weight += (argmax_w_low + 1 - argmax_w) * + im_data[argmax_h_high * data_width + argmax_w_low]; + weight += (argmax_w - argmax_w_low) * + im_data[argmax_h_high * data_width + argmax_w_high]; + } else if (bp_dir == 1) { + weight += -1 * (argmax_h_low + 1 - argmax_h) * + im_data[argmax_h_low * data_width + argmax_w_low]; + weight += (argmax_h_low + 1 - argmax_h) * + im_data[argmax_h_low * data_width + argmax_w_high]; + weight += -1 * (argmax_h - argmax_h_low) * + im_data[argmax_h_high * data_width + argmax_w_low]; + weight += (argmax_h - argmax_h_low) * + im_data[argmax_h_high * data_width + argmax_w_high]; + } + + return weight; +} + +/*! + * \brief deformable_im2col gpu kernel. + * DO NOT call this directly. Use wrapper function im2col() instead; + */ +template +__global__ void deformable_im2col_gpu_kernel( + const int n, + const DType* data_im, + const DType* data_offset, + const int height, + const int width, + const int kernel_h, + const int kernel_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int channel_per_deformable_group, + const int height_col, + const int width_col, + DType* data_col) { + CUDA_1D_KERNEL_LOOP(index, n) { + // index index of output matrix + const int w_col = index % width_col; + const int h_col = (index / width_col) % height_col; + const int c_im = (index / width_col) / height_col; + const int c_col = c_im * kernel_h * kernel_w; + + // compute deformable group index + const int deformable_group_index = c_im / channel_per_deformable_group; + + const int h_in = h_col * stride_h - pad_h; + const int w_in = w_col * stride_w - pad_w; + DType* data_col_ptr = + data_col + (c_col * height_col + h_col) * width_col + w_col; + const DType* data_im_ptr = data_im + (c_im * height + h_in) * width + w_in; + const DType* data_offset_ptr = data_offset + + deformable_group_index * 2 * kernel_h * kernel_w * height_col * + width_col; + + for (int i = 0; i < kernel_h; ++i) { + for (int j = 0; j < kernel_w; ++j) { + const int data_offset_h_ptr = + ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; + const int data_offset_w_ptr = + ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + + w_col; + const DType offset_h = data_offset_ptr[data_offset_h_ptr]; + const DType offset_w = data_offset_ptr[data_offset_w_ptr]; + DType val = static_cast(0); + const DType h_im = h_in + i * dilation_h + offset_h; + const DType w_im = w_in + j * dilation_w + offset_w; + if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) { + const DType map_h = i * dilation_h + offset_h; + const DType map_w = j * dilation_w + offset_w; + const int cur_height = height - h_in; + const int cur_width = width - w_in; + val = deformable_im2col_bilinear( + data_im_ptr, width, cur_height, cur_width, map_h, map_w); + } + *data_col_ptr = val; + data_col_ptr += height_col * width_col; + } + } + } +} + +/*!\brief + * cpu function of deformable_im2col algorithm + * \param s device stream + * \param data_im pointer of an image (C, H, W, ...) in the image batch + * \param data_offset pointer of offset (C, H, W, ...) in the offset batch + * \param im_shape input image shape in dimensions (N, C, H, W,) + * \param col_shape column buffer shape (#channels, output_im_height, + * output_im_width, ...) \param kernel_shape kernel filter shape \param pad pad + * shape \param stride stride shape \param dilation dilation shape \param + * deformable_group #offset group that deformable convolution use \param + * data_col column buffer pointer + */ +template +void DeformConvOpBase::DeformableIm2col( + const DType* data_im, + const DType* data_offset, + at::IntArrayRef im_shape, + at::IntArrayRef col_shape, + DType* data_col) { + CHECK_LT(2, CAFFE_CUDA_NUM_THREADS); + CAFFE_ENFORCE_EQ(pad_t(), pad_b()); + CAFFE_ENFORCE_EQ(pad_l(), pad_r()); + const int pad_h = pad_t(); + const int pad_w = pad_l(); + index_t channel_per_deformable_group = im_shape[1] / deformable_group_; + index_t num_kernels = im_shape[1] * size_from_dim_(1, col_shape); + deformable_im2col_gpu_kernel + <<>>( + num_kernels, + data_im, + data_offset, + im_shape[2], + im_shape[3], + kernel_h(), + kernel_w(), + pad_h, + pad_w, + stride_h(), + stride_w(), + dilation_h(), + dilation_w(), + channel_per_deformable_group, + col_shape[1], + col_shape[2], + data_col); + C10_CUDA_KERNEL_LAUNCH_CHECK(); +} + +/*! + * \brief deformable_col2im gpu kernel. + * \brief DO NOT call this directly. Use wrapper function deformable_col2im() + * instead; + */ +template +__global__ void deformable_col2im_gpu_kernel( + const int n, + const DType* data_col, + const DType* data_offset, + const int channels, + const int height, + const int width, + const int kernel_h, + const int kernel_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int channel_per_deformable_group, + const int height_col, + const int width_col, + DType* grad_im) { + CUDA_1D_KERNEL_LOOP(index, n) { + const int j = (index / width_col / height_col) % kernel_w; + const int i = (index / width_col / height_col / kernel_w) % kernel_h; + const int c = index / width_col / height_col / kernel_w / kernel_h; + // compute the start and end of the output + + const int deformable_group_index = c / channel_per_deformable_group; + + int w_out = index % width_col; + int h_out = (index / width_col) % height_col; + int w_in = w_out * stride_w - pad_w; + int h_in = h_out * stride_h - pad_h; + + const DType* data_offset_ptr = data_offset + + deformable_group_index * 2 * kernel_h * kernel_w * height_col * + width_col; + const int data_offset_h_ptr = + ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; + const int data_offset_w_ptr = + ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; + const DType offset_h = data_offset_ptr[data_offset_h_ptr]; + const DType offset_w = data_offset_ptr[data_offset_w_ptr]; + const DType cur_inv_h_data = h_in + i * dilation_h + offset_h; + const DType cur_inv_w_data = w_in + j * dilation_w + offset_w; + + const DType cur_top_grad = data_col[index]; + const int cur_h = (int)cur_inv_h_data; + const int cur_w = (int)cur_inv_w_data; + for (int dy = -2; dy <= 2; dy++) { + for (int dx = -2; dx <= 2; dx++) { + if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && + cur_w + dx < width && + c10::cuda::compat::abs(cur_inv_h_data - (cur_h + dy)) < 1 && + c10::cuda::compat::abs(cur_inv_w_data - (cur_w + dx)) < 1) { + int cur_bottom_grad_pos = + (c * height + cur_h + dy) * width + cur_w + dx; + DType weight = get_gradient_weight( + cur_inv_h_data, + cur_inv_w_data, + cur_h + dy, + cur_w + dx, + height, + width); + atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); + } + } + } + } +} + +/*!\brief + * gpu function of deformable_col2im algorithm + * \param s device stream + * \param data_col start pointer of the column buffer to be filled + * \param data_offset pointer of offset (C, H, W, ...) in the offset batch + * \param im_shape input image shape in dimensions (N, C, H, W,) + * \param col_shape column buffer shape + * \param kernel_shape kernel filter shape + * \param pad pad shape + * \param stride stride shape + * \param dilation dilation shape + * \param deformable_group #offset group that deformable convolution use + * \param grad_im pointer of a image (C, H, W,...) in the image batch + */ +template +void DeformConvOpBase::DeformableCol2im( + const DType* data_col, + const DType* data_offset, + at::IntArrayRef im_shape, + at::IntArrayRef col_shape, + DType* grad_im) { + CAFFE_ENFORCE_EQ(pad_t(), pad_b()); + CAFFE_ENFORCE_EQ(pad_l(), pad_r()); + const int pad_h = pad_t(); + const int pad_w = pad_l(); + index_t im_size = size_from_dim_(1, im_shape); + index_t channel_per_deformable_group = im_shape[1] / deformable_group_; + index_t num_kernels = size_from_dim_(0, col_shape); + // num_axes should be smaller than block size + CHECK_LT(2, CAFFE_CUDA_NUM_THREADS); + // To avoid involving atomic operations, we will launch one kernel per + // bottom dimension, and then in the kernel add up the top dimensions. + // NOLINT_NEXT_LINE(whitespace/operators) + deformable_col2im_gpu_kernel + <<>>( + num_kernels, + data_col, + data_offset, + im_shape[1], + im_shape[2], + im_shape[3], + kernel_h(), + kernel_w(), + pad_h, + pad_w, + stride_h(), + stride_w(), + dilation_h(), + dilation_w(), + channel_per_deformable_group, + col_shape[1], + col_shape[2], + grad_im); + C10_CUDA_KERNEL_LAUNCH_CHECK(); +} + +/*! + * \brief deformable_col2im_coord gpu kernel. + * \brief DO NOT call this directly. Use wrapper function + * deformable_col2im_coord() instead; + */ +template +__global__ void deformable_col2im_coord_gpu_kernel( + const int n, + const DType* data_col, + const DType* data_im, + const DType* data_offset, + const int channels, + const int height, + const int width, + const int kernel_h, + const int kernel_w, + const int pad_h, + const int pad_w, + const int stride_h, + const int stride_w, + const int dilation_h, + const int dilation_w, + const int channel_per_deformable_group, + const int height_col, + const int width_col, + DType* grad_offset) { + CUDA_1D_KERNEL_LOOP(index, n) { + DType val = 0; + int w = index % width_col; + int h = (index / width_col) % height_col; + int c = index / width_col / height_col; + // compute the start and end of the output + + const int deformable_group_index = c / (2 * kernel_h * kernel_w); + const int col_step = kernel_h * kernel_w; + int cnt = 0; + const DType* data_col_ptr = data_col + + deformable_group_index * channel_per_deformable_group * width_col * + height_col; + const DType* data_im_ptr = data_im + + deformable_group_index * channel_per_deformable_group / kernel_h / + kernel_w * height * width; + const DType* data_offset_ptr = data_offset + + deformable_group_index * 2 * kernel_h * kernel_w * height_col * + width_col; + + const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; + + for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; + col_c += col_step) { + const int col_pos = ((col_c * height_col) + h) * width_col + w; + const int bp_dir = offset_c % 2; + + int j = (col_pos / width_col / height_col) % kernel_w; + int i = (col_pos / width_col / height_col / kernel_w) % kernel_h; + int w_out = col_pos % width_col; + int h_out = (col_pos / width_col) % height_col; + int w_in = w_out * stride_w - pad_w; + int h_in = h_out * stride_h - pad_h; + const int data_offset_h_ptr = + (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); + const int data_offset_w_ptr = + (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + + w_out); + const DType offset_h = data_offset_ptr[data_offset_h_ptr]; + const DType offset_w = data_offset_ptr[data_offset_w_ptr]; + DType inv_h = h_in + i * dilation_h + offset_h; + DType inv_w = w_in + j * dilation_w + offset_w; + if (inv_h < 0 || inv_w < 0 || inv_h >= height || inv_w >= width) { + inv_h = inv_w = -1; + } + const DType weight = get_coordinate_weight( + inv_h, + inv_w, + height, + width, + data_im_ptr + cnt * height * width, + width, + bp_dir); + val += weight * data_col_ptr[col_pos]; + cnt += 1; + } + + grad_offset[index] = val; + } +} + +/*!\brief + * gpu function of deformable_col2im_coord algorithm + * \param s device stream + * \param data_col start pointer of the column buffer to be filled + * \param data_im pointer of an image (C, H, W, ...) in the image batch + * \param data_offset pointer of offset (C, H, W, ...) in the offset batch + * \param im_shape input image shape in dimensions (N, C, H, W,) + * \param col_shape column buffer shape + * \param kernel_shape kernel filter shape + * \param pad pad shape + * \param stride stride shape + * \param dilation dilation shape + * \param deformable_group #offset group that deformable convolution use + * \param grad_offset pointer of the offset (C, H, W,...) in the offset batch + */ +template +void DeformConvOpBase::DeformableCol2imCoord( + const DType* data_col, + const DType* data_im, + const DType* data_offset, + at::IntArrayRef im_shape, + at::IntArrayRef col_shape, + DType* grad_offset) { + CAFFE_ENFORCE_EQ(pad_t(), pad_b()); + CAFFE_ENFORCE_EQ(pad_l(), pad_r()); + const int pad_h = pad_t(); + const int pad_w = pad_l(); + index_t num_kernels = col_shape[1] * col_shape[2] * 2 * kernel_h() * + kernel_w() * deformable_group_; + index_t channel_per_deformable_group = col_shape[0] / deformable_group_; + // num_axes should be smaller than block size + CHECK_LT(2, CAFFE_CUDA_NUM_THREADS); + // To avoid involving atomic operations, we will launch one kernel per + // bottom dimension, and then in the kernel add up the top dimensions. + // NOLINT_NEXT_LINE(whitespace/operators) + deformable_col2im_coord_gpu_kernel + <<>>( + num_kernels, + data_col, + data_im, + data_offset, + im_shape[1], + im_shape[2], + im_shape[3], + kernel_h(), + kernel_w(), + pad_h, + pad_w, + stride_h(), + stride_w(), + dilation_h(), + dilation_w(), + channel_per_deformable_group, + col_shape[1], + col_shape[2], + grad_offset); + C10_CUDA_KERNEL_LAUNCH_CHECK(); +} + +REGISTER_CUDA_OPERATOR(DeformConv, DeformConvOp); +REGISTER_CUDA_OPERATOR( + DeformConvGradient, + DeformConvGradientOp); + +} // namespace caffe2 diff --git a/cuda_code/delta2bbox_2.cu b/cuda_code/delta2bbox_2.cu new file mode 100644 index 0000000000000000000000000000000000000000..cb5153b99b55f86bbec00baa556ed64e9770343e --- /dev/null +++ b/cuda_code/delta2bbox_2.cu @@ -0,0 +1,209 @@ +#include +#include +#include + +#include +#include + +#include "amir_cuda_util/cuda_util.h" +#include "delta2bbox.h" + +namespace amirstan { +namespace plugin { +using namespace amirstan::cuda; + +struct SMeanStd { + float mean[4]; + float std[4]; +}; + +template +__global__ void delta2bbox_kernel(T *__restrict__ out_bbox, + const T *__restrict__ in_bbox, + const T *__restrict__ anchor, + const int *__restrict__ clip_range, + int batch_size, int num_outbbox, + int num_classes, SMeanStd mean_std) { + extern __shared__ T anchor_cache[][4]; + const T max_ratio = abs(logf(16. / 1000.)); + + const int class_id = blockIdx.y * blockDim.y + threadIdx.y; + const int batch_id = blockIdx.z; + if (class_id > num_classes) { + return; + } + CUDA_KERNEL_LOOP(box_id, num_outbbox) { + // load thread idx + if (threadIdx.y == 0) { + anchor_cache[threadIdx.x][0] = anchor[box_id * 4]; + anchor_cache[threadIdx.x][1] = anchor[box_id * 4 + 1]; + anchor_cache[threadIdx.x][2] = anchor[box_id * 4 + 2]; + anchor_cache[threadIdx.x][3] = anchor[box_id * 4 + 3]; + } + __syncthreads(); + + const int delta_offset = (batch_id * num_outbbox * num_classes + + box_id * num_classes + class_id) * + 4; + const T dx = in_bbox[delta_offset] * mean_std.std[0] + mean_std.mean[0]; + const T dy = in_bbox[delta_offset + 1] * mean_std.std[1] + mean_std.mean[1]; + const T dw = in_bbox[delta_offset + 2] * mean_std.std[2] + mean_std.mean[2]; + const T dh = in_bbox[delta_offset + 3] * mean_std.std[3] + mean_std.mean[3]; + + const T clamp_dw = max(-max_ratio, min(max_ratio, dw)); + const T clamp_dh = max(-max_ratio, min(max_ratio, dh)); + + const int anchor_offset = threadIdx.x; + const T ax1 = anchor_cache[anchor_offset][0]; + const T ay1 = anchor_cache[anchor_offset][1]; + const T ax2 = anchor_cache[anchor_offset][2]; + const T ay2 = anchor_cache[anchor_offset][3]; + const T px = (ax1 + ax2) * 0.5; + const T py = (ay1 + ay2) * 0.5; + const T pw = ax2 - ax1; + const T ph = ay2 - ay1; + + const T gw = pw * exp(dw); + const T gh = ph * exp(dh); + + const T gx = px + pw * dx; + const T gy = py + ph * dy; + + const T x1 = gx - gw * 0.5; + const T y1 = gy - gh * 0.5; + const T x2 = gx + gw * 0.5; + const T y2 = gy + gh * 0.5; + + const int out_offset = delta_offset; + if (clip_range != nullptr) { + out_bbox[out_offset] = max(T(0.), min(x1, T(clip_range[1] - 1))); + out_bbox[out_offset + 1] = max(T(0.), min(y1, T(clip_range[0] - 1))); + out_bbox[out_offset + 2] = max(T(0.), min(x2, T(clip_range[1] - 1))); + out_bbox[out_offset + 3] = max(T(0.), min(y2, T(clip_range[0] - 1))); + } else { + out_bbox[out_offset] = x1; + out_bbox[out_offset + 1] = y1; + out_bbox[out_offset + 2] = x2; + out_bbox[out_offset + 3] = y2; + } + } +} + +template <> +__global__ void delta2bbox_kernel(float *__restrict__ out_bbox, + const float *__restrict__ in_bbox, + const float *__restrict__ anchor, + const int *__restrict__ clip_range, + int batch_size, int num_outbbox, + int num_classes, SMeanStd mean_std) { + extern __shared__ float4 anchor_cache[]; + const float max_ratio = abs(logf(16. / 1000.)); + + const int class_id = blockIdx.y * blockDim.y + threadIdx.y; + const int batch_id = blockIdx.z; + if (class_id > num_classes) { + return; + } + CUDA_KERNEL_LOOP(box_id, num_outbbox) { + // load thread idx + if (threadIdx.y == 0) { + anchor_cache[threadIdx.x] = + reinterpret_cast(anchor)[box_id]; + } + __syncthreads(); + + const int delta_offset = (batch_id * num_outbbox * num_classes + + box_id * num_classes + class_id); + const float4 delta = + reinterpret_cast(in_bbox)[delta_offset]; + + const float &dx = delta.x * mean_std.std[0] + mean_std.mean[0]; + const float &dy = delta.y * mean_std.std[1] + mean_std.mean[1]; + const float &dw = delta.z * mean_std.std[2] + mean_std.mean[2]; + const float &dh = delta.w * mean_std.std[3] + mean_std.mean[3]; + + const float clamp_dw = max(-max_ratio, min(max_ratio, dw)); + const float clamp_dh = max(-max_ratio, min(max_ratio, dh)); + + const int anchor_offset = threadIdx.x; + const float &ax1 = anchor_cache[anchor_offset].x; + const float &ay1 = anchor_cache[anchor_offset].y; + const float &ax2 = anchor_cache[anchor_offset].z; + const float &ay2 = anchor_cache[anchor_offset].w; + const float px = (ax1 + ax2) * 0.5; + const float py = (ay1 + ay2) * 0.5; + const float pw = ax2 - ax1; + const float ph = ay2 - ay1; + + const float gw = pw * exp(dw); + const float gh = ph * exp(dh); + + const float gx = fmaf(pw, dx, px); + const float gy = fmaf(ph, dy, py); + + const float x1 = fmaf(gw, -0.5f, gx); + const float y1 = fmaf(gh, -0.5f, gy); + const float x2 = fmaf(gw, 0.5f, gx); + const float y2 = fmaf(gh, 0.5f, gy); + + const int out_offset = delta_offset; + float4 out_bbox_val; + + if (clip_range != nullptr) { + out_bbox_val.x = max(0.0f, min(x1, float(clip_range[1] - 1))); + out_bbox_val.y = max(0.0f, min(y1, float(clip_range[0] - 1))); + out_bbox_val.z = max(0.0f, min(x2, float(clip_range[1] - 1))); + out_bbox_val.w = max(0.0f, min(y2, float(clip_range[0] - 1))); + } else { + out_bbox_val.x = x1; + out_bbox_val.y = y1; + out_bbox_val.z = x2; + out_bbox_val.w = y2; + } + + reinterpret_cast(out_bbox)[out_offset] = out_bbox_val; + } +} + +template +void fill_zeros(T_cls *out_cls, T_bbox *out_bbox, size_t num_cls_element, + size_t num_bbox_element, cudaStream_t stream) { + thrust::fill_n(thrust::cuda::par.on(stream), (float *)(out_cls), + num_cls_element, 0.0f); + thrust::fill_n(thrust::cuda::par.on(stream), (float *)(out_bbox), + num_bbox_element, 0.0f); +} + +template void fill_zeros(float *out_cls, float *out_bbox, + size_t num_cls_element, + size_t num_bbox_element, + cudaStream_t stream); + +template +void delta2bbox(T *out_bbox, const T *in_bbox, const T *anchor, + const int *clip_range, int batch_size, int num_outbbox, + int num_classes, float *mean, float *std, cudaStream_t stream) { + SMeanStd mean_std; + memcpy(&mean_std.mean[0], mean, sizeof(float) * 4); + memcpy(&mean_std.std[0], std, sizeof(float) * 4); + const int classes_per_block = min(CUDA_NUM_THREADS, num_classes); + const int threads_per_classes = int(CUDA_NUM_THREADS / classes_per_block); + const int blocks_for_classes = DIVUP(num_classes, classes_per_block); + const int blocks_for_bboxes = DIVUP(num_outbbox, threads_per_classes); + const dim3 block_size(blocks_for_bboxes, blocks_for_classes, batch_size); + const dim3 thread_size(threads_per_classes, classes_per_block, 1); + + const int cache_size = threads_per_classes * 4; + delta2bbox_kernel + <<>>( + out_bbox, in_bbox, anchor, clip_range, batch_size, num_outbbox, + num_classes, mean_std); +} + +template void delta2bbox(float *out_bbox, const float *in_bbox, + const float *anchor, const int *clip_range, + int batch_size, int num_outbbox, + int num_classes, float *mean, float *std, + cudaStream_t stream); +} // namespace plugin +} // namespace amirstan diff --git a/cuda_code/deskew_deprecate2.cu b/cuda_code/deskew_deprecate2.cu new file mode 100644 index 0000000000000000000000000000000000000000..6efa1bbd3737899092b8462aeb6bb92668d25e28 --- /dev/null +++ b/cuda_code/deskew_deprecate2.cu @@ -0,0 +1,73 @@ +texture shear_tex; +texture rotate_tex; + +__global__ +void shear_kernel( + float *dst, + const float shift, // unit shifts + const unsigned int nu, const unsigned int nv, // output size + const float ratio, // scale + const unsigned int nx, const unsigned int ny, // input size + const unsigned int nz // layers +) { + unsigned int iu = blockIdx.x*blockDim.x+threadIdx.x; + unsigned int iv = blockIdx.y*blockDim.y+threadIdx.y; + if ((iu >= nu) || (iv >= nv)) { + return; + } + + // move origin to center + float u0 = iu - nu/2.; + float v0 = iv - nv/2.; + + // shear + float x0 = u0 - shift*v0; + float y0 = v0; + + // rescale + y0 /= ratio; + + // move origin to corner + float x = x0 + nx/2.; + float y = y0 + ny/2.; + + // write back + for (unsigned int iz = 0; iz < nz; iz++) { + unsigned int i = iz*nv*nu + iv*nu + iu; + dst[i] = tex2DLayered(shear_tex, x+.5f, y+.5f, iz); + } +} + +__global__ +void rotate_kernel( + float *dst, + const float vsin, const float vcos, // rotation matrix + const unsigned int nu, const unsigned int nv, // output size + const float sx, const float sy, // scale + const unsigned int nx, const unsigned int ny // input size +) { + unsigned int iu = blockIdx.x*blockDim.x+threadIdx.x; + unsigned int iv = blockIdx.y*blockDim.y+threadIdx.y; + if ((iu >= nu) || (iv >= nv)) { + return; + } + + // move origin to center + float u0 = iu - nu/2.; + float v0 = iv - nv/2.; + + // rotate + float x0 = u0*vcos + v0*vsin; + float y0 = -u0*vsin + v0*vcos; + + // rescale + x0 /= sx; y0 /= sy; + + // move origin to corner + float x = x0 + nx/2.; + float y = y0 + ny/2.; + + // write back + unsigned int i = iv*nu + iu; + dst[i] = tex2D(rotate_tex, x+.5f, y+.5f); +} \ No newline at end of file diff --git a/cuda_code/dev_R2grid.cu b/cuda_code/dev_R2grid.cu new file mode 100644 index 0000000000000000000000000000000000000000..928430250fc4e76ee81f306810f09f3ee9624169 --- /dev/null +++ b/cuda_code/dev_R2grid.cu @@ -0,0 +1,148 @@ +/* dev_R2grid.cu + * R3 under discretization (discretize functor) to a grid + * Ernest Yeung ernestyalumni@gmail.com + * 2016115 + * + * compilation tip: (compile separately) + * nvcc -std=c++11 -c ./physlib/dev_R2grid.cu -o dev_R2grid.o + * + */ +#include "dev_R2grid.h" + +//__constant__ int dev_Ld[2]; + +// constructor +__host__ dev_Grid2d::dev_Grid2d( dim3 Ld_in) : Ld(Ld_in) +{ + checkCudaErrors( + cudaMalloc((void**)&this->dev_f, this->NFLAT()*sizeof(float)) ); + + checkCudaErrors( + cudaMalloc((void**)&this->dev_f_out, this->NFLAT()*sizeof(float)) ); + + checkCudaErrors( + cudaMalloc((void**)&this->dev_u, this->NFLAT()*sizeof(float2)) ); + + checkCudaErrors( + cudaMalloc((void**)&this->dev_u_out, this->NFLAT()*sizeof(float2)) ); + + + (this->channelDesc_f) = cudaCreateChannelDesc( 32, 0, 0, 0, + cudaChannelFormatKindFloat); + + // 8 bits * 4 bytes in float (sizeof(float)) = 32 +/* (this->channelDesc_f2) = cudaCreateChannelDesc( 32, 32, 0, 0, + cudaChannelFormatKindFloat);*/ // This gave a Segmentation Fault + (this->channelDesc_f2) = cudaCreateChannelDesc(); + + + checkCudaErrors( + cudaMallocArray(&(this->cuArr_f), &(this->channelDesc_f), (this->Ld).x, (this->Ld).y, + cudaArraySurfaceLoadStore) ); + + checkCudaErrors( + cudaMallocArray(&(this->cuArr_f_out), &(this->channelDesc_f), (this->Ld).x, (this->Ld).y, + cudaArraySurfaceLoadStore) ); + + checkCudaErrors( + cudaMallocArray(&(this->cuArr_u), &(this->channelDesc_f2), (this->Ld).x, (this->Ld).y, + cudaArraySurfaceLoadStore) ); + + checkCudaErrors( + cudaMallocArray(&(this->cuArr_u_out), &(this->channelDesc_f2), (this->Ld).x, (this->Ld).y, + cudaArraySurfaceLoadStore) ); + +} + +// destructor + +__host__ dev_Grid2d::~dev_Grid2d() { + + checkCudaErrors( + cudaFree( this->dev_f ) ); + + checkCudaErrors( + cudaFree( this->dev_f_out ) ); + + + checkCudaErrors( + cudaFree( this->dev_u ) ); + + checkCudaErrors( + cudaFree( this->dev_u_out ) ); + + + checkCudaErrors( + cudaFreeArray( this->cuArr_f )); + + checkCudaErrors( + cudaFreeArray( this->cuArr_f_out )); + + + checkCudaErrors( + cudaFreeArray( this->cuArr_u )); + + checkCudaErrors( + cudaFreeArray( this->cuArr_u_out )); + +} + + +__host__ int dev_Grid2d :: NFLAT() { + return Ld.x*Ld.y; +} + +__global__ void d_BoundaryConditions( float2 hds, + cudaSurfaceObject_t uSurf , + const int L_x, const int L_y, + const float h_val) { + const int k_x = threadIdx.x + blockIdx.x * blockDim.x ; + const int k_y = threadIdx.y + blockIdx.y * blockDim.y ; + if ((k_x >= L_x) || (k_y >= L_y)) { + return ; } + + // real values on Euclidean space + float2 xreal ; + float2 lreal ; + xreal.x = hds.x * k_x; + xreal.y = hds.y * k_y; + lreal.x = hds.x * L_x; + lreal.y = hds.y * L_y; + + // stencil values, or halo cells + float2 tempu, b, bb; + + // wall boundary condition, y=0 + if (k_y == 0) { + // no slip condition + tempu.x = 0.f; + tempu.y = 0.f; + surf2Dwrite( tempu, uSurf, k_x * 8 , 0) ; } + + // symmetry plane boundary condition, y=H + else if (k_y == (L_y - 1)) { + surf2Dread(&b, uSurf, k_x * 8, L_y - 2) ; + surf2Dread(&bb, uSurf, k_x * 8, L_y - 3) ; + tempu.x = (4.f/3.f) * b.x - (1.f/3.f) * bb.x ; + tempu.y = 0.f ; + surf2Dwrite( tempu, uSurf, k_x * 8, L_y-1) ; } + + // inlet condition at x=0 + else if (k_x == 0) { + tempu.y = 0.f ; + + if ( xreal.y <= (lreal.y -h_val )) { + tempu.x = 0.f ; } + else if (( xreal.y > (lreal.y -h_val)) && (xreal.y <= lreal.y)) { + float tempux {0.f }; + tempux = 1.5f*( 2.f *(xreal.y-lreal.y+h_val)/h_val + -(xreal.y-lreal.y+h_val)*(xreal.y-lreal.y+h_val)/(h_val*h_val)) ; + tempu.x = tempux; + } + surf2Dwrite( tempu, uSurf, 0 , k_y ) ; } + +} + + + + diff --git a/cuda_code/dev_R2grid_2.cu b/cuda_code/dev_R2grid_2.cu new file mode 100644 index 0000000000000000000000000000000000000000..2b92028441f2a15b6dba9acc9106a8248ee39758 --- /dev/null +++ b/cuda_code/dev_R2grid_2.cu @@ -0,0 +1,55 @@ +/* dev_R2grid.cu + * R3 under discretization (discretize functor) to a grid + * Ernest Yeung ernestyalumni@gmail.com + * 20160728 + */ +#include "dev_R2grid.h" + + +__host__ Dev_Grid2d::Dev_Grid2d( dim3 Ld_in) : Ld(Ld_in) +{ + checkCudaErrors( + cudaMalloc((void**)&this->u, this->staggered_NFLAT()*sizeof(float) ) ); + + checkCudaErrors( + cudaMalloc((void**)&this->v, this->staggered_NFLAT()*sizeof(float) ) ); + + checkCudaErrors( + cudaMalloc((void**)&this->F, this->staggered_NFLAT()*sizeof(float) ) ); + + checkCudaErrors( + cudaMalloc((void**)&this->G, this->staggered_NFLAT()*sizeof(float) ) ); + + int size_pres = ((Ld.x / 2 ) + 2 ) * (Ld.y + 2); + + checkCudaErrors( + cudaMalloc((void**)&this->pres_red, size_pres*sizeof(float) ) ); + + checkCudaErrors( + cudaMalloc((void**)&this->pres_black, size_pres*sizeof(float) ) ); + +} + +// destructor +/* +__host__ dev_Grid3d::~dev_Grid3d() { + HANDLE_ERROR( + cudaFree( this->dev_rho ) ); + HANDLE_ERROR( + cudaFree( this->dev_E ) ); + HANDLE_ERROR( + cudaFree( this->dev_u ) ); + +} +* */ + +__host__ int Dev_Grid2d :: NFLAT() { + return Ld.x*Ld.y; +} + +__host__ int Dev_Grid2d :: staggered_NFLAT() { + return (Ld.x+2)*(Ld.y+2); +} + + + diff --git a/cuda_code/dev_approximate_gravity_2.cu b/cuda_code/dev_approximate_gravity_2.cu new file mode 100644 index 0000000000000000000000000000000000000000..eb20783b4a8b40eb55c4e5d15a5bc8cb1e5a693e --- /dev/null +++ b/cuda_code/dev_approximate_gravity_2.cu @@ -0,0 +1,1035 @@ +// #include "support_kernels.cu" +#include +#include "../profiling/bonsai_timing.h" +PROF_MODULE(dev_approximate_gravity); + +#include "node_specs.h" + +#define WARP_SIZE2 5 +#define WARP_SIZE 32 + +#if NTHREAD != 2*WARP_SIZE +#error "NTHREAD in include/node_specs.h must be = 2*WARP_SIZE" +#endif + +#ifdef WIN32 +#define M_PI 3.14159265358979323846264338328 +#endif + +#if 0 +#define tid (threadIdx.x) +#else +#define tid (threadIdx.y*blockDim.x + threadIdx.x) +#endif +#define laneId (threadIdx.x & (WARP_SIZE - 1)) +#define warpId (threadIdx.x >> WARP_SIZE2) + +__forceinline__ __device__ float Wkernel(const float q) +{ + const float sigma = 8.0f/M_PI; + + const float qm = 1.0f - q; + const float f1 = sigma * (1.0f + (-6.0f)*q*q*qm); + const float f2 = sigma * 2.0f*qm*qm*qm; + + return fmaxf(0.0f, fminf(f1, f2)); +} + +__forceinline__ __device__ float interact( + const float3 ipos, + const float h, + const float hinv, + const float3 jpos, + const float jmass) +{ + const float3 dr = make_float3(jpos.x - ipos.x, jpos.y - ipos.y, jpos.z - ipos.z); + const float r2 = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z; + if (r2 >= h*h) return 0.0f; + const float q = sqrtf(r2) * hinv; + const float hinv3 = hinv*hinv*hinv; + + return jmass * Wkernel(q) * hinv3; +} + + +/*** +**** --> prefix calculation via Horn(2005) data-parallel algoritm +***/ +#define BTEST(x) (-(int)(x)) + + template +__device__ int calc_prefix1(int* prefix, int value) +{ + int x; + + const int DIM = 1 << DIM2; + + prefix[tid] = value; + __syncthreads(); + + x = prefix[tid - 1]; __syncthreads(); prefix[tid] += x & BTEST(tid >= 1); __syncthreads(); + x = prefix[tid - 2]; __syncthreads(); prefix[tid] += x & BTEST(tid >= 2); __syncthreads(); + x = prefix[tid - 4]; __syncthreads(); prefix[tid] += x & BTEST(tid >= 4); __syncthreads(); + x = prefix[tid - 8]; __syncthreads(); prefix[tid] += x & BTEST(tid >= 8); __syncthreads(); + x = prefix[tid - 16]; __syncthreads(); prefix[tid] += x & BTEST(tid >= 16); __syncthreads(); + if (DIM2 >= 6) {x = prefix[tid - 32]; __syncthreads(); prefix[tid] += x & BTEST(tid >= 32); __syncthreads();} + if (DIM2 >= 7) {x = prefix[tid - 64]; __syncthreads(); prefix[tid] += x & BTEST(tid >= 64); __syncthreads();} + if (DIM2 >= 8) {x = prefix[tid -128]; __syncthreads(); prefix[tid] += x & BTEST(tid >=128); __syncthreads();} + + x = prefix[DIM - 1]; + __syncthreads(); + return x; +} + +#if 1 +#define _KEPLERCODE_ +#endif + +#ifdef _KEPLERCODE_ + +/****** KEPLER __shfl prefix sum ******/ + +#if 1 /* uses inlined assembly */ +__device__ __forceinline__ uint shfl_scan_add_step(uint partial, uint up_offset) +{ + uint result; + asm( + "{.reg .u32 r0;" + ".reg .pred p;" + "shfl.up.b32 r0|p, %1, %2, 0;" + "@p add.u32 r0, r0, %3;" + "mov.u32 %0, r0;}" + : "=r"(result) : "r"(partial), "r"(up_offset), "r"(partial)); + return result; +} + + template +__device__ __forceinline__ uint inclusive_scan_warp(int mysum) +{ + for(int i = 0; i < levels; ++i) + mysum = shfl_scan_add_step(mysum, 1 << i); + return mysum; +} + +#else /* uses code from CUDA 4.2 Manual Appendix B13 */ + + template +__device__ __forceinline__ int inclusive_scan_warp(int value) +{ + const int BLOCKDIM = 1 << BLOCKDIM2; + for (int i=1; i<=BLOCKDIM-1; i <<= 1) + { +#if 0 /* uses *if* version, generate too manu spills, 240 bytes */ + int n = __shfl_up(value, i, BLOCKDIM); + if (laneId >= i) + value += n; +#else /* uses masking via BTEST, only 32 bytes spills to lmem */ + value += __shfl_up(value, i, BLOCKDIM) & BTEST(laneId >= i); +#endif + } + + return value; +} +#endif /* inlined assebly */ + +template +__device__ __forceinline__ int calc_prefix(int* prefix, int value) +{ + if (DIM2 != 6) /* should never be called */ + return calc_prefix1(prefix, value); + else + { + prefix[tid] = inclusive_scan_warp(value); + __syncthreads(); + + prefix[tid] += prefix[WARP_SIZE - 1] & BTEST(tid >= WARP_SIZE); + __syncthreads(); + + const int x = prefix[(1 << DIM2)- 1]; + __syncthreads(); /* must be here, otherwise the code crashes */ + + return x; + } +} +#else + template +__device__ __forceinline__ int calc_prefix(int* prefix, int value) +{ + return calc_prefix1(prefix, value); +} +#endif + + + template +__device__ int calc_prefix(int N, int* prefix_in) +{ + const int DIM = 1 << DIM2; + + int y = calc_prefix(prefix_in, prefix_in[tid]); + if (N <= DIM) return y; + + for (int p = DIM; p < N; p += DIM) + { + int *prefix = &prefix_in[p]; + const int y1 = calc_prefix(prefix, prefix[tid]); + prefix[tid] += y; + y += y1; + } + __syncthreads(); + + return y; +} + +/************************************/ +/********* SEGMENTED SCAN ***********/ +/************************************/ + + +#ifdef _KEPLERCODE_ + template +__device__ __forceinline__ int inclusive_segscan_warp(int value, const int distance) +{ +#if 0 + const unsigned int laneId = threadIdx.x & (WARP_SIZE - 1); +#endif + + const int SIZE = 1 << SIZE2; + +#if 0 /* this one forces lmem usage to 236 bytes of lmem */ + for (int i = 1; i <= SIZE; i <<= 1) + value += __shfl_up(value, i, SIZE) & BTEST(laneId >= i && i <= distance); +#else /* this one uses only 40 bytes of lmem, any idea why ?!? */ + for (int i = 0; i < SIZE2; i++) + value += __shfl_up(value, 1 << i, SIZE) & BTEST(laneId >= (1< +__device__ __forceinline__ int inclusive_segscan_block64( + int *shmem, const int packed_value, int &dist_block, int &nseg) +{ + const int flag = packed_value < 0; + const int mask = BTEST(flag); + const int value = (mask & (-1-packed_value)) + (~mask & 1); + + int flags = __ballot(flag); +#if 1 /* this uses 36 bytes of lmem */ + shmem[tid] = __popc(flags); + __syncthreads(); + nseg += shmem[0] + shmem[WARP_SIZE]; + __syncthreads(); + + shmem[tid] = __clz(__brev(flags)); + __syncthreads(); + + int dist0 = shmem[WARP_SIZE]; + dist_block = shmem[0] + (BTEST(shmem[0] == WARP_SIZE) & dist0); +#else /* this uses 236 bytes of lmem */ + const int popc1 = __popc(flags); + const int popc2 = __clz(__brev(flags)); + if (tid == 0) {shmem[0] = popc1; shmem[2] = popc2;} + if (tid == 32) {shmem[1] = popc1; shmem[3] = popc2;} + __syncthreads(); + nseg += shmem[0] + shmem[1]; + int dist0 = shmem[3]; + dist_block = shmem[2]; +#endif + + __syncthreads(); + + const int distance = __clz(flags & lanemask_le()) + laneId - 31; + shmem[tid] = inclusive_segscan_warp(value, distance); + __syncthreads(); + + shmem[tid] += shmem[WARP_SIZE - 1] & BTEST(tid >= WARP_SIZE) & BTEST(tid < WARP_SIZE + dist0); + __syncthreads(); + + const int val = shmem[(1 << DIM2) - 1]; + __syncthreads(); + + return val; +} + +/* does not work if segment size > 64 (= thead block size) */ +template +__device__ __forceinline__ int inclusive_segscan_array(int *shmem_in, const int N) +{ + const int DIM = 1 << DIM2; + + int dist, nseg = 0; + int y = inclusive_segscan_block64(shmem_in, shmem_in[tid], dist, nseg); + if (N <= DIM) return nseg; + + for (int p = DIM; p < N; p += DIM) + { + int *shmem = shmem_in + p; + int y1 = inclusive_segscan_block64(shmem, shmem[tid], dist, nseg); + shmem[tid] += y & BTEST(tid < dist); + y = y1; + } + + return nseg; +} + + +/*************** Tree walk ************/ + + + template +__forceinline__ __device__ int ACCS(const int i) +{ + return (i & ((LMEM_STACK_SIZE << SHIFT) - 1))*blockDim.x + threadIdx.x; +} + + +#define BTEST(x) (-(int)(x)) + +texture texNodeSize; +texture texNodeCenter; +texture texMultipole; +texture texBody; + +/*********** Forces *************/ + +__device__ float4 add_acc( + float4 acc, const float4 pos, + const float massj, const float3 posj, + const float eps2) +{ +#if 1 /* to test performance of a tree-walk */ + const float3 dr = make_float3(posj.x - pos.x, posj.y - pos.y, posj.z - pos.z); + + const float r2 = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z + eps2; + const float rinv = rsqrtf(r2); + const float rinv2 = rinv*rinv; + const float mrinv = massj * rinv; + const float mrinv3 = mrinv * rinv2; + + acc.w -= mrinv; + acc.x += mrinv3 * dr.x; + acc.y += mrinv3 * dr.y; + acc.z += mrinv3 * dr.z; +#endif + + return acc; +} + + +//Improved Barnes Hut criterium +__device__ bool split_node_grav_impbh( + const float4 nodeCOM, + const float4 groupCenter, + const float4 groupSize) +{ + //Compute the distance between the group and the cell + float3 dr = make_float3( + fabsf(groupCenter.x - nodeCOM.x) - (groupSize.x), + fabsf(groupCenter.y - nodeCOM.y) - (groupSize.y), + fabsf(groupCenter.z - nodeCOM.z) - (groupSize.z) + ); + + dr.x += fabsf(dr.x); dr.x *= 0.5f; + dr.y += fabsf(dr.y); dr.y *= 0.5f; + dr.z += fabsf(dr.z); dr.z *= 0.5f; + + //Distance squared, no need to do sqrt since opening criteria has been squared + const float ds2 = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z; + + return (ds2 <= fabsf(nodeCOM.w)); +} + + + +#define TEXTURES + + +template +__device__ __forceinline__ float4 approximate_gravity(int DIM2x, int DIM2y, + int tx, int ty, + int body_i, float4 pos_i, + real4 group_pos, + float eps2, + uint2 node_begend, + real4 *multipole_data, + real4 *body_pos, + int *shmem, + int *lmem, + int &ngb, + int &apprCount, int &direCount, + volatile float4 *boxSizeInfo, + float4 groupSize, + volatile float4 *boxCenterInfo, + float group_eps, + real4 *body_vel) { + + float4 acc_i = {0.0f, 0.0f, 0.0f, 0.0f}; + + + /*********** set necessary thread constants **********/ + + const int DIMx = 1 << DIM2x; + const int DIMy = 1 << DIM2y; + const int DIM = 1 << DIM2; + const int offs = ty << DIM2x; + + /*********** shared memory distribution **********/ + + // begin, end, size + // ----------------------- + const int stack_sz = (LMEM_STACK_SIZE << SHIFT) << DIM2; /* stack allocated per thread-block */ + int *approxL = lmem + stack_sz; + + int *directS = shmem; // 0*DIM, 1*DIM, 1*DIM + int *nodesS = directS + DIM; // 1*DIM, 10*DIM, 9*DIM + int *prefix = nodesS + DIM*8; // 9*DIM, 10*DIM, 1*DIM + + const int NJMAX = DIM*3; + int *body_list = (int* )&nodesS [ DIM]; // 2*DIM, 5*DIM, 2*DIM + float *sh_mass = (float* )&body_list[NJMAX]; // 5*DIM, 6*DIM, 1*DIM + float3 *sh_pos = (float3*)&sh_mass [ DIM]; // 6*DIM, 9*DIM 3*DIM + + int *approxM = approxL; + int *directM = directS; + int * nodesM = nodesS; + + + float *node_mon0 = sh_mass; + float3 *node_mon1 = sh_pos; + + float *sh_pot = sh_mass; + float3 *sh_acc = sh_pos; + + /*********** stack **********/ + + int *nstack = lmem; + + /*********** begin tree-walk **********/ + + int n_approx = 0; + int n_direct = 0; + + + for (int root_node = node_begend.x; root_node < node_begend.y; root_node += DIM) + { + int n_nodes0 = min(node_begend.y - root_node, DIM); + int n_stack0 = 0; + int n_stack_pre = 0; + + { nstack[ACCS(n_stack0)] = root_node + tid; n_stack0++; } + + /*********** walk each level **********/ + while (n_nodes0 > 0) { + + + int n_nodes1 = 0; + int n_offset = 0; + + int n_stack1 = n_stack0; + int c_stack0 = n_stack_pre; + + /*********** walk a level **********/ + while(c_stack0 < n_stack0) + { + + /*** + **** --> fetch the list of nodes rom LMEM + ***/ + bool use_node = tid < n_nodes0; +#if 0 + { prefix[tid] = nstack[ACCS(c_stack0)]; c_stack0++; } + __syncthreads(); + int node = prefix[min(tid, n_nodes0 - 1)]; +#else /* eg: seems to work, but I do not remember if that will *always* work */ + int node; + { node = nstack[ACCS(c_stack0)]; c_stack0++; } +#endif + +#if 0 + if(n_nodes0 > 0){ //Work around pre 4.1 compiler bug + n_nodes0 -= DIM; + } +#else + n_nodes0 -= DIM; +#endif + + /*** + **** --> process each of the nodes in the list in parallel + ***/ + +#ifndef TEXTURES + float4 nodeSize = boxSizeInfo[node]; //Fetch the size of the box. Size.w = child info + float4 node_pos = boxCenterInfo[node]; //Fetch the center of the box. center.w = opening info +#else + float4 nodeSize = tex1Dfetch(texNodeSize, node); + float4 node_pos = tex1Dfetch(texNodeCenter, node); +#endif + + int node_data = __float_as_int(nodeSize.w); + + //Check if a cell has to be opened +#ifndef TEXTURES + float4 nodeCOM = multipole_data[node*3]; +#else + float4 nodeCOM = tex1Dfetch(texMultipole,node*3); +#endif + + nodeCOM.w = node_pos.w; + bool split = split_node_grav_impbh(nodeCOM, group_pos, groupSize); + + + bool leaf = node_pos.w <= 0; //Small AND equal incase of a 1 particle cell //Check if it is a leaf + // split = true; + + uint mask = BTEST((split && !leaf) && use_node); // mask = #FFFFFFFF if use_node+split+not_a_leaf==true, otherwise zero + int child = node_data & 0x0FFFFFFF; //Index to the first child of the node + int nchild = (((node_data & 0xF0000000) >> 28)) & mask; //The number of children this node has + + /*** + **** --> calculate prefix + ***/ + + + int n_total = calc_prefix(prefix, nchild); // inclusive scan to compute memory offset of each child (return total # of children) + int offset = prefix[tid]; + offset += n_offset - nchild; // convert inclusive into exclusive scan for referencing purpose + __syncthreads(); // thread barrier + + for (int i = n_offset; i < n_offset + n_total; i += DIM) //nullify part of the array that will be filled with children + nodesM[tid + i] = 0; //but do not touch those parts which has already been filled + __syncthreads(); //Thread barrier to make sure all warps finished writing data + + bool flag = (split && !leaf) && use_node; //Flag = use_node + split + not_a_leaf;Use only non_leaf nodes that are to be split + if (flag) nodesM[offset] = child; //Thread with the node that is about to be split + /*** WARNING: The __syncthreads() must be here to avoid warp race + * conditions. If removed, the result will be changing from run to run + * */ + __syncthreads(); + //writes the first child in the array of nodes + /*** in the following 8 lines, we calculate indexes of all the children that have to be walked from the index of the first child***/ + if (flag && nodesM[offset + 1] == 0) nodesM[offset + 1] = child + 1; + __syncthreads(); + if (flag && nodesM[offset + 2] == 0) nodesM[offset + 2] = child + 2; + __syncthreads(); + if (flag && nodesM[offset + 3] == 0) nodesM[offset + 3] = child + 3; + __syncthreads(); + if (flag && nodesM[offset + 4] == 0) nodesM[offset + 4] = child + 4; + __syncthreads(); + if (flag && nodesM[offset + 5] == 0) nodesM[offset + 5] = child + 5; + __syncthreads(); + if (flag && nodesM[offset + 6] == 0) nodesM[offset + 6] = child + 6; + __syncthreads(); + if (flag && nodesM[offset + 7] == 0) nodesM[offset + 7] = child + 7; + __syncthreads(); + + n_offset += n_total; //Increase the offset in the array by the number of newly added nodes + + + /*** + **** --> save list of nodes to LMEM + ***/ + + /*** if half of shared memory or more is filled with the the nodes, dump these into slowmem stack ***/ + while(n_offset >= DIM) + { + n_offset -= DIM; + const int offs1 = ACCS(n_stack1); + nstack[offs1] = nodesM[n_offset + tid]; n_stack1++; + n_nodes1 += DIM; + + if((n_stack1 - c_stack0) >= (LMEM_STACK_SIZE << SHIFT)) + { + //We overwrote our current stack + apprCount = -1; + return acc_i; + } + } + + __syncthreads(); + + + + /******************************/ + /******************************/ + /***** EVALUATION *****/ + /******************************/ + /******************************/ +#if 1 + /***********************************/ + /****** APPROX ******/ + /***********************************/ + + n_total = calc_prefix(prefix, 1 - (split || !use_node)); + offset = prefix[tid]; + + if (!split && use_node) approxM[n_approx + offset - 1] = node; + __syncthreads(); + n_approx += n_total; + + while (n_approx >= DIM) + { + n_approx -= DIM; + const int address = (approxM[n_approx + tid] << 1) + approxM[n_approx + tid]; +#ifndef TEXTURES + const float4 monopole = multipole_data[address ]; +#if 0 + float4 octopole0 = multipole_data[address + 1]; + float4 octopole1 = multipole_data[address + 2]; +#endif +#else + const float4 monopole = tex1Dfetch(texMultipole, address); +#if 0 + float4 octopole0 = tex1Dfetch(texMultipole, address + 1); + float4 octopole1 = tex1Dfetch(texMultipole, address + 2); +#endif +#endif + + node_mon0[tid] = monopole.w; + node_mon1[tid] = make_float3(monopole.x, monopole.y, monopole.z); + __syncthreads(); + +#if 1 +#pragma unroll 16 + for (int i = 0; i < DIMx; i++) + acc_i = add_acc(acc_i, pos_i, node_mon0[offs + i], node_mon1[offs+i], eps2); + apprCount += DIMx; + __syncthreads(); +#endif + } + __syncthreads(); +#endif + +#if 1 + /***********************************/ + /****** DIRECT ******/ + /***********************************/ + + + flag = split && leaf && use_node; //flag = split + leaf + use_node + const int jbody = node_data & BODYMASK; //the first body in the leaf + const int nbody = (((node_data & INVBMASK) >> LEAFBIT)+1) & BTEST(flag); //number of bodies in the leaf masked with the flag + + body_list[tid] = directM[tid]; //copy list of bodies from previous pass to body_list + + // step 1 + calc_prefix(prefix, flag); // inclusive scan on flags to construct array + const int offset1 = prefix[tid]; + + // step 2 + int n_bodies = calc_prefix(prefix, nbody); // inclusive scan to compute memory offset for each body + offset = prefix[tid]; + __syncthreads(); + + if (flag) prefix[offset1 - 1] = tid; //with tidś whose leaves have to be opened + __syncthreads(); //thread barrier, make sure all warps completed the job + + directM[tid] = offset; //Store a copy of inclusive scan in direct + offset -= nbody; //convert inclusive int oexclusive scan + offset += 1; //add unity, since later prefix0[tid] == 0 used to check barrier + + int nl_pre = 0; //Number of leaves that have already been processed + + while (n_bodies > 0) + { + int nb = min(n_bodies, NJMAX - n_direct); //Make sure number of bides to be extracted does not exceed + //the amount of allocated shared memory + + // step 0 //nullify part of the body_list that will be filled with bodies + for (int i = n_direct; i < n_direct + nb; i += DIM) //from the leaves that are being processed + body_list[i + tid] = 0; + __syncthreads(); + + //step 1: + if (flag && (directM[tid] <= nb) && (offset > 0)) //make sure that the thread indeed carries a leaf + body_list[n_direct + offset- 1] = -1-jbody; //whose bodies will be extracted + __syncthreads(); + + // step 2: + const int nl = inclusive_segscan_array(&body_list[n_direct], nb); + nb = directM[prefix[nl_pre + nl - 1]]; // number of bodies stored in these leaves + __syncthreads(); + + + /***************************************************************************** + * example of what is accomplished in steps 0-2 * + * --------------------------- * + * step 0: body_list = 000000000000000000000 * + * step 1: body_list = n000m000p000000q00r00 n,m,.. = -1-jbody_n,m... * + * step 2: body_list = n n+1 n+2 n+3 m m+1 m+2 m+3 p p+1 p+2 p+3 p+4 p+5 ... * + *****************************************************************************/ + + n_bodies -= nb; //subtract from n_bodies number of bodies that have been extracted + nl_pre += nl; //increase the number of leaves that where processed + directM[tid] -= nb; //subtract the number of extracted bodies in this pass + offset = max(offset - nb, 0); + n_direct += nb; //increase the number of bodies to be procssed + + while(n_direct >= DIM) + { + n_direct -= DIM; + + + const float4 posj = body_pos[body_list[n_direct + tid]]; +#if 0 + const float4 posj = tex1Dfetch(texBody, body_list[n_direct + tid]); +#endif + sh_mass[tid] = posj.w; + sh_pos [tid] = make_float3(posj.x, posj.y, posj.z); + + __syncthreads(); +#if 1 +#pragma unroll 16 + for (int j = 0; j < DIMx; j++) + acc_i = add_acc(acc_i, pos_i, sh_mass[offs + j], sh_pos[offs + j], eps2); + direCount += DIMx; + __syncthreads(); +#endif + } + + } + directM[tid] = body_list[tid]; + __syncthreads(); +#endif + } //end lvl + + + n_nodes1 += n_offset; + if (n_offset > 0) + { + nstack[ACCS(n_stack1)] = nodesM[tid]; n_stack1++; + if((n_stack1 - c_stack0) >= (LMEM_STACK_SIZE << SHIFT)) + { + //We overwrote our current stack + apprCount = -1; + return acc_i; + } + } + __syncthreads(); + + + /*** + **** --> copy nodes1 to nodes0: done by reassigning the pointers + ***/ + n_nodes0 = n_nodes1; + + n_stack_pre = n_stack0; + n_stack0 = n_stack1; + + }//end while levels + }//end for + + + if(n_approx > 0) + { + if (tid < n_approx) + { + const int address = (approxM[tid] << 1) + approxM[tid]; +#ifndef TEXTURES + float4 monopole = multipole_data[address ]; + float4 octopole0 = multipole_data[address + 1]; + float4 octopole1 = multipole_data[address + 2]; +#else + float4 monopole = tex1Dfetch(texMultipole, address); + float4 octopole0 = tex1Dfetch(texMultipole, address + 1); + float4 octopole1 = tex1Dfetch(texMultipole, address + 2); +#endif + + node_mon0[tid] = monopole.w; + node_mon1[tid] = make_float3(monopole.x, monopole.y, monopole.z); + + } else { + + //Set non-active memory locations to zero + node_mon0[tid] = 0.0f; + node_mon1[tid] = make_float3(1.0e10f, 1.0e10f, 1.0e10f); + + } + __syncthreads(); +#pragma unroll + for (int i = 0; i < DIMx; i++) + acc_i = add_acc(acc_i, pos_i, node_mon0[offs + i], node_mon1[offs+i],eps2); + apprCount += DIMx; + + __syncthreads(); + } //if n_approx > 0 + + if(n_direct > 0) + { + if (tid < n_direct) + { + const float4 posj = body_pos[directM[tid]]; +#if 0 + const float4 posj = tex1Dfetch(texBody, direct[tid]); +#endif + sh_mass[tid] = posj.w; + sh_pos [tid] = make_float3(posj.x, posj.y, posj.z); + } else { + sh_mass[tid] = 0.0f; + sh_pos [tid] = make_float3(1.0e10f, 1.0e10f, 1.0e10f); + } + + __syncthreads(); +#pragma unroll + for (int j = 0; j < DIMx; j++) + acc_i = add_acc(acc_i, pos_i, sh_mass[offs + j], sh_pos[offs + j], eps2); + direCount += DIMx; + __syncthreads(); + } + + /*** + **** --> reduce data between threads + ***/ + sh_pot[tid] = acc_i.w; + sh_acc[tid] = make_float3(acc_i.x, acc_i.y, acc_i.z); + __syncthreads(); + + if (ty == 0) +#pragma unroll + for (int i = 1; i < DIMy; i++) + { + const int idx = (i << DIM2x) + tx; + acc_i.w += sh_pot[idx]; + acc_i.x += sh_acc[idx].x; + acc_i.y += sh_acc[idx].y; + acc_i.z += sh_acc[idx].z; + } + __syncthreads(); + + +#if 0 /* below breaks the code */ + //Sum the interaction counters + int *sh_dire = (int*)&sh_pot; + int *sh_appr = (int*)&sh_acc; + sh_dire[tid] = direCount; + sh_appr[tid] = apprCount; + __syncthreads(); + + + if (ty == 0) + { +#pragma unroll + for (int i = 1; i < DIMy; i++) + { + const int idx = (i << DIM2x) + tx; + direCount += sh_dire[idx]; + apprCount += sh_appr[idx]; + } + } + __syncthreads(); +#endif + + return acc_i; +} + + + extern "C" __global__ void +#if 0 +__launch_bounds__(NTHREAD) +#endif + dev_approximate_gravity(const int n_active_groups, + int n_bodies, + float eps2, + uint2 node_begend, + int *active_groups, + real4 *body_pos, + real4 *multipole_data, + float4 *acc_out, + int *ngb_out, + int *active_inout, + int2 *interactions, + float4 *boxSizeInfo, + float4 *groupSizeInfo, + float4 *boxCenterInfo, + float4 *groupCenterInfo, + real4 *body_vel, + int *MEM_BUF) { + // int grpOffset){ + + + const int blockDim2 = NTHREAD2; + __shared__ int shmem[10*(1 << blockDim2)]; + // __shared__ int shmem[24*(1 << blockDim2)]; is possible on FERMI + // int lmem[LMEM_STACK_SIZE]; + + + + /*********** check if this block is linked to a leaf **********/ + + int bid = gridDim.x * blockIdx.y + blockIdx.x; + + while(true) + { + + if(threadIdx.x == 0) + { + bid = atomicAdd(&active_inout[n_bodies], 1); + shmem[0] = bid; + } + __syncthreads(); + + bid = shmem[0]; + + if (bid >= n_active_groups) return; + + +// int tid = threadIdx.y * blockDim.x + threadIdx.x; + + int grpOffset = 0; + + // volatile int *lmem = &MEM_BUF[blockIdx.x*LMEM_STACK_SIZE*blockDim.x + threadIdx.x*LMEM_STACK_SIZE]; + // int *lmem = &MEM_BUF[blockIdx.x*LMEM_STACK_SIZE*blockDim.x + threadIdx.x*LMEM_STACK_SIZE]; + int *lmem = &MEM_BUF[blockIdx.x*(LMEM_STACK_SIZE*blockDim.x + LMEM_EXTRA_SIZE)]; + + + /*********** set necessary thread constants **********/ +#ifdef DO_BLOCK_TIMESTEP + real4 curGroupSize = groupSizeInfo[active_groups[bid + grpOffset]]; +#else + real4 curGroupSize = groupSizeInfo[bid + grpOffset]; +#endif + int groupData = __float_as_int(curGroupSize.w); + uint body_i = groupData & CRITMASK; + uint nb_i = ((groupData & INVCMASK) >> CRITBIT) + 1; + +#ifdef DO_BLOCK_TIMESTEP + real4 group_pos = groupCenterInfo[active_groups[bid + grpOffset]]; +#else + real4 group_pos = groupCenterInfo[bid + grpOffset]; +#endif + // if(tid == 0) + // printf("[%f %f %f %f ] \n [%f %f %f %f ] %d %d \n", + // curGroupSize.x, curGroupSize.y, curGroupSize.z, curGroupSize.w, + // group_pos.x, group_pos.y, group_pos.z, group_pos.w, body_i, nb_i); + + + int DIM2x = 0; + while (((nb_i - 1) >> DIM2x) > 0) DIM2x++; + + DIM2x = max(DIM2x,4); + int DIM2y = blockDim2 - DIM2x; + + int tx = tid & ((1 << DIM2x) - 1); + int ty = tid >> DIM2x; + + body_i += tx%nb_i; + + //float4 pos_i = tex1Dfetch(bodies_pos_ref, body_i); // texture read: 4 floats + + + float4 pos_i = body_pos[body_i]; + + + int ngb_i; + + float4 acc_i = {0.0f, 0.0f, 0.0f, 0.0f}; + +#ifdef INDSOFT + eps2 = body_vel[body_i].w; + float group_eps = eps2; + + volatile float *reduc = (float*) &shmem[0]; + reduc[threadIdx.x] = eps2; + + //Find the maximum softening value for the particles in this group + __syncthreads(); + // do reduction in shared mem + if(blockDim.x >= 512) if (tid < 256) {reduc[threadIdx.x] = group_eps = fmaxf(group_eps, reduc[threadIdx.x + 256]);} __syncthreads(); + if(blockDim.x >= 256) if (tid < 128) {reduc[threadIdx.x] = group_eps = fmaxf(group_eps, reduc[threadIdx.x + 128]);} __syncthreads(); + if(blockDim.x >= 128) if (tid < 64) {reduc[threadIdx.x] = group_eps = fmaxf(group_eps, reduc[threadIdx.x + 64]);} __syncthreads(); + if(blockDim.x >= 64) if (tid < 32) { reduc[threadIdx.x] = group_eps = fmaxf(group_eps, reduc[threadIdx.x + 32]);} + if(blockDim.x >= 32) if (tid < 16) { reduc[threadIdx.x] = group_eps = fmaxf(group_eps, reduc[threadIdx.x + 16]);} + + if(tid < 8) + { + reduc[threadIdx.x] = group_eps = fmaxf(group_eps, reduc[threadIdx.x + 8]); + reduc[threadIdx.x] = group_eps = fmaxf(group_eps, reduc[threadIdx.x + 4]); + reduc[threadIdx.x] = group_eps = fmaxf(group_eps, reduc[threadIdx.x + 2]); + reduc[threadIdx.x] = group_eps = fmaxf(group_eps, reduc[threadIdx.x + 1]); + } + __syncthreads(); + + group_eps = reduc[0]; +#else + float group_eps = 0; +#endif + + int apprCount = 0; + int direCount = 0; + + + acc_i = approximate_gravity( DIM2x, DIM2y, tx, ty, + body_i, pos_i, group_pos, + eps2, node_begend, + multipole_data, body_pos, + shmem, lmem, ngb_i, apprCount, direCount, boxSizeInfo, curGroupSize, boxCenterInfo, + group_eps, body_vel); + if(apprCount < 0) + { + + //Try to get access to the big stack, only one block per time is allowed + if(threadIdx.x == 0) + { + int res = atomicExch(&active_inout[n_bodies+1], 1); //If the old value (res) is 0 we can go otherwise sleep + int waitCounter = 0; + while(res != 0) + { + //Sleep + for(int i=0; i < (1024); i++) + { + waitCounter += 1; + } + //Test again + shmem[0] = waitCounter; + res = atomicExch(&active_inout[n_bodies+1], 1); + } + } + + __syncthreads(); + + lmem = &MEM_BUF[gridDim.x*(LMEM_STACK_SIZE*blockDim.x + LMEM_EXTRA_SIZE)]; //Use the extra large buffer + apprCount = direCount = 0; + acc_i = approximate_gravity( DIM2x, DIM2y, tx, ty, + body_i, pos_i, group_pos, + eps2, node_begend, + multipole_data, body_pos, + shmem, lmem, ngb_i, apprCount, direCount, boxSizeInfo, curGroupSize, boxCenterInfo, + group_eps, body_vel); + lmem = &MEM_BUF[blockIdx.x*(LMEM_STACK_SIZE*blockDim.x + LMEM_EXTRA_SIZE)]; + + if(threadIdx.x == 0) + { + atomicExch(&active_inout[n_bodies+1], 0); //Release the lock + } + }//end if apprCount < 0 + + if (tid < nb_i) { + acc_out [body_i] = acc_i; + ngb_out [body_i] = ngb_i; + active_inout[body_i] = 1; + interactions[body_i].x = apprCount; + interactions[body_i].y = direCount ; + } + + + } //end while + } diff --git a/cuda_code/device_15.cu b/cuda_code/device_15.cu new file mode 100644 index 0000000000000000000000000000000000000000..d66761e7543a66113f22980f405583724ec6be28 --- /dev/null +++ b/cuda_code/device_15.cu @@ -0,0 +1,40 @@ +#include +#include + +#include "tensors/device.h" +#include "tensors/gpu/cuda_helpers.h" + +namespace marian { +namespace gpu { + +Device::~Device() { + cudaSetDevice(deviceId_.no); + if(data_) { + CUDA_CHECK(cudaFree(data_)); + } + cudaDeviceSynchronize(); +} + +void Device::reserve(size_t size) { + size = align(size); + cudaSetDevice(deviceId_.no); + + ABORT_IF(size < size_ || size == 0, + "New size must be larger than old size and larger than 0"); + + if(data_) { + // Allocate memory by going through host memory + uint8_t *temp = new uint8_t[size_]; + CUDA_CHECK(cudaMemcpy(temp, data_, size_, cudaMemcpyDeviceToHost)); + CUDA_CHECK(cudaFree(data_)); + CUDA_CHECK(cudaMalloc(&data_, size)); + CUDA_CHECK(cudaMemcpy(data_, temp, size_, cudaMemcpyHostToDevice)); + delete[] temp; + } else { + CUDA_CHECK(cudaMalloc(&data_, size)); + } + + size_ = size; +} +} +} diff --git a/cuda_code/device_8.cu b/cuda_code/device_8.cu new file mode 100644 index 0000000000000000000000000000000000000000..66a7b983bf7a8896392422aeba4c299d03bbb16e --- /dev/null +++ b/cuda_code/device_8.cu @@ -0,0 +1,624 @@ +#include +#include +#include +#include +#define _USE_MATH_DEFINES +#include +#include +#include +#include +#include "device.h" + +/** Good ol' MIN macro */ +#define MIN(a, b) ((a) < (b) ? (a) : (b)) + +static float gaussian_noise(float mean, float sigma, std::mt19937 &rng) +{ + std::normal_distribution gaussian(mean, sigma); + return gaussian(rng); +} + +static float probability_of_value_from_bivariate_gaussian(float x, float y, float mean_x, float mean_y, float sigma_x, float sigma_y) +{ + const float rho = 0.0; // cov / (sig1 * sig2); Covariance of two independent random variables is zero. + float denom = 2.0 * M_PI * sigma_x * sigma_y * sqrt(1.0 - (rho * rho)); + float A = ((x - mean_x) * (x - mean_x)) / (sigma_x * sigma_x); + float B = ((2.0 * rho * (x - mean_x) * (y - mean_y)) / (sigma_x * sigma_y)); + float C = ((y - mean_y) * (y - mean_y)) / (sigma_y * sigma_y); + A /= 1000.0; // For numerical stability + C /= 1000.0; // Ditto + float z = A - B + C; + float a = (-1.0 * z) / (2.0 * (1.0 - rho * rho)); + + return exp(a) / denom; +} + +__global__ void kernel_calculate_likelihood(int *particles_x, int *particles_y, float *weights, unsigned int nparticles, float estimate_x, float estimate_y) +{ + int index = blockIdx.x * blockDim.x + threadIdx.x; + + if (index < nparticles) + { + float x = (float)particles_x[index]; + float y = (float)particles_y[index]; + + const float sigma_x = 2.5; + const float sigma_y = 2.5; + float mean_x = estimate_x; + float mean_y = estimate_y; + + // Compute the probability of getting this x,y combo from a distribution centered at estimate_x, estimte_y. + const float rho = 0.0; // cov / (sig1 * sig2); Covariance of two independent random variables is zero. + float denom = 2.0f * M_PI * sigma_x * sigma_y * sqrt(1.0f - (rho * rho)); + float A = ((x - mean_x) * (x - mean_x)) / (sigma_x * sigma_x); + float B = ((2.0f * rho * (x - mean_x) * (y - mean_y)) / (sigma_x * sigma_y)); + float C = ((y - mean_y) * (y - mean_y)) / (sigma_y * sigma_y); + A /= 1000.0f; // For numerical stability + C /= 1000.0f; // Ditto + float z = A - B + C; + float a = (-1.0f * z) / (2.0f * (1.0f - rho * rho)); + float prob = exp(a) / denom; + weights[index] = prob; + } +} + +int device_calculate_likelihood(const int *particles_x, const int *particles_y, int estimate_x, int estimate_y, float *weights, unsigned int nparticles, int nthreads_per_block) +{ + cudaError_t err; + int *dev_particles_x = nullptr; + int *dev_particles_y = nullptr; + float *dev_weights = nullptr; + + #define CHECK_CUDA_ERR(err) do { if (err != cudaSuccess) { err = (cudaError_t)__LINE__; goto fail; }} while (0) + + /* Malloc all the device memory we need */ + err = cudaMalloc(&dev_particles_x, nparticles * sizeof(int)); + CHECK_CUDA_ERR(err); + + err = cudaMalloc(&dev_particles_y, nparticles * sizeof(int)); + CHECK_CUDA_ERR(err); + + err = cudaMalloc(&dev_weights, nparticles * sizeof(float)); + CHECK_CUDA_ERR(err); + + /* Copy arrays onto device */ + err = cudaMemcpy(dev_particles_x, particles_x, nparticles * sizeof(int), cudaMemcpyHostToDevice); + CHECK_CUDA_ERR(err); + + err = cudaMemcpy(dev_particles_y, particles_y, nparticles * sizeof(int), cudaMemcpyHostToDevice); + CHECK_CUDA_ERR(err); + + err = cudaMemcpy(dev_weights, weights, nparticles * sizeof(float), cudaMemcpyHostToDevice); + CHECK_CUDA_ERR(err); + + /* Call the kernel */ + kernel_calculate_likelihood<<>>(dev_particles_x, dev_particles_y, dev_weights, nparticles, estimate_x, estimate_y); + err = cudaDeviceSynchronize(); + CHECK_CUDA_ERR(err); + + /* Copy array back onto host */ + err = cudaMemcpy(weights, dev_weights, nparticles * sizeof(float), cudaMemcpyDeviceToHost); + CHECK_CUDA_ERR(err); + + /* Deallocate the device arrays */ + err = cudaFree(dev_particles_x); + CHECK_CUDA_ERR(err); + + err = cudaFree(dev_particles_y); + CHECK_CUDA_ERR(err); + + err = cudaFree(dev_weights); + CHECK_CUDA_ERR(err); + + #undef CHECK_CUDA_ERR + +fail: + assert(err == cudaSuccess); + return (int)err; +} + +static void move_particles(int estimated_vx, int estimated_vy, unsigned int nparticles, int *particles_x, int *particles_y, float *particles_weights, std::mt19937 &rng) +{ + for (unsigned int i = 0; i < nparticles; i++) + { + static const float sigma = 2.5; + float vx = gaussian_noise(estimated_vx, sigma, rng); + float vy = gaussian_noise(estimated_vy, sigma, rng); + particles_x[i] += vx; + particles_y[i] += vy; + particles_weights[i] = probability_of_value_from_bivariate_gaussian(vx, vy, estimated_vx, estimated_vy, sigma, sigma); + } +} + +static void sort_particles_by_weight_in_place(unsigned int *indices, unsigned int nparticles, float *particles_weights, int *particles_x, int *particles_y) +{ + // Sort the indices + std::sort(indices, indices + nparticles, SortIndices(particles_weights)); + + // Make copies of the three arrays (gross) + int *xcpy = (int *)malloc(sizeof(int) * nparticles); + int *ycpy = (int *)malloc(sizeof(int) * nparticles); + float *wcpy = (float *)malloc(sizeof(float) * nparticles); + memcpy(xcpy, particles_x, sizeof(int) * nparticles); + memcpy(ycpy, particles_y, sizeof(int) * nparticles); + memcpy(wcpy, particles_weights, sizeof(float) * nparticles); + + // Sort each array according to the sorted indices + for (unsigned int i = 0; i < nparticles; i++) + { + particles_weights[i] = wcpy[indices[i]]; + particles_x[i] = xcpy[indices[i]]; + particles_y[i] = ycpy[indices[i]]; + } + + free(xcpy); + free(ycpy); + free(wcpy); + xcpy = nullptr; + ycpy = nullptr; + wcpy = nullptr; +} + +static void normalize_weights(unsigned int nparticles, float *particles_weights) +{ + float sum = 0.0; + for (unsigned int i = 0; i < nparticles; i++) + { + sum += particles_weights[i]; + } + + if (sum > 0.0) + { + for (unsigned int i = 0; i < nparticles; i++) + { + particles_weights[i] /= sum; + assert((particles_weights[i] >= 0.0) && (particles_weights[i] <= 1.0)); + } + } +} + +static void complete_resample_and_move_step(unsigned int nparticles, float *particles_weights, std::mt19937 &rng, unsigned int *indices, int *particles_x, int *particles_y, int estimated_vx, int estimated_vy) +{ + // Create a distribution I will need + auto dist = std::uniform_real_distribution(0.0, 1.0); + std::uniform_int_distribution height_distribution; + std::uniform_int_distribution width_distribution; + + // Create the new particles in vectors + std::vector pxs; + std::vector pys; + + // Sort the particles by weight (in reverse - heaviest at the front of the array) + //sort_particles_by_weight_in_place(indices, nparticles, particles_weights, particles_x, particles_y); + + // Align a CMF (cumulative mass function) array, where each bin is the sum of all previous weights + std::vector cmf; + float acc_prob_mass = 0.0; + for (unsigned int i = 0; i < nparticles; i++) + { + acc_prob_mass += particles_weights[i]; + cmf.push_back(acc_prob_mass); + } + + // Do a search into the CMF to find the place where our randomly generated probability (0 to 1) fits + for (unsigned int i = 0; i < nparticles; i++) + { + float p = dist(rng); + assert((p <= 1.0) && (p >= 0.0)); + + int cmf_index = -1; + for (unsigned int j = 0; j < nparticles; j++) + { + // Search for where the generated probability belongs + if (p <= cmf[j]) + { + cmf_index = j; + break; + } + } + + if (cmf_index >= 0) + { + pxs.push_back(particles_x[cmf_index]); + pys.push_back(particles_y[cmf_index]); + } + else + { + // Probabilities are all zero. Resample from uniform. + pxs.push_back(width_distribution(rng)); + pys.push_back(height_distribution(rng)); + } + } + + // Now overwrite the current batch of particles with the new ones + for (unsigned int i = 0; i < nparticles; i++) + { + particles_x[i] = pxs[i]; + particles_y[i] = pys[i]; + } + + // Reset all weights + for (unsigned int i = 0; i < nparticles; i++) + { + particles_weights[i] = 0.0; + } + + // Move particles + for (unsigned int i = 0; i < nparticles; i++) + { + static const float sigma = 2.5; + float vx = gaussian_noise(estimated_vx, sigma, rng); + float vy = gaussian_noise(estimated_vy, sigma, rng); + particles_x[i] += vx; + particles_y[i] += vy; + particles_weights[i] = probability_of_value_from_bivariate_gaussian(vx, vy, estimated_vx, estimated_vy, sigma, sigma); + } +} + +static void resample_particles(unsigned int nparticles, float *particles_weights, std::mt19937 &rng, unsigned int *indices, int *particles_x, int *particles_y) +{ + // Create a distribution I will need + auto dist = std::uniform_real_distribution(0.0, 1.0); + std::uniform_int_distribution height_distribution; + std::uniform_int_distribution width_distribution; + + // Create the new particles in vectors + std::vector pxs; + std::vector pys; + + // Normalize the weights so that each one is between 0 and 1 + normalize_weights(nparticles, particles_weights); + + // Sort the particles by weight (in reverse - heaviest at the front of the array) + sort_particles_by_weight_in_place(indices, nparticles, particles_weights, particles_x, particles_y); + + // Align a CMF (cumulative mass function) array, where each bin is the sum of all previous weights + std::vector cmf; + float acc_prob_mass = 0.0; + for (unsigned int i = 0; i < nparticles; i++) + { + acc_prob_mass += particles_weights[i]; + cmf.push_back(acc_prob_mass); + } + + // Do a search into the CMF to find the place where our randomly generated probability (0 to 1) fits + for (unsigned int i = 0; i < nparticles; i++) + { + float p = dist(rng); + assert((p <= 1.0) && (p >= 0.0)); + + int cmf_index = -1; + for (unsigned int j = 0; j < nparticles; j++) + { + // Search for where the generated probability belongs + if (p <= cmf[j]) + { + cmf_index = j; + break; + } + } + + if (cmf_index >= 0) + { + pxs.push_back(particles_x[cmf_index]); + pys.push_back(particles_y[cmf_index]); + } + else + { + // Probabilities are all zero. Resample from uniform. + pxs.push_back(width_distribution(rng)); + pys.push_back(height_distribution(rng)); + } + } + + // Now overwrite the current batch of particles with the new ones + for (unsigned int i = 0; i < nparticles; i++) + { + particles_x[i] = pxs[i]; + particles_y[i] = pys[i]; + } +} + +__global__ void kernel_normalize_weights_reduction(unsigned int nparticles, float *dev_weights, float *intermediate) +{ + // Dynamically-sized shared memory buffer for the reduction (this should be no smaller than blockDim.x) + extern __shared__ float tmp[]; + + unsigned int index = blockDim.x * blockIdx.x + threadIdx.x; + + // load all weights in this block into temp array + if (index < nparticles) + { + tmp[threadIdx.x] = dev_weights[index]; + } + __syncthreads(); + + // Now do a binary sum tree to reduce to a single accumulated total weight + for (unsigned int stride = 1; stride < nparticles; stride *= 2) + { + if ((index < nparticles) && (threadIdx.x >= stride)) + { + tmp[threadIdx.x] += tmp[threadIdx.x - stride]; + } + __syncthreads(); + } + + // Each block now needs to add its total to its index in intermediate + // So determine which thread should do this, since we only need one + // item from each block + bool lastusefulthread; + if (blockIdx.x == (gridDim.x - 1)) + { + // If my block index is that of the final block, then I am + // the thread responsible for the last useful item if + // my index is that of the final particle + lastusefulthread = (index == (nparticles - 1)); + } + else + { + // If my block is not the final one, then I am + // the thread responsible for the last useful item if + // my index is that of the final item in this block + lastusefulthread = (threadIdx.x == (blockDim.x - 1)); + } + + if (lastusefulthread) + { + intermediate[blockIdx.x] = tmp[threadIdx.x]; + } +} + +__global__ void kernel_normalize_weights_complete(unsigned int nparticles, float *dev_weights, float summed_weights) +{ + int index = blockIdx.x * blockDim.x + threadIdx.x; + + // Divide all weights by sum in parallel + if ((index < nparticles) && summed_weights > 0.0f) + { + dev_weights[index] /= summed_weights; + } +} + +__device__ void kernel_sequential_merge(float *tmpbuf_weights, float *weights_a, float *weights_b, + int *tmpbuf_x, int *x_a, int *x_b, + int *tmpbuf_y, int *y_a, int *y_b, + unsigned int len_arr_a, unsigned int len_arr_b) +{ + // Sorts backwards (largest first) + + unsigned int i = 0; + unsigned int j = 0; + while ((i < len_arr_a) && (j < len_arr_b)) + { + float wa = weights_a[i]; + float wb = weights_b[j]; + if (wa > wb) + { + tmpbuf_weights[i + j] = weights_a[i]; + tmpbuf_x[i + j] = x_a[i]; + tmpbuf_y[i + j] = y_a[i]; + i++; + } + else + { + tmpbuf_weights[i + j] = weights_b[j]; + tmpbuf_x[i + j] = x_b[j]; + tmpbuf_y[i + j] = y_b[j]; + j++; + } + } + + // Now add the rest from whichever array is not done + if (j < len_arr_b) + { + memcpy(&tmpbuf_weights[i + j], &weights_b[j], sizeof(unsigned int) * (len_arr_b - j)); + memcpy(&tmpbuf_x[i + j], &x_b[j], sizeof(int) * (len_arr_b - j)); + memcpy(&tmpbuf_y[i + j], &y_b[j], sizeof(int) * (len_arr_b - j)); + } + else if (i < len_arr_a) + { + memcpy(&tmpbuf_weights[i + j], &weights_a[i], sizeof(unsigned int) * (len_arr_a - i)); + memcpy(&tmpbuf_x[i + j], &x_a[i], sizeof(int) * (len_arr_a - i)); + memcpy(&tmpbuf_y[i + j], &y_b[j], sizeof(int) * (len_arr_b - j)); + } +} + +// The most naive parallel merge sort possible - quite possibly worse than sequential // TODO do better +__global__ void kernel_sort_particles(unsigned int nparticles, int *particles_x, int *particles_y, float *particles_weights, + int *tmpbuf_x, int *tmpbuf_y, float *tmpbuf_weights) +{ + int index = blockIdx.x * blockDim.x + threadIdx.x; + + if (index < nparticles) + { + // Every other thread merges their input with their neighbor + // Binary reduction merge + for (unsigned int stride = 1; stride < nparticles; stride *= 2) + { + if (index >= stride) + { + // The first (stride / 2) elements are sorted and the second (stride / 2) elements are sorted + // The second half though may be less than stride / 2, if we are at the end of the reduction. + unsigned int len_arr_a = ceil(stride / 2.0f); + unsigned int len_arr_b = MIN(ceil(stride / 2.0f), nparticles - len_arr_a); + unsigned int start_a = index - stride; + unsigned int start_b = start_a + len_arr_a; + + // Merge + float *weights_a = &particles_weights[start_a]; + float *weights_b = &particles_weights[start_b]; + int *x_a = &particles_x[start_a]; + int *x_b = &particles_x[start_b]; + int *y_a = &particles_y[start_a]; + int *y_b = &particles_y[start_b]; + // Since each thread is writing to the same global array, we need to make sure they are only + // writing to their appropriate subsection. + // The start of each thread's output array should be given by the following formula. + unsigned int tmpbuf_start = (index - 1) * (2 * stride); + kernel_sequential_merge(&tmpbuf_weights[tmpbuf_start], weights_a, weights_b, &tmpbuf_x[tmpbuf_start], x_a, x_b, &tmpbuf_y[tmpbuf_start], y_a, y_b, len_arr_a, len_arr_b); + } + // Since we are doing a reduction, we need to make sure each thread is done before moving on. + __syncthreads(); + } + } +} + +int device_resample_and_move(int estimated_vx, int estimated_vy, unsigned int nparticles, int *particles_x, int *particles_y, float *particles_weights, std::mt19937 &rng, unsigned int *indices, int nthreads_per_block) +{ + #if 1 + cudaError_t err; + int *dev_particles_x = nullptr; + int *dev_particles_y = nullptr; + float *dev_weights = nullptr; + unsigned int *dev_indices = nullptr; + float *dev_sum_tmp = nullptr; // The temporary results from each block during sum + float *dev_sort_weights_tmp = nullptr; + int *dev_sort_x_tmp = nullptr; + int *dev_sort_y_tmp = nullptr; + float *sum_tmp = nullptr; + float summed_weights = 0.0; + int nblocks = ceil(nparticles / (float)nthreads_per_block); + + #define CHECK_CUDA_ERR(err) do { if (err != cudaSuccess) { err = (cudaError_t)__LINE__; goto fail; }} while (0) + + /* Allocate everything we need */ + err = cudaMalloc(&dev_particles_x, nparticles * sizeof(int)); + CHECK_CUDA_ERR(err); + + err = cudaMalloc(&dev_particles_y, nparticles * sizeof(int)); + CHECK_CUDA_ERR(err); + + err = cudaMalloc(&dev_weights, nparticles * sizeof(float)); + CHECK_CUDA_ERR(err); + + err = cudaMalloc(&dev_indices, nparticles * sizeof(unsigned int)); + CHECK_CUDA_ERR(err); + + err = cudaMalloc(&dev_sum_tmp, nblocks * sizeof(float)); + CHECK_CUDA_ERR(err); + + err = cudaMalloc(&dev_sort_weights_tmp, nparticles * sizeof(float)); + CHECK_CUDA_ERR(err); + + err = cudaMalloc(&dev_sort_x_tmp, nparticles * sizeof(int)); + CHECK_CUDA_ERR(err); + + err = cudaMalloc(&dev_sort_y_tmp, nparticles * sizeof(int)); + CHECK_CUDA_ERR(err); + + /* Copy everything to the device */ + err = cudaMemcpy(dev_particles_x, particles_x, nparticles * sizeof(int), cudaMemcpyHostToDevice); + CHECK_CUDA_ERR(err); + + err = cudaMemcpy(dev_particles_y, particles_y, nparticles * sizeof(int), cudaMemcpyHostToDevice); + CHECK_CUDA_ERR(err); + + err = cudaMemcpy(dev_weights, particles_weights, nparticles * sizeof(float), cudaMemcpyHostToDevice); + CHECK_CUDA_ERR(err); + + err = cudaMemcpy(dev_indices, indices, nparticles * sizeof(unsigned int), cudaMemcpyHostToDevice); + CHECK_CUDA_ERR(err); + + /* Launch kernels */ + kernel_normalize_weights_reduction<<>>(nparticles, dev_weights, dev_sum_tmp); + err = cudaDeviceSynchronize(); + CHECK_CUDA_ERR(err); + + // Sequential sum of the intermediate results in dev_sum_tmp + sum_tmp = (float *)malloc(nblocks * sizeof(float)); + err = cudaMemcpy(sum_tmp, dev_sum_tmp, nblocks * sizeof(float), cudaMemcpyDeviceToHost); + CHECK_CUDA_ERR(err); + for (unsigned int i = 0; i < nblocks; i++) + { + summed_weights += sum_tmp[i]; + } + free(sum_tmp); + sum_tmp = nullptr; + + kernel_normalize_weights_complete<<>>(nparticles, dev_weights, summed_weights); + err = cudaDeviceSynchronize(); + CHECK_CUDA_ERR(err); + + kernel_sort_particles<<>>(nparticles, dev_particles_x, dev_particles_y, dev_weights, dev_sort_x_tmp, dev_sort_y_tmp, dev_sort_weights_tmp); + err = cudaDeviceSynchronize(); + CHECK_CUDA_ERR(err); + + free(dev_sort_y_tmp); + free(dev_sort_x_tmp); + free(dev_sort_weights_tmp); + dev_sort_y_tmp = nullptr; + dev_sort_x_tmp = nullptr; + dev_sort_weights_tmp = nullptr; + + //kernel_resample_particles + //kernel_reset_all_weights + //kernel_move_particles + + /* Transfer results back to host */ + err = cudaMemcpy(particles_x, dev_particles_x, nparticles * sizeof(int), cudaMemcpyDeviceToHost); + CHECK_CUDA_ERR(err); + + err = cudaMemcpy(particles_y, dev_particles_y, nparticles * sizeof(int), cudaMemcpyDeviceToHost); + CHECK_CUDA_ERR(err); + + err = cudaMemcpy(particles_weights, dev_weights, nparticles * sizeof(float), cudaMemcpyDeviceToHost); + CHECK_CUDA_ERR(err); + + err = cudaMemcpy(indices, dev_indices, nparticles * sizeof(unsigned int), cudaMemcpyDeviceToHost); + CHECK_CUDA_ERR(err); + + //&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&// + //Remove the logic here as you convert it to CUDA + //&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&// + complete_resample_and_move_step(nparticles, particles_weights, rng, indices, particles_x, particles_y, estimated_vx, estimated_vy); + //&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&// + // End + //&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&// + + /* Free up memory */ + err = cudaFree(dev_particles_x); + dev_particles_x = nullptr; + CHECK_CUDA_ERR(err); + + err = cudaFree(dev_particles_y); + dev_particles_y = nullptr; + CHECK_CUDA_ERR(err); + + err = cudaFree(dev_weights); + dev_weights = nullptr; + CHECK_CUDA_ERR(err); + + err = cudaFree(dev_indices); + dev_indices = nullptr; + CHECK_CUDA_ERR(err); + + err = cudaFree(dev_sum_tmp); + dev_sum_tmp = nullptr; + CHECK_CUDA_ERR(err); + + #undef CHECK_CUDA_ERR + +fail: + if (err != cudaSuccess) + { + std::cout << "Error at line " << err << std::endl; + assert(false); + } + return err; +#else + // Resample from weights + resample_particles(nparticles, particles_weights, rng, indices, particles_x, particles_y); + + // Reset all weights + for (unsigned int i = 0; i < nparticles; i++) + { + particles_weights[i] = 0.0; + } + + // Move all particles according to our movement model (plus Gaussian noise) + // Also update weights based on how likely the movements were + move_particles(estimated_vx, estimated_vy, nparticles, particles_x, particles_y, particles_weights, rng); + + return 0; +#endif +} diff --git a/cuda_code/device_atomics_test.cu b/cuda_code/device_atomics_test.cu new file mode 100644 index 0000000000000000000000000000000000000000..f8c62a08c209ecbd7ef4003c5a65e38be6604dc4 --- /dev/null +++ b/cuda_code/device_atomics_test.cu @@ -0,0 +1,338 @@ +/* + * Copyright (c) 2021, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include + +template +__global__ void gpu_atomic_test(T* result, T* data, size_t size) +{ + size_t id = blockIdx.x * blockDim.x + threadIdx.x; + size_t step = blockDim.x * gridDim.x; + + for (; id < size; id += step) { + atomicAdd(&result[0], data[id]); + atomicMin(&result[1], data[id]); + atomicMax(&result[2], data[id]); + cudf::genericAtomicOperation(&result[3], data[id], cudf::DeviceSum{}); + cudf::genericAtomicOperation(&result[4], data[id], cudf::DeviceMin{}); + cudf::genericAtomicOperation(&result[5], data[id], cudf::DeviceMax{}); + } +} + +template +constexpr inline bool is_timestamp_sum() +{ + return cudf::is_timestamp() && std::is_same_v; +} +// Disable SUM of TIMESTAMP types +template ()>* = nullptr> +__device__ T atomic_op(T* addr, T const& value, BinaryOp op) +{ + return {}; +} + +template ()>* = nullptr> +__device__ T atomic_op(T* addr, T const& value, BinaryOp op) +{ + T old_value = *addr; + T assumed; + + do { + assumed = old_value; + T new_value = op(old_value, value); + + old_value = atomicCAS(addr, assumed, new_value); + } while (assumed != old_value); + + return old_value; +} + +template +__global__ void gpu_atomicCAS_test(T* result, T* data, size_t size) +{ + size_t id = blockIdx.x * blockDim.x + threadIdx.x; + size_t step = blockDim.x * gridDim.x; + + for (; id < size; id += step) { + atomic_op(&result[0], data[id], cudf::DeviceSum{}); + atomic_op(&result[1], data[id], cudf::DeviceMin{}); + atomic_op(&result[2], data[id], cudf::DeviceMax{}); + atomic_op(&result[3], data[id], cudf::DeviceSum{}); + atomic_op(&result[4], data[id], cudf::DeviceMin{}); + atomic_op(&result[5], data[id], cudf::DeviceMax{}); + } +} + +template +typename std::enable_if_t(), T> accumulate(cudf::host_span xs) +{ + return std::accumulate(xs.begin(), xs.end(), T{0}); +} + +template +typename std::enable_if_t(), T> accumulate(cudf::host_span xs) +{ + auto ys = std::vector(xs.size()); + std::transform( + xs.begin(), xs.end(), ys.begin(), [](T const& ts) { return ts.time_since_epoch().count(); }); + return T{typename T::duration{std::accumulate(ys.begin(), ys.end(), 0)}}; +} + +template +struct AtomicsTest : public cudf::test::BaseFixture { + void atomic_test(std::vector const& v_input, + bool is_cas_test, + int block_size = 0, + int grid_size = 1) + { + size_t vec_size = v_input.size(); + + // use transform from thrust::host_vector instead. + thrust::host_vector v(vec_size); + std::transform(v_input.begin(), v_input.end(), v.begin(), [](int x) { + T t = cudf::test::make_type_param_scalar(x); + return t; + }); + + T exact[3]; + exact[0] = accumulate(v); + exact[1] = *(std::min_element(v.begin(), v.end())); + exact[2] = *(std::max_element(v.begin(), v.end())); + + thrust::host_vector result_init(9); // +3 padding for int8 tests + result_init[0] = cudf::test::make_type_param_scalar(0); + result_init[1] = std::numeric_limits::max(); + result_init[2] = std::numeric_limits::min(); + result_init[3] = result_init[0]; + result_init[4] = result_init[1]; + result_init[5] = result_init[2]; + + auto dev_data = cudf::detail::make_device_uvector_sync(v); + auto dev_result = cudf::detail::make_device_uvector_sync(result_init); + + if (block_size == 0) { block_size = vec_size; } + + if (is_cas_test) { + gpu_atomicCAS_test<<>>(dev_result.data(), dev_data.data(), vec_size); + } else { + gpu_atomic_test<<>>(dev_result.data(), dev_data.data(), vec_size); + } + + auto host_result = cudf::detail::make_host_vector_sync(dev_result); + + CHECK_CUDA(rmm::cuda_stream_default.value()); + + if (!is_timestamp_sum()) { + EXPECT_EQ(host_result[0], exact[0]) << "atomicAdd test failed"; + } + EXPECT_EQ(host_result[1], exact[1]) << "atomicMin test failed"; + EXPECT_EQ(host_result[2], exact[2]) << "atomicMax test failed"; + if (!is_timestamp_sum()) { + EXPECT_EQ(host_result[3], exact[0]) << "atomicAdd test(2) failed"; + } + EXPECT_EQ(host_result[4], exact[1]) << "atomicMin test(2) failed"; + EXPECT_EQ(host_result[5], exact[2]) << "atomicMax test(2) failed"; + } +}; + +TYPED_TEST_CASE(AtomicsTest, cudf::test::FixedWidthTypesWithoutFixedPoint); + +// tests for atomicAdd/Min/Max +TYPED_TEST(AtomicsTest, atomicOps) +{ + bool is_cas_test = false; + std::vector input_array({0, 6, 0, -14, 13, 64, -13, -20, 45}); + this->atomic_test(input_array, is_cas_test); + + std::vector input_array2({6, -6, 13, 62, -11, -20, 33}); + this->atomic_test(input_array2, is_cas_test); +} + +// tests for atomicCAS +TYPED_TEST(AtomicsTest, atomicCAS) +{ + bool is_cas_test = true; + std::vector input_array({0, 6, 0, -14, 13, 64, -13, -20, 45}); + this->atomic_test(input_array, is_cas_test); + + std::vector input_array2({6, -6, 13, 62, -11, -20, 33}); + this->atomic_test(input_array2, is_cas_test); +} + +// tests for atomicAdd/Min/Max +TYPED_TEST(AtomicsTest, atomicOpsGrid) +{ + bool is_cas_test = false; + int block_size = 3; + int grid_size = 4; + + std::vector input_array({0, 6, 0, -14, 13, 64, -13, -20, 45}); + this->atomic_test(input_array, is_cas_test, block_size, grid_size); + + std::vector input_array2({6, -6, 13, 62, -11, -20, 33}); + this->atomic_test(input_array2, is_cas_test, block_size, grid_size); +} + +// tests for atomicCAS +TYPED_TEST(AtomicsTest, atomicCASGrid) +{ + bool is_cas_test = true; + int block_size = 3; + int grid_size = 4; + + std::vector input_array({0, 6, 0, -14, 13, 64, -13, -20, 45}); + this->atomic_test(input_array, is_cas_test, block_size, grid_size); + + std::vector input_array2({6, -6, 13, 62, -11, -20, 33}); + this->atomic_test(input_array2, is_cas_test, block_size, grid_size); +} + +// tests for large array +TYPED_TEST(AtomicsTest, atomicOpsRandom) +{ + bool is_cas_test = false; + int block_size = 256; + int grid_size = 64; + + std::vector input_array(grid_size * block_size); + + std::default_random_engine engine; + std::uniform_int_distribution<> dist(-10, 10); + std::generate(input_array.begin(), input_array.end(), [&]() { return dist(engine); }); + + this->atomic_test(input_array, is_cas_test, block_size, grid_size); +} + +TYPED_TEST(AtomicsTest, atomicCASRandom) +{ + bool is_cas_test = true; + int block_size = 256; + int grid_size = 64; + + std::vector input_array(grid_size * block_size); + + std::default_random_engine engine; + std::uniform_int_distribution<> dist(-10, 10); + std::generate(input_array.begin(), input_array.end(), [&]() { return dist(engine); }); + + this->atomic_test(input_array, is_cas_test, block_size, grid_size); +} + +template +__global__ void gpu_atomic_bitwiseOp_test(T* result, T* data, size_t size) +{ + size_t id = blockIdx.x * blockDim.x + threadIdx.x; + size_t step = blockDim.x * gridDim.x; + + for (; id < size; id += step) { + atomicAnd(&result[0], data[id]); + atomicOr(&result[1], data[id]); + atomicXor(&result[2], data[id]); + cudf::genericAtomicOperation(&result[3], data[id], cudf::DeviceAnd{}); + cudf::genericAtomicOperation(&result[4], data[id], cudf::DeviceOr{}); + cudf::genericAtomicOperation(&result[5], data[id], cudf::DeviceXor{}); + } +} + +template +struct AtomicsBitwiseOpTest : public cudf::test::BaseFixture { + void atomic_test(std::vector const& v_input, int block_size = 0, int grid_size = 1) + { + size_t vec_size = v_input.size(); + std::vector v(vec_size); + std::transform(v_input.begin(), v_input.end(), v.begin(), [](int x) { + T t(x); + return t; + }); + + thrust::host_vector identity(9, T{0}); // +3 elements padding for int8 tests + identity[0] = T(~0ull); + identity[3] = T(~0ull); + + T exact[3]; + exact[0] = std::accumulate( + v.begin(), v.end(), identity[0], [](T acc, uint64_t i) { return acc & T(i); }); + exact[1] = std::accumulate( + v.begin(), v.end(), identity[1], [](T acc, uint64_t i) { return acc | T(i); }); + exact[2] = std::accumulate( + v.begin(), v.end(), identity[2], [](T acc, uint64_t i) { return acc ^ T(i); }); + + auto dev_result = cudf::detail::make_device_uvector_sync(identity); + auto dev_data = cudf::detail::make_device_uvector_sync(v); + + if (block_size == 0) { block_size = vec_size; } + + gpu_atomic_bitwiseOp_test<<>>( + reinterpret_cast(dev_result.data()), reinterpret_cast(dev_data.data()), vec_size); + + auto host_result = cudf::detail::make_host_vector_sync(dev_result); + + CHECK_CUDA(rmm::cuda_stream_default.value()); + + // print_exact(exact, "exact"); + // print_exact(host_result.data(), "result"); + + EXPECT_EQ(host_result[0], exact[0]) << "atomicAnd test failed"; + EXPECT_EQ(host_result[1], exact[1]) << "atomicOr test failed"; + EXPECT_EQ(host_result[2], exact[2]) << "atomicXor test failed"; + EXPECT_EQ(host_result[3], exact[0]) << "atomicAnd test(2) failed"; + EXPECT_EQ(host_result[4], exact[1]) << "atomicOr test(2) failed"; + EXPECT_EQ(host_result[5], exact[2]) << "atomicXor test(2) failed"; + } + + [[maybe_unused]] void print_exact(const T* v, const char* msg) + { + std::cout << std::hex << std::showbase; + std::cout << "The " << msg << " = {" << +v[0] << ", " << +v[1] << ", " << +v[2] << "}" + << std::endl; + } +}; + +using BitwiseOpTestingTypes = + cudf::test::Types; + +TYPED_TEST_CASE(AtomicsBitwiseOpTest, BitwiseOpTestingTypes); + +TYPED_TEST(AtomicsBitwiseOpTest, atomicBitwiseOps) +{ + { // test for AND, XOR + std::vector input_array( + {0xfcfcfcfcfcfcfc7f, 0x7f7f7f7f7f7ffc, 0xfffddffddffddfdf, 0x7f7f7f7f7f7ffc}); + this->atomic_test(input_array); + } + { // test for OR, XOR + std::vector input_array( + {0x01, 0xfc02, 0x1dff03, 0x1100a0b0801d0003, 0x8000000000000000, 0x1dff03}); + this->atomic_test(input_array); + } +} + +CUDF_TEST_PROGRAM_MAIN() diff --git a/cuda_code/device_atomics_test_24.cu b/cuda_code/device_atomics_test_24.cu new file mode 100644 index 0000000000000000000000000000000000000000..94d437110d62fa77e1709c8f9f3c087dfdcaf5eb --- /dev/null +++ b/cuda_code/device_atomics_test_24.cu @@ -0,0 +1,315 @@ +/* + * Copyright (c) 2019, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include +#include +#include +#include + +template +__global__ +void gpu_atomic_test(T *result, T *data, size_t size) +{ + size_t id = blockIdx.x * blockDim.x + threadIdx.x; + size_t step = blockDim.x * gridDim.x; + + for (; id < size; id += step) { + atomicAdd(&result[0], data[id]); + atomicMin(&result[1], data[id]); + atomicMax(&result[2], data[id]); + cudf::genericAtomicOperation(&result[3], data[id], cudf::DeviceSum{}); + } +} + +template +__device__ +T atomic_op(T* addr, T const & value, BinaryOp op) +{ + T old_value = *addr; + T assumed; + + do { + assumed = old_value; + const T new_value = op(old_value, value); + + old_value = atomicCAS(addr, assumed, new_value); + } while (assumed != old_value); + + return old_value; +} + +template +__global__ +void gpu_atomicCAS_test(T *result, T *data, size_t size) +{ + size_t id = blockIdx.x * blockDim.x + threadIdx.x; + size_t step = blockDim.x * gridDim.x; + + for (; id < size; id += step) { + atomic_op(&result[0], data[id], cudf::DeviceSum{}); + atomic_op(&result[1], data[id], cudf::DeviceMin{}); + atomic_op(&result[2], data[id], cudf::DeviceMax{}); + atomic_op(&result[3], data[id], cudf::DeviceSum{}); + } +} + +template +__global__ +void gpu_atomic_bitwiseOp_test(T *result, T *data, size_t size) +{ + size_t id = blockIdx.x * blockDim.x + threadIdx.x; + size_t step = blockDim.x * gridDim.x; + + for (; id < size; id += step) { + atomicAnd(&result[0], data[id]); + atomicOr(&result[1], data[id]); + atomicXor(&result[2], data[id]); + cudf::genericAtomicOperation(&result[3], data[id], cudf::DeviceAnd{}); + } +} + +template +struct AtomicsTest : public GdfTest +{ + void atomic_test(std::vector const & v, bool is_cas_test, + int block_size=0, int grid_size=1) + { + int exact[3]; + exact[0] = std::accumulate(v.begin(), v.end(), 0); + exact[1] = *( std::min_element(v.begin(), v.end()) ); + exact[2] = *( std::max_element(v.begin(), v.end()) ); + size_t vec_size = v.size(); + + // std::vector v_type({6, -14, 13, 64, -13, -20, 45})); + // use transform from std::vector instead. + std::vector v_type(vec_size); + std::transform(v.begin(), v.end(), v_type.begin(), + [](int x) { T t(x) ; return t; } ); + + std::vector result_init(4); + result_init[0] = T{0}; + result_init[1] = std::numeric_limits::max(); + result_init[2] = std::numeric_limits::min(); + result_init[3] = T{0}; + + thrust::device_vector dev_result(result_init); + thrust::device_vector dev_data(v_type); + + if( block_size == 0) block_size = vec_size; + + if( is_cas_test ){ + gpu_atomicCAS_test<<>>( + dev_result.data().get(), dev_data.data().get(), vec_size); + }else{ + gpu_atomic_test<<>>( + dev_result.data().get(), dev_data.data().get(), vec_size); + } + + thrust::host_vector host_result(dev_result); + cudaDeviceSynchronize(); + CUDA_CHECK_LAST(); + + EXPECT_EQ(host_result[0], T(exact[0])) << "atomicAdd test failed"; + EXPECT_EQ(host_result[1], T(exact[1])) << "atomicMin test failed"; + EXPECT_EQ(host_result[2], T(exact[2])) << "atomicMax test failed"; + EXPECT_EQ(host_result[3], T(exact[0])) << "atomicAdd test(2) failed"; + } +}; + +using TestingTypes = ::testing::Types< + int8_t, int16_t, int32_t, int64_t, float, double, + cudf::date32, cudf::date64, cudf::timestamp, cudf::category, + cudf::nvstring_category>; + +TYPED_TEST_CASE(AtomicsTest, TestingTypes); + +// tests for atomicAdd/Min/Max +TYPED_TEST(AtomicsTest, atomicOps) +{ + bool is_cas_test = false; + std::vector input_array({0, 6, 0, -14, 13, 64, -13, -20, 45}); + this->atomic_test(input_array, is_cas_test); + + std::vector input_array2({6, -6, 13, 62, -11, -20, 33}); + this->atomic_test(input_array2, is_cas_test); +} + +// tests for atomicCAS +TYPED_TEST(AtomicsTest, atomicCAS) +{ + bool is_cas_test = true; + std::vector input_array({0, 6, 0, -14, 13, 64, -13, -20, 45}); + this->atomic_test(input_array, is_cas_test); + + std::vector input_array2({6, -6, 13, 62, -11, -20, 33}); + this->atomic_test(input_array2, is_cas_test); +} + +// tests for atomicAdd/Min/Max +TYPED_TEST(AtomicsTest, atomicOpsGrid) +{ + bool is_cas_test = false; + int block_size=3; + int grid_size=4; + + std::vector input_array({0, 6, 0, -14, 13, 64, -13, -20, 45}); + this->atomic_test(input_array, is_cas_test, block_size, grid_size); + + std::vector input_array2({6, -6, 13, 62, -11, -20, 33}); + this->atomic_test(input_array2, is_cas_test, block_size, grid_size); +} + +// tests for atomicCAS +TYPED_TEST(AtomicsTest, atomicCASGrid) +{ + bool is_cas_test = true; + int block_size=3; + int grid_size=4; + + std::vector input_array({0, 6, 0, -14, 13, 64, -13, -20, 45}); + this->atomic_test(input_array, is_cas_test, block_size, grid_size); + + std::vector input_array2({6, -6, 13, 62, -11, -20, 33}); + this->atomic_test(input_array2, is_cas_test, block_size, grid_size); +} + +// tests for large array +TYPED_TEST(AtomicsTest, atomicOpsRandom) +{ + bool is_cas_test = false; + int block_size=256; + int grid_size=64; + + std::vector input_array(grid_size * block_size); + + std::default_random_engine engine; + std::uniform_int_distribution<> dist(-10, 10); + std::generate(input_array.begin(), input_array.end(), + [&](){ return dist(engine);} ); + + this->atomic_test(input_array, is_cas_test, block_size, grid_size); +} + +TYPED_TEST(AtomicsTest, atomicCASRandom) +{ + bool is_cas_test = true; + int block_size=256; + int grid_size=64; + + std::vector input_array(grid_size * block_size); + + std::default_random_engine engine; + std::uniform_int_distribution<> dist(-10, 10); + std::generate(input_array.begin(), input_array.end(), + [&](){ return dist(engine);} ); + + this->atomic_test(input_array, is_cas_test, block_size, grid_size); +} + +// ------------------------------------------------------------------ + +template +struct AtomicsBitwiseOpTest : public GdfTest +{ + void atomic_test(std::vector const & v, + int block_size=0, int grid_size=1) + { + std::vector identity = {T(~0ull), T(0), T(0), T(~0ull)}; + T exact[4]; + exact[0] = std::accumulate(v.begin(), v.end(), identity[0], + [](T acc, uint64_t i) { return acc & T(i); }); + exact[1] = std::accumulate(v.begin(), v.end(), identity[1], + [](T acc, uint64_t i) { return acc | T(i); }); + exact[2] = std::accumulate(v.begin(), v.end(), identity[2], + [](T acc, uint64_t i) { return acc ^ T(i); }); + exact[3] = exact[0]; + + size_t vec_size = v.size(); + + std::vector v_type(vec_size); + std::transform(v.begin(), v.end(), v_type.begin(), + [](uint64_t x) { T t(x) ; return t; } ); + + std::vector result_init(identity); + + + thrust::device_vector dev_result(result_init); + thrust::device_vector dev_data(v_type); + + if( block_size == 0) block_size = vec_size; + + gpu_atomic_bitwiseOp_test <<>> ( + reinterpret_cast( dev_result.data().get() ), + reinterpret_cast( dev_data.data().get() ), + vec_size); + + thrust::host_vector host_result(dev_result); + cudaDeviceSynchronize(); + CUDA_CHECK_LAST(); + + print_exact(exact, "exact"); + print_exact(host_result.data(), "result"); + + + EXPECT_EQ(host_result[0], exact[0]) << "atomicAnd test failed"; + EXPECT_EQ(host_result[1], exact[1]) << "atomicOr test failed"; + EXPECT_EQ(host_result[2], exact[2]) << "atomicXor test failed"; + EXPECT_EQ(host_result[3], exact[0]) << "atomicAnd test(2) failed"; + } + + void print_exact(const T *v, const char* msg){ + std::cout << std::hex << std::showbase; + std::cout << "The " << msg << " = {" + << +v[0] << ", " + << +v[1] << ", " + << +v[2] << "}" + << std::endl; + } + +}; + +using BitwiseOpTestingTypes = ::testing::Types< + int8_t, int16_t, int32_t, int64_t, + uint8_t, uint16_t, uint32_t, uint64_t + >; + +TYPED_TEST_CASE(AtomicsBitwiseOpTest, BitwiseOpTestingTypes); + +TYPED_TEST(AtomicsBitwiseOpTest, atomicBitwiseOps) +{ + { // test for AND, XOR + std::vector input_array( + {0xfcfcfcfcfcfcfc7f, 0x7f7f7f7f7f7ffc, 0xfffddffddffddfdf, 0x7f7f7f7f7f7ffc}); + this->atomic_test(input_array); + } + { // test for OR, XOR + std::vector input_array( + {0x01, 0xfc02, 0x1dff03, 0x1100a0b0801d0003, 0x8000000000000000, 0x1dff03}); + this->atomic_test(input_array); + } +} + diff --git a/cuda_code/device_initialisation.cu b/cuda_code/device_initialisation.cu new file mode 100644 index 0000000000000000000000000000000000000000..00468b2e8c0bce1f06b3e214f8b695e33f7b9cb9 --- /dev/null +++ b/cuda_code/device_initialisation.cu @@ -0,0 +1,49 @@ +#include "helpers/device_initialisation.h" +#include +#include +#include "flamegpu/flamegpu.h" + +namespace flamegpu { +namespace tests { +namespace { + // Boolean to store the result of the test, in an anonymous namespace (i.e. static) + bool _CUDASimulationContextCreationTime_result = false; +} // namespace +// Set a threshold value, which is large enough to account for context creation +// Experimentally cudaFree(0); takes ~2us (nsys) without context creation, +// while cudaFree(0) including context creation takes ~> 150ms in my linux titan v system. +// This test is a little fluffy. +const double CONTEXT_CREATION_ATLEAST_SECONDS = 0.050; // atleast 50ms? + +/* Test that CUDASimulation::applyConfig_derived() is invoked prior to any cuda call which will invoke the CUDA +Alternative is to use the driver API, call CuCtxGetCurrent(CuContext* pctx) immediatebly before applyConfig, and if pctx is the nullptr then the context had not yet been initialised? +@note - This needs to be called first, and only once. +*/ +void timeCUDASimulationContextCreationTest() { + // Create a very simple model to enable creation of a CUDASimulation + ModelDescription m("model"); + m.newAgent("agent"); + CUDASimulation c(m); + c.CUDAConfig().device_id = 0; + c.SimulationConfig().steps = 1; + // Time how long applyconfig takes, which should invoke cudaFree as the first cuda command, initialising the context. + auto t0 = std::chrono::high_resolution_clock::now(); + c.applyConfig(); + auto t1 = std::chrono::high_resolution_clock::now(); + auto time_span = std::chrono::duration_cast>(t1 - t0); + // The test fails if applyconfig was too fast. + _CUDASimulationContextCreationTime_result = time_span.count() >= CONTEXT_CREATION_ATLEAST_SECONDS; + // Run the simulation. + c.simulate(); +} + +/** + * Return the value stored in the anonymous namespace. + * @note - there is no way to know if the test has not yet been ran, instead it reports false. + */ +bool getCUDASimulationContextCreationTestResult() { + return _CUDASimulationContextCreationTime_result; +} + +} // namespace tests +} // namespace flamegpu diff --git a/cuda_code/dgeaxpy_1.cu b/cuda_code/dgeaxpy_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..c2ef73c2075359ab6512f99a7c13a846eb4ef7a7 --- /dev/null +++ b/cuda_code/dgeaxpy_1.cu @@ -0,0 +1,102 @@ +/* + -- MAGMA (version 2.5.4) -- + Univ. of Tennessee, Knoxville + Univ. of California, Berkeley + Univ. of Colorado, Denver + @date October 2020 + + @generated from sparse/blas/zgeaxpy.cu, normal z -> d, Thu Oct 8 23:05:46 2020 + +*/ +#include "magmasparse_internal.h" + +#define BLOCK_SIZE 256 + + +// axpy kernel for matrices stored in the MAGMA format +__global__ void +dgeaxpy_kernel( + int num_rows, + int num_cols, + double alpha, + double * dx, + double beta, + double * dy) +{ + int row = blockIdx.x*blockDim.x+threadIdx.x; + int j; + + if( rowstorage_type == Magma_DENSE ){ + + dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); + magma_int_t threads = BLOCK_SIZE; + dgeaxpy_kernel<<< grid, threads, 0, queue->cuda_stream() >>> + ( m, n, alpha, X.dval, beta, Y->dval ); + + } else if( X.storage_type == Magma_CSR && Y->storage_type == Magma_CSR ) { + + magma_dcuspaxpy( &alpha, X, &beta, *Y, &C, queue ); + magma_dmfree( Y, queue ); + magma_dmtransfer( C, Y, Magma_DEV, Magma_DEV, queue ); + magma_dmfree( &C, queue ); + } else { + printf("%% error: matrix addition only supported for DENSE and CSR format.\n"); + } + + return MAGMA_SUCCESS; +} diff --git a/cuda_code/dgecscsyncfreetrsm.cu b/cuda_code/dgecscsyncfreetrsm.cu new file mode 100644 index 0000000000000000000000000000000000000000..46da0375fdd3acefc75b3388a80e66d10534289a --- /dev/null +++ b/cuda_code/dgecscsyncfreetrsm.cu @@ -0,0 +1,242 @@ +/* + -- MAGMA (version 2.5.4) -- + Univ. of Tennessee, Knoxville + Univ. of California, Berkeley + Univ. of Colorado, Denver + @date October 2020 + + @generated from sparse/blas/zgecscsyncfreetrsm.cu, normal z -> d, Thu Oct 8 23:05:50 2020 + @author Weifeng Liu + +*/ + +// CSC Sync-Free SpTRSM kernel +// see paper by W. Liu, A. Li, J. D. Hogg, I. S. Duff, and B. Vinter. (2016). +// "A Synchronization-Free Algorithm for Parallel Sparse Triangular Solves". +// 22nd International European Conference on Parallel and Distributed Computing +// (Euro-Par '16). pp. 617-630. + +#include "magmasparse_internal.h" +#include "atomicopsdouble.h" + +#include // for CUDA_VERSION + +#define MAGMA_CSC_SYNCFREE_WARP_SIZE 32 + +#define MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD 0 +#define MAGMA_CSC_SYNCFREE_SUBSTITUTION_BACKWARD 1 + +#define MAGMA_CSC_SYNCFREE_OPT_WARP_NNZ 1 +#define MAGMA_CSC_SYNCFREE_OPT_WARP_RHS 2 +#define MAGMA_CSC_SYNCFREE_OPT_WARP_AUTO 3 + +__global__ +void sptrsv_syncfree_analyser(magmaIndex_ptr d_cscRowIdx, + magmaDouble_ptr d_cscVal, + magma_int_t m, + magma_int_t nnz, + magmaIndex_ptr d_graphInDegree) +{ + const int global_id = blockIdx.x * blockDim.x + threadIdx.x; + if (global_id < nnz) + { + atomicAdd(&d_graphInDegree[d_cscRowIdx[global_id]], 1); + } +} + +__global__ +void sptrsm_syncfree_executor(magmaIndex_ptr d_cscColPtr, + magmaIndex_ptr d_cscRowIdx, + magmaDouble_ptr d_cscVal, + magmaIndex_ptr d_graphInDegree, + magma_int_t m, + magma_int_t substitution, + magma_int_t rhs, + magma_int_t opt, + magmaDouble_ptr d_b, + magmaDouble_ptr d_x) +{ + const int global_id = blockIdx.x * blockDim.x + threadIdx.x; + int global_x_id = global_id / MAGMA_CSC_SYNCFREE_WARP_SIZE; + if (global_x_id >= m) return; + + // substitution is forward or backward + global_x_id = substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ? + global_x_id : m - 1 - global_x_id; + + // Initialize + const int lane_id = (MAGMA_CSC_SYNCFREE_WARP_SIZE - 1) & threadIdx.x; + + // Prefetch + const int pos = substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ? + d_cscColPtr[global_x_id] : d_cscColPtr[global_x_id+1]-1; + const double one = MAGMA_D_MAKE( 1.0, 0.0); + const double coef = one / d_cscVal[pos]; + + /* + // clock_t start; + // Consumer + do { + start = clock(); + } + while (1 != d_graphInDegree[global_x_id]); + + // Consumer + int graphInDegree; + do { + //bypass Tex cache and avoid other mem optimization by nvcc/ptxas + asm("ld.global.u32 %0, [%1];" : "=r"(graphInDegree),"=r"(d_graphInDegree[global_x_id]) :: "memory"); + } + while (1 != graphInDegree ); + */ + + for (int k = lane_id; k < rhs; k += MAGMA_CSC_SYNCFREE_WARP_SIZE) + { + const int pos = global_x_id * rhs + k; + d_x[pos] = (d_b[pos] - d_x[pos]) * coef; + } + + // Producer + const magma_index_t start_ptr = + substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ? + d_cscColPtr[global_x_id]+1 : d_cscColPtr[global_x_id]; + const magma_index_t stop_ptr = + substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ? + d_cscColPtr[global_x_id+1] : d_cscColPtr[global_x_id+1]-1; + + if (opt == MAGMA_CSC_SYNCFREE_OPT_WARP_NNZ) + { + for (magma_index_t jj = start_ptr + lane_id; + jj < stop_ptr; jj += MAGMA_CSC_SYNCFREE_WARP_SIZE) + { + const magma_index_t j = + substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ? + jj : stop_ptr - 1 - (jj - start_ptr); + const magma_index_t rowIdx = d_cscRowIdx[j]; + for (magma_index_t k = 0; k < rhs; k++) + atomicAdddouble(&d_x[rowIdx * rhs + k], + d_x[global_x_id * rhs + k] * d_cscVal[j]); + __threadfence(); + atomicSub(&d_graphInDegree[rowIdx], 1); + } + } + else if (opt == MAGMA_CSC_SYNCFREE_OPT_WARP_RHS) + { + for (magma_index_t jj = start_ptr; jj < stop_ptr; jj++) + { + const magma_index_t j = + substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ? + jj : stop_ptr - 1 - (jj - start_ptr); + const magma_index_t rowIdx = d_cscRowIdx[j]; + for (magma_index_t k = lane_id; + k < rhs; k+=MAGMA_CSC_SYNCFREE_WARP_SIZE) + atomicAdddouble(&d_x[rowIdx * rhs + k], + d_x[global_x_id * rhs + k] * d_cscVal[j]); + __threadfence(); + if (!lane_id) atomicSub(&d_graphInDegree[rowIdx], 1); + } + } + else if (opt == MAGMA_CSC_SYNCFREE_OPT_WARP_AUTO) + { + const magma_index_t len = stop_ptr - start_ptr; + + if ((len <= rhs || rhs > 8) && len < 2048) + { + for (magma_index_t jj = start_ptr; jj < stop_ptr; jj++) + { + const magma_index_t j = + substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ? + jj : stop_ptr - 1 - (jj - start_ptr); + const magma_index_t rowIdx = d_cscRowIdx[j]; + for (magma_index_t k = lane_id; + k < rhs; k+=MAGMA_CSC_SYNCFREE_WARP_SIZE) + atomicAdddouble(&d_x[rowIdx * rhs + k], + d_x[global_x_id * rhs + k] * d_cscVal[j]); + __threadfence(); + if (!lane_id) atomicSub(&d_graphInDegree[rowIdx], 1); + } + } + else + { + for (magma_index_t jj = start_ptr + lane_id; + jj < stop_ptr; jj += MAGMA_CSC_SYNCFREE_WARP_SIZE) + { + const magma_index_t j = + substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ? + jj : stop_ptr - 1 - (jj - start_ptr); + const magma_index_t rowIdx = d_cscRowIdx[j]; + for (magma_index_t k = 0; k < rhs; k++) + atomicAdddouble(&d_x[rowIdx * rhs + k], + d_x[global_x_id * rhs + k] * d_cscVal[j]); + __threadfence(); + atomicSub(&d_graphInDegree[rowIdx], 1); + } + } + } +} + + +extern "C" magma_int_t +magma_dgecscsyncfreetrsm_analysis( + magma_int_t m, + magma_int_t nnz, + magmaDouble_ptr dval, + magmaIndex_ptr dcolptr, + magmaIndex_ptr drowind, + magmaIndex_ptr dgraphindegree, + magmaIndex_ptr dgraphindegree_bak, + magma_queue_t queue ) +{ + int info = MAGMA_SUCCESS; + + int num_threads = 128; + int num_blocks = ceil ((double)nnz / (double)num_threads); + cudaMemset(dgraphindegree, 0, m * sizeof(magma_index_t)); + sptrsv_syncfree_analyser<<< num_blocks, num_threads >>> + (drowind, dval, m, nnz, dgraphindegree); + + // backup in-degree array + cudaMemcpy(dgraphindegree_bak, dgraphindegree, + m * sizeof(int), cudaMemcpyDeviceToDevice); + return info; +} + +extern "C" magma_int_t +magma_dgecscsyncfreetrsm_solve( + magma_int_t m, + magma_int_t nnz, + magmaDouble_ptr dval, + magmaIndex_ptr dcolptr, + magmaIndex_ptr drowind, + magmaIndex_ptr dgraphindegree, + magmaIndex_ptr dgraphindegree_bak, + magmaDouble_ptr dx, + magmaDouble_ptr db, + magma_int_t substitution, + magma_int_t rhs, + magma_queue_t queue ) +{ + int info = MAGMA_SUCCESS; + + // get an unmodified in-degree array, only for benchmarking use + cudaMemcpy(dgraphindegree, dgraphindegree_bak, + m * sizeof(magma_index_t), cudaMemcpyDeviceToDevice); + + // clear d_x for atomic operations + cudaMemset(dx, 0, sizeof(double) * m * rhs); + + int num_threads, num_blocks; + + num_threads = 4 * MAGMA_CSC_SYNCFREE_WARP_SIZE; + num_blocks = ceil ((double)m / + (double)(num_threads/MAGMA_CSC_SYNCFREE_WARP_SIZE)); + sptrsm_syncfree_executor<<< num_blocks, num_threads >>> + (dcolptr, drowind, dval, dgraphindegree, + m, substitution, rhs, MAGMA_CSC_SYNCFREE_OPT_WARP_AUTO, + db, dx); + + return info; +} + + + diff --git a/cuda_code/dgerbt_kernels_3.cu b/cuda_code/dgerbt_kernels_3.cu new file mode 100644 index 0000000000000000000000000000000000000000..4429060fde871ffa1c7e8b8dc21044637e5d3bef --- /dev/null +++ b/cuda_code/dgerbt_kernels_3.cu @@ -0,0 +1,183 @@ +/* + -- MAGMA (version 2.1.0) -- + Univ. of Tennessee, Knoxville + Univ. of California, Berkeley + Univ. of Colorado, Denver + @date August 2016 + + @generated from magmablas/zgerbt_kernels.cu, normal z -> d, Tue Aug 30 09:38:28 2016 + + + @author Adrien REMY +*/ +#include "magma_internal.h" +#include "dgerbt.h" + +#define block_height 32 +#define block_width 4 +#define block_length 256 +#define NB 64 + + +/******************************************************************************/ +static __device__ void +magmablas_delementary_multiplication_devfunc( + magma_int_t n, + double *dA, magma_int_t ldda, + double *du, + double *dv) +{ + magma_int_t idx, idy; + + idx = blockIdx.x * blockDim.x + threadIdx.x; + idy = blockIdx.y * blockDim.y + threadIdx.y; + + if ((idx < n/2) && (idy < n/2)) { + dA += idx + idy * ldda; + + double a00, a10, a01, a11, b1, b2, b3, b4; + __shared__ double u1[block_height], u2[block_height], v1[block_width], v2[block_width]; + + du += idx; + dv += idy; + + u1[threadIdx.x] = du[0]; + u2[threadIdx.x] = du[n/2]; + v1[threadIdx.y] = dv[0]; + v2[threadIdx.y] = dv[n/2]; + + __syncthreads(); + + a00 = dA[0]; + a01 = dA[ldda*n/2]; + a10 = dA[n/2]; + a11 = dA[ldda*n/2+n/2]; + + b1 = a00 + a01; + b2 = a10 + a11; + b3 = a00 - a01; + b4 = a10 - a11; + + dA[0] = u1[threadIdx.x] * v1[threadIdx.y] * (b1 + b2); + dA[ldda*n/2] = u1[threadIdx.x] * v2[threadIdx.y] * (b3 + b4); + dA[n/2] = u2[threadIdx.x] * v1[threadIdx.y] * (b1 - b2); + dA[ldda*n/2+n/2] = u2[threadIdx.x] * v2[threadIdx.y] *(b3 - b4); + } +} + + +/******************************************************************************/ +__global__ void +magmablas_delementary_multiplication_kernel( + magma_int_t n, + double *dA, magma_int_t offsetA, magma_int_t ldda, + double *du, magma_int_t offsetu, + double *dv, magma_int_t offsetv) +{ + magmablas_delementary_multiplication_devfunc( n, dA+offsetA, ldda, du+offsetu, dv+offsetv ); +} + + +/******************************************************************************/ +__global__ void +magmablas_delementary_multiplication_kernel_batched( + magma_int_t n, + double **dA_array, magma_int_t offsetA, magma_int_t ldda, + double *du, magma_int_t offsetu, + double *dv, magma_int_t offsetv) +{ + int batchid = blockIdx.z; + magmablas_delementary_multiplication_devfunc( n, dA_array[batchid]+offsetA, ldda, du+offsetu, dv+offsetv ); +} + + +/******************************************************************************/ +static __device__ void +magmablas_dapply_vector_devfunc( + magma_int_t n, + double *du, double *db) +{ + magma_int_t idx; + + idx = blockIdx.x * blockDim.x + threadIdx.x; + + if (idx < n/2) { + du += idx; + db += idx; + + double a1,a2; + + a1 = du[0]*db[0]; + a2 = du[n/2]*db[n/2]; + + db[0] = a1 + a2; + db[n/2] = a1 -a2; + } +} + + +/******************************************************************************/ +__global__ void +magmablas_dapply_vector_kernel( + magma_int_t n, + double *du, magma_int_t offsetu, double *db, magma_int_t offsetb ) +{ + magmablas_dapply_vector_devfunc(n, du+offsetu, db+offsetb); +} + + +/******************************************************************************/ +__global__ void +magmablas_dapply_vector_kernel_batched( + magma_int_t n, + double *du, magma_int_t offsetu, double **db_array, magma_int_t offsetb ) +{ + int batchid = blockIdx.y; + magmablas_dapply_vector_devfunc(n, du+offsetu, db_array[batchid]+offsetb); +} + + +/******************************************************************************/ +static __device__ void +magmablas_dapply_transpose_vector_devfunc( + magma_int_t n, + double *du,double *db ) +{ + magma_int_t idx; + + idx = blockIdx.x * blockDim.x + threadIdx.x; + + if (idx < n/2) { + du += idx; + db += idx; + + double a1,a2; + + a1 = db[0] + db[n/2]; + a2 = db[0] - db[n/2]; + + db[0] = du[0]*a1; + db[n/2] = du[n/2]*a2; + } +} + + +/******************************************************************************/ +__global__ void +magmablas_dapply_transpose_vector_kernel( + magma_int_t n, + double *du, magma_int_t offsetu, double *db, magma_int_t offsetb ) +{ + magmablas_dapply_transpose_vector_devfunc(n, du+offsetu, db+offsetb); +} + + +/******************************************************************************/ +__global__ void +magmablas_dapply_transpose_vector_kernel_batched( + magma_int_t n, + double *du, magma_int_t offsetu, double **db_array, magma_int_t offsetb ) +{ + int batchid = blockIdx.y; + magmablas_dapply_transpose_vector_devfunc(n, du+offsetu, db_array[batchid]+offsetb); +} diff --git a/cuda_code/dihedral_force_impl_5.cu b/cuda_code/dihedral_force_impl_5.cu new file mode 100644 index 0000000000000000000000000000000000000000..959bca09d23004d47366a494eea0ef2f92255ff2 --- /dev/null +++ b/cuda_code/dihedral_force_impl_5.cu @@ -0,0 +1,119 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "backend/kernel_compiler/gpu/cuda_impl/sponge/dihedral/dihedral_force_impl.cuh" +#include "backend/kernel_compiler/gpu/cuda_impl/util.cuh" +#include "backend/kernel_compiler/gpu/cuda_impl/sponge/common_sponge.cuh" + +__global__ void DihedralForceKernel(int dihedral_numbers, const UNSIGNED_INT_VECTOR *uint_crd, const VECTOR *scaler, + const int *atom_a, const int *atom_b, const int *atom_c, const int *atom_d, + const int *ipn, const float *pk, const float *gamc, const float *gams, + const float *pn, VECTOR *frc) { + int dihedral_i = blockDim.x * blockIdx.x + threadIdx.x; + if (dihedral_i < dihedral_numbers) { + int atom_i = atom_a[dihedral_i]; + int atom_j = atom_b[dihedral_i]; + int atom_k = atom_c[dihedral_i]; + int atom_l = atom_d[dihedral_i]; + + int temp_ipn = ipn[dihedral_i]; + + float temp_pk = pk[dihedral_i]; + float temp_pn = pn[dihedral_i]; + float temp_gamc = gamc[dihedral_i]; + float temp_gams = gams[dihedral_i]; + + VECTOR drij = Get_Periodic_Displacement(uint_crd[atom_i], uint_crd[atom_j], scaler[0]); + VECTOR drkj = Get_Periodic_Displacement(uint_crd[atom_k], uint_crd[atom_j], scaler[0]); + VECTOR drkl = Get_Periodic_Displacement(uint_crd[atom_k], uint_crd[atom_l], scaler[0]); + + VECTOR r1 = drij ^ drkj; + VECTOR r2 = drkl ^ drkj; + + float r1_1 = rnorm3df(r1.x, r1.y, r1.z); + float r2_1 = rnorm3df(r2.x, r2.y, r2.z); + float r1_2 = r1_1 * r1_1; + float r2_2 = r2_1 * r2_1; + float r1_1_r2_1 = r1_1 * r2_1; + + float phi = r1 * r2 * r1_1_r2_1; + phi = fmaxf(-0.999999, fminf(phi, 0.999999)); + phi = acosf(phi); + + float sign = (r2 ^ r1) * drkj; + copysignf(phi, sign); + + phi = CONSTANT_Pi - phi; + + float nphi = temp_pn * phi; + + float cos_phi = cosf(phi); + float sin_phi = sinf(phi); + float cos_nphi = cosf(nphi); + float sin_nphi = sinf(nphi); + + float dE_dphi; + if (fabsf(sin_phi) < 1e-6) { + temp_ipn *= temp_ipn % 2; // (((temp_ipn - 1) & 1) ^ 1) + dE_dphi = temp_gamc * (temp_pn - temp_ipn + temp_ipn * cos_phi); + } else { + dE_dphi = temp_pn * (temp_gamc * sin_nphi - temp_gams * cos_nphi) / sin_phi; + } + + VECTOR dphi_dr1 = r1_1_r2_1 * r2 + cos_phi * r1_2 * r1; + VECTOR dphi_dr2 = r1_1_r2_1 * r1 + cos_phi * r2_2 * r2; + + VECTOR dE_dri = dE_dphi * drkj ^ dphi_dr1; + VECTOR dE_drl = dE_dphi * dphi_dr2 ^ drkj; + VECTOR dE_drj_part = dE_dphi * ((drij ^ dphi_dr1) + (drkl ^ dphi_dr2)); + + VECTOR fi = dE_dri; + VECTOR fj = dE_drj_part - dE_dri; + VECTOR fk = -dE_drl - dE_drj_part; + VECTOR fl = dE_drl; + + atomicAdd(&frc[atom_i].x, fi.x); + atomicAdd(&frc[atom_i].y, fi.y); + atomicAdd(&frc[atom_i].z, fi.z); + atomicAdd(&frc[atom_j].x, fj.x); + atomicAdd(&frc[atom_j].y, fj.y); + atomicAdd(&frc[atom_j].z, fj.z); + atomicAdd(&frc[atom_k].x, fk.x); + atomicAdd(&frc[atom_k].y, fk.y); + atomicAdd(&frc[atom_k].z, fk.z); + atomicAdd(&frc[atom_l].x, fl.x); + atomicAdd(&frc[atom_l].y, fl.y); + atomicAdd(&frc[atom_l].z, fl.z); + } +} + +void DihedralForce(int dihedral_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a, + const int *atom_b, const int *atom_c, const int *atom_d, const int *ipn, const float *pk, + const float *gamc, const float *gams, const float *pn, float *frc_f, cudaStream_t stream) { + size_t thread_per_block = 128; + size_t block_per_grid = ceilf(static_cast(dihedral_numbers) / 128); + UNSIGNED_INT_VECTOR *uint_crd = + const_cast(reinterpret_cast(uint_crd_f)); + VECTOR *frc = const_cast(reinterpret_cast(frc_f)); + VECTOR *scaler = const_cast(reinterpret_cast(scaler_f)); + + DihedralForceKernel<<>>( + dihedral_numbers, uint_crd, scaler, atom_a, atom_b, atom_c, atom_d, ipn, pk, gamc, gams, pn, frc); + return; +} +void DihedralForce(int dihedral_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a, + const int *atom_b, const int *atom_c, const int *atom_d, const int *ipn, const float *pk, + const float *gamc, const float *gams, const float *pn, float *frc_f, cudaStream_t stream); diff --git a/cuda_code/direct_product_1.cu b/cuda_code/direct_product_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..d6bcc36e07c7ba41faca51564f093e73c7dfa1a8 --- /dev/null +++ b/cuda_code/direct_product_1.cu @@ -0,0 +1,213 @@ +#include +#include +#include +#include +#include + +constexpr std::size_t block_size = 256; +constexpr unsigned warp_size = 32; + +#ifndef CUDA_ARCH_SM +#define CUDA_ARCH_SM 0 +#endif + +template +__global__ void direct_product(float* const c_ptr, const half* const a_ptr, const half* const b_ptr, unsigned dim); + +template <> +__global__ void direct_product(float* const c_ptr, const half* const a_ptr, const half* const b_ptr, unsigned dim) { + constexpr unsigned FDIM = 16; + __shared__ half A_smem[warp_size]; + __shared__ half B_smem[block_size]; + __shared__ float C_smem[warp_size * block_size]; + + const unsigned warp_id = threadIdx.x >> 5; + const unsigned unique_id = threadIdx.x & 0x1f; + + float* const C_smem_ptr = C_smem + warp_size * warp_size * warp_id; + + nvcuda::wmma::fragment A_frag[2]; + nvcuda::wmma::fragment B_frag[2]; + nvcuda::wmma::fragment C_frag[4]; + + // load A + const unsigned a_start = blockIdx.x * warp_size; + if (warp_id == 0) { + A_smem[unique_id] = a_ptr[a_start + unique_id]; + } + __syncthreads(); + mtk::wmma::load_vector(A_frag[0], A_smem); + mtk::wmma::load_vector(A_frag[1], A_smem + FDIM); + nvcuda::wmma::fill_fragment(B_frag[0], __float2half(0.0f)); + nvcuda::wmma::fill_fragment(B_frag[1], __float2half(0.0f)); + + for (unsigned b_start = 0; b_start < dim; b_start += block_size) { + nvcuda::wmma::fill_fragment(C_frag[0], __float2half(0.0f)); + nvcuda::wmma::fill_fragment(C_frag[1], __float2half(0.0f)); + nvcuda::wmma::fill_fragment(C_frag[2], __float2half(0.0f)); + nvcuda::wmma::fill_fragment(C_frag[3], __float2half(0.0f)); + // load B + B_smem[threadIdx.x] = __ldg(&b_ptr[b_start + threadIdx.x]); + + mtk::wmma::load_vector(B_frag[0], B_smem + warp_size * warp_id, false); + nvcuda::wmma::mma_sync(C_frag[0], A_frag[0], B_frag[0], C_frag[0]); + nvcuda::wmma::store_matrix_sync(C_smem_ptr, C_frag[0], warp_size, nvcuda::wmma::mem_col_major); + nvcuda::wmma::mma_sync(C_frag[1], A_frag[1], B_frag[0], C_frag[1]); + nvcuda::wmma::store_matrix_sync(C_smem_ptr + FDIM, C_frag[1], warp_size, nvcuda::wmma::mem_col_major); + + mtk::wmma::load_vector(B_frag[1], B_smem + warp_size * warp_id + FDIM, false); + nvcuda::wmma::mma_sync(C_frag[2], A_frag[0], B_frag[1], C_frag[2]); + nvcuda::wmma::store_matrix_sync(C_smem_ptr + warp_size * FDIM, C_frag[2], warp_size, nvcuda::wmma::mem_col_major); + nvcuda::wmma::mma_sync(C_frag[3], A_frag[1], B_frag[1], C_frag[3]); + nvcuda::wmma::store_matrix_sync(C_smem_ptr + FDIM + warp_size * FDIM, C_frag[3], warp_size, nvcuda::wmma::mem_col_major); + + for (unsigned i = 0; i < block_size * warp_size; i += block_size) { + c_ptr[a_start + unique_id + (warp_id + i / warp_size + b_start) * dim] = C_smem[i + threadIdx.x]; + } + } +} + +template <> +__global__ void direct_product(float* const c_ptr, const half* const a_ptr, const half* const b_ptr, unsigned dim) { + constexpr unsigned FDIM = 16; + __shared__ half B_smem[block_size * FDIM]; + half* const A_smem = B_smem; + __shared__ float C_smem[warp_size * block_size]; + + const unsigned warp_id = threadIdx.x >> 5; + const unsigned unique_id = threadIdx.x & 0x1f; + const unsigned a_start = blockIdx.x * warp_size; + + float* const C_smem_ptr = C_smem + warp_size * warp_size * warp_id; + + nvcuda::wmma::fragment A_frag[2]; + nvcuda::wmma::fragment B_frag[2]; + nvcuda::wmma::fragment C_frag[4]; + + // load A + for (unsigned i = 0; i < FDIM * warp_size; i += block_size) { + A_smem[i + threadIdx.x] = __float2half(0.0f); + } + if (warp_id == 0) { + A_smem[unique_id] = a_ptr[a_start + unique_id]; + } + __syncthreads(); + nvcuda::wmma::load_matrix_sync(A_frag[0], A_smem, warp_size); + nvcuda::wmma::load_matrix_sync(A_frag[1], A_smem + FDIM, warp_size); + // init B + for (unsigned i = 0; i < FDIM * block_size; i += block_size) { + B_smem[i + threadIdx.x] = __float2half(0.0f); + } + __syncthreads(); + + + for (unsigned b_start = 0; b_start < dim; b_start += block_size) { + nvcuda::wmma::fill_fragment(C_frag[0], __float2half(0.0f)); + nvcuda::wmma::fill_fragment(C_frag[1], __float2half(0.0f)); + nvcuda::wmma::fill_fragment(C_frag[2], __float2half(0.0f)); + nvcuda::wmma::fill_fragment(C_frag[3], __float2half(0.0f)); + // load B + B_smem[threadIdx.x] = __ldg(&b_ptr[b_start + threadIdx.x]); + + nvcuda::wmma::load_matrix_sync(B_frag[0], B_smem + warp_size * warp_id, block_size); + nvcuda::wmma::mma_sync(C_frag[0], A_frag[0], B_frag[0], C_frag[0]); + nvcuda::wmma::store_matrix_sync(C_smem_ptr, C_frag[0], warp_size, nvcuda::wmma::mem_col_major); + nvcuda::wmma::mma_sync(C_frag[1], A_frag[1], B_frag[0], C_frag[1]); + nvcuda::wmma::store_matrix_sync(C_smem_ptr + FDIM, C_frag[1], warp_size, nvcuda::wmma::mem_col_major); + + nvcuda::wmma::load_matrix_sync(B_frag[1], B_smem + warp_size * warp_id + FDIM, block_size); + nvcuda::wmma::mma_sync(C_frag[2], A_frag[0], B_frag[1], C_frag[2]); + nvcuda::wmma::store_matrix_sync(C_smem_ptr + warp_size * FDIM, C_frag[2], warp_size, nvcuda::wmma::mem_col_major); + nvcuda::wmma::mma_sync(C_frag[3], A_frag[1], B_frag[1], C_frag[3]); + nvcuda::wmma::store_matrix_sync(C_smem_ptr + FDIM + warp_size * FDIM, C_frag[3], warp_size, nvcuda::wmma::mem_col_major); + + for (unsigned i = 0; i < block_size * warp_size; i += block_size) { + c_ptr[a_start + unique_id + (warp_id + i / warp_size + b_start) * dim] = C_smem[i + threadIdx.x]; + } + } +} + +template +void test_direct_product(const unsigned size_power) { + constexpr std::size_t C = 1lu << 6; + const unsigned DIM = 1lu << size_power; + const std::size_t grid_size = DIM / warp_size; + + half *dA, *dB; + float *dC; + cudaMalloc(&dA, sizeof(half) * DIM); + cudaMalloc(&dB, sizeof(half) * DIM); + cudaMalloc(&dC, sizeof(float) * DIM * DIM); + + half *hA; + float *hC; + cudaMallocHost(&hA, sizeof(half) * DIM); + cudaMallocHost(&hC, sizeof(float) * DIM * DIM); + for (unsigned i = 0; i < DIM; i++) hA[i] = __float2half(static_cast(i) / DIM); + cudaMemcpy(dA, hA, sizeof(half) * DIM, cudaMemcpyDefault); + cudaMemcpy(dB, hA, sizeof(half) * DIM, cudaMemcpyDefault); + + const auto start_clock = std::chrono::system_clock::now(); + for (std::size_t c = 0; c < C; c++) { + direct_product<<>>( + dC, + dA, + dB, + DIM + ); + } + const auto status = cudaGetLastError(); + cudaDeviceSynchronize(); + if (status != 0) { + std::fprintf(stderr, "%s\n", cudaGetErrorString(status)); + } + const auto end_clock = std::chrono::system_clock::now(); + const auto elapsed_time = std::chrono::duration_cast(end_clock - start_clock).count() / 1.e6 / C; + + cudaMemcpy(hC, dC, sizeof(float) * DIM * DIM, cudaMemcpyDefault); + float diff_norm = 0.0f; + float base_norm = 0.0f; + for (std::size_t i = 0; i < DIM ;i++) { + for (std::size_t j = 0; j < DIM; j++) { + const auto corr = __half2float(hA[i]) * __half2float(hA[j]); + const auto diff = hC[i + DIM * j] - corr; + base_norm += corr * corr; + diff_norm += diff * diff; + } + } + std::printf("%u,%u,%u,%e,%e,%e\n", + static_cast(CUDA_ARCH_SM), + DIM, + (UseWMMAe ? 1u : 0u), + elapsed_time, + (2 * DIM * DIM / elapsed_time / (1lu<<40)), + sqrt(diff_norm / base_norm) + ); + + cudaFreeHost(hA); + cudaFree(dA); + cudaFree(dB); + cudaFree(dC); +} + +void test_direct_product(const unsigned min_p, const unsigned max_p) { + std::printf("# %s\n", __func__); + std::printf("-- 1\n"); + for (unsigned i = min_p; i <= max_p; i++) { + test_direct_product(i); + } + for (unsigned i = min_p; i <= max_p; i++) { + test_direct_product(i); + } + std::printf("-- 2\n"); + for (unsigned i = min_p; i <= max_p; i++) { + test_direct_product(i); + } + for (unsigned i = min_p; i <= max_p; i++) { + test_direct_product(i); + } +} + +int main() { + test_direct_product(8, 15); +} diff --git a/cuda_code/disco.cu b/cuda_code/disco.cu new file mode 100644 index 0000000000000000000000000000000000000000..36c6faa09173dec44621b4166d8389ce10c84b7f --- /dev/null +++ b/cuda_code/disco.cu @@ -0,0 +1,131 @@ +#include +#include +#include
+#include + +const int __SIZE = VFB_MAX_SIZE * VFB_MAX_SIZE; +SDL_Surface* screen = NULL; + +void handleExit(bool& running){ + SDL_Event ev; + + while (SDL_PollEvent(&ev)) { + switch (ev.type) { + case SDL_QUIT: + running = false; + return; + } + } +} + +bool getCommandLineArgs(int argc, char** argv, + unsigned& threadCount, unsigned& numBuckets, + unsigned& resX, unsigned& resY, + char* inputFile, char* outputFile) { + unsigned i = 1; + bool fail = true; + + printf("%d", argc); + for(; i < argc;){ + if (strcmp(argv[i], "--threadCount")){ + threadCount = atoi(argv[i + 1]); i += 1; fail = false; + } + else if (strcmp(argv[i], "--inputFile")){ + strcpy(inputFile, argv[i + 1]); i += 2; fail = false; + } + else if (strcmp(argv[i], "--numBuckets")){ + numBuckets = atoi(argv[i + 1]); i += 2; + } + else if (strcmp(argv[i], "--outputFile")){ + strcpy(outputFile, argv[i + 1]); i += 2; + } + else if (strcmp(argv[i], "--resX")){ + resX = atoi(argv[i + 1]); i += 2; + } + else if (strcmp(argv[i], "--resY")){ + resY = atoi(argv[i + 1]); i += 2; + } + else { + printf("Unknown argument!"); fail = true; + return fail; + } + } + + return fail; +} + +int main(int argc, char* argv[]) { + bool running = true; + unsigned threadCount, numBuckets = 0, resX = 0, resY = 0; + char inputFile[1024], outputFile[1024]; + + printf("Starting program..."); + bool args = getCommandLineArgs(argc, argv, + threadCount, numBuckets, + resX, resY, inputFile, outputFile); + if (!args) { + printf("command line options are:\n"); + printf("--threadCount, compulsory\n"); + printf("--inputFile, compulsory, for an example see main/sampleScene.txt\n"); + printf("--outputFile, optional, location to save the file"); + printf("--resX, --resY - resolution, optional"); + } + else { + printf("read args:\n"); + printf("threadCount: %d\n", threadCount); + if (numBuckets == 0) numBuckets = 1; + if (resX == 0) resX = 640; + if (resY == 0) resY = 480; + } + + setBuckets(threadCount, numBuckets); + Color *host_vfb, *device_vfb; + host_vfb = (Color*)malloc(__SIZE * sizeof(Color)); + cudaMalloc((void**)&device_vfb, __SIZE * sizeof(Color)); + cudaMemcpy(device_vfb, host_vfb, __SIZE * sizeof(Color), + cudaMemcpyHostToDevice); + + bool *needsAA; + cudaMalloc((void**)&needsAA, __SIZE * sizeof(bool)); + + Scene *scene = new Scene, *dev_scene; + int3 dirs; float3 shift; shift.x = 0.5; shift.y = 0.1; shift.z = 0.1; + dirs.x = 1; dirs.y = 1; dirs.z = 1; + + if (!initGraphics(&screen, resX, resY)) return -1; + scene->readFromFile("main/sampleScene.txt"); + printf("read file!"); + cudaMalloc((void**)&dev_scene, sizeof(Scene)); + cudaMemcpy(dev_scene, scene, sizeof(Scene), cudaMemcpyHostToDevice); + + Light* host_light = new Light(); + cudaMemcpy(host_light, scene->light, sizeof(Light), cudaMemcpyDeviceToHost); + while (running) { + int lp = 1; + renderScene<<<1, threadCount>>>(dev_scene, device_vfb); + findAA<<<1, threadCount>>>(needsAA, device_vfb); + antialias<<<1, threadCount>>>(dev_scene, needsAA, device_vfb); + cudaError_t cudaerr = cudaDeviceSynchronize(); + + cudaMemcpy(host_vfb, device_vfb, __SIZE * sizeof(Color), + cudaMemcpyDeviceToHost); + displayVFB(screen, host_vfb); + shiftColor(host_light->color, dirs, shift); + host_light->power += 1000 * lp; + if (host_light->power > 100000) lp = -1; + if (host_light->power < 20000) lp = +1; + cudaMemcpy(scene->light, host_light, sizeof(Light), + cudaMemcpyHostToDevice); + handleExit(running); + } + + closeGraphics(); + + scene->cleanUp(); + free(scene); cudaFree(dev_scene); + free(host_vfb); + cudaFree(device_vfb); + cudaFree(needsAA); + + return 0; +} diff --git a/cuda_code/dist_3d.cu b/cuda_code/dist_3d.cu new file mode 100644 index 0000000000000000000000000000000000000000..0c2763ab128bce715f712cb675b5fc916e42bf3b --- /dev/null +++ b/cuda_code/dist_3d.cu @@ -0,0 +1,53 @@ +/* dist_3d.cu + * + * based on code from CUDA for Engineers (cudaforengineers) + * Ernest Yeung ernestyalumni@gmail.com + * 20160625 + */ +#include +#define Nthreads_x 32 // total number of threads in x-direction +#define Nthreads_y 32 // total number of threads in y-direction +#define Nthreads_z 32 // total number of threads in z-direction + +#define M_x 8 // number of threads per block in x-direction +#define M_y 8 // number of threads per block in y-direction +#define M_z 8 // number of threads per block in z-direction + +int blocksNeeded(int N_i, int M_i) { return (N_i+M_i-1)/M_i; } + +__device__ float distance(int k_x, int k_y, int k_z, float3 x_0) { + return sqrtf((k_x - x_0.x)*(k_x - x_0.x) + (k_y - x_0.y)*(k_y - x_0.y) + + (k_z - x_0.z)*(k_z - x_0.z)); + } + +__global__ void distance(float *d_out, int L_x, int L_y, int L_z, float3 x_0) { + // map from threadIdx/blockIdx to (k_x,k_y,k_z) grid position + const int k_x = threadIdx.x + blockIdx.x*blockDim.x; + const int k_y = threadIdx.y + blockIdx.y*blockDim.y; + const int k_z = threadIdx.z + blockIdx.z*blockDim.z; + const int offset = k_x + k_y*L_x + k_z*L_x*L_y; + if ((k_x >= L_x) || (k_y >= L_y) || (k_z >= L_z)) return; + d_out[offset] = distance( k_x, k_y, k_z, x_0); // compute and store result +} + +int main() { + float *out = (float *)malloc(Nthreads_x*Nthreads_y*Nthreads_z*sizeof(float)); + float *d_out = 0; + cudaMalloc(&d_out, Nthreads_x*Nthreads_y*Nthreads_z*sizeof(float)); + + const float3 x_0 = { 0.0f, 0.0f, 0.0f }; // set reference position x_0 + const dim3 blockSize( M_x, M_y, M_z); + const dim3 gridSize( blocksNeeded(Nthreads_x, M_x), blocksNeeded(Nthreads_y, M_y), + blocksNeeded(Nthreads_z, M_z)); + distance<<>>(d_out, Nthreads_x, Nthreads_y, Nthreads_z, x_0); + cudaMemcpy(out, d_out, Nthreads_x*Nthreads_y*Nthreads_z*sizeof(float), cudaMemcpyDeviceToHost); + cudaFree(d_out); + + // sanity check + int testx = (int) 7*Nthreads_x*Nthreads_y*Nthreads_z/10; + printf("At %d the distance is %f \n", testx, out[testx]); + + free(out); + return 0; +} + diff --git a/cuda_code/dist_coo_spmv_6.cu b/cuda_code/dist_coo_spmv_6.cu new file mode 100644 index 0000000000000000000000000000000000000000..f65791d2a22b3240acebaf649d60c561ce04d508 --- /dev/null +++ b/cuda_code/dist_coo_spmv_6.cu @@ -0,0 +1,636 @@ +/* + * Copyright (c) 2018-2021, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +namespace raft { +namespace sparse { +namespace distance { + +using namespace raft; +using namespace raft::sparse; + +template +struct SparseDistanceCOOSPMVInputs { + value_idx n_cols; + + std::vector indptr_h; + std::vector indices_h; + std::vector data_h; + + std::vector out_dists_ref_h; + + raft::distance::DistanceType metric; + + float metric_arg = 0.0; +}; + +template +::std::ostream &operator<<( + ::std::ostream &os, + const SparseDistanceCOOSPMVInputs &dims) { + return os; +} + +template +class SparseDistanceCOOSPMVTest + : public ::testing::TestWithParam< + SparseDistanceCOOSPMVInputs> { + public: + template + void compute_dist(reduce_f reduce_func, accum_f accum_func, + write_f write_func, bool rev = true) { + raft::mr::device::buffer coo_rows( + dist_config.allocator, dist_config.stream, + max(dist_config.b_nnz, dist_config.a_nnz)); + + raft::sparse::convert::csr_to_coo(dist_config.b_indptr, dist_config.b_nrows, + coo_rows.data(), dist_config.b_nnz, + dist_config.stream); + + balanced_coo_pairwise_generalized_spmv( + out_dists, dist_config, coo_rows.data(), reduce_func, accum_func, + write_func); + + if (rev) { + raft::sparse::convert::csr_to_coo(dist_config.a_indptr, + dist_config.a_nrows, coo_rows.data(), + dist_config.a_nnz, dist_config.stream); + + balanced_coo_pairwise_generalized_spmv_rev( + out_dists, dist_config, coo_rows.data(), reduce_func, accum_func, + write_func); + } + } + + void run_spmv() { + switch (params.metric) { + case raft::distance::DistanceType::InnerProduct: + compute_dist(Product(), Sum(), AtomicAdd(), true); + break; + case raft::distance::DistanceType::L2Unexpanded: + compute_dist(SqDiff(), Sum(), AtomicAdd()); + break; + case raft::distance::DistanceType::Canberra: + compute_dist( + [] __device__(value_t a, value_t b) { + return fabsf(a - b) / (fabsf(a) + fabsf(b)); + }, + Sum(), AtomicAdd()); + break; + case raft::distance::DistanceType::L1: + compute_dist(AbsDiff(), Sum(), AtomicAdd()); + break; + case raft::distance::DistanceType::Linf: + compute_dist(AbsDiff(), Max(), AtomicMax()); + break; + case raft::distance::DistanceType::LpUnexpanded: { + compute_dist(PDiff(params.metric_arg), Sum(), AtomicAdd()); + float p = 1.0f / params.metric_arg; + raft::linalg::unaryOp( + out_dists, out_dists, dist_config.a_nrows * dist_config.b_nrows, + [=] __device__(value_t input) { return powf(input, p); }, + dist_config.stream); + + } break; + default: + throw raft::exception("Unknown distance"); + } + } + + protected: + void make_data() { + std::vector indptr_h = params.indptr_h; + std::vector indices_h = params.indices_h; + std::vector data_h = params.data_h; + + allocate(indptr, indptr_h.size()); + allocate(indices, indices_h.size()); + allocate(data, data_h.size()); + + update_device(indptr, indptr_h.data(), indptr_h.size(), stream); + update_device(indices, indices_h.data(), indices_h.size(), stream); + update_device(data, data_h.data(), data_h.size(), stream); + + std::vector out_dists_ref_h = params.out_dists_ref_h; + + allocate(out_dists_ref, (indptr_h.size() - 1) * (indptr_h.size() - 1)); + + update_device(out_dists_ref, out_dists_ref_h.data(), out_dists_ref_h.size(), + stream); + } + + void SetUp() override { + params = ::testing::TestWithParam< + SparseDistanceCOOSPMVInputs>::GetParam(); + std::shared_ptr alloc( + new raft::mr::device::default_allocator); + CUDA_CHECK(cudaStreamCreate(&stream)); + + CUSPARSE_CHECK(cusparseCreate(&cusparseHandle)); + CUSPARSE_CHECK(cusparseSetStream(cusparseHandle, stream)); + + make_data(); + + dist_config.b_nrows = params.indptr_h.size() - 1; + dist_config.b_ncols = params.n_cols; + dist_config.b_nnz = params.indices_h.size(); + dist_config.b_indptr = indptr; + dist_config.b_indices = indices; + dist_config.b_data = data; + dist_config.a_nrows = params.indptr_h.size() - 1; + dist_config.a_ncols = params.n_cols; + dist_config.a_nnz = params.indices_h.size(); + dist_config.a_indptr = indptr; + dist_config.a_indices = indices; + dist_config.a_data = data; + dist_config.handle = cusparseHandle; + dist_config.allocator = alloc; + dist_config.stream = stream; + + int out_size = dist_config.a_nrows * dist_config.b_nrows; + + allocate(out_dists, out_size); + + ML::Logger::get().setLevel(CUML_LEVEL_DEBUG); + + run_spmv(); + + CUDA_CHECK(cudaStreamSynchronize(stream)); + } + + void TearDown() override { + CUDA_CHECK(cudaStreamSynchronize(stream)); + CUDA_CHECK(cudaFree(indptr)); + CUDA_CHECK(cudaFree(indices)); + CUDA_CHECK(cudaFree(data)); + CUDA_CHECK(cudaFree(out_dists)); + CUDA_CHECK(cudaFree(out_dists_ref)); + } + + void compare() { + raft::print_device_vector("expected: ", out_dists_ref, + params.out_dists_ref_h.size(), std::cout); + raft::print_device_vector("out_dists: ", out_dists, + params.out_dists_ref_h.size(), std::cout); + ASSERT_TRUE(devArrMatch(out_dists_ref, out_dists, + params.out_dists_ref_h.size(), + CompareApprox(1e-3))); + } + + protected: + cudaStream_t stream; + cusparseHandle_t cusparseHandle; + + // input data + value_idx *indptr, *indices; + value_t *data; + + // output data + value_t *out_dists, *out_dists_ref; + + raft::sparse::distance::distances_config_t dist_config; + + SparseDistanceCOOSPMVInputs params; +}; + +const std::vector> inputs_i32_f = { + {2, + {0, 2, 4, 6, 8}, + {0, 1, 0, 1, 0, 1, 0, 1}, + {1.0f, 2.0f, 1.0f, 2.0f, 1.0f, 2.0f, 1.0f, 2.0f}, + {5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, + 5.0}, + raft::distance::DistanceType::InnerProduct, + 0.0}, + {2, + {0, 2, 4, 6, 8}, + {0, 1, 0, 1, 0, 1, 0, 1}, // indices + {1.0f, 3.0f, 1.0f, 5.0f, 50.0f, 28.0f, 16.0f, 2.0f}, + { + // dense output + 0.0, + 4.0, + 3026.0, + 226.0, + 4.0, + 0.0, + 2930.0, + 234.0, + 3026.0, + 2930.0, + 0.0, + 1832.0, + 226.0, + 234.0, + 1832.0, + 0.0, + }, + raft::distance::DistanceType::L2Unexpanded, + 0.0}, + + {10, + {0, 5, 11, 15, 20, 27, 32, 36, 43, 47, 50}, + {0, 1, 3, 6, 8, 0, 1, 2, 3, 5, 6, 1, 2, 4, 8, 0, 2, + 3, 4, 7, 0, 1, 2, 3, 4, 6, 8, 0, 1, 2, 5, 7, 1, 5, + 8, 9, 0, 1, 2, 5, 6, 8, 9, 2, 4, 5, 7, 0, 3, 9}, // indices + {0.5438, 0.2695, 0.4377, 0.7174, 0.9251, 0.7648, 0.3322, 0.7279, 0.4131, + 0.5167, 0.8655, 0.0730, 0.0291, 0.9036, 0.7988, 0.5019, 0.7663, 0.2190, + 0.8206, 0.3625, 0.0411, 0.3995, 0.5688, 0.7028, 0.8706, 0.3199, 0.4431, + 0.0535, 0.2225, 0.8853, 0.1932, 0.3761, 0.3379, 0.1771, 0.2107, 0.228, + 0.5279, 0.4885, 0.3495, 0.5079, 0.2325, 0.2331, 0.3018, 0.6231, 0.2645, + 0.8429, 0.6625, 0.0797, 0.2724, 0.4218}, + {0.0, + 3.3954660629919076, + 5.6469232737388815, + 6.373112846266441, + 4.0212880272531715, + 6.916281504639404, + 5.741508386786526, + 5.411470999663036, + 9.0, + 4.977014354725805, + 3.3954660629919076, + 0.0, + 7.56256082439209, + 5.540261147481582, + 4.832322929216881, + 4.62003193872216, + 6.498056792320361, + 4.309846252268695, + 6.317531174829905, + 6.016362684141827, + 5.6469232737388815, + 7.56256082439209, + 0.0, + 5.974878731322299, + 4.898357301336036, + 6.442097410320605, + 5.227077347287883, + 7.134101195584642, + 5.457753923371659, + 7.0, + 6.373112846266441, + 5.540261147481582, + 5.974878731322299, + 0.0, + 5.5507273748583, + 4.897749658726415, + 9.0, + 8.398776718824767, + 3.908281400328807, + 4.83431066343688, + 4.0212880272531715, + 4.832322929216881, + 4.898357301336036, + 5.5507273748583, + 0.0, + 6.632989819428174, + 7.438852294822894, + 5.6631570310967465, + 7.579428202635459, + 6.760811985364303, + 6.916281504639404, + 4.62003193872216, + 6.442097410320605, + 4.897749658726415, + 6.632989819428174, + 0.0, + 5.249404187382862, + 6.072559523278559, + 4.07661278488929, + 6.19678948003145, + 5.741508386786526, + 6.498056792320361, + 5.227077347287883, + 9.0, + 7.438852294822894, + 5.249404187382862, + 0.0, + 3.854811639654704, + 6.652724827169063, + 5.298236851430971, + 5.411470999663036, + 4.309846252268695, + 7.134101195584642, + 8.398776718824767, + 5.6631570310967465, + 6.072559523278559, + 3.854811639654704, + 0.0, + 7.529184598969917, + 6.903282911791188, + 9.0, + 6.317531174829905, + 5.457753923371659, + 3.908281400328807, + 7.579428202635459, + 4.07661278488929, + 6.652724827169063, + 7.529184598969917, + 0.0, + 7.0, + 4.977014354725805, + 6.016362684141827, + 7.0, + 4.83431066343688, + 6.760811985364303, + 6.19678948003145, + 5.298236851430971, + 6.903282911791188, + 7.0, + 0.0}, + raft::distance::DistanceType::Canberra, + 0.0}, + + {10, + {0, 5, 11, 15, 20, 27, 32, 36, 43, 47, 50}, + {0, 1, 3, 6, 8, 0, 1, 2, 3, 5, 6, 1, 2, 4, 8, 0, 2, + 3, 4, 7, 0, 1, 2, 3, 4, 6, 8, 0, 1, 2, 5, 7, 1, 5, + 8, 9, 0, 1, 2, 5, 6, 8, 9, 2, 4, 5, 7, 0, 3, 9}, // indices + {0.5438, 0.2695, 0.4377, 0.7174, 0.9251, 0.7648, 0.3322, 0.7279, 0.4131, + 0.5167, 0.8655, 0.0730, 0.0291, 0.9036, 0.7988, 0.5019, 0.7663, 0.2190, + 0.8206, 0.3625, 0.0411, 0.3995, 0.5688, 0.7028, 0.8706, 0.3199, 0.4431, + 0.0535, 0.2225, 0.8853, 0.1932, 0.3761, 0.3379, 0.1771, 0.2107, 0.228, + 0.5279, 0.4885, 0.3495, 0.5079, 0.2325, 0.2331, 0.3018, 0.6231, 0.2645, + 0.8429, 0.6625, 0.0797, 0.2724, 0.4218}, + {0.0, + 1.31462855332296, + 1.3690307816129905, + 1.698603990921237, + 1.3460470789553531, + 1.6636670712582544, + 1.2651744044972217, + 1.1938329352055201, + 1.8811409082590185, + 1.3653115050624267, + 1.31462855332296, + 0.0, + 1.9447722703291133, + 1.42818777206562, + 1.4685491458946494, + 1.3071999866010466, + 1.4988622861692171, + 0.9698559287406783, + 1.4972023224597841, + 1.5243383567266802, + 1.3690307816129905, + 1.9447722703291133, + 0.0, + 1.2748400840107568, + 1.0599569946448246, + 1.546591282841402, + 1.147526531928459, + 1.447002179128145, + 1.5982242387673176, + 1.3112533607072414, + 1.698603990921237, + 1.42818777206562, + 1.2748400840107568, + 0.0, + 1.038121552545461, + 1.011788365364402, + 1.3907391109256988, + 1.3128200942311496, + 1.19595706584447, + 1.3233328139624725, + 1.3460470789553531, + 1.4685491458946494, + 1.0599569946448246, + 1.038121552545461, + 0.0, + 1.3642741698145529, + 1.3493868683808095, + 1.394942694628328, + 1.572881849642552, + 1.380122665319464, + 1.6636670712582544, + 1.3071999866010466, + 1.546591282841402, + 1.011788365364402, + 1.3642741698145529, + 0.0, + 1.018961640373018, + 1.0114394258945634, + 0.8338711034820684, + 1.1247823842299223, + 1.2651744044972217, + 1.4988622861692171, + 1.147526531928459, + 1.3907391109256988, + 1.3493868683808095, + 1.018961640373018, + 0.0, + 0.7701238110357329, + 1.245486437864406, + 0.5551259549534626, + 1.1938329352055201, + 0.9698559287406783, + 1.447002179128145, + 1.3128200942311496, + 1.394942694628328, + 1.0114394258945634, + 0.7701238110357329, + 0.0, + 1.1886800117391216, + 1.0083692448135637, + 1.8811409082590185, + 1.4972023224597841, + 1.5982242387673176, + 1.19595706584447, + 1.572881849642552, + 0.8338711034820684, + 1.245486437864406, + 1.1886800117391216, + 0.0, + 1.3661374102525012, + 1.3653115050624267, + 1.5243383567266802, + 1.3112533607072414, + 1.3233328139624725, + 1.380122665319464, + 1.1247823842299223, + 0.5551259549534626, + 1.0083692448135637, + 1.3661374102525012, + 0.0}, + raft::distance::DistanceType::LpUnexpanded, + 2.0}, + + {10, + {0, 5, 11, 15, 20, 27, 32, 36, 43, 47, 50}, + {0, 1, 3, 6, 8, 0, 1, 2, 3, 5, 6, 1, 2, 4, 8, 0, 2, + 3, 4, 7, 0, 1, 2, 3, 4, 6, 8, 0, 1, 2, 5, 7, 1, 5, + 8, 9, 0, 1, 2, 5, 6, 8, 9, 2, 4, 5, 7, 0, 3, 9}, // indices + {0.5438, 0.2695, 0.4377, 0.7174, 0.9251, 0.7648, 0.3322, 0.7279, 0.4131, + 0.5167, 0.8655, 0.0730, 0.0291, 0.9036, 0.7988, 0.5019, 0.7663, 0.2190, + 0.8206, 0.3625, 0.0411, 0.3995, 0.5688, 0.7028, 0.8706, 0.3199, 0.4431, + 0.0535, 0.2225, 0.8853, 0.1932, 0.3761, 0.3379, 0.1771, 0.2107, 0.228, + 0.5279, 0.4885, 0.3495, 0.5079, 0.2325, 0.2331, 0.3018, 0.6231, 0.2645, + 0.8429, 0.6625, 0.0797, 0.2724, 0.4218}, + {0.0, + 0.9251771844789913, + 0.9036452083899731, + 0.9251771844789913, + 0.8706483735804971, + 0.9251771844789913, + 0.717493881903289, + 0.6920214832303888, + 0.9251771844789913, + 0.9251771844789913, + 0.9251771844789913, + 0.0, + 0.9036452083899731, + 0.8655339692155823, + 0.8706483735804971, + 0.8655339692155823, + 0.8655339692155823, + 0.6329837991017668, + 0.8655339692155823, + 0.8655339692155823, + 0.9036452083899731, + 0.9036452083899731, + 0.0, + 0.7988276152181608, + 0.7028075145996631, + 0.9036452083899731, + 0.9036452083899731, + 0.9036452083899731, + 0.8429599432532096, + 0.9036452083899731, + 0.9251771844789913, + 0.8655339692155823, + 0.7988276152181608, + 0.0, + 0.48376552205293305, + 0.8206394616536681, + 0.8206394616536681, + 0.8206394616536681, + 0.8429599432532096, + 0.8206394616536681, + 0.8706483735804971, + 0.8706483735804971, + 0.7028075145996631, + 0.48376552205293305, + 0.0, + 0.8706483735804971, + 0.8706483735804971, + 0.8706483735804971, + 0.8429599432532096, + 0.8706483735804971, + 0.9251771844789913, + 0.8655339692155823, + 0.9036452083899731, + 0.8206394616536681, + 0.8706483735804971, + 0.0, + 0.8853924473642432, + 0.535821510936138, + 0.6497196601457607, + 0.8853924473642432, + 0.717493881903289, + 0.8655339692155823, + 0.9036452083899731, + 0.8206394616536681, + 0.8706483735804971, + 0.8853924473642432, + 0.0, + 0.5279604218147174, + 0.6658348373853169, + 0.33799874888632914, + 0.6920214832303888, + 0.6329837991017668, + 0.9036452083899731, + 0.8206394616536681, + 0.8706483735804971, + 0.535821510936138, + 0.5279604218147174, + 0.0, + 0.662579808115858, + 0.5079750812968089, + 0.9251771844789913, + 0.8655339692155823, + 0.8429599432532096, + 0.8429599432532096, + 0.8429599432532096, + 0.6497196601457607, + 0.6658348373853169, + 0.662579808115858, + 0.0, + 0.8429599432532096, + 0.9251771844789913, + 0.8655339692155823, + 0.9036452083899731, + 0.8206394616536681, + 0.8706483735804971, + 0.8853924473642432, + 0.33799874888632914, + 0.5079750812968089, + 0.8429599432532096, + 0.0}, + raft::distance::DistanceType::Linf, + 0.0}, + + {4, + {0, 1, 1, 2, 4}, + {3, 2, 0, 1}, // indices + {0.99296, 0.42180, 0.11687, 0.305869}, + { + // dense output + 0.0, + 0.99296, + 1.41476, + 1.415707, + 0.99296, + 0.0, + 0.42180, + 0.42274, + 1.41476, + 0.42180, + 0.0, + 0.84454, + 1.41570, + 0.42274, + 0.84454, + 0.0, + }, + raft::distance::DistanceType::L1, + 0.0} + +}; + +typedef SparseDistanceCOOSPMVTest SparseDistanceCOOSPMVTestF; +TEST_P(SparseDistanceCOOSPMVTestF, Result) { compare(); } +INSTANTIATE_TEST_CASE_P(SparseDistanceCOOSPMVTests, SparseDistanceCOOSPMVTestF, + ::testing::ValuesIn(inputs_i32_f)); + +}; // namespace distance +}; // end namespace sparse +}; // end namespace raft diff --git a/cuda_code/dist_hellinger_1.cu b/cuda_code/dist_hellinger_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..39dc7aaeff7fd316df576eed6302b2a7e7d6f412 --- /dev/null +++ b/cuda_code/dist_hellinger_1.cu @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2021, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "../test_utils.h" +#include "distance_base.cuh" + +namespace raft { +namespace distance { + +template +class DistanceHellingerExp + : public DistanceTest {}; + +const std::vector> inputsf = { + {0.001f, 1024, 1024, 32, true, 1234ULL}, + {0.001f, 1024, 32, 1024, true, 1234ULL}, + {0.001f, 32, 1024, 1024, true, 1234ULL}, + {0.003f, 1024, 1024, 1024, true, 1234ULL}, + {0.001f, 1024, 1024, 32, false, 1234ULL}, + {0.001f, 1024, 32, 1024, false, 1234ULL}, + {0.001f, 32, 1024, 1024, false, 1234ULL}, + {0.003f, 1024, 1024, 1024, false, 1234ULL}, +}; +typedef DistanceHellingerExp DistanceHellingerExpF; +TEST_P(DistanceHellingerExpF, Result) { + int m = params.isRowMajor ? params.m : params.n; + int n = params.isRowMajor ? params.n : params.m; + ASSERT_TRUE(raft::devArrMatch(dist_ref, dist, m, n, + raft::CompareApprox(params.tolerance))); +} +INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceHellingerExpF, + ::testing::ValuesIn(inputsf)); + +const std::vector> inputsd = { + {0.001, 1024, 1024, 32, true, 1234ULL}, + {0.001, 1024, 32, 1024, true, 1234ULL}, + {0.001, 32, 1024, 1024, true, 1234ULL}, + {0.003, 1024, 1024, 1024, true, 1234ULL}, + {0.001, 1024, 1024, 32, false, 1234ULL}, + {0.001, 1024, 32, 1024, false, 1234ULL}, + {0.001, 32, 1024, 1024, false, 1234ULL}, + {0.003, 1024, 1024, 1024, false, 1234ULL}, +}; +typedef DistanceHellingerExp DistanceHellingerExpD; +TEST_P(DistanceHellingerExpD, Result) { + int m = params.isRowMajor ? params.m : params.n; + int n = params.isRowMajor ? params.n : params.m; + ASSERT_TRUE(raft::devArrMatch(dist_ref, dist, m, n, + raft::CompareApprox(params.tolerance))); +} +INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceHellingerExpD, + ::testing::ValuesIn(inputsd)); + +} // end namespace distance +} // end namespace raft diff --git a/cuda_code/distance_12.cu b/cuda_code/distance_12.cu new file mode 100644 index 0000000000000000000000000000000000000000..3260497eb9d24b3507289ed2a3c9927e2f9affcd --- /dev/null +++ b/cuda_code/distance_12.cu @@ -0,0 +1,188 @@ +/* + * Copyright (c) 2018-2020, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include + +#include +#include +#include + +#include + +#include + +#include + +namespace MLCommon { +namespace Sparse { +namespace Selection { + +using namespace raft; + +template +struct SparseDistanceInputs { + value_idx n_cols; + + std::vector indptr_h; + std::vector indices_h; + std::vector data_h; + + std::vector out_dists_ref_h; + + raft::distance::DistanceType metric; +}; + +template +::std::ostream &operator<<( + ::std::ostream &os, const SparseDistanceInputs &dims) { + return os; +} + +template +class SparseDistanceTest + : public ::testing::TestWithParam> { + protected: + void make_data() { + std::vector indptr_h = params.indptr_h; + std::vector indices_h = params.indices_h; + std::vector data_h = params.data_h; + + allocate(indptr, indptr_h.size()); + allocate(indices, indices_h.size()); + allocate(data, data_h.size()); + + update_device(indptr, indptr_h.data(), indptr_h.size(), stream); + update_device(indices, indices_h.data(), indices_h.size(), stream); + update_device(data, data_h.data(), data_h.size(), stream); + + std::vector out_dists_ref_h = params.out_dists_ref_h; + + allocate(out_dists_ref, (indptr_h.size() - 1) * (indptr_h.size() - 1)); + + update_device(out_dists_ref, out_dists_ref_h.data(), out_dists_ref_h.size(), + stream); + } + + void SetUp() override { + params = ::testing::TestWithParam< + SparseDistanceInputs>::GetParam(); + std::shared_ptr alloc( + new raft::mr::device::default_allocator); + CUDA_CHECK(cudaStreamCreate(&stream)); + + CUSPARSE_CHECK(cusparseCreate(&cusparseHandle)); + + make_data(); + + Distance::distances_config_t dist_config; + dist_config.b_nrows = params.indptr_h.size() - 1; + dist_config.b_ncols = params.n_cols; + dist_config.b_nnz = params.indices_h.size(); + dist_config.b_indptr = indptr; + dist_config.b_indices = indices; + dist_config.b_data = data; + dist_config.a_nrows = params.indptr_h.size() - 1; + dist_config.a_ncols = params.n_cols; + dist_config.a_nnz = params.indices_h.size(); + dist_config.a_indptr = indptr; + dist_config.a_indices = indices; + dist_config.a_data = data; + dist_config.handle = cusparseHandle; + dist_config.allocator = alloc; + dist_config.stream = stream; + + int out_size = dist_config.a_nrows * dist_config.b_nrows; + + allocate(out_dists, out_size); + + ML::Logger::get().setLevel(CUML_LEVEL_INFO); + + pairwiseDistance(out_dists, dist_config, params.metric); + + CUDA_CHECK(cudaStreamSynchronize(stream)); + } + + void TearDown() override { + CUDA_CHECK(cudaStreamSynchronize(stream)); + CUDA_CHECK(cudaFree(indptr)); + CUDA_CHECK(cudaFree(indices)); + CUDA_CHECK(cudaFree(data)); + CUDA_CHECK(cudaFree(out_dists)); + CUDA_CHECK(cudaFree(out_dists_ref)); + } + + void compare() { + ASSERT_TRUE(devArrMatch(out_dists_ref, out_dists, 16, Compare())); + } + + protected: + cudaStream_t stream; + cusparseHandle_t cusparseHandle; + + // input data + value_idx *indptr, *indices; + value_t *data; + + // output data + value_t *out_dists, *out_dists_ref; + + SparseDistanceInputs params; +}; + +const std::vector> inputs_i32_f = { + {2, + {0, 2, 4, 6, 8}, + {0, 1, 0, 1, 0, 1, 0, 1}, // indices + {1.0f, 3.0f, 1.0f, 5.0f, 50.0f, 28.0f, 16.0f, 2.0f}, + { + // dense output + 0.0, + 4.0, + 3026.0, + 226.0, + 4.0, + 0.0, + 2930.0, + 234.0, + 3026.0, + 2930.0, + 0.0, + 1832.0, + 226.0, + 234.0, + 1832.0, + 0.0, + }, + raft::distance::DistanceType::EucExpandedL2}, + {2, + {0, 2, 4, 6, 8}, + {0, 1, 0, 1, 0, 1, 0, 1}, + {1.0f, 2.0f, 1.0f, 2.0f, 1.0f, 2.0f, 1.0f, 2.0f}, + {5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, + 5.0}, + raft::distance::DistanceType::InnerProduct + + }}; +typedef SparseDistanceTest SparseDistanceTestF; +TEST_P(SparseDistanceTestF, Result) { compare(); } +INSTANTIATE_TEST_CASE_P(SparseDistanceTests, SparseDistanceTestF, + ::testing::ValuesIn(inputs_i32_f)); + +}; // end namespace Selection +}; // end namespace Sparse +}; // end namespace MLCommon diff --git a/cuda_code/distributed_ivfpq_topk.cu b/cuda_code/distributed_ivfpq_topk.cu new file mode 100644 index 0000000000000000000000000000000000000000..8ca6e10363f205c191e2925ba4e6a1731d84167f --- /dev/null +++ b/cuda_code/distributed_ivfpq_topk.cu @@ -0,0 +1,1208 @@ +#define _VOLATILE_ +#define likely(x) __builtin_expect(!!(x), 1) +#define unlikely(x) __builtin_expect(!!(x), 0) +#define load(x) __ldcg(x) +#define store(x, value) __stcs(x, value) +#ifndef INFINITY +#define INFINITY __int_as_float(0x7f800000) +#endif + +typedef unsigned char uint8_t; +typedef long long ll_t; + +typedef struct __builtin_align__(8) +{ + float value; + float index; +} pair; + +typedef struct __device_builtin__ __builtin_align__(_NCS_) +{ + uint8_t _VARNAMES_; +} _uint8n_t; + +typedef union { + _uint8n_t u8n; + uint8_t val[_NCS_]; +} uint8n_t; + +__device__ __forceinline__ float atomicMax(float *address, float val) +{ + int ret = __float_as_int(*address); + while(val > __int_as_float(ret)) + { + int old = ret; + if((ret = atomicCAS((int *)address, old, __float_as_int(val))) == old) + break; + } + return __int_as_float(ret); +} + +__device__ __forceinline__ unsigned int bfe( + unsigned int source, + unsigned int bitIndex +) { + unsigned int bit; + asm volatile("bfe.u32 %0, %1, %2, %3;" : "=r"(bit) : "r"((unsigned int) source), "r"(bitIndex), "r"(1)); + return bit; +} + +__device__ __forceinline__ void warp_comparator( + float &value, + float &index, + const int stride, + const int direction +){ + const float otherValue = __shfl_xor_sync(0xFFFFFFFF, value, stride); + const float otherIndex = __shfl_xor_sync(0xFFFFFFFF, index, stride); + bool condition = value < otherValue == direction; + index = condition ? otherIndex : index; + value = condition ? otherValue : value; +} + +__device__ __forceinline__ void block_comparator( + float &value, + float &index, + const int stride, + const int direction, + const int laneID, + _VOLATILE_ float sMem[] +){ + float tempPrecomputed1 = sMem[laneID]; + float tempPrecomputed2 = sMem[_TPB_ + laneID]; + __syncthreads(); + + sMem[laneID] = value; + sMem[_TPB_ + laneID] = index; + __syncthreads(); + + float otherValue = sMem[laneID ^ stride]; + float otherIndex = sMem[_TPB_ + laneID ^ stride]; + __syncthreads(); + + sMem[laneID] = tempPrecomputed1; + sMem[_TPB_ + laneID] = tempPrecomputed2; + __syncthreads(); + + bool condition = value < otherValue == direction; + value = condition ? otherValue : value; + index = condition ? otherIndex : index; + /* + */ +} + +__device__ __forceinline__ void block_comparator_noop( +){ + __syncthreads(); + __syncthreads(); + __syncthreads(); + __syncthreads(); +} + +__device__ __forceinline__ void thread_comparator( + float &value, + float &index, + float otherValue, + float otherIndex, + const int direction +){ + bool condition = value > otherValue == direction; + if (condition){ + value = otherValue; + index = otherIndex; + } +} + +__device__ void bitonic_sort_2( + float &value, + float &index, + int laneID +){ + warp_comparator(value, index, 1, bfe(laneID, 1) ^ bfe(laneID, 0)); +} + +__device__ void bitonic_sort_4( + float &value, + float &index, + int laneID +){ + bitonic_sort_2(value, index, laneID); + warp_comparator(value, index, 2, bfe(laneID, 2) ^ bfe(laneID, 1)); + warp_comparator(value, index, 1, bfe(laneID, 2) ^ bfe(laneID, 0)); +} + +__device__ void bitonic_sort_8( + float &value, + float &index, + int laneID +){ + bitonic_sort_4(value, index, laneID); + warp_comparator(value, index, 4, bfe(laneID, 3) ^ bfe(laneID, 2)); + warp_comparator(value, index, 2, bfe(laneID, 3) ^ bfe(laneID, 1)); + warp_comparator(value, index, 1, bfe(laneID, 3) ^ bfe(laneID, 0)); +} + +__device__ void bitonic_sort_16( + float &value, + float &index, + int laneID +){ + bitonic_sort_8(value, index, laneID); + warp_comparator(value, index, 8, bfe(laneID, 4) ^ bfe(laneID, 3)); + warp_comparator(value, index, 4, bfe(laneID, 4) ^ bfe(laneID, 2)); + warp_comparator(value, index, 2, bfe(laneID, 4) ^ bfe(laneID, 1)); + warp_comparator(value, index, 1, bfe(laneID, 4) ^ bfe(laneID, 0)); +} + +__device__ void bitonic_sort_32( + float &value, + float &index, + int laneID +){ + bitonic_sort_16(value, index, laneID); + warp_comparator(value, index, 16, bfe(laneID, 5) ^ bfe(laneID, 4)); + warp_comparator(value, index, 8, bfe(laneID, 5) ^ bfe(laneID, 3)); + warp_comparator(value, index, 4, bfe(laneID, 5) ^ bfe(laneID, 2)); + warp_comparator(value, index, 2, bfe(laneID, 5) ^ bfe(laneID, 1)); + warp_comparator(value, index, 1, bfe(laneID, 5) ^ bfe(laneID, 0)); +} + +__device__ void bitonic_sort_global_2( + float &value, + float &index, + float otherValue, + float otherIndex, + int laneID +) { + if (_TPB_ - 32 <= threadIdx.x){ + thread_comparator(value, index, otherValue, otherIndex, 0); + warp_comparator(value, index, 1, !bfe(laneID, 0)); + } +} + +__device__ void bitonic_sort_global_4( + float &value, + float &index, + float otherValue, + float otherIndex, + int laneID +) { + if (_TPB_ - 32 <= threadIdx.x){ + thread_comparator(value, index, otherValue, otherIndex, 0); + warp_comparator(value, index, 2, !bfe(laneID, 1)); + warp_comparator(value, index, 1, !bfe(laneID, 0)); + } +} + +__device__ void bitonic_sort_global_8( + float &value, + float &index, + float otherValue, + float otherIndex, + int laneID +) { + if (_TPB_ - 32 <= threadIdx.x){ + thread_comparator(value, index, otherValue, otherIndex, 0); + warp_comparator(value, index, 4, !bfe(laneID, 2)); + warp_comparator(value, index, 2, !bfe(laneID, 1)); + warp_comparator(value, index, 1, !bfe(laneID, 0)); + } +} + +__device__ void bitonic_sort_global_16( + float &value, + float &index, + float otherValue, + float otherIndex, + int laneID +) { + if (_TPB_ - 32 <= threadIdx.x){ + thread_comparator(value, index, otherValue, otherIndex, 0); + warp_comparator(value, index, 8, !bfe(laneID, 3)); + warp_comparator(value, index, 4, !bfe(laneID, 2)); + warp_comparator(value, index, 2, !bfe(laneID, 1)); + warp_comparator(value, index, 1, !bfe(laneID, 0)); + } +} + +__device__ void bitonic_sort_global_32( + float &value, + float &index, + float otherValue, + float otherIndex, + int laneID +) { + if (_TPB_ - 32 <= threadIdx.x){ + thread_comparator(value, index, otherValue, otherIndex, 0); + warp_comparator(value, index, 16, !bfe(laneID, 4)); + warp_comparator(value, index, 8, !bfe(laneID, 3)); + warp_comparator(value, index, 4, !bfe(laneID, 2)); + warp_comparator(value, index, 2, !bfe(laneID, 1)); + warp_comparator(value, index, 1, !bfe(laneID, 0)); + } +} + +#if _TPB_ >= 64 +__device__ void bitonic_sort_64( + float &value, + float &index, + _VOLATILE_ float sMem[], + int laneID +){ + bitonic_sort_32(value, index, laneID); + block_comparator(value, index, 32, bfe(laneID, 6) ^ bfe(laneID, 5), laneID, sMem); + warp_comparator(value, index, 16, bfe(laneID, 6) ^ bfe(laneID, 4)); + warp_comparator(value, index, 8, bfe(laneID, 6) ^ bfe(laneID, 3)); + warp_comparator(value, index, 4, bfe(laneID, 6) ^ bfe(laneID, 2)); + warp_comparator(value, index, 2, bfe(laneID, 6) ^ bfe(laneID, 1)); + warp_comparator(value, index, 1, bfe(laneID, 6) ^ bfe(laneID, 0)); +} +#endif +__device__ void bitonic_sort_global_64( + float &value, + float &index, + float otherValue, + float otherIndex, + _VOLATILE_ float sMem[], + int laneID +) { + if (_TPB_ - 64 <= threadIdx.x){ + thread_comparator(value, index, otherValue, otherIndex, 0); + block_comparator(value, index, 32, !bfe(laneID, 5), laneID, sMem); + warp_comparator(value, index, 16, !bfe(laneID, 4)); + warp_comparator(value, index, 8, !bfe(laneID, 3)); + warp_comparator(value, index, 4, !bfe(laneID, 2)); + warp_comparator(value, index, 2, !bfe(laneID, 1)); + + warp_comparator(value, index, 1, !bfe(laneID, 0)); + } else { + block_comparator_noop(); + } +} + +#if _TPB_ >= 128 +__device__ void bitonic_sort_128( + float &value, + float &index, + _VOLATILE_ float sMem[], + int laneID +){ + bitonic_sort_64(value, index, sMem, laneID); + block_comparator(value, index, 64, bfe(laneID, 7) ^ bfe(laneID, 6), laneID, sMem); + block_comparator(value, index, 32, bfe(laneID, 7) ^ bfe(laneID, 5), laneID, sMem); + warp_comparator(value, index, 16, bfe(laneID, 7) ^ bfe(laneID, 4)); + warp_comparator(value, index, 8, bfe(laneID, 7) ^ bfe(laneID, 3)); + warp_comparator(value, index, 4, bfe(laneID, 7) ^ bfe(laneID, 2)); + warp_comparator(value, index, 2, bfe(laneID, 7) ^ bfe(laneID, 1)); + warp_comparator(value, index, 1, bfe(laneID, 7) ^ bfe(laneID, 0)); +} +#endif +__device__ void bitonic_sort_global_128( + float &value, + float &index, + float otherValue, + float otherIndex, + _VOLATILE_ float sMem[], + int laneID +) { + if (_TPB_ - 128 <= threadIdx.x){ + thread_comparator(value, index, otherValue, otherIndex, 0); + block_comparator(value, index, 64, !bfe(laneID, 6), laneID, sMem); + block_comparator(value, index, 32, !bfe(laneID, 5), laneID, sMem); + warp_comparator(value, index, 16, !bfe(laneID, 4)); + warp_comparator(value, index, 8, !bfe(laneID, 3)); + warp_comparator(value, index, 4, !bfe(laneID, 2)); + warp_comparator(value, index, 2, !bfe(laneID, 1)); + warp_comparator(value, index, 1, !bfe(laneID, 0)); + } else { + block_comparator_noop(); + block_comparator_noop(); + } +} + +#if _TPB_ >= 256 +__device__ void bitonic_sort_256( + float &value, + float &index, + _VOLATILE_ float sMem[], + int laneID +){ + bitonic_sort_128(value, index, sMem, laneID); + block_comparator(value, index, 128, bfe(laneID, 8) ^ bfe(laneID, 7), laneID, sMem); + block_comparator(value, index, 64, bfe(laneID, 8) ^ bfe(laneID, 6), laneID, sMem); + block_comparator(value, index, 32, bfe(laneID, 8) ^ bfe(laneID, 5), laneID, sMem); + warp_comparator(value, index, 16, bfe(laneID, 8) ^ bfe(laneID, 4)); + warp_comparator(value, index, 8, bfe(laneID, 8) ^ bfe(laneID, 3)); + warp_comparator(value, index, 4, bfe(laneID, 8) ^ bfe(laneID, 2)); + warp_comparator(value, index, 2, bfe(laneID, 8) ^ bfe(laneID, 1)); + warp_comparator(value, index, 1, bfe(laneID, 8) ^ bfe(laneID, 0)); +} +#endif +__device__ void bitonic_sort_global_256( + float &value, + float &index, + float otherValue, + float otherIndex, + _VOLATILE_ float sMem[], + int laneID +) { + if (_TPB_ - 256 <= threadIdx.x){ + thread_comparator(value, index, otherValue, otherIndex, 0); + block_comparator(value, index, 128, !bfe(laneID, 7), laneID, sMem); + block_comparator(value, index, 64, !bfe(laneID, 6), laneID, sMem); + block_comparator(value, index, 32, !bfe(laneID, 5), laneID, sMem); + warp_comparator(value, index, 16, !bfe(laneID, 4)); + warp_comparator(value, index, 8, !bfe(laneID, 3)); + warp_comparator(value, index, 4, !bfe(laneID, 2)); + warp_comparator(value, index, 2, !bfe(laneID, 1)); + warp_comparator(value, index, 1, !bfe(laneID, 0)); + } else { + block_comparator_noop(); + block_comparator_noop(); + block_comparator_noop(); + } +} + +#if _TPB_ >= 512 +__device__ void bitonic_sort_512( + float &value, + float &index, + _VOLATILE_ float sMem[], + int laneID +){ + bitonic_sort_256(value, index, sMem, laneID); + block_comparator(value, index, 256, bfe(laneID, 9) ^ bfe(laneID, 8), laneID, sMem); + block_comparator(value, index, 128, bfe(laneID, 9) ^ bfe(laneID, 7), laneID, sMem); + block_comparator(value, index, 64, bfe(laneID, 9) ^ bfe(laneID, 6), laneID, sMem); + block_comparator(value, index, 32, bfe(laneID, 9) ^ bfe(laneID, 5), laneID, sMem); + warp_comparator(value, index, 16, bfe(laneID, 9) ^ bfe(laneID, 4)); + warp_comparator(value, index, 8, bfe(laneID, 9) ^ bfe(laneID, 3)); + warp_comparator(value, index, 4, bfe(laneID, 9) ^ bfe(laneID, 2)); + warp_comparator(value, index, 2, bfe(laneID, 9) ^ bfe(laneID, 1)); + warp_comparator(value, index, 1, bfe(laneID, 9) ^ bfe(laneID, 0)); +} +#endif +__device__ void bitonic_sort_global_512( + float &value, + float &index, + float otherValue, + float otherIndex, + _VOLATILE_ float sMem[], + int laneID +) { + if (_TPB_ - 512 <= threadIdx.x){ + thread_comparator(value, index, otherValue, otherIndex, 0); + block_comparator(value, index, 256, !bfe(laneID, 8), laneID, sMem); + block_comparator(value, index, 128, !bfe(laneID, 7), laneID, sMem); + block_comparator(value, index, 64, !bfe(laneID, 6), laneID, sMem); + block_comparator(value, index, 32, !bfe(laneID, 5), laneID, sMem); + warp_comparator(value, index, 16, !bfe(laneID, 4)); + warp_comparator(value, index, 8, !bfe(laneID, 3)); + warp_comparator(value, index, 4, !bfe(laneID, 2)); + warp_comparator(value, index, 2, !bfe(laneID, 1)); + warp_comparator(value, index, 1, !bfe(laneID, 0)); + } else { + block_comparator_noop(); + block_comparator_noop(); + block_comparator_noop(); + block_comparator_noop(); + } +} + + +#if _TPB_ >= 1024 +__device__ void bitonic_sort_1024( + float &value, + float &index, + _VOLATILE_ float sMem[], + int laneID +){ + bitonic_sort_512(value, index, sMem, laneID); + block_comparator(value, index, 512, bfe(laneID, 10) ^ bfe(laneID, 9), laneID, sMem); + block_comparator(value, index, 256, bfe(laneID, 10) ^ bfe(laneID, 8), laneID, sMem); + block_comparator(value, index, 128, bfe(laneID, 10) ^ bfe(laneID, 7), laneID, sMem); + block_comparator(value, index, 64, bfe(laneID, 10) ^ bfe(laneID, 6), laneID, sMem); + block_comparator(value, index, 32, bfe(laneID, 10) ^ bfe(laneID, 5), laneID, sMem); + warp_comparator(value, index, 16, bfe(laneID, 10) ^ bfe(laneID, 4)); + warp_comparator(value, index, 8, bfe(laneID, 10) ^ bfe(laneID, 3)); + warp_comparator(value, index, 4, bfe(laneID, 10) ^ bfe(laneID, 2)); + warp_comparator(value, index, 2, bfe(laneID, 10) ^ bfe(laneID, 1)); + warp_comparator(value, index, 1, bfe(laneID, 10) ^ bfe(laneID, 0)); +} +#endif +__device__ void bitonic_sort_global_1024( + float &value, + float &index, + float otherValue, + float otherIndex, + _VOLATILE_ float sMem[], + int laneID +) { + if (_TPB_ - 1024 <= threadIdx.x){ + thread_comparator(value, index, otherValue, otherIndex, 0); + block_comparator(value, index, 512, !bfe(laneID, 9), laneID, sMem); + block_comparator(value, index, 256, !bfe(laneID, 8), laneID, sMem); + block_comparator(value, index, 128, !bfe(laneID, 7), laneID, sMem); + block_comparator(value, index, 64, !bfe(laneID, 6), laneID, sMem); + block_comparator(value, index, 32, !bfe(laneID, 5), laneID, sMem); + warp_comparator(value, index, 16, !bfe(laneID, 4)); + warp_comparator(value, index, 8, !bfe(laneID, 3)); + warp_comparator(value, index, 4, !bfe(laneID, 2)); + warp_comparator(value, index, 2, !bfe(laneID, 1)); + warp_comparator(value, index, 1, !bfe(laneID, 0)); + } else { + block_comparator_noop(); + block_comparator_noop(); + block_comparator_noop(); + block_comparator_noop(); + block_comparator_noop(); + } +} + +__device__ void load_precomputed_v1( + const float *precomputed, + _VOLATILE_ float *sMem, + int nQuery +){ + const int tid = threadIdx.x; + const int qid = blockIdx.x; + if (tid < 256){ + #pragma unroll + for (int i = 0; i < _M_; i++){ + #if _TPB_ >= 256 + int adr = (i * nQuery * _K_) + (qid * _K_) + (tid); + sMem[i * _K_ + tid] = precomputed[adr]; + + #else + #pragma unroll + for (int j = 0; j < _K_ / _TPB_; j++){ + int adr = (i * nQuery * _K_) + (qid * _K_) + (j * _TPB_ + tid); + sMem[i * _K_ + j * _TPB_ + tid] = precomputed[adr]; + } + #endif + } + } + __syncthreads(); +} + +__device__ void load_precomputed_v2( + const float *precomputed, + _VOLATILE_ float *sMem, + int iProbe, int nProbe +){ + const int tid = threadIdx.x; + const int qid = blockIdx.x; + if (tid < 256){ + #pragma unroll + for (int i = 0; i < _M_; i++){ + #if _TPB_ >= 256 + // int adr = (i * nQuery * _K_) + (qid * _K_) + (tid); + int adr = + (qid) * nProbe * _M_ * _K_ +\ + (iProbe) * _M_ * _K_ +\ + (i) * _K_ +\ + (tid); + sMem[i * _K_ + tid] = precomputed[adr]; + + #else + #pragma unroll + for (int j = 0; j < _K_ / _TPB_; j++){ + int adr = (qid) * nProbe * _M_ * _K_ +\ + (iProbe) * _M_ * _K_ +\ + (i) * _K_ +\ + (j * _TPB_ + tid); + sMem[i * _K_ + j * _TPB_ + tid] = precomputed[adr]; + } + #endif + } + } + __syncthreads(); +} + +__device__ void load_precomputed_v3( + const float* part1, + const float* part2, + _VOLATILE_ float *sMem, + int iCell +){ + const int tid = threadIdx.x; + const int qid = blockIdx.x; + if (tid < 256){ + #pragma unroll + for (int i = 0; i < _M_; i++){ + #if _TPB_ >= 256 + // int adr = (i * nQuery * _K_) + (qid * _K_) + (tid); + int adr1 =\ + (qid) * _M_ * _K_ +\ + (i) * _K_ +\ + (tid); + float precomputedValue = part1[adr1]; + + int adr2 =\ + (iCell) * _M_ * _K_ +\ + (i) * _K_ +\ + (tid); + sMem[i * _K_ + tid] = precomputedValue + part2[adr2]; + + #else + #pragma unroll + for (int j = 0; j < _K_ / _TPB_; j++){ + int adr1 =\ + (qid) * _M_ * _K_ +\ + (i) * _K_ +\ + (j * _TPB_ + tid); + float precomputedValue = part1[adr1]; + + int adr2 =\ + (iCell) * _M_ * _K_ +\ + (i) * _K_ +\ + (j * _TPB_ + tid); + sMem[i * _K_ + j * _TPB_ + tid] = precomputedValue + part2[adr2]; + } + #endif + } + } + __syncthreads(); +} + +__device__ void load_part1_to_cache( + const float* part1, + float part1Cache[_M_] +){ + const int tid = threadIdx.x; + const int qid = blockIdx.x; + if (tid < 256){ + #pragma unroll + for (int i = 0; i < _M_; i++){ + #if _TPB_ >= 256 + int adr1 =\ + (qid) * _M_ * _K_ +\ + (i) * _K_ +\ + (tid); + part1Cache[i] = part1[adr1]; + #endif + } + } +} + +__device__ void load_part2_to_cache( + const float* part2, + float part2Cache[_M_], + int iCell +){ + const int tid = threadIdx.x; + const int qid = blockIdx.x; + if (tid < 256){ + #pragma unroll + for (int i = 0; i < _M_; i++){ + #if _TPB_ >= 256 + int adr2 =\ + (iCell) * _M_ * _K_ +\ + (i) * _K_ +\ + (tid); + part2Cache[i] = part2[adr2]; + #endif + } + } +} + +__device__ void store_precomputed_to_smem( + float part1Cache[_M_], + float part2Cache[_M_], + _VOLATILE_ float *sMem +){ + const int tid = threadIdx.x; + const int qid = blockIdx.x; + __syncthreads(); + if (tid < 256){ + #pragma unroll + for (int i = 0; i < _M_; i++){ + #if _TPB_ >= 256 + float part1Value = part1Cache[i]; + float part2Value = part2Cache[i]; + sMem[i * _K_ + tid] = part1Value + part2Value; + #endif + } + } + __syncthreads(); +} + +__device__ void load_consume_data( + const uint8n_t* data, + _VOLATILE_ float sMem[], + float &value, + int iN, int nData +){ + #pragma unroll + for (int i = 0; i < _M_ / _NCS_; i++){ + uint8n_t threadData = data[(i * nData) + iN]; + float pre0 = sMem[(i * _NCS_ + 0) * _K_ + int(threadData.val[0]) ]; + float pre1 = sMem[(i * _NCS_ + 1) * _K_ + int(threadData.val[1]) ]; + float pre2 = sMem[(i * _NCS_ + 2) * _K_ + int(threadData.val[2]) ]; + float pre3 = sMem[(i * _NCS_ + 3) * _K_ + int(threadData.val[3]) ]; + value += pre0; + value += pre1; + value += pre2; + value += pre3; + } +} + +__device__ void load_data( + const uint8n_t* data, + uint8n_t dataCache[_M_ / _NCS_], + int iN, int nData +){ + #pragma unroll + for (int i = 0; i < _M_ / _NCS_; i++){ + uint8n_t threadData = data[(i * nData) + iN]; + dataCache[i] = threadData; + } +} + +__device__ void consume_data( + _VOLATILE_ float sMem[], + uint8n_t dataCache[_M_ / _NCS_], + float &value +){ + #pragma unroll + for (int i = 0; i < _M_ / _NCS_; i++){ + uint8n_t threadData = dataCache[i]; + float pre0 = sMem[(i * _NCS_ + 0) * _K_ + int(threadData.val[0]) ]; + float pre1 = sMem[(i * _NCS_ + 1) * _K_ + int(threadData.val[1]) ]; + float pre2 = sMem[(i * _NCS_ + 2) * _K_ + int(threadData.val[2]) ]; + float pre3 = sMem[(i * _NCS_ + 3) * _K_ + int(threadData.val[3]) ]; + value += pre0; + value += pre1; + value += pre2; + value += pre3; + } +} + +__device__ void sort( + float &finalValue, + float &finalIndex, + float value, + float index, + _VOLATILE_ float sMem[], + int nCandidates +){ + const int tid = threadIdx.x; + #if _TPB_ == 32 + bitonic_sort_32(value, index, tid); + + #elif _TPB_ == 64 + bitonic_sort_64(value, index, sMem, tid); + + #elif _TPB_ == 128 + bitonic_sort_128(value, index, sMem, tid); + + #elif _TPB_ == 256 + bitonic_sort_256(value, index, sMem, tid); + + #elif _TPB_ == 512 + bitonic_sort_512(value, index, sMem, tid); + + #elif _TPB_ == 1024 + bitonic_sort_1024(value, index, sMem, tid); + #endif + + switch (nCandidates){ + case 2: + bitonic_sort_global_2( + finalValue, finalIndex, value, index, + tid); + break; + case 4: + bitonic_sort_global_4( + finalValue, finalIndex, value, index, + tid); + break; + case 8: + bitonic_sort_global_8( + finalValue, finalIndex, value, index, + tid); + break; + case 16: + bitonic_sort_global_16( + finalValue, finalIndex, value, index, + tid); + break; + case 32: + bitonic_sort_global_32( + finalValue, finalIndex, value, index, + tid); + break; + case 64: + bitonic_sort_global_64( + finalValue, finalIndex, value, index, + sMem, tid); + break; + case 128: + bitonic_sort_global_128( + finalValue, finalIndex, value, index, + sMem, tid); + break; + case 256: + bitonic_sort_global_256( + finalValue, finalIndex, value, index, + sMem, tid); + break; + case 512: + bitonic_sort_global_512( + finalValue, finalIndex, value, index, + sMem, tid); + break; + case 1024: + bitonic_sort_global_1024( + finalValue, finalIndex, value, index, + sMem, tid); + break; + } +} + +__device__ bool is_stack_empty( + int stackSize +){ + return stackSize <= 0; +} + +__device__ bool is_stack_full( + int stackSize +){ + return stackSize >= _STACKCAP_ - 1; +} + +__device__ void push_stack( + pair stack[_STACKCAP_], + pair newPair, + int &stackSize +) { + if (is_stack_full(stackSize)){ + return; + } else { + #pragma unroll + for (int i = _STACKCAP_ - 1; i >= 1; i--){ + stack[i] = stack[i - 1]; + } + stack[0] = newPair; + stackSize ++; + } +} + +__device__ void pop_stack( + pair stack[_STACKCAP_], + pair &outPair, + int &stackSize +) { + if (is_stack_empty(stackSize)){ + return; + } else { + outPair = stack[0]; + #pragma unroll + for (int i=0; i<_STACKCAP_-1; i++){ + stack[i] = stack[i+1]; + } + stackSize--; + } +} + +__device__ void init_stack( + pair stack[_STACKCAP_] +){ + pair emptyPair; + emptyPair.value = -INFINITY; + emptyPair.index = -1; + #pragma unroll + for (int i=0; i < _STACKCAP_; i++){ + stack[i] = emptyPair; + } +} + + +extern "C" +__global__ void ivfpq_topk( + const uint8n_t* __restrict__ data, + const float* __restrict__ precomputed, + const uint8_t* __restrict__ isEmpty, + const ll_t* __restrict__ cellStart, + const ll_t* __restrict__ cellSize, + const ll_t* __restrict__ totalSize, + const ll_t* __restrict__ nProbeList, + float* __restrict__ gValue, + ll_t* __restrict__ gIndex, + int nData, int nQuery, int maxNProbe, int nCandidates +) { + const int tid = threadIdx.x; // thread ID + const int qid = blockIdx.x; // query ID + const int nProbe = nProbeList[qid]; + + pair stack[_STACKCAP_]; + int stackSize = 0; + init_stack(stack); + + extern __shared__ _VOLATILE_ float sMem[]; // M * K + load_precomputed_v1(precomputed, sMem, nQuery); + float finalValue = -INFINITY; + float finalIndex = -1; + float minValue = -INFINITY; + const ll_t threadTotalSize = totalSize[qid]; + const int nIter = (threadTotalSize + _TPB_ - 1) / _TPB_; + int cCell = 0; + int cCellStart = cellStart[qid * maxNProbe + cCell]; + int cCellSize = cellSize[qid * maxNProbe + cCell]; + int cCellEnd = cCellStart + cCellSize; + int iN = cCellStart + tid; + + for (int i = 0; i < nIter; i++){ + while (iN >= cCellEnd){ + cCell ++; // increment cell index by 1 + if (cCell >= nProbe) + break; + int pCellEnd = cCellEnd; + int pCellStart = cCellStart; + cCellStart = cellStart[qid * maxNProbe + cCell]; + if (cCellStart == pCellStart){ + continue; + } + cCellSize = cellSize[qid * maxNProbe + cCell]; + cCellEnd = cCellStart + cCellSize; + iN = iN - pCellEnd + cCellStart; + } + pair newPair; + newPair.value = -INFINITY; + newPair.index = -1; + int cIsEmpty = 0; + if (likely(iN < cCellEnd)){ + newPair.value = 0.f; + newPair.index = iN; + cIsEmpty = isEmpty[iN]; + uint8n_t dataCache[_M_ / _NCS_]; + load_data(data, dataCache, iN, nData); + consume_data(sMem, dataCache, newPair.value); + } + newPair.value = cIsEmpty == 0 ? newPair.value : -INFINITY; + newPair.index = cIsEmpty == 0 ? newPair.index : -1; + + __syncthreads(); + float temp1, temp2; + if (tid == 0){ + temp1 = sMem[0]; + temp2 = sMem[1]; + sMem[0] = 0; + } + __syncthreads(); + + pair oldPair; + oldPair.value = -INFINITY; + oldPair.index = -1; + if (is_stack_full(stackSize)){ + pop_stack(stack, oldPair, stackSize); + if (oldPair.value > minValue){ + sMem[0] = 1; + } + } + + if (newPair.value > minValue){ + push_stack(stack, newPair, stackSize); + } + __syncthreads(); + + if (sMem[0] > 0){ + __syncthreads(); + sort( + finalValue, finalIndex, + oldPair.value, oldPair.index, + sMem, nCandidates + ); + __syncthreads(); + if (tid == _TPB_ - 1){ + sMem[1] = finalValue; + } + __syncthreads(); + minValue = sMem[1]; + } + __syncthreads(); + if (tid == 0){ + sMem[0] = temp1; + sMem[1] = temp2; + } + __syncthreads(); + iN += _TPB_; + } + + sMem[0] = 0; + __syncthreads(); + #pragma unroll + for (int i=0; i<_STACKCAP_; i++){ + pair oldPair; + oldPair.value = -INFINITY; + oldPair.index = -1; + if (!is_stack_empty(stackSize)){ + pop_stack(stack, oldPair, stackSize); + if (oldPair.value > minValue){ + sMem[0] = 1; + } + } + __syncthreads(); + + if (sMem[0] > 0){ + __syncthreads(); + sort( + finalValue, finalIndex, + oldPair.value, oldPair.index, + sMem, nCandidates + ); + __syncthreads(); + sMem[0] = 0; + if (tid == _TPB_ - 1){ + sMem[1] = finalValue; + } + __syncthreads(); + minValue = sMem[1]; + } + __syncthreads(); + } + + if (_TPB_ - nCandidates <= tid){ + const int writeAddress = (qid * nCandidates) + tid - ( _TPB_ - nCandidates); + gValue[writeAddress] = finalValue; + gIndex[writeAddress] = finalIndex; + } +} + +extern "C" +__global__ void ivfpq_topk_residual( + const uint8n_t* __restrict__ data, + const float* __restrict__ precomputed, + const float* __restrict__ baseSims, + const uint8_t* __restrict__ isEmpty, + const ll_t* __restrict__ cellStart, + const ll_t* __restrict__ cellSize, + const ll_t* __restrict__ totalSize, + const ll_t* __restrict__ nProbeList, + float* __restrict__ gValue, + ll_t* __restrict__ gIndex, + int nData, int nQuery, int maxNProbe, int nCandidates +) { + const int tid = threadIdx.x; // thread ID + const int qid = blockIdx.x; // query ID + const int nProbe = nProbeList[qid]; + + extern __shared__ _VOLATILE_ float sMem[]; // M * K + const ll_t threadTotalSize = totalSize[qid]; + float finalValue = -INFINITY; + float finalIndex = -1; + int cCellStart = -1; + for (int cCell = 0; cCell < nProbe; cCell++){ + int pCellStart = cCellStart; + cCellStart = cellStart[qid * maxNProbe + cCell]; + if (cCellStart == pCellStart){ + continue; + } + int cCellSize = cellSize[qid * maxNProbe + cCell]; + load_precomputed_v2(precomputed, sMem, cCell, maxNProbe); + float cBaseSim = baseSims[qid * maxNProbe + cCell]; + int cCellEnd = cCellStart + cCellSize; + int nIter = (cCellSize + _TPB_ - 1) / _TPB_; + for (int iter = 0; iter < nIter; iter++ ){ + int iN = cCellStart + iter * _TPB_ + tid; + float value; + float index = iN; + int cIsEmpty = 0; + if (cCellStart <= iN && iN < cCellEnd){ + value = cBaseSim; + cIsEmpty = isEmpty[iN]; + uint8n_t dataCache[_M_ / _NCS_]; + load_data(data, dataCache, iN, nData); + consume_data(sMem, dataCache, value); + } else { + value = -INFINITY; + } + value = cIsEmpty == 0 ? value : -INFINITY; + index = cIsEmpty == 0 ? index : -1; + + sort( + finalValue, finalIndex, + value, index, + sMem, nCandidates + ); + } + } + + if (_TPB_ - nCandidates <= tid){ + const int writeAddress = (qid * nCandidates) + tid - ( _TPB_ - nCandidates); + gValue[writeAddress] = finalValue; + gIndex[writeAddress] = finalIndex; + } +} + +extern "C" +__global__ void ivfpq_topk_residual_precomputed( + const uint8n_t* __restrict__ data, + const float* __restrict__ part1, + const float* __restrict__ part2, + const ll_t* __restrict__ cells, + const float* __restrict__ baseSims, + const uint8_t* __restrict__ isEmpty, + const ll_t* __restrict__ cellStart, + const ll_t* __restrict__ cellSize, + const ll_t* __restrict__ totalSize, + const ll_t* __restrict__ nProbeList, + float* __restrict__ gValue, + ll_t* __restrict__ gIndex, + int nData, int nQuery, int maxNProbe, int nCandidates +) { + const int tid = threadIdx.x; // thread ID + const int qid = blockIdx.x; // query ID + const int nProbe = nProbeList[qid]; + + pair stack[_STACKCAP_]; + int stackSize = 0; + init_stack(stack); + + extern __shared__ _VOLATILE_ float sMem[]; // M * K + const ll_t threadTotalSize = totalSize[qid]; + float finalValue = -INFINITY; + float finalIndex = -1; + float minValue = -INFINITY; + float part1Cache[_M_]; + float part2Cache[_M_]; + load_part1_to_cache(part1, part1Cache); + + int nCellStart = cellStart[qid * maxNProbe]; + int nCellSize = cellSize[qid * maxNProbe]; + int nCellEnd = nCellStart + nCellSize; + int iCell = cells[qid * maxNProbe]; + bool nCellRepeated = false; + bool cCellRepeated = false; + load_part2_to_cache(part2, part2Cache, iCell); + + for (int cCell = 0; cCell < nProbe; cCell++){ + int cCellStart = nCellStart; + int cCellSize = nCellSize; + int cCellEnd = nCellEnd; + if (!cCellRepeated){ + store_precomputed_to_smem(part1Cache, part2Cache, sMem); + } + + if (cCell < nProbe - 1){ + int tCellStart = cellStart[qid * maxNProbe + cCell + 1]; + if (tCellStart != cCellStart){ + nCellStart = tCellStart; + nCellSize = cellSize[qid * maxNProbe + cCell + 1]; + nCellEnd = nCellStart + nCellSize; + iCell = cells[qid * maxNProbe + cCell + 1]; + load_part2_to_cache(part2, part2Cache, iCell); + nCellRepeated = false; + } else { + nCellRepeated = true; + } + } + if (cCellRepeated){ + cCellRepeated = nCellRepeated; + continue; + } + cCellRepeated = nCellRepeated; + float cBaseSim = baseSims[qid * maxNProbe + cCell]; + int nIter = (cCellSize + _TPB_ - 1) / _TPB_; + for (int iter = 0; iter < nIter; iter++ ){ + int iN = cCellStart + iter * _TPB_ + tid; + pair newPair; + newPair.value = -INFINITY; + newPair.index = -1; + int cIsEmpty = 0; + if (iN < cCellEnd){ + newPair.value = cBaseSim; + newPair.index = iN; + cIsEmpty = isEmpty[iN]; + uint8n_t dataCache[_M_ / _NCS_]; + load_data(data, dataCache, iN, nData); + consume_data(sMem, dataCache, newPair.value); + } + + newPair.value = cIsEmpty == 0 ? newPair.value : -INFINITY; + newPair.index = cIsEmpty == 0 ? newPair.index : -1; + + __syncthreads(); + float temp1, temp2; + if (tid == 0){ + temp1 = sMem[0]; + temp2 = sMem[1]; + sMem[0] = 0; + } + __syncthreads(); + + pair oldPair; + oldPair.value = -INFINITY; + oldPair.index = -1; + if (is_stack_full(stackSize)){ + pop_stack(stack, oldPair, stackSize); + if (oldPair.value > minValue){ + sMem[0] = 1; + } + } + if (newPair.value > minValue){ + push_stack(stack, newPair, stackSize); + } + __syncthreads(); + if (sMem[0] > 0){ + __syncthreads(); + sort( + finalValue, finalIndex, + oldPair.value, oldPair.index, + sMem, nCandidates + ); + __syncthreads(); + if (tid == _TPB_ - 1){ + sMem[1] = finalValue; + } + __syncthreads(); + minValue = sMem[1]; + } + __syncthreads(); + if (tid == 0){ + sMem[0] = temp1; + sMem[1] = temp2; + } + __syncthreads(); + } + } + + sMem[0] = 0; + __syncthreads(); + for (int i=0; i < _STACKCAP_; i++){ + pair oldPair; + oldPair.value = -INFINITY; + oldPair.index = -1; + if (!is_stack_empty(stackSize)){ + pop_stack(stack, oldPair, stackSize); + if (oldPair.value > minValue){ + sMem[0] = 1; + } + } + __syncthreads(); + + if (sMem[0] > 0){ + __syncthreads(); + sort( + finalValue, finalIndex, + oldPair.value, oldPair.index, + sMem, nCandidates + ); + __syncthreads(); + sMem[0] = 0; + if (tid == _TPB_ - 1){ + sMem[1] = finalValue; + } + __syncthreads(); + minValue = sMem[1]; + } + __syncthreads(); + } + + if (_TPB_ - nCandidates <= tid){ + const int writeAddress = (qid * nCandidates) + tid - ( _TPB_ - nCandidates); + gValue[writeAddress] = finalValue; + gIndex[writeAddress] = finalIndex; + } +} \ No newline at end of file diff --git a/cuda_code/distributed_join_1.cu b/cuda_code/distributed_join_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..2cb788c3d53c2cbafb711ae1cdf57fb3db9c6591 --- /dev/null +++ b/cuda_code/distributed_join_1.cu @@ -0,0 +1,301 @@ +/* + * Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* +This benchmark runs distributed join on random keys. Both the left and the right tables contain two +columns. The key column consists of random integers and the payload column consists of row ids. + +Parameters: + +**--key-type {int32_t,int64_t}** + +Data type for the key columns. Default: `int64_t`. + +**--payload-type {int32_t,int64_t}** + +Data type for the payload columns. Default: `int64_t`. + +**--build-table-nrows [INTEGER]** + +Number of rows in the build table per GPU. Default: `100'000'000`. + +**--probe-table-nrows [INTEGER]** + +Number of rows in the probe table per GPU. Default: `100'000'000`. + +**--selectivity [FLOAT]** + +The probability (in range 0.0 - 1.0) of each probe table row has matches in the build table. +Default: `0.3`. + +**--duplicate-build-keys** + +If specified, key columns of the build table are allowed to have duplicates. + +**--over-decomposition-factor [INTEGER]** + +Partition the input tables into (over decomposition factor) * (number of GPUs) buckets, which is +used for computation-communication overlap. This argument has to be an integer >= 1. Higher number +means smaller batch size. `1` means no overlap. Default: `1`. + +**--communicator [STR]** + +This option can be either "UCX" or "NCCL", which controls what communicator to use. Default: `UCX`. + +**--registration-method [STR]** + +If the UCX communicator is selected, this option can be either "none", "preregistered" or "buffer", +to control how registration is performed for GPUDirect RDMA. +- "none": No preregistration. +- "preregistered": The whole RMM memory pool will be preregistered. +- "buffer": Preregister a set of communication buffers. The communication in distributed join will +go through these buffers. +*/ + +#include "../src/communicator.hpp" +#include "../src/compression.hpp" +#include "../src/distributed_join.hpp" +#include "../src/error.hpp" +#include "../src/generate_table.cuh" +#include "../src/registered_memory_resource.hpp" +#include "../src/setup.hpp" + +#include +#include +#include + +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static std::string key_type = "int64_t"; +static std::string payload_type = "int64_t"; + +static cudf::size_type BUILD_TABLE_NROWS_EACH_RANK = 100'000'000; +static cudf::size_type PROBE_TABLE_NROWS_EACH_RANK = 100'000'000; +static double SELECTIVITY = 0.3; +static bool IS_BUILD_TABLE_KEY_UNIQUE = true; +static int OVER_DECOMPOSITION_FACTOR = 1; +static std::string COMMUNICATOR_NAME = "UCX"; +static std::string REGISTRATION_METHOD = "preregistered"; +static int64_t COMMUNICATOR_BUFFER_SIZE = 1'600'000'000LL; +static bool COMPRESSION = false; +static int NVLINK_DOMAIN_SIZE = 1; +static bool REPORT_TIMING = false; + +void parse_command_line_arguments(int argc, char *argv[]) +{ + for (int iarg = 0; iarg < argc; iarg++) { + if (!strcmp(argv[iarg], "--key-type")) { key_type = argv[iarg + 1]; } + + if (!strcmp(argv[iarg], "--payload-type")) { payload_type = argv[iarg + 1]; } + + if (!strcmp(argv[iarg], "--build-table-nrows")) { + BUILD_TABLE_NROWS_EACH_RANK = atoi(argv[iarg + 1]); + } + + if (!strcmp(argv[iarg], "--probe-table-nrows")) { + PROBE_TABLE_NROWS_EACH_RANK = atoi(argv[iarg + 1]); + } + + if (!strcmp(argv[iarg], "--selectivity")) { SELECTIVITY = atof(argv[iarg + 1]); } + + if (!strcmp(argv[iarg], "--duplicate-build-keys")) { IS_BUILD_TABLE_KEY_UNIQUE = false; } + + if (!strcmp(argv[iarg], "--over-decomposition-factor")) { + OVER_DECOMPOSITION_FACTOR = atoi(argv[iarg + 1]); + } + + if (!strcmp(argv[iarg], "--communicator")) { COMMUNICATOR_NAME = argv[iarg + 1]; } + + if (!strcmp(argv[iarg], "--compression")) { COMPRESSION = true; } + + if (!strcmp(argv[iarg], "--registration-method")) { REGISTRATION_METHOD = argv[iarg + 1]; } + + if (!strcmp(argv[iarg], "--nvlink-domain-size")) { NVLINK_DOMAIN_SIZE = atoi(argv[iarg + 1]); } + + if (!strcmp(argv[iarg], "--report-timing")) { REPORT_TIMING = true; } + } +} + +void report_configuration() +{ + MPI_CALL(MPI_Barrier(MPI_COMM_WORLD)); + + int mpi_rank; + int mpi_size; + MPI_CALL(MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank)); + MPI_CALL(MPI_Comm_size(MPI_COMM_WORLD, &mpi_size)); + if (mpi_rank != 0) return; + + std::cout << "========== Parameters ==========" << std::endl; + std::cout << std::boolalpha; + std::cout << "Key type: " << key_type << std::endl; + std::cout << "Payload type: " << payload_type << std::endl; + std::cout << "Number of rows in the build table: " + << static_cast(BUILD_TABLE_NROWS_EACH_RANK) * mpi_size / 1e6 << " million" + << std::endl; + std::cout << "Number of rows in the probe table: " + << static_cast(PROBE_TABLE_NROWS_EACH_RANK) * mpi_size / 1e6 << " million" + << std::endl; + std::cout << "Selectivity: " << SELECTIVITY << std::endl; + std::cout << "Keys in build table are unique: " << IS_BUILD_TABLE_KEY_UNIQUE << std::endl; + std::cout << "Over-decomposition factor: " << OVER_DECOMPOSITION_FACTOR << std::endl; + std::cout << "Communicator: " << COMMUNICATOR_NAME << std::endl; + if (COMMUNICATOR_NAME == "UCX") + std::cout << "Registration method: " << REGISTRATION_METHOD << std::endl; + std::cout << "Compression: " << COMPRESSION << std::endl; + std::cout << "NVLink domain size: " << NVLINK_DOMAIN_SIZE << std::endl; + std::cout << "================================" << std::endl; +} + +int main(int argc, char *argv[]) +{ + MPI_CALL(MPI_Init(&argc, &argv)); + set_cuda_device(); + + /* Parse command line arguments */ + + parse_command_line_arguments(argc, argv); + report_configuration(); + + cudf::size_type RAND_MAX_VAL = + std::max(BUILD_TABLE_NROWS_EACH_RANK, PROBE_TABLE_NROWS_EACH_RANK) * 2; + + /* Initialize communicator and memory pool */ + + int mpi_rank; + int mpi_size; + MPI_CALL(MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank)); + MPI_CALL(MPI_Comm_size(MPI_COMM_WORLD, &mpi_size)); + + Communicator *communicator{nullptr}; + // `registered_mr` holds reference to the registered memory resource, and *nullptr* if registered + // memory resource is not used. + registered_memory_resource *registered_mr{nullptr}; + // pool_mr need to live on heap because for registered memory resources, the memory pool needs + // to deallocated before UCX cleanup, which can be achieved by calling the destructor of + // `poll_mr`. + rmm::mr::pool_memory_resource *pool_mr{nullptr}; + + setup_memory_pool_and_communicator(communicator, + registered_mr, + pool_mr, + COMMUNICATOR_NAME, + REGISTRATION_METHOD, + COMMUNICATOR_BUFFER_SIZE); + + void *preallocated_pinned_buffer; + CUDA_RT_CALL(cudaMallocHost(&preallocated_pinned_buffer, mpi_size * sizeof(size_t))); + + /* Warmup nvcomp */ + + if (COMPRESSION) { warmup_nvcomp(); } + + /* Generate build table and probe table on each rank */ + + std::unique_ptr left; + std::unique_ptr right; + +#define generate_tables(KEY_T, PAYLOAD_T) \ + { \ + std::tie(left, right) = \ + generate_tables_distributed(BUILD_TABLE_NROWS_EACH_RANK, \ + PROBE_TABLE_NROWS_EACH_RANK, \ + SELECTIVITY, \ + RAND_MAX_VAL, \ + IS_BUILD_TABLE_KEY_UNIQUE, \ + communicator); \ + } + +#define generate_tables_key_type(KEY_T) \ + { \ + if (payload_type == "int64_t") { \ + generate_tables(KEY_T, int64_t) \ + } else if (payload_type == "int32_t") { \ + generate_tables(KEY_T, int32_t) \ + } else { \ + throw std::runtime_error("Unknown payload type"); \ + } \ + } + + if (key_type == "int64_t") { + generate_tables_key_type(int64_t) + } else if (key_type == "int32_t") { + generate_tables_key_type(int32_t) + } else { + throw std::runtime_error("Unknown key type"); + } + + /* Generate compression options */ + + std::vector left_compression_options = + generate_compression_options_distributed(left->view(), COMPRESSION); + std::vector right_compression_options = + generate_compression_options_distributed(right->view(), COMPRESSION); + + /* Distributed join */ + + CUDA_RT_CALL(cudaDeviceSynchronize()); + + MPI_Barrier(MPI_COMM_WORLD); + cudaProfilerStart(); + double start = MPI_Wtime(); + + std::unique_ptr join_result = distributed_inner_join(left->view(), + right->view(), + {0}, + {0}, + communicator, + left_compression_options, + right_compression_options, + OVER_DECOMPOSITION_FACTOR, + REPORT_TIMING, + preallocated_pinned_buffer, + NVLINK_DOMAIN_SIZE); + + MPI_Barrier(MPI_COMM_WORLD); + double stop = MPI_Wtime(); + cudaProfilerStop(); + + if (mpi_rank == 0) { std::cout << "Elasped time (s) " << stop - start << std::endl; } + + /* Cleanup */ + left.reset(); + right.reset(); + join_result.reset(); + CUDA_RT_CALL(cudaFreeHost(preallocated_pinned_buffer)); + CUDA_RT_CALL(cudaDeviceSynchronize()); + + destroy_memory_pool_and_communicator( + communicator, registered_mr, pool_mr, COMMUNICATOR_NAME, REGISTRATION_METHOD); + + MPI_CALL(MPI_Finalize()); + + return 0; +} diff --git a/cuda_code/div_scalar_2.cu b/cuda_code/div_scalar_2.cu new file mode 100644 index 0000000000000000000000000000000000000000..c26867a20b3ca09ee5c2956fb66a0a4f8b5f453f --- /dev/null +++ b/cuda_code/div_scalar_2.cu @@ -0,0 +1,12 @@ +#include "scalar.h" + +__device__ double op(double d1,double d2,double *params) { + return d2 / d1; +} + +extern "C" +__global__ void div_scalar_double(int n, int idx,double dx,double *dy,int incy,double *params,double *result,int blockSize) { + transform(n,idx,dx,dy,incy,params,result,blockSize); + } + + diff --git a/cuda_code/div_scalar_3.cu b/cuda_code/div_scalar_3.cu new file mode 100644 index 0000000000000000000000000000000000000000..51e35c3ed75878820d6ce242459f518106a63992 --- /dev/null +++ b/cuda_code/div_scalar_3.cu @@ -0,0 +1,12 @@ +#include "scalar.h" + +__device__ float op(float d1,float d2,float *params) { + return d2 / d1; +} + +extern "C" +__global__ void div_scalar_float(int n, int idx,float dx,float *dy,int incy,float *params,float *result,int blockSize) { + transform(n,idx,dx,dy,incy,params,result,blockSize); + } + + diff --git a/cuda_code/dlarfg_1.cu b/cuda_code/dlarfg_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..7000ab753d6e7686ec29be2420b52121088aeda2 --- /dev/null +++ b/cuda_code/dlarfg_1.cu @@ -0,0 +1,176 @@ +/* + -- MAGMA (version 2.5.4) -- + Univ. of Tennessee, Knoxville + Univ. of California, Berkeley + Univ. of Colorado, Denver + @date October 2020 + + @generated from magmablas/zlarfg.cu, normal z -> d, Thu Oct 8 23:05:34 2020 + + @author Mark Gates +*/ +#include "magma_internal.h" +#include "magma_templates.h" + +#define REAL + +// 512 is maximum number of threads for CUDA capability 1.x +#define NB 512 + + +/******************************************************************************/ +// kernel for magma_dlarfg. +// Uses one block of NB (currently 512) threads. +// Each thread sums dx[ tx + k*NB ]^2 for k = 0, 1, ..., +// then does parallel sum reduction to get norm-squared. +// +// Currently setup to use NB threads, no matter how small dx is. +// This was slightly faster (5%) than passing n to magma_sum_reduce. +// To use number of threads = min( NB, max( 1, n-1 )), pass n as +// argument to magma_sum_reduce, rather than as template parameter. +__global__ void +dlarfg_kernel( + int n, + double* dalpha, + double* dx, int incx, + double* dtau ) +{ + const int tx = threadIdx.x; + __shared__ double swork[ NB ]; + // TODO is it faster for each thread to have its own scale (register)? + // if so, communicate it via swork[0] + __shared__ double sscale; + __shared__ double sscale2; + double tmp; + + // find max of [dalpha, dx], to use as scaling to avoid unnecesary under- and overflow + if ( tx == 0 ) { + tmp = *dalpha; + #ifdef COMPLEX + swork[tx] = max( fabs( MAGMA_D_REAL(tmp)), fabs( MAGMA_D_IMAG(tmp)) ); + #else + swork[tx] = fabs(tmp); + #endif + } + else { + swork[tx] = 0; + } + for( int j = tx; j < n-1; j += NB ) { + tmp = dx[j*incx]; + #ifdef COMPLEX + swork[tx] = max( swork[tx], max( fabs( MAGMA_D_REAL(tmp)), fabs( MAGMA_D_IMAG(tmp)) )); + #else + swork[tx] = max( swork[tx], fabs(tmp) ); + #endif + } + magma_max_reduce< NB >( tx, swork ); + if ( tx == 0 ) + sscale = swork[0]; + __syncthreads(); + + // sum norm^2 of dx/sscale + // dx has length n-1 + swork[tx] = 0; + if ( sscale > 0 ) { + for( int j = tx; j < n-1; j += NB ) { + tmp = dx[j*incx] / sscale; + swork[tx] += MAGMA_D_REAL(tmp)*MAGMA_D_REAL(tmp) + MAGMA_D_IMAG(tmp)*MAGMA_D_IMAG(tmp); + } + magma_sum_reduce< NB >( tx, swork ); + //magma_sum_reduce( blockDim.x, tx, swork ); + } + + if ( tx == 0 ) { + double alpha = *dalpha; + if ( swork[0] == 0 && MAGMA_D_IMAG(alpha) == 0 ) { + // H = I + *dtau = MAGMA_D_ZERO; + } + else { + // beta = norm( [dalpha, dx] ) + double beta; + tmp = alpha / sscale; + beta = sscale * sqrt( MAGMA_D_REAL(tmp)*MAGMA_D_REAL(tmp) + MAGMA_D_IMAG(tmp)*MAGMA_D_IMAG(tmp) + swork[0] ); + beta = -copysign( beta, MAGMA_D_REAL(alpha) ); + // todo: deal with badly scaled vectors (see lapack's larfg) + *dtau = MAGMA_D_MAKE( (beta - MAGMA_D_REAL(alpha)) / beta, -MAGMA_D_IMAG(alpha) / beta ); + *dalpha = MAGMA_D_MAKE( beta, 0 ); + sscale2 = 1 / (alpha - beta); + } + } + + // scale x (if norm was not 0) + __syncthreads(); + if ( swork[0] != 0 ) { + for( int j = tx; j < n-1; j += NB ) { + dx[j*incx] *= sscale2; + } + } +} + + +/***************************************************************************//** + Purpose + ------- + DLARFG generates a real elementary reflector (Householder matrix) + H of order n, such that + + H * ( alpha ) = ( beta ), H**H * H = I. + ( x ) ( 0 ) + + where alpha and beta are scalars, with beta real and beta = ±norm([alpha, x]), + and x is an (n-1)-element real vector. H is represented in the form + + H = I - tau * ( 1 ) * ( 1 v**H ), + ( v ) + + where tau is a real scalar and v is a real (n-1)-element vector. + Note that H is not symmetric. + + If the elements of x are all zero and dalpha is real, then tau = 0 + and H is taken to be the unit matrix. + + Otherwise 1 <= real(tau) <= 2 and abs(tau-1) <= 1. + + Arguments + --------- + @param[in] + n INTEGER + The order of the elementary reflector. + + @param[in,out] + dalpha DOUBLE PRECISION* on the GPU. + On entry, pointer to the value alpha, i.e., the first entry of the vector. + On exit, it is overwritten with the value beta. + + @param[in,out] + dx DOUBLE PRECISION array, dimension (1+(N-2)*abs(INCX)), on the GPU + On entry, the (n-1)-element vector x. + On exit, it is overwritten with the vector v. + + @param[in] + incx INTEGER + The increment between elements of X. INCX > 0. + + @param[out] + dtau DOUBLE PRECISION* on the GPU. + Pointer to the value tau. + + @param[in] + queue magma_queue_t + Queue to execute in. + + @ingroup magma_larfg +*******************************************************************************/ +extern "C" +void magmablas_dlarfg( + magma_int_t n, + magmaDouble_ptr dalpha, + magmaDouble_ptr dx, magma_int_t incx, + magmaDouble_ptr dtau, + magma_queue_t queue ) +{ + dim3 threads( NB ); + dim3 blocks( 1 ); + dlarfg_kernel<<< blocks, threads, 0, queue->cuda_stream() >>>( n, dalpha, dx, incx, dtau ); +} diff --git a/cuda_code/dmdotc_3.cu b/cuda_code/dmdotc_3.cu new file mode 100644 index 0000000000000000000000000000000000000000..c74e55e8699bb627776dae843c8429849ed5579b --- /dev/null +++ b/cuda_code/dmdotc_3.cu @@ -0,0 +1,1098 @@ +/* + -- MAGMA (version 2.1.0) -- + Univ. of Tennessee, Knoxville + Univ. of California, Berkeley + Univ. of Colorado, Denver + @date August 2016 + + @generated from sparse-iter/blas/zmdotc.cu, normal z -> d, Tue Aug 30 09:38:43 2016 + @author Hartwig Anzt + +*/ +#include "magmasparse_internal.h" + +#define BLOCK_SIZE 256 + +#define REAL + + + +// dot product for multiple vectors +__global__ void +magma_dmdotc1_kernel_1( + int Gs, + int n, + double * v0, + double * w0, + double * vtmp) +{ + extern __shared__ double temp[]; + int Idx = threadIdx.x; + int i = blockIdx.x * blockDim.x + Idx; + + // 1 vectors v(i)/w(i) + + temp[ Idx ] = ( i < n ) ? + v0[ i ] * w0[ i ] : MAGMA_D_ZERO; + + __syncthreads(); + if ( Idx < 128 ){ + temp[ Idx ] += temp[ Idx + 128 ]; + } + __syncthreads(); + if ( Idx < 64 ){ + temp[ Idx ] += temp[ Idx + 64 ]; + } + __syncthreads(); + #ifdef COMPLEX + if( Idx < 32 ){ + temp[ Idx ] += temp[ Idx + 32 ]; + __syncthreads(); + temp[ Idx ] += temp[ Idx + 16 ]; + __syncthreads(); + temp[ Idx ] += temp[ Idx + 8 ]; + __syncthreads(); + temp[ Idx ] += temp[ Idx + 4 ]; + __syncthreads(); + temp[ Idx ] += temp[ Idx + 2 ]; + __syncthreads(); + temp[ Idx ] += temp[ Idx + 1 ]; + __syncthreads(); + } + #endif + #ifdef REAL + if( Idx < 32 ){ + volatile double *temp2 = temp; + temp2[ Idx ] += temp2[ Idx + 32 ]; + temp2[ Idx ] += temp2[ Idx + 16 ]; + temp2[ Idx ] += temp2[ Idx + 8 ]; + temp2[ Idx ] += temp2[ Idx + 4 ]; + temp2[ Idx ] += temp2[ Idx + 2 ]; + temp2[ Idx ] += temp2[ Idx + 1 ]; + } + #endif + + if ( Idx == 0 ){ + vtmp[ blockIdx.x ] = temp[ 0 ]; + } +} + + + +// block reduction for 1 vectors +__global__ void +magma_dmdotc1_kernel_2( + int Gs, + int n, + double * vtmp, + double * vtmp2 ) +{ + extern __shared__ double temp[]; + int Idx = threadIdx.x; + int blockSize = 128; + int gridSize = blockSize * 2 * gridDim.x; + + int i = blockIdx.x * ( blockSize * 2 ) + Idx; + temp[Idx] = MAGMA_D_ZERO; + while (i < Gs ) { + temp[ Idx ] += vtmp[ i ]; + temp[ Idx ] += + ( i + (blockSize) < Gs ) ? vtmp[ i + (blockSize) ] + : MAGMA_D_ZERO; + i += gridSize; + } + __syncthreads(); + if ( Idx < 64 ){ + temp[ Idx ] += temp[ Idx + 64 ]; + } + __syncthreads(); + #ifdef COMPLEX + if( Idx < 32 ){ + temp[ Idx ] += temp[ Idx + 32 ]; + __syncthreads(); + temp[ Idx ] += temp[ Idx + 16 ]; + __syncthreads(); + temp[ Idx ] += temp[ Idx + 8 ]; + __syncthreads(); + temp[ Idx ] += temp[ Idx + 4 ]; + __syncthreads(); + temp[ Idx ] += temp[ Idx + 2 ]; + __syncthreads(); + temp[ Idx ] += temp[ Idx + 1 ]; + __syncthreads(); + } + #endif + #ifdef REAL + if( Idx < 32 ){ + volatile double *temp2 = temp; + temp2[ Idx ] += temp2[ Idx + 32 ]; + temp2[ Idx ] += temp2[ Idx + 16 ]; + temp2[ Idx ] += temp2[ Idx + 8 ]; + temp2[ Idx ] += temp2[ Idx + 4 ]; + temp2[ Idx ] += temp2[ Idx + 2 ]; + temp2[ Idx ] += temp2[ Idx + 1 ]; + } + #endif + + if ( Idx == 0 ){ + vtmp2[ blockIdx.x ] = temp[ 0 ]; + } +} + +/** + Purpose + ------- + + Computes the scalar product of a set of 1 vectors such that + + skp[0] = [ ] + + Returns the vector skp. + In case there are less dot products required, an easy workaround is + given by doubling input. + + Arguments + --------- + + @param[in] + n int + length of v_i and w_i + + @param[in] + v0 magmaDouble_ptr + input vector + + @param[in] + w0 magmaDouble_ptr + input vector + + @param[in] + d1 magmaDouble_ptr + workspace + + @param[in] + d2 magmaDouble_ptr + workspace + + @param[out] + skp magmaDouble_ptr + vector[4] of scalar products [] + This vector is located on the host + + @param[in] + queue magma_queue_t + Queue to execute in. + + @ingroup magmasparse_cblas + ********************************************************************/ + +extern "C" magma_int_t +magma_dmdotc1( + magma_int_t n, + magmaDouble_ptr v0, + magmaDouble_ptr w0, + magmaDouble_ptr d1, + magmaDouble_ptr d2, + magmaDouble_ptr skp, + magma_queue_t queue ) +{ + int local_block_size=256; + dim3 Bs( local_block_size ); + dim3 Gs( magma_ceildiv( n, local_block_size ) ); + dim3 Gs_next; + int Ms = (local_block_size) * sizeof( double ); // 1 skp + magmaDouble_ptr aux1 = d1, aux2 = d2; + int b = 1; + + + magma_dmdotc1_kernel_1<<< Gs, Bs, Ms, queue->cuda_stream() >>> + ( Gs.x, n, v0, w0, d1 ); + + while( Gs.x > 1 ) { + Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); + if ( Gs_next.x == 1 ) Gs_next.x = 2; + magma_dmdotc1_kernel_2<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream() >>> + ( Gs.x, n, aux1, aux2 ); + Gs_next.x = Gs_next.x /2; + Gs.x = Gs_next.x; + b = 1 - b; + if ( b ) { aux1 = d1; aux2 = d2; } + else { aux2 = d1; aux1 = d2; } + } + + // copy vectors to host + magma_dgetvector( 1 , aux1, 1, skp, 1, queue ); + + + return MAGMA_SUCCESS; +} + +// 2 dot products // + + +// initialize arrays with zero +__global__ void +magma_dmdotc2_gpumemzero( + double * d, + int n ) +{ + int i = blockIdx.x * blockDim.x + threadIdx.x; + + if( i < n ){ + for( int j=0; j<2; j++) + d[ i+j*n ] = MAGMA_D_MAKE( 0.0, 0.0 ); + } +} + + +// dot product for multiple vectors +__global__ void +magma_dmdotc2_kernel_1( + int Gs, + int n, + double * v0, + double * w0, + double * v1, + double * w1, + double * vtmp) +{ + extern __shared__ double temp[]; + int Idx = threadIdx.x; + int i = blockIdx.x * blockDim.x + Idx; + int j; + + // 2 vectors v(i)/w(i) + + temp[ Idx ] = ( i < n ) ? + v0[ i ] * w0[ i ] : MAGMA_D_ZERO; + + temp[ Idx + blockDim.x ] = ( i < n ) ? + v1[ i ] * w1[ i ] : MAGMA_D_ZERO; + + + __syncthreads(); + if ( Idx < 128 ){ + for( j=0; j<2; j++){ + temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ]; + } + } + __syncthreads(); + if ( Idx < 64 ){ + for( j=0; j<2; j++){ + temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ]; + } + } + __syncthreads(); + #ifdef COMPLEX + if( Idx < 32 ){ + for( j=0; j<2; j++) + temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ]; + __syncthreads(); + for( j=0; j<2; j++) + temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ]; + __syncthreads(); + for( j=0; j<2; j++) + temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ]; + __syncthreads(); + for( j=0; j<2; j++) + temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ]; + __syncthreads(); + for( j=0; j<2; j++) + temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ]; + __syncthreads(); + for( j=0; j<2; j++) + temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ]; + __syncthreads(); + } + #endif + #ifdef REAL + if( Idx < 32 ){ + volatile double *temp2 = temp; + for( j=0; j<2; j++){ + temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; + temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; + temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; + temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; + temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; + temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; + } + } + #endif + + if ( Idx == 0 ){ + for( j=0; j<2; j++){ + vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ]; + } + } +} + + + +// block reduction for 2 vectors +__global__ void +magma_dmdotc2_kernel_2( + int Gs, + int n, + double * vtmp, + double * vtmp2 ) +{ + extern __shared__ double temp[]; + int Idx = threadIdx.x; + int blockSize = 128; + int gridSize = blockSize * 2 * gridDim.x; + int j; + + for( j=0; j<2; j++){ + int i = blockIdx.x * ( blockSize * 2 ) + Idx; + temp[Idx+j*(blockSize)] = MAGMA_D_ZERO; + while (i < Gs ) { + temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ]; + temp[ Idx+j*(blockSize) ] += + ( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ] + : MAGMA_D_ZERO; + i += gridSize; + } + } + __syncthreads(); + if ( Idx < 64 ){ + for( j=0; j<2; j++){ + temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ]; + } + } + __syncthreads(); + #ifdef COMPLEX + if( Idx < 32 ){ + for( j=0; j<2; j++) + temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ]; + __syncthreads(); + for( j=0; j<2; j++) + temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ]; + __syncthreads(); + for( j=0; j<2; j++) + temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ]; + __syncthreads(); + for( j=0; j<2; j++) + temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ]; + __syncthreads(); + for( j=0; j<2; j++) + temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ]; + __syncthreads(); + for( j=0; j<2; j++) + temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ]; + __syncthreads(); + } + #endif + #ifdef REAL + if( Idx < 32 ){ + volatile double *temp2 = temp; + for( j=0; j<2; j++){ + temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ]; + temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ]; + temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ]; + temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ]; + temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ]; + temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ]; + } + } + #endif + + if ( Idx == 0 ){ + for( j=0; j<2; j++){ + vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ]; + } + } +} + +/** + Purpose + ------- + + Computes the scalar product of a set of 2 vectors such that + + skp[0,1,2,3] = [ , ] + + Returns the vector skp. + In case there are less dot products required, an easy workaround is + given by doubling input. + + Arguments + --------- + + @param[in] + n int + length of v_i and w_i + + @param[in] + v0 magmaDouble_ptr + input vector + + @param[in] + w0 magmaDouble_ptr + input vector + + @param[in] + v1 magmaDouble_ptr + input vector + + @param[in] + w1 magmaDouble_ptr + input vector + + @param[in] + d1 magmaDouble_ptr + workspace + + @param[in] + d2 magmaDouble_ptr + workspace + + @param[out] + skp magmaDouble_ptr + vector[3] of scalar products [] + This vector is located on the host + + @param[in] + queue magma_queue_t + Queue to execute in. + + @ingroup magmasparse_cblas + ********************************************************************/ + +extern "C" magma_int_t +magma_dmdotc2( + magma_int_t n, + magmaDouble_ptr v0, + magmaDouble_ptr w0, + magmaDouble_ptr v1, + magmaDouble_ptr w1, + magmaDouble_ptr d1, + magmaDouble_ptr d2, + magmaDouble_ptr skp, + magma_queue_t queue ) +{ + int local_block_size=256; + dim3 Bs( local_block_size ); + dim3 Gs( magma_ceildiv( n, local_block_size ) ); + dim3 Gs_next; + int Ms = 2 * (local_block_size) * sizeof( double ); // 4 skp + magmaDouble_ptr aux1 = d1, aux2 = d2; + int b = 1; + + + magma_dmdotc2_kernel_1<<< Gs, Bs, Ms, queue->cuda_stream() >>> + ( Gs.x, n, v0, w0, v1, w1, d1 ); + + while( Gs.x > 1 ) { + Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); + if ( Gs_next.x == 1 ) Gs_next.x = 2; + magma_dmdotc2_kernel_2<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream() >>> + ( Gs.x, n, aux1, aux2 ); + Gs_next.x = Gs_next.x /2; + Gs.x = Gs_next.x; + b = 1 - b; + if ( b ) { aux1 = d1; aux2 = d2; } + else { aux2 = d1; aux1 = d2; } + } + + // copy vectors to host + magma_dgetvector( 2 , aux1, n, skp, 1, queue ); + + + return MAGMA_SUCCESS; +} + + + + +// 3 dot products // + + +// initialize arrays with zero +__global__ void +magma_dmdotc3_gpumemzero( + double * d, + int n ) +{ + int i = blockIdx.x * blockDim.x + threadIdx.x; + + if( i < n ){ + for( int j=0; j<3; j++) + d[ i+j*n ] = MAGMA_D_MAKE( 0.0, 0.0 ); + } +} + + +// dot product for multiple vectors +__global__ void +magma_dmdotc3_kernel_1( + int Gs, + int n, + double * v0, + double * w0, + double * v1, + double * w1, + double * v2, + double * w2, + double * vtmp) +{ + extern __shared__ double temp[]; + int Idx = threadIdx.x; + int i = blockIdx.x * blockDim.x + Idx; + int j; + + // 3 vectors v(i)/w(i) + + temp[ Idx ] = ( i < n ) ? + v0[ i ] * w0[ i ] : MAGMA_D_ZERO; + + temp[ Idx + blockDim.x ] = ( i < n ) ? + v1[ i ] * w1[ i ] : MAGMA_D_ZERO; + + temp[ Idx + 2*blockDim.x ] = ( i < n ) ? + v2[ i ] * w2[ i ] : MAGMA_D_ZERO; + + + __syncthreads(); + if ( Idx < 128 ){ + for( j=0; j<3; j++){ + temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ]; + } + } + __syncthreads(); + if ( Idx < 64 ){ + for( j=0; j<3; j++){ + temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ]; + } + } + __syncthreads(); + #ifdef COMPLEX + if( Idx < 32 ){ + for( j=0; j<3; j++) + temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ]; + __syncthreads(); + for( j=0; j<3; j++) + temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ]; + __syncthreads(); + for( j=0; j<3; j++) + temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ]; + __syncthreads(); + for( j=0; j<3; j++) + temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ]; + __syncthreads(); + for( j=0; j<3; j++) + temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ]; + __syncthreads(); + for( j=0; j<3; j++) + temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ]; + __syncthreads(); + } + #endif + #ifdef REAL + if( Idx < 32 ){ + volatile double *temp2 = temp; + for( j=0; j<3; j++){ + temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; + temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; + temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; + temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; + temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; + temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; + } + } + #endif + + if ( Idx == 0 ){ + for( j=0; j<3; j++){ + vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ]; + } + } +} + + + +// block reduction for 3 vectors +__global__ void +magma_dmdotc3_kernel_2( + int Gs, + int n, + double * vtmp, + double * vtmp2 ) +{ + extern __shared__ double temp[]; + int Idx = threadIdx.x; + int blockSize = 128; + int gridSize = blockSize * 2 * gridDim.x; + int j; + + for( j=0; j<3; j++){ + int i = blockIdx.x * ( blockSize * 2 ) + Idx; + temp[Idx+j*(blockSize)] = MAGMA_D_ZERO; + while (i < Gs ) { + temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ]; + temp[ Idx+j*(blockSize) ] += + ( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ] + : MAGMA_D_ZERO; + i += gridSize; + } + } + __syncthreads(); + if ( Idx < 64 ){ + for( j=0; j<3; j++){ + temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ]; + } + } + __syncthreads(); + #ifdef COMPLEX + if( Idx < 32 ){ + for( j=0; j<3; j++) + temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ]; + __syncthreads(); + for( j=0; j<3; j++) + temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ]; + __syncthreads(); + for( j=0; j<3; j++) + temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ]; + __syncthreads(); + for( j=0; j<3; j++) + temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ]; + __syncthreads(); + for( j=0; j<3; j++) + temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ]; + __syncthreads(); + for( j=0; j<3; j++) + temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ]; + __syncthreads(); + } + #endif + #ifdef REAL + if( Idx < 32 ){ + volatile double *temp2 = temp; + for( j=0; j<3; j++){ + temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ]; + temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ]; + temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ]; + temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ]; + temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ]; + temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ]; + } + } + #endif + + if ( Idx == 0 ){ + for( j=0; j<3; j++){ + vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ]; + } + } +} + +/** + Purpose + ------- + + Computes the scalar product of a set of 4 vectors such that + + skp[0,1,2,3] = [ , , , ] + + Returns the vector skp. + In case there are less dot products required, an easy workaround is + given by doubling input. + + Arguments + --------- + + @param[in] + n int + length of v_i and w_i + + @param[in] + v0 magmaDouble_ptr + input vector + + @param[in] + w0 magmaDouble_ptr + input vector + + @param[in] + v1 magmaDouble_ptr + input vector + + @param[in] + w1 magmaDouble_ptr + input vector + + @param[in] + v2 magmaDouble_ptr + input vector + + @param[in] + w2 magmaDouble_ptr + input vector + + @param[in] + d1 magmaDouble_ptr + workspace + + @param[in] + d2 magmaDouble_ptr + workspace + + @param[out] + skp magmaDouble_ptr + vector[3] of scalar products [] + This vector is located on the host + + @param[in] + queue magma_queue_t + Queue to execute in. + + @ingroup magmasparse_cblas + ********************************************************************/ + +extern "C" magma_int_t +magma_dmdotc3( + magma_int_t n, + magmaDouble_ptr v0, + magmaDouble_ptr w0, + magmaDouble_ptr v1, + magmaDouble_ptr w1, + magmaDouble_ptr v2, + magmaDouble_ptr w2, + magmaDouble_ptr d1, + magmaDouble_ptr d2, + magmaDouble_ptr skp, + magma_queue_t queue ) +{ + int local_block_size=256; + dim3 Bs( local_block_size ); + dim3 Gs( magma_ceildiv( n, local_block_size ) ); + dim3 Gs_next; + int Ms = 3 * (local_block_size) * sizeof( double ); // 4 skp + magmaDouble_ptr aux1 = d1, aux2 = d2; + int b = 1; + // magma_dmdotc3_gpumemzero<<< Gs, Bs, 0, queue->cuda_stream() >>>( d1, n ); + + magma_dmdotc3_kernel_1<<< Gs, Bs, Ms, queue->cuda_stream() >>> + ( Gs.x, n, v0, w0, v1, w1, v2, w2, d1 ); + + while( Gs.x > 1 ) { + Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); + if ( Gs_next.x == 1 ) Gs_next.x = 2; + magma_dmdotc3_kernel_2<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream() >>> + ( Gs.x, n, aux1, aux2 ); + Gs_next.x = Gs_next.x /2; + Gs.x = Gs_next.x; + b = 1 - b; + if ( b ) { aux1 = d1; aux2 = d2; } + else { aux2 = d1; aux1 = d2; } + } + + // copy vectors to host + magma_dgetvector( 3 , aux1, n, skp, 1, queue ); + + return MAGMA_SUCCESS; +} + + + +// 4 dot products // + + +// initialize arrays with zero +__global__ void +magma_dmdotc4_gpumemzero( + double * d, + int n ) +{ + int i = blockIdx.x * blockDim.x + threadIdx.x; + + if( i < n ){ + for( int j=0; j<4; j++) + d[ i+j*n ] = MAGMA_D_MAKE( 0.0, 0.0 ); + } +} + + +// dot product for multiple vectors +__global__ void +magma_dmdotc4_kernel_1( + int Gs, + int n, + double * v0, + double * w0, + double * v1, + double * w1, + double * v2, + double * w2, + double * v3, + double * w3, + double * vtmp) +{ + extern __shared__ double temp[]; + int Idx = threadIdx.x; + int i = blockIdx.x * blockDim.x + Idx; + int j; + + // 4 vectors v(i)/w(i) + + temp[ Idx ] = ( i < n ) ? + v0[ i ] * w0[ i ] : MAGMA_D_ZERO; + + temp[ Idx + blockDim.x ] = ( i < n ) ? + v1[ i ] * w1[ i ] : MAGMA_D_ZERO; + + temp[ Idx + 2*blockDim.x ] = ( i < n ) ? + v2[ i ] * w2[ i ] : MAGMA_D_ZERO; + + temp[ Idx + 3*blockDim.x ] = ( i < n ) ? + v3[ i ] * w3[ i ] : MAGMA_D_ZERO; + + + __syncthreads(); + if ( Idx < 128 ){ + for( j=0; j<4; j++){ + temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ]; + } + } + __syncthreads(); + if ( Idx < 64 ){ + for( j=0; j<4; j++){ + temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ]; + } + } + __syncthreads(); + #ifdef COMPLEX + if( Idx < 32 ){ + for( j=0; j<4; j++) + temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ]; + __syncthreads(); + for( j=0; j<4; j++) + temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ]; + __syncthreads(); + for( j=0; j<4; j++) + temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ]; + __syncthreads(); + for( j=0; j<4; j++) + temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ]; + __syncthreads(); + for( j=0; j<4; j++) + temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ]; + __syncthreads(); + for( j=0; j<4; j++) + temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ]; + __syncthreads(); + } + #endif + #ifdef REAL + if( Idx < 32 ){ + volatile double *temp2 = temp; + for( j=0; j<4; j++){ + temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; + temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; + temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; + temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; + temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; + temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; + } + } + #endif + + if ( Idx == 0 ){ + for( j=0; j<4; j++){ + vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ]; + } + } +} + + + +// block reduction for 4 vectors +__global__ void +magma_dmdotc4_kernel_2( + int Gs, + int n, + double * vtmp, + double * vtmp2 ) +{ + extern __shared__ double temp[]; + int Idx = threadIdx.x; + int blockSize = 128; + int gridSize = blockSize * 2 * gridDim.x; + int j; + + for( j=0; j<4; j++){ + int i = blockIdx.x * ( blockSize * 2 ) + Idx; + temp[Idx+j*(blockSize)] = MAGMA_D_ZERO; + while (i < Gs ) { + temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ]; + temp[ Idx+j*(blockSize) ] += + ( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ] + : MAGMA_D_ZERO; + i += gridSize; + } + } + __syncthreads(); + if ( Idx < 64 ){ + for( j=0; j<4; j++){ + temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ]; + } + } + __syncthreads(); + #ifdef COMPLEX + if( Idx < 32 ){ + for( j=0; j<4; j++) + temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ]; + __syncthreads(); + for( j=0; j<4; j++) + temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ]; + __syncthreads(); + for( j=0; j<4; j++) + temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ]; + __syncthreads(); + for( j=0; j<4; j++) + temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ]; + __syncthreads(); + for( j=0; j<4; j++) + temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ]; + __syncthreads(); + for( j=0; j<4; j++) + temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ]; + __syncthreads(); + } + #endif + #ifdef REAL + if( Idx < 32 ){ + volatile double *temp2 = temp; + for( j=0; j<4; j++){ + temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ]; + temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ]; + temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ]; + temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ]; + temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ]; + temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ]; + } + } + #endif + + if ( Idx == 0 ){ + for( j=0; j<4; j++){ + vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ]; + } + } +} + +/** + Purpose + ------- + + Computes the scalar product of a set of 4 vectors such that + + skp[0,1,2,3] = [ , , , ] + + Returns the vector skp. + In case there are less dot products required, an easy workaround is + given by doubling input. + + Arguments + --------- + + @param[in] + n int + length of v_i and w_i + + @param[in] + v0 magmaDouble_ptr + input vector + + @param[in] + w0 magmaDouble_ptr + input vector + + @param[in] + v1 magmaDouble_ptr + input vector + + @param[in] + w1 magmaDouble_ptr + input vector + + @param[in] + v2 magmaDouble_ptr + input vector + + @param[in] + w2 magmaDouble_ptr + input vector + + @param[in] + v3 magmaDouble_ptr + input vector + + @param[in] + w3 magmaDouble_ptr + input vector + + @param[in] + d1 magmaDouble_ptr + workspace + + @param[in] + d2 magmaDouble_ptr + workspace + + @param[out] + skp magmaDouble_ptr + vector[4] of scalar products [] + This vector is located on the host + + @param[in] + queue magma_queue_t + Queue to execute in. + + @ingroup magmasparse_dblas + ********************************************************************/ + +extern "C" magma_int_t +magma_dmdotc4( + magma_int_t n, + magmaDouble_ptr v0, + magmaDouble_ptr w0, + magmaDouble_ptr v1, + magmaDouble_ptr w1, + magmaDouble_ptr v2, + magmaDouble_ptr w2, + magmaDouble_ptr v3, + magmaDouble_ptr w3, + magmaDouble_ptr d1, + magmaDouble_ptr d2, + magmaDouble_ptr skp, + magma_queue_t queue ) +{ + int local_block_size=256; + dim3 Bs( local_block_size ); + dim3 Gs( magma_ceildiv( n, local_block_size ) ); + dim3 Gs_next; + int Ms = 4 * (local_block_size) * sizeof( double ); // 4 skp + magmaDouble_ptr aux1 = d1, aux2 = d2; + int b = 1; + + + magma_dmdotc4_kernel_1<<< Gs, Bs, Ms, queue->cuda_stream() >>> + ( Gs.x, n, v0, w0, v1, w1, v2, w2, v3, w3, d1 ); + + while( Gs.x > 1 ) { + Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); + if ( Gs_next.x == 1 ) Gs_next.x = 2; + magma_dmdotc4_kernel_2<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream() >>> + ( Gs.x, n, aux1, aux2 ); + Gs_next.x = Gs_next.x /2; + Gs.x = Gs_next.x; + b = 1 - b; + if ( b ) { aux1 = d1; aux2 = d2; } + else { aux2 = d1; aux1 = d2; } + } + + // copy vectors to host + magma_dgetvector( 4 , aux1, n, skp, 1, queue ); + + return MAGMA_SUCCESS; +} diff --git a/cuda_code/dmergebicgstab_1.cu b/cuda_code/dmergebicgstab_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..333e462cf719ecf440d447d499e7ca28f6d354ed --- /dev/null +++ b/cuda_code/dmergebicgstab_1.cu @@ -0,0 +1,386 @@ +/* + -- MAGMA (version 2.5.4) -- + Univ. of Tennessee, Knoxville + Univ. of California, Berkeley + Univ. of Colorado, Denver + @date October 2020 + + @generated from sparse/blas/zmergebicgstab.cu, normal z -> d, Thu Oct 8 23:05:47 2020 + @author Hartwig Anzt + +*/ +#include "magmasparse_internal.h" + +#define BLOCK_SIZE 512 + +#define PRECISION_d + + +// These routines merge multiple kernels from bicgstab into one. + +/* -------------------------------------------------------------------------- */ + +__global__ void +magma_dbicgstab_1_kernel( + int num_rows, + int num_cols, + double beta, + double omega, + double *r, + double *v, + double *p ) +{ + int i = blockIdx.x * blockDim.x + threadIdx.x; + if ( icuda_stream() >>>( num_rows, num_cols, beta, omega, + r, v, p ); + + return MAGMA_SUCCESS; +} + + +__global__ void +magma_dbicgstab_2_kernel( + int num_rows, + int num_cols, + double alpha, + magmaDouble_ptr r, + magmaDouble_ptr v, + magmaDouble_ptr s ) +{ + int i = blockIdx.x * blockDim.x + threadIdx.x; + if ( icuda_stream() >>>( num_rows, num_cols, alpha, r, v, s ); + + return MAGMA_SUCCESS; +} + + +__global__ void +magma_dbicgstab_3_kernel( + int num_rows, + int num_cols, + double alpha, + double omega, + double *p, + double *s, + double *t, + double *x, + double *r ) +{ + int i = blockIdx.x * blockDim.x + threadIdx.x; + if ( icuda_stream() >>>( num_rows, num_cols, alpha, omega, p, s, t, x, r ); + + return MAGMA_SUCCESS; +} + + +__global__ void +magma_dbicgstab_4_kernel( + int num_rows, + int num_cols, + double alpha, + double omega, + double *y, + double *z, + double *s, + double *t, + double *x, + double *r ) +{ + int i = blockIdx.x * blockDim.x + threadIdx.x; + if ( icuda_stream() >>>( num_rows, num_cols, alpha, omega, y, z, s, t, x, r ); + + return MAGMA_SUCCESS; +} diff --git a/cuda_code/dqBlender.cu b/cuda_code/dqBlender.cu new file mode 100644 index 0000000000000000000000000000000000000000..c94cde88ab477f0636b87aab7e72815e67ac7aea --- /dev/null +++ b/cuda_code/dqBlender.cu @@ -0,0 +1,389 @@ +/*%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% by: Alireza Ahmadi % +% University of Bonn- MSc Robotics & Geodetic Engineering% +% Alireza.Ahmadi@uni-bonn.de % +% AlirezaAhmadi.xyz % +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%*/ +#include "dqBlender.h" +#define effectConst 0.5f +#define knn 4 + +namespace DynaMap{ + +namespace blender{ + + dqBlender::dqBlender(void){} + dqBlender::~dqBlender(void){ + Free(); + } + void dqBlender::init(geometry::MeshSTD *srcMesh){ + sourceMesh = srcMesh; + cudaMallocManaged(&nWeights, sizeof(float) * srcMesh->verticesNum * srcMesh->verticesNum); + cudaMallocManaged(&nDistances, sizeof(float) * srcMesh->verticesNum * srcMesh->verticesNum); + cudaMallocManaged(&nIds, sizeof(int) * srcMesh->verticesNum * srcMesh->verticesNum); + cudaDeviceSynchronize(); + } + void dqBlender::init(geometry::defGraph& graph){ + sourceMesh = NULL; + } + void dqBlender::Free(void){ + if(sourceMesh != NULL){ + cudaFree(nDistances); + cudaFree(nWeights); + cudaFree(nIds); + } + } + //************************** blend mesh based on defGraph ****************************** + __device__ + float3 dqBlender::blendVertexPose(math::dualQuat* subWarpField, + float* subWarpFiledWeights, + int* subWarpFiledIDs, + float3 vertexPose, + int vertexID){ + math::dualQuat dqblend; + dqblend = math::dualQuat::identity(); + + for(int n = 0; n < KNN; n++){ + dqblend += subWarpField[n] * subWarpFiledWeights[n]; + } + dqblend.normalize(); + + for(int n = 0; n < KNN; n++){ + if(subWarpFiledIDs[n] == vertexID){ + dqblend = subWarpField[n]; + } + } + + float3 blendedPose; + if(dqblend != math::dualQuat::identity()){ + // get blended vertex position + blendedPose = dqblend.transformPosition(vertexPose); + }else{ + blendedPose = vertexPose; + } + return blendedPose; + } + __device__ + float3 dqBlender::blendVertexPose(const geometry::defGraph& graph, + float3 vertexPose, + unsigned int vertexID){ + math::dualQuat dqblend; + dqblend = math::dualQuat::identity(); + for(int n = 0; n < graph.nNum; n++){ + int nIdx = 0; + if(graph.KDTREE){ + nIdx = vertexID * graph.nNum + n; + }else{ + nIdx = vertexID * graph.nodeNum + n; + } + dqblend += graph.nodes[graph.visibleNodeIds[nIdx]].dq * graph.visibleNWeights[nIdx]; + } + dqblend.normalize(); + + for(int n = 0; n < graph.nodeNum; n++){ + if(graph.nodes[n].id == vertexID){ + dqblend = graph.nodes[n].dq; + } + } + + float3 blendedPose; + if(dqblend != math::dualQuat::identity()){ + // get blended vertex position + blendedPose = dqblend.transformPosition(vertexPose); + }else{ + blendedPose = vertexPose; + } + return blendedPose; + } + __device__ + void dqBlender::blendVertexPose(geometry::Vertex srcVertex, + geometry::Vertex dstVertex, + geometry::defGraph& graph, + unsigned int vertexID){ + math::dualQuat dqblend; + dqblend = math::dualQuat::identity(); + for(int n = 0; n < graph.nNum; n++){ + int nIdx = 0; + if(graph.KDTREE){ + nIdx = vertexID * graph.nNum + n; + }else{ + nIdx = vertexID * graph.nodeNum + n; + } + dqblend += graph.nodes[nIds[nIdx]].dq * graph.nodes[nIds[nIdx]].nWeights[nIdx]; + } + if(graph.nodes[vertexID].dq != math::dualQuat::identity()){ + dqblend += graph.nodes[vertexID].dq; + } + dqblend.normalize(); + + for(int n = 0; n < graph.nodeNum; n++){ + if(graph.nodes[n].id == vertexID){ + dqblend = graph.nodes[n].dq; + } + } + + if(dqblend != math::dualQuat::identity()){ + // get blended vertex position + dstVertex.position = dqblend.transformPosition(srcVertex.position); + }else{ + dstVertex.position = srcVertex.position; + } + } + __device__ + void dqBlender::blendVertexNormal(geometry::Vertex srctNromal, + geometry::Vertex dstNromal, + geometry::defGraph& graph, + unsigned int vertexID){ + math::dualQuat dqblend; + dqblend = math::dualQuat::identity(); + for(int n = 0; n < graph.nNum; n++){ + int nIdx = vertexID * graph.nodeNum + n; + dqblend += graph.nodes[nIds[nIdx]].dq * graph.nodes[nIds[nIdx]].nWeights[nIdx]; + } + if(graph.nodes[vertexID].dq != math::dualQuat::identity()){ + dqblend += graph.nodes[vertexID].dq; + } + dqblend.normalize(); + + for(int n = 0; n < graph.nodeNum; n++){ + if(graph.nodes[n].id == vertexID){ + dqblend = graph.nodes[n].dq; + } + } + + if(dqblend != math::dualQuat::identity()){ + // get blended vertex position + dstNromal.normal = dqblend.transformNormal(srctNromal.normal); + }else{ + dstNromal.normal = srctNromal.normal; + } + } + __device__ + void dqBlender::blendVertex(geometry::MeshSTD& dstMesh, + geometry::defGraph& graph, + unsigned int vertexID){ + + math::dualQuat dqblend; + dqblend.setIdentity(); + for(int n = 0; n < graph.nNum; n++){ + int nIdx = 0; + if(graph.KDTREE){ + nIdx = vertexID * graph.nNum + n; + }else{ + nIdx = vertexID * graph.nodeNum + n; + } + dqblend += graph.nodes[graph.visibleNodeIds[nIdx]].dq * graph.visibleNWeights[nIdx]; + dqblend.normalize(); + } + for(int n = 0; n < graph.nodeNum; n++){ + if(graph.nodes[n].id == vertexID){ + dqblend = graph.nodes[n].dq; + } + } + + dstMesh.vertices[vertexID].position = dqblend.transformPosition(graph.defGraphMesh.vertices[vertexID].position); + dstMesh.vertices[vertexID].normal = dqblend.transformNormal(graph.defGraphMesh.vertices[vertexID].normal); + } + __global__ + void blendMeshbyDefGraphKernel(dqBlender& blender, + geometry::defGraph& graph, + geometry::MeshSTD& dstMesh){ + + int index = blockIdx.x * blockDim.x + threadIdx.x; + int stride = blockDim.x * gridDim.x; + int size = graph.defGraphMesh.verticesNum; + for (int idx = index; idx < size; idx += stride){ + // blend mesh vertices based on nearby nodes + blender.blendVertex(dstMesh, graph, idx); + } + } + void dqBlender::blendMesh(geometry::MeshSTD& dstMesh, + geometry::defGraph& graph){ + + int threads_per_block = 512; + int thread_blocks =(graph.defGraphMesh.verticesNum + threads_per_block - 1) / threads_per_block; + // std::cout << "<<< blendMeshbyDefGraph >>> threadBlocks: "<< thread_blocks << + // ", threadPerBlock: " << threads_per_block << + // ", VertexNum: " << graph.defGraphMesh.verticesNum << + // ", nodeNum: " << graph.nodeNum << + // std::endl; + blendMeshbyDefGraphKernel<<>>(*this, + graph, + dstMesh); + cudaDeviceSynchronize(); + + // for(int cnt=0; cntverticesNum + n; + dqblend += DualQuat[nIds[nIdx]] * nWeights[nIdx]; + } + if(DualQuat[vertexID] != math::dualQuat::identity()){ + dqblend += DualQuat[vertexID]; + } + dqblend.normalize(); + // get blended vertex position + dstMesh.vertices[vertexID].position = dqblend.transformPosition(sourceMesh->vertices[vertexID].position); + dstMesh.vertices[vertexID].normal = dqblend.transformNormal(sourceMesh->vertices[vertexID].normal); + } + __global__ + void blendMeshKernel(dqBlender& blender, + geometry::MeshSTD& dstMesh, + math::dualQuat *DualQuat){ + + int index = blockIdx.x * blockDim.x + threadIdx.x; + int stride = blockDim.x * gridDim.x; + int size = blender.sourceMesh->verticesNum; + + for (int idx = index; idx < size; idx += stride){ + blender.blendVertex(dstMesh, + DualQuat, + idx); + } + } + __global__ + void updateNodeWeightsKernel(dqBlender& blender){ + int index = blockIdx.x * blockDim.x + threadIdx.x; + int stride = blockDim.x * gridDim.x; + int size = blender.sourceMesh->verticesNum; + for (int idx = index; idx < size; idx += stride){ + // In case of using radius reach for the neighborhood, this parameter will show number of close nodes + for(int w = 0; w < blender.sourceMesh->verticesNum; w++){ + int w_index = idx * blender.sourceMesh->verticesNum + w; + // float ref = effectConst * blender.nDistances[idx * size]; + float ref = effectConst ; + if(expWeight){ + // supposed distance[0] contains leasts distance after sorting + blender.nWeights[w_index] = exp(-pow(blender.nDistances[idx * size + w],2) / pow(ref,2)); + }else{ + blender.nWeights[w_index] = blender.nDistances[idx * size] * effectConst / blender.nDistances[idx * size + w]; + } + } + // if(idx == 10) + // for(int cnt=0; cnt< graph.nodeNum; cnt++){ + // printf("dist: %f, ids: %d, W: %f\n", graph.nodes[idx].nDistances[cnt], graph.nodes[idx].nIds[cnt],graph.nodes[idx].nWeights[cnt]); + // } + } + } + __global__ + void updateNodeDistnacesKernel(dqBlender& blender){ + int index = blockIdx.x * blockDim.x + threadIdx.x; + int stride = blockDim.x * gridDim.x; + int size = blender.sourceMesh->verticesNum; + + for (int idx = index; idx < size; idx += stride){ + // invoking target node vertex position from degGraph + geometry::Vertex vi = blender.sourceMesh->vertices[idx]; + int nIdx = idx * size; + for(int n = 0; n < size; n++){ + // shouldn't add node itself as a neighbour in neighbour list + if(n == idx) continue; + // invoking neighbour node j vertex position from degGraph + geometry::Vertex vj = blender.sourceMesh->vertices[n]; + // computing distance between target node vi and i-th neighbour vertex position + float tmp_dist = distance(vi.position, vj.position); + // excluding absolute 0.0 to avoid nan and inf products + if(tmp_dist < 10e-5) tmp_dist = 10e-5; + // storing distance and id of the neighbour in target node struct + blender.nDistances[nIdx] = tmp_dist; + blender.nIds[nIdx] = n; + nIdx++; + } + } + } + __global__ + void sortNeighbourNodesKernel(dqBlender& blender){ + int index = blockIdx.x * blockDim.x + threadIdx.x; + int stride = blockDim.x * gridDim.x; + int size = blender.sourceMesh->verticesNum; + for (int idx = index; idx < size; idx += stride){ + // Go through all neighbour points + for (int i = idx * size; i < idx * size + size - 1; i++) { + // Store current distance and associated index + float currDist = blender.nDistances[i]; + int currIndex = blender.nIds[i]; + // Shift values (and indexes) higher that the current distance to the right + int j = i; + float tmp_dist = 0; + int tmp_index = 0; + while (j > idx * size && blender.nDistances[j-1] > currDist) { + + tmp_dist = blender.nDistances[j-1]; + tmp_index = blender.nIds[j-1]; + + blender.nDistances[j-1] = currDist; + blender.nIds[j-1] = currIndex; + + blender.nDistances[j] = tmp_dist; + blender.nIds[j] = tmp_index; + + --j; + } + } + } + } + void dqBlender::blendMesh(geometry::MeshSTD& dstMesh, + math::dualQuat *DualQuat){ + + // build KDtree for input mesh "*defGraphMesh" + + // find KNN for each vertex in mesh + // update Euclidian distnaces between vertices and nodes + int threads_per_block = 1024; + int thread_blocks =(sourceMesh->verticesNum + threads_per_block - 1) / threads_per_block; + // std::cout << "<<< updateNodeDistnacesKernel >>> threadBlocks: "<< thread_blocks << + // ", threadPerBlock: " << threads_per_block << + // ", sourceMesh->verticesNum: " << sourceMesh->verticesNum << + // std::endl; + updateNodeDistnacesKernel<<>>(*this); + cudaDeviceSynchronize(); + + // for(int cnt=0; cnt< 8; cnt++){ + // int idx = sourceMesh->verticesNum * 201 + cnt; + // printf("cnt: %d, dist: %f, ID: %d \n", cnt, nDistances[idx], nIds[idx]); + // } + + // Sort vertices based on their distances + // std::cout << "<<< sortNeighbourNodesKernel >>> threadBlocks: "<< thread_blocks << + // ", threadPerBlock: " << threads_per_block << + // std::endl; + sortNeighbourNodesKernel<<>>(*this); + cudaDeviceSynchronize(); + + // for(int cnt=0; cnt<8; cnt++){ + // int idx = sourceMesh->verticesNum * 201 + cnt; + // printf("cnt: %d, dist: %f, ID: %d \n", cnt, nDistances[idx], nIds[idx]); + // } + + // std::cout << "<<< updateNodeWeightsKernel >>> threadBlocks: "<< thread_blocks << + // ", threadPerBlock: " << threads_per_block << + // std::endl; + updateNodeWeightsKernel<<>>(*this); + cudaDeviceSynchronize(); + + // for(int cnt=0; cnt< 8; cnt++){ + // int idx = sourceMesh->verticesNum * 201 + cnt; + // printf("cnt: %d, dist: %f, weight: %f, ID: %d \n", cnt, nDistances[idx], nWeights[idx], nIds[idx]); + // } + + + // Blend Mesh + // std::cout << "<<< blendMesh >>> threadBlocks: " << thread_blocks << + // ", threadPerBlock: " << threads_per_block << + // ", VertexNum: " << sourceMesh->verticesNum << + // std::endl; + blendMeshKernel<<>>(*this, dstMesh, DualQuat); + cudaDeviceSynchronize(); + } + +} // namespace blender + +} // namespace DynaMap diff --git a/cuda_code/driver-draft.cu b/cuda_code/driver-draft.cu new file mode 100644 index 0000000000000000000000000000000000000000..5b02b63377b735b101ac5b4d2bc662ab3972769b --- /dev/null +++ b/cuda_code/driver-draft.cu @@ -0,0 +1,1848 @@ +/* This file is generated by code_gen.py */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../include/common.h" +#include "../include/hashJoin.h" +#include "../include/schema.h" +#include "../include/cpuCudaLib.h" +#include "../include/gpuCudaLib.h" +extern struct tableNode* tableScan(struct scanNode *,struct statistic *); + +//Ugly...we will put everything together later into an object +extern struct tableNode* tableScanNest(struct scanNode *, struct tableNode *, struct statistic *, + char ** , + int *, + int *, + int *, + int *, + int *, + dim3, + dim3 +); + + +extern void createIndex (struct tableNode *, int, int, struct statistic *); +extern struct tableNode* indexScan (struct tableNode *, int, int, int, struct statistic *); +extern struct tableNode* hashJoin(struct joinNode *, struct statistic *); +extern struct tableNode* groupBy(struct groupByNode *,struct statistic *); +extern struct tableNode* orderBy(struct orderByNode *, struct statistic *); +extern char* materializeCol(struct materializeNode * mn, struct statistic *); + +#define CHECK_POINTER(p) do {\ + if(p == NULL){ \ + perror("Failed to allocate host memory"); \ + exit(-1); \ + }} while(0) + +int main(int argc, char ** argv){ + + /* For initializing CUDA device */ + int * cudaTmp; + cudaMalloc((void**)&cudaTmp,sizeof(int)); + cudaFree(cudaTmp); + + int table; + int long_index; + char path[PATH_MAX]; + int setPath = 0; + struct option long_options[] = { + {"datadir",required_argument,0,'0'} + }; + + while((table=getopt_long(argc,argv,"",long_options,&long_index))!=-1){ + switch(table){ + case '0': + setPath = 1; + strcpy(path,optarg); + break; + } + } + + if(setPath == 1) + chdir(path); + + struct timespec start, end; + struct timespec diskStart, diskEnd; + double diskTotal = 0; + clock_gettime(CLOCK_REALTIME,&start); + struct statistic pp; + pp.total = pp.kernel = pp.pcie = 0; + + pp.buildIndexTotal = 0; + pp.tableScanTotal = 0; + pp.tableScanCount = 0; + pp.whereMemCopy_s1 = 0; + pp.dataMemCopy_s2 = 0; + pp.scanTotal_s3 = 0; + pp.preScanTotal_s4 = 0; + pp.preScanCount_s4 = 0; + pp.preScanResultMemCopy_s5 = 0; + pp.dataMemCopyOther_s6 = 0; + pp.materializeResult_s7 = 0; + pp.finalResultMemCopy_s8 = 0; + pp.create_tableNode_S01 = 0; + pp.mallocRes_S02 = 0; + pp.deallocateBuffs_S03 = 0; + // Load columns from the table SUPPLIER + struct tableNode *supplierTable; + supplierTable = (struct tableNode *)malloc(sizeof(struct tableNode)); + CHECK_POINTER(supplierTable); + initTable(supplierTable); + { + struct tableNode *_supplier_table; + int outFd; + long outSize; + char *outTable; + long offset, tupleOffset; + int blockTotal; + struct columnHeader header; + + // Retrieve the block number from SUPPLIER5 + outFd = open("SUPPLIER5", O_RDONLY); + read(outFd, &header, sizeof(struct columnHeader)); + blockTotal = header.blockTotal; + close(outFd); + offset = 0; + tupleOffset = 0; + for(int i = 0; i < blockTotal; i++){ + + // Table initialization + _supplier_table = (struct tableNode *)malloc(sizeof(struct tableNode)); + CHECK_POINTER(_supplier_table); + _supplier_table->totalAttr = 7; + _supplier_table->attrType = (int *)malloc(sizeof(int) * 7); + CHECK_POINTER(_supplier_table->attrType); + _supplier_table->attrSize = (int *)malloc(sizeof(int) * 7); + CHECK_POINTER(_supplier_table->attrSize); + _supplier_table->attrIndex = (int *)malloc(sizeof(int) * 7); + CHECK_POINTER(_supplier_table->attrIndex); + _supplier_table->attrTotalSize = (int *)malloc(sizeof(int) * 7); + CHECK_POINTER(_supplier_table->attrTotalSize); + _supplier_table->dataPos = (int *)malloc(sizeof(int) * 7); + CHECK_POINTER(_supplier_table->dataPos); + _supplier_table->dataFormat = (int *) malloc(sizeof(int) * 7); + CHECK_POINTER(_supplier_table->dataFormat); + _supplier_table->content = (char **)malloc(sizeof(char *) * 7); + CHECK_POINTER(_supplier_table->content); + + // Load column 5, type: DECIMAL + _supplier_table->attrSize[0] = sizeof(float); + _supplier_table->attrIndex[0] = 5; + _supplier_table->attrType[0] = FLOAT; + _supplier_table->dataPos[0] = MEM; + outFd = open("SUPPLIER5", O_RDONLY); + offset = i * sizeof(struct columnHeader) + tupleOffset * sizeof(float); + lseek(outFd, offset, SEEK_SET); + read(outFd, &header, sizeof(struct columnHeader)); + offset += sizeof(struct columnHeader); + _supplier_table->dataFormat[0] = header.format; + outSize = header.tupleNum * sizeof(float); + _supplier_table->attrTotalSize[0] = outSize; + + clock_gettime(CLOCK_REALTIME,&diskStart); + outTable =(char *)mmap(0, outSize, PROT_READ, MAP_SHARED, outFd, offset); + _supplier_table->content[0] = (char *)memalign(256, outSize); + memcpy(_supplier_table->content[0], outTable, outSize); + munmap(outTable, outSize); + clock_gettime(CLOCK_REALTIME, &diskEnd); + diskTotal += (diskEnd.tv_sec - diskStart.tv_sec)* BILLION + diskEnd.tv_nsec - diskStart.tv_nsec; + close(outFd); + + // Load column 1, type: TEXT + _supplier_table->attrSize[1] = 18; + _supplier_table->attrIndex[1] = 1; + _supplier_table->attrType[1] = STRING; + _supplier_table->dataPos[1] = MEM; + outFd = open("SUPPLIER1", O_RDONLY); + offset = i * sizeof(struct columnHeader) + tupleOffset * 18; + lseek(outFd, offset, SEEK_SET); + read(outFd, &header, sizeof(struct columnHeader)); + offset += sizeof(struct columnHeader); + _supplier_table->dataFormat[1] = header.format; + outSize = header.tupleNum * 18; + _supplier_table->attrTotalSize[1] = outSize; + + clock_gettime(CLOCK_REALTIME,&diskStart); + outTable =(char *)mmap(0, outSize, PROT_READ, MAP_SHARED, outFd, offset); + _supplier_table->content[1] = (char *)memalign(256, outSize); + memcpy(_supplier_table->content[1], outTable, outSize); + munmap(outTable, outSize); + clock_gettime(CLOCK_REALTIME, &diskEnd); + diskTotal += (diskEnd.tv_sec - diskStart.tv_sec)* BILLION + diskEnd.tv_nsec - diskStart.tv_nsec; + close(outFd); + + // Load column 2, type: TEXT + _supplier_table->attrSize[2] = 22; + _supplier_table->attrIndex[2] = 2; + _supplier_table->attrType[2] = STRING; + _supplier_table->dataPos[2] = MEM; + outFd = open("SUPPLIER2", O_RDONLY); + offset = i * sizeof(struct columnHeader) + tupleOffset * 22; + lseek(outFd, offset, SEEK_SET); + read(outFd, &header, sizeof(struct columnHeader)); + offset += sizeof(struct columnHeader); + _supplier_table->dataFormat[2] = header.format; + outSize = header.tupleNum * 22; + _supplier_table->attrTotalSize[2] = outSize; + + clock_gettime(CLOCK_REALTIME,&diskStart); + outTable =(char *)mmap(0, outSize, PROT_READ, MAP_SHARED, outFd, offset); + _supplier_table->content[2] = (char *)memalign(256, outSize); + memcpy(_supplier_table->content[2], outTable, outSize); + munmap(outTable, outSize); + clock_gettime(CLOCK_REALTIME, &diskEnd); + diskTotal += (diskEnd.tv_sec - diskStart.tv_sec)* BILLION + diskEnd.tv_nsec - diskStart.tv_nsec; + close(outFd); + + // Load column 4, type: TEXT + _supplier_table->attrSize[3] = 15; + _supplier_table->attrIndex[3] = 4; + _supplier_table->attrType[3] = STRING; + _supplier_table->dataPos[3] = MEM; + outFd = open("SUPPLIER4", O_RDONLY); + offset = i * sizeof(struct columnHeader) + tupleOffset * 15; + lseek(outFd, offset, SEEK_SET); + read(outFd, &header, sizeof(struct columnHeader)); + offset += sizeof(struct columnHeader); + _supplier_table->dataFormat[3] = header.format; + outSize = header.tupleNum * 15; + _supplier_table->attrTotalSize[3] = outSize; + + clock_gettime(CLOCK_REALTIME,&diskStart); + outTable =(char *)mmap(0, outSize, PROT_READ, MAP_SHARED, outFd, offset); + _supplier_table->content[3] = (char *)memalign(256, outSize); + memcpy(_supplier_table->content[3], outTable, outSize); + munmap(outTable, outSize); + clock_gettime(CLOCK_REALTIME, &diskEnd); + diskTotal += (diskEnd.tv_sec - diskStart.tv_sec)* BILLION + diskEnd.tv_nsec - diskStart.tv_nsec; + close(outFd); + + // Load column 6, type: TEXT + _supplier_table->attrSize[4] = 22; + _supplier_table->attrIndex[4] = 6; + _supplier_table->attrType[4] = STRING; + _supplier_table->dataPos[4] = MEM; + outFd = open("SUPPLIER6", O_RDONLY); + offset = i * sizeof(struct columnHeader) + tupleOffset * 22; + lseek(outFd, offset, SEEK_SET); + read(outFd, &header, sizeof(struct columnHeader)); + offset += sizeof(struct columnHeader); + _supplier_table->dataFormat[4] = header.format; + outSize = header.tupleNum * 22; + _supplier_table->attrTotalSize[4] = outSize; + + clock_gettime(CLOCK_REALTIME,&diskStart); + outTable =(char *)mmap(0, outSize, PROT_READ, MAP_SHARED, outFd, offset); + _supplier_table->content[4] = (char *)memalign(256, outSize); + memcpy(_supplier_table->content[4], outTable, outSize); + munmap(outTable, outSize); + clock_gettime(CLOCK_REALTIME, &diskEnd); + diskTotal += (diskEnd.tv_sec - diskStart.tv_sec)* BILLION + diskEnd.tv_nsec - diskStart.tv_nsec; + close(outFd); + + // Load column 3, type: INTEGER + _supplier_table->attrSize[5] = sizeof(int); + _supplier_table->attrIndex[5] = 3; + _supplier_table->attrType[5] = INT; + _supplier_table->dataPos[5] = MEM; + outFd = open("SUPPLIER3", O_RDONLY); + offset = i * sizeof(struct columnHeader) + tupleOffset * sizeof(int); + lseek(outFd, offset, SEEK_SET); + read(outFd, &header, sizeof(struct columnHeader)); + offset += sizeof(struct columnHeader); + _supplier_table->dataFormat[5] = header.format; + outSize = header.tupleNum * sizeof(int); + _supplier_table->attrTotalSize[5] = outSize; + + clock_gettime(CLOCK_REALTIME,&diskStart); + outTable =(char *)mmap(0, outSize, PROT_READ, MAP_SHARED, outFd, offset); + _supplier_table->content[5] = (char *)memalign(256, outSize); + memcpy(_supplier_table->content[5], outTable, outSize); + munmap(outTable, outSize); + clock_gettime(CLOCK_REALTIME, &diskEnd); + diskTotal += (diskEnd.tv_sec - diskStart.tv_sec)* BILLION + diskEnd.tv_nsec - diskStart.tv_nsec; + close(outFd); + + // Load column 0, type: INTEGER + _supplier_table->attrSize[6] = sizeof(int); + _supplier_table->attrIndex[6] = 0; + _supplier_table->attrType[6] = INT; + _supplier_table->dataPos[6] = MEM; + outFd = open("SUPPLIER0", O_RDONLY); + offset = i * sizeof(struct columnHeader) + tupleOffset * sizeof(int); + lseek(outFd, offset, SEEK_SET); + read(outFd, &header, sizeof(struct columnHeader)); + offset += sizeof(struct columnHeader); + _supplier_table->dataFormat[6] = header.format; + outSize = header.tupleNum * sizeof(int); + _supplier_table->attrTotalSize[6] = outSize; + + clock_gettime(CLOCK_REALTIME,&diskStart); + outTable =(char *)mmap(0, outSize, PROT_READ, MAP_SHARED, outFd, offset); + _supplier_table->content[6] = (char *)memalign(256, outSize); + memcpy(_supplier_table->content[6], outTable, outSize); + munmap(outTable, outSize); + clock_gettime(CLOCK_REALTIME, &diskEnd); + diskTotal += (diskEnd.tv_sec - diskStart.tv_sec)* BILLION + diskEnd.tv_nsec - diskStart.tv_nsec; + close(outFd); + + _supplier_table->tupleSize = 0 + sizeof(float) + 18 + 22 + 15 + 22 + sizeof(int) + sizeof(int); + _supplier_table->tupleNum = header.tupleNum; + + if(blockTotal != 1){ + mergeIntoTable(supplierTable,_supplier_table, &pp); + clock_gettime(CLOCK_REALTIME, &diskStart); + freeTable(_supplier_table); + clock_gettime(CLOCK_REALTIME, &diskEnd); + diskTotal += (diskEnd.tv_sec - diskStart.tv_sec)* BILLION + diskEnd.tv_nsec - diskStart.tv_nsec; + }else{ + free(supplierTable); + supplierTable = _supplier_table; + } + tupleOffset += header.tupleNum; + } + supplierTable->colIdxNum = 0; + _supplier_table->keepInGpuIdx = 1; + } + + // Load columns from the table REGION + struct tableNode *regionTable; + regionTable = (struct tableNode *)malloc(sizeof(struct tableNode)); + CHECK_POINTER(regionTable); + initTable(regionTable); + { + struct tableNode *_region_table; + int outFd; + long outSize; + char *outTable; + long offset, tupleOffset; + int blockTotal; + struct columnHeader header; + + // Retrieve the block number from REGION0 + outFd = open("REGION0", O_RDONLY); + read(outFd, &header, sizeof(struct columnHeader)); + blockTotal = header.blockTotal; + close(outFd); + offset = 0; + tupleOffset = 0; + for(int i = 0; i < blockTotal; i++){ + + // Table initialization + _region_table = (struct tableNode *)malloc(sizeof(struct tableNode)); + CHECK_POINTER(_region_table); + _region_table->totalAttr = 2; + _region_table->attrType = (int *)malloc(sizeof(int) * 2); + CHECK_POINTER(_region_table->attrType); + _region_table->attrSize = (int *)malloc(sizeof(int) * 2); + CHECK_POINTER(_region_table->attrSize); + _region_table->attrIndex = (int *)malloc(sizeof(int) * 2); + CHECK_POINTER(_region_table->attrIndex); + _region_table->attrTotalSize = (int *)malloc(sizeof(int) * 2); + CHECK_POINTER(_region_table->attrTotalSize); + _region_table->dataPos = (int *)malloc(sizeof(int) * 2); + CHECK_POINTER(_region_table->dataPos); + _region_table->dataFormat = (int *) malloc(sizeof(int) * 2); + CHECK_POINTER(_region_table->dataFormat); + _region_table->content = (char **)malloc(sizeof(char *) * 2); + CHECK_POINTER(_region_table->content); + + // Load column 0, type: INTEGER + _region_table->attrSize[0] = sizeof(int); + _region_table->attrIndex[0] = 0; + _region_table->attrType[0] = INT; + _region_table->dataPos[0] = MEM; + outFd = open("REGION0", O_RDONLY); + offset = i * sizeof(struct columnHeader) + tupleOffset * sizeof(int); + lseek(outFd, offset, SEEK_SET); + read(outFd, &header, sizeof(struct columnHeader)); + offset += sizeof(struct columnHeader); + _region_table->dataFormat[0] = header.format; + outSize = header.tupleNum * sizeof(int); + _region_table->attrTotalSize[0] = outSize; + + clock_gettime(CLOCK_REALTIME,&diskStart); + outTable =(char *)mmap(0, outSize, PROT_READ, MAP_SHARED, outFd, offset); + _region_table->content[0] = (char *)memalign(256, outSize); + memcpy(_region_table->content[0], outTable, outSize); + munmap(outTable, outSize); + clock_gettime(CLOCK_REALTIME, &diskEnd); + diskTotal += (diskEnd.tv_sec - diskStart.tv_sec)* BILLION + diskEnd.tv_nsec - diskStart.tv_nsec; + close(outFd); + + // Load column 1, type: TEXT + _region_table->attrSize[1] = 25; + _region_table->attrIndex[1] = 1; + _region_table->attrType[1] = STRING; + _region_table->dataPos[1] = MEM; + outFd = open("REGION1", O_RDONLY); + offset = i * sizeof(struct columnHeader) + tupleOffset * 25; + lseek(outFd, offset, SEEK_SET); + read(outFd, &header, sizeof(struct columnHeader)); + offset += sizeof(struct columnHeader); + _region_table->dataFormat[1] = header.format; + outSize = header.tupleNum * 25; + _region_table->attrTotalSize[1] = outSize; + + clock_gettime(CLOCK_REALTIME,&diskStart); + outTable =(char *)mmap(0, outSize, PROT_READ, MAP_SHARED, outFd, offset); + _region_table->content[1] = (char *)memalign(256, outSize); + memcpy(_region_table->content[1], outTable, outSize); + munmap(outTable, outSize); + clock_gettime(CLOCK_REALTIME, &diskEnd); + diskTotal += (diskEnd.tv_sec - diskStart.tv_sec)* BILLION + diskEnd.tv_nsec - diskStart.tv_nsec; + close(outFd); + + _region_table->tupleSize = 0 + sizeof(int) + 25; + _region_table->tupleNum = header.tupleNum; + + if(blockTotal != 1){ + mergeIntoTable(regionTable,_region_table, &pp); + clock_gettime(CLOCK_REALTIME, &diskStart); + freeTable(_region_table); + clock_gettime(CLOCK_REALTIME, &diskEnd); + diskTotal += (diskEnd.tv_sec - diskStart.tv_sec)* BILLION + diskEnd.tv_nsec - diskStart.tv_nsec; + }else{ + free(regionTable); + regionTable = _region_table; + } + tupleOffset += header.tupleNum; + } + regionTable->colIdxNum = 0; + _region_table->keepInGpuIdx = 1; + } + + // Load columns from the table PART + struct tableNode *partTable; + partTable = (struct tableNode *)malloc(sizeof(struct tableNode)); + CHECK_POINTER(partTable); + initTable(partTable); + { + struct tableNode *_part_table; + int outFd; + long outSize; + char *outTable; + long offset, tupleOffset; + int blockTotal; + struct columnHeader header; + + // Retrieve the block number from PART0 + outFd = open("PART0", O_RDONLY); + read(outFd, &header, sizeof(struct columnHeader)); + blockTotal = header.blockTotal; + close(outFd); + offset = 0; + tupleOffset = 0; + for(int i = 0; i < blockTotal; i++){ + + // Table initialization + _part_table = (struct tableNode *)malloc(sizeof(struct tableNode)); + CHECK_POINTER(_part_table); + _part_table->totalAttr = 4; + _part_table->attrType = (int *)malloc(sizeof(int) * 4); + CHECK_POINTER(_part_table->attrType); + _part_table->attrSize = (int *)malloc(sizeof(int) * 4); + CHECK_POINTER(_part_table->attrSize); + _part_table->attrIndex = (int *)malloc(sizeof(int) * 4); + CHECK_POINTER(_part_table->attrIndex); + _part_table->attrTotalSize = (int *)malloc(sizeof(int) * 4); + CHECK_POINTER(_part_table->attrTotalSize); + _part_table->dataPos = (int *)malloc(sizeof(int) * 4); + CHECK_POINTER(_part_table->dataPos); + _part_table->dataFormat = (int *) malloc(sizeof(int) * 4); + CHECK_POINTER(_part_table->dataFormat); + _part_table->content = (char **)malloc(sizeof(char *) * 4); + CHECK_POINTER(_part_table->content); + + // Load column 0, type: INTEGER + _part_table->attrSize[0] = sizeof(int); + _part_table->attrIndex[0] = 0; + _part_table->attrType[0] = INT; + _part_table->dataPos[0] = MEM; + outFd = open("PART0", O_RDONLY); + offset = i * sizeof(struct columnHeader) + tupleOffset * sizeof(int); + lseek(outFd, offset, SEEK_SET); + read(outFd, &header, sizeof(struct columnHeader)); + offset += sizeof(struct columnHeader); + _part_table->dataFormat[0] = header.format; + outSize = header.tupleNum * sizeof(int); + _part_table->attrTotalSize[0] = outSize; + + clock_gettime(CLOCK_REALTIME,&diskStart); + outTable =(char *)mmap(0, outSize, PROT_READ, MAP_SHARED, outFd, offset); + _part_table->content[0] = (char *)memalign(256, outSize); + memcpy(_part_table->content[0], outTable, outSize); + munmap(outTable, outSize); + clock_gettime(CLOCK_REALTIME, &diskEnd); + diskTotal += (diskEnd.tv_sec - diskStart.tv_sec)* BILLION + diskEnd.tv_nsec - diskStart.tv_nsec; + close(outFd); + + // Load column 2, type: TEXT + _part_table->attrSize[1] = 22; + _part_table->attrIndex[1] = 2; + _part_table->attrType[1] = STRING; + _part_table->dataPos[1] = MEM; + outFd = open("PART2", O_RDONLY); + offset = i * sizeof(struct columnHeader) + tupleOffset * 22; + lseek(outFd, offset, SEEK_SET); + read(outFd, &header, sizeof(struct columnHeader)); + offset += sizeof(struct columnHeader); + _part_table->dataFormat[1] = header.format; + outSize = header.tupleNum * 22; + _part_table->attrTotalSize[1] = outSize; + + clock_gettime(CLOCK_REALTIME,&diskStart); + outTable =(char *)mmap(0, outSize, PROT_READ, MAP_SHARED, outFd, offset); + _part_table->content[1] = (char *)memalign(256, outSize); + memcpy(_part_table->content[1], outTable, outSize); + munmap(outTable, outSize); + clock_gettime(CLOCK_REALTIME, &diskEnd); + diskTotal += (diskEnd.tv_sec - diskStart.tv_sec)* BILLION + diskEnd.tv_nsec - diskStart.tv_nsec; + close(outFd); + + // Load column 5, type: INTEGER + _part_table->attrSize[2] = sizeof(int); + _part_table->attrIndex[2] = 5; + _part_table->attrType[2] = INT; + _part_table->dataPos[2] = MEM; + outFd = open("PART5", O_RDONLY); + offset = i * sizeof(struct columnHeader) + tupleOffset * sizeof(int); + lseek(outFd, offset, SEEK_SET); + read(outFd, &header, sizeof(struct columnHeader)); + offset += sizeof(struct columnHeader); + _part_table->dataFormat[2] = header.format; + outSize = header.tupleNum * sizeof(int); + _part_table->attrTotalSize[2] = outSize; + + clock_gettime(CLOCK_REALTIME,&diskStart); + outTable =(char *)mmap(0, outSize, PROT_READ, MAP_SHARED, outFd, offset); + _part_table->content[2] = (char *)memalign(256, outSize); + memcpy(_part_table->content[2], outTable, outSize); + munmap(outTable, outSize); + clock_gettime(CLOCK_REALTIME, &diskEnd); + diskTotal += (diskEnd.tv_sec - diskStart.tv_sec)* BILLION + diskEnd.tv_nsec - diskStart.tv_nsec; + close(outFd); + + // Load column 4, type: TEXT + _part_table->attrSize[3] = 25; + _part_table->attrIndex[3] = 4; + _part_table->attrType[3] = STRING; + _part_table->dataPos[3] = MEM; + outFd = open("PART4", O_RDONLY); + offset = i * sizeof(struct columnHeader) + tupleOffset * 25; + lseek(outFd, offset, SEEK_SET); + read(outFd, &header, sizeof(struct columnHeader)); + offset += sizeof(struct columnHeader); + _part_table->dataFormat[3] = header.format; + outSize = header.tupleNum * 25; + _part_table->attrTotalSize[3] = outSize; + + clock_gettime(CLOCK_REALTIME,&diskStart); + outTable =(char *)mmap(0, outSize, PROT_READ, MAP_SHARED, outFd, offset); + _part_table->content[3] = (char *)memalign(256, outSize); + memcpy(_part_table->content[3], outTable, outSize); + munmap(outTable, outSize); + clock_gettime(CLOCK_REALTIME, &diskEnd); + diskTotal += (diskEnd.tv_sec - diskStart.tv_sec)* BILLION + diskEnd.tv_nsec - diskStart.tv_nsec; + close(outFd); + + _part_table->tupleSize = 0 + sizeof(int) + 22 + sizeof(int) + 25; + _part_table->tupleNum = header.tupleNum; + + if(blockTotal != 1){ + mergeIntoTable(partTable,_part_table, &pp); + clock_gettime(CLOCK_REALTIME, &diskStart); + freeTable(_part_table); + clock_gettime(CLOCK_REALTIME, &diskEnd); + diskTotal += (diskEnd.tv_sec - diskStart.tv_sec)* BILLION + diskEnd.tv_nsec - diskStart.tv_nsec; + }else{ + free(partTable); + partTable = _part_table; + } + tupleOffset += header.tupleNum; + } + partTable->colIdxNum = 0; + _part_table->keepInGpuIdx = 1; + } + + // Load columns from the table NATION + struct tableNode *nationTable; + nationTable = (struct tableNode *)malloc(sizeof(struct tableNode)); + CHECK_POINTER(nationTable); + initTable(nationTable); + { + struct tableNode *_nation_table; + int outFd; + long outSize; + char *outTable; + long offset, tupleOffset; + int blockTotal; + struct columnHeader header; + + // Retrieve the block number from NATION1 + outFd = open("NATION1", O_RDONLY); + read(outFd, &header, sizeof(struct columnHeader)); + blockTotal = header.blockTotal; + close(outFd); + offset = 0; + tupleOffset = 0; + for(int i = 0; i < blockTotal; i++){ + + // Table initialization + _nation_table = (struct tableNode *)malloc(sizeof(struct tableNode)); + CHECK_POINTER(_nation_table); + _nation_table->totalAttr = 3; + _nation_table->attrType = (int *)malloc(sizeof(int) * 3); + CHECK_POINTER(_nation_table->attrType); + _nation_table->attrSize = (int *)malloc(sizeof(int) * 3); + CHECK_POINTER(_nation_table->attrSize); + _nation_table->attrIndex = (int *)malloc(sizeof(int) * 3); + CHECK_POINTER(_nation_table->attrIndex); + _nation_table->attrTotalSize = (int *)malloc(sizeof(int) * 3); + CHECK_POINTER(_nation_table->attrTotalSize); + _nation_table->dataPos = (int *)malloc(sizeof(int) * 3); + CHECK_POINTER(_nation_table->dataPos); + _nation_table->dataFormat = (int *) malloc(sizeof(int) * 3); + CHECK_POINTER(_nation_table->dataFormat); + _nation_table->content = (char **)malloc(sizeof(char *) * 3); + CHECK_POINTER(_nation_table->content); + + // Load column 1, type: TEXT + _nation_table->attrSize[0] = 25; + _nation_table->attrIndex[0] = 1; + _nation_table->attrType[0] = STRING; + _nation_table->dataPos[0] = MEM; + outFd = open("NATION1", O_RDONLY); + offset = i * sizeof(struct columnHeader) + tupleOffset * 25; + lseek(outFd, offset, SEEK_SET); + read(outFd, &header, sizeof(struct columnHeader)); + offset += sizeof(struct columnHeader); + _nation_table->dataFormat[0] = header.format; + outSize = header.tupleNum * 25; + _nation_table->attrTotalSize[0] = outSize; + + clock_gettime(CLOCK_REALTIME,&diskStart); + outTable =(char *)mmap(0, outSize, PROT_READ, MAP_SHARED, outFd, offset); + _nation_table->content[0] = (char *)memalign(256, outSize); + memcpy(_nation_table->content[0], outTable, outSize); + munmap(outTable, outSize); + clock_gettime(CLOCK_REALTIME, &diskEnd); + diskTotal += (diskEnd.tv_sec - diskStart.tv_sec)* BILLION + diskEnd.tv_nsec - diskStart.tv_nsec; + close(outFd); + + // Load column 2, type: INTEGER + _nation_table->attrSize[1] = sizeof(int); + _nation_table->attrIndex[1] = 2; + _nation_table->attrType[1] = INT; + _nation_table->dataPos[1] = MEM; + outFd = open("NATION2", O_RDONLY); + offset = i * sizeof(struct columnHeader) + tupleOffset * sizeof(int); + lseek(outFd, offset, SEEK_SET); + read(outFd, &header, sizeof(struct columnHeader)); + offset += sizeof(struct columnHeader); + _nation_table->dataFormat[1] = header.format; + outSize = header.tupleNum * sizeof(int); + _nation_table->attrTotalSize[1] = outSize; + + clock_gettime(CLOCK_REALTIME,&diskStart); + outTable =(char *)mmap(0, outSize, PROT_READ, MAP_SHARED, outFd, offset); + _nation_table->content[1] = (char *)memalign(256, outSize); + memcpy(_nation_table->content[1], outTable, outSize); + munmap(outTable, outSize); + clock_gettime(CLOCK_REALTIME, &diskEnd); + diskTotal += (diskEnd.tv_sec - diskStart.tv_sec)* BILLION + diskEnd.tv_nsec - diskStart.tv_nsec; + close(outFd); + + // Load column 0, type: INTEGER + _nation_table->attrSize[2] = sizeof(int); + _nation_table->attrIndex[2] = 0; + _nation_table->attrType[2] = INT; + _nation_table->dataPos[2] = MEM; + outFd = open("NATION0", O_RDONLY); + offset = i * sizeof(struct columnHeader) + tupleOffset * sizeof(int); + lseek(outFd, offset, SEEK_SET); + read(outFd, &header, sizeof(struct columnHeader)); + offset += sizeof(struct columnHeader); + _nation_table->dataFormat[2] = header.format; + outSize = header.tupleNum * sizeof(int); + _nation_table->attrTotalSize[2] = outSize; + + clock_gettime(CLOCK_REALTIME,&diskStart); + outTable =(char *)mmap(0, outSize, PROT_READ, MAP_SHARED, outFd, offset); + _nation_table->content[2] = (char *)memalign(256, outSize); + memcpy(_nation_table->content[2], outTable, outSize); + munmap(outTable, outSize); + clock_gettime(CLOCK_REALTIME, &diskEnd); + diskTotal += (diskEnd.tv_sec - diskStart.tv_sec)* BILLION + diskEnd.tv_nsec - diskStart.tv_nsec; + close(outFd); + + _nation_table->tupleSize = 0 + 25 + sizeof(int) + sizeof(int); + _nation_table->tupleNum = header.tupleNum; + + if(blockTotal != 1){ + mergeIntoTable(nationTable,_nation_table, &pp); + clock_gettime(CLOCK_REALTIME, &diskStart); + freeTable(_nation_table); + clock_gettime(CLOCK_REALTIME, &diskEnd); + diskTotal += (diskEnd.tv_sec - diskStart.tv_sec)* BILLION + diskEnd.tv_nsec - diskStart.tv_nsec; + }else{ + free(nationTable); + nationTable = _nation_table; + } + tupleOffset += header.tupleNum; + } + nationTable->colIdxNum = 0; + _nation_table->keepInGpuIdx = 1; + } + + // Load columns from the table PARTSUPP + struct tableNode *partsuppTable; + partsuppTable = (struct tableNode *)malloc(sizeof(struct tableNode)); + CHECK_POINTER(partsuppTable); + initTable(partsuppTable); + { + struct tableNode *_partsupp_table; + int outFd; + long outSize; + char *outTable; + long offset, tupleOffset; + int blockTotal; + struct columnHeader header; + + // Retrieve the block number from PARTSUPP1 + outFd = open("PARTSUPP1", O_RDONLY); + read(outFd, &header, sizeof(struct columnHeader)); + blockTotal = header.blockTotal; + close(outFd); + offset = 0; + tupleOffset = 0; + for(int i = 0; i < blockTotal; i++){ + + // Table initialization + _partsupp_table = (struct tableNode *)malloc(sizeof(struct tableNode)); + CHECK_POINTER(_partsupp_table); + _partsupp_table->totalAttr = 3; + _partsupp_table->attrType = (int *)malloc(sizeof(int) * 3); + CHECK_POINTER(_partsupp_table->attrType); + _partsupp_table->attrSize = (int *)malloc(sizeof(int) * 3); + CHECK_POINTER(_partsupp_table->attrSize); + _partsupp_table->attrIndex = (int *)malloc(sizeof(int) * 3); + CHECK_POINTER(_partsupp_table->attrIndex); + _partsupp_table->attrTotalSize = (int *)malloc(sizeof(int) * 3); + CHECK_POINTER(_partsupp_table->attrTotalSize); + _partsupp_table->dataPos = (int *)malloc(sizeof(int) * 3); + CHECK_POINTER(_partsupp_table->dataPos); + _partsupp_table->dataFormat = (int *) malloc(sizeof(int) * 3); + CHECK_POINTER(_partsupp_table->dataFormat); + _partsupp_table->content = (char **)malloc(sizeof(char *) * 3); + CHECK_POINTER(_partsupp_table->content); + + // Load column 1, type: INTEGER + _partsupp_table->attrSize[0] = sizeof(int); + _partsupp_table->attrIndex[0] = 1; + _partsupp_table->attrType[0] = INT; + _partsupp_table->dataPos[0] = MEM; + outFd = open("PARTSUPP1", O_RDONLY); + offset = i * sizeof(struct columnHeader) + tupleOffset * sizeof(int); + lseek(outFd, offset, SEEK_SET); + read(outFd, &header, sizeof(struct columnHeader)); + offset += sizeof(struct columnHeader); + _partsupp_table->dataFormat[0] = header.format; + outSize = header.tupleNum * sizeof(int); + _partsupp_table->attrTotalSize[0] = outSize; + + clock_gettime(CLOCK_REALTIME,&diskStart); + outTable =(char *)mmap(0, outSize, PROT_READ, MAP_SHARED, outFd, offset); + _partsupp_table->content[0] = (char *)memalign(256, outSize); + memcpy(_partsupp_table->content[0], outTable, outSize); + munmap(outTable, outSize); + clock_gettime(CLOCK_REALTIME, &diskEnd); + diskTotal += (diskEnd.tv_sec - diskStart.tv_sec)* BILLION + diskEnd.tv_nsec - diskStart.tv_nsec; + close(outFd); + + // Load column 0, type: INTEGER + _partsupp_table->attrSize[1] = sizeof(int); + _partsupp_table->attrIndex[1] = 0; + _partsupp_table->attrType[1] = INT; + _partsupp_table->dataPos[1] = MEM; + outFd = open("PARTSUPP0", O_RDONLY); + offset = i * sizeof(struct columnHeader) + tupleOffset * sizeof(int); + lseek(outFd, offset, SEEK_SET); + read(outFd, &header, sizeof(struct columnHeader)); + offset += sizeof(struct columnHeader); + _partsupp_table->dataFormat[1] = header.format; + outSize = header.tupleNum * sizeof(int); + _partsupp_table->attrTotalSize[1] = outSize; + + clock_gettime(CLOCK_REALTIME,&diskStart); + outTable =(char *)mmap(0, outSize, PROT_READ, MAP_SHARED, outFd, offset); + _partsupp_table->content[1] = (char *)memalign(256, outSize); + memcpy(_partsupp_table->content[1], outTable, outSize); + munmap(outTable, outSize); + clock_gettime(CLOCK_REALTIME, &diskEnd); + diskTotal += (diskEnd.tv_sec - diskStart.tv_sec)* BILLION + diskEnd.tv_nsec - diskStart.tv_nsec; + close(outFd); + + // Load column 3, type: DECIMAL + _partsupp_table->attrSize[2] = sizeof(float); + _partsupp_table->attrIndex[2] = 3; + _partsupp_table->attrType[2] = FLOAT; + _partsupp_table->dataPos[2] = MEM; + outFd = open("PARTSUPP3", O_RDONLY); + offset = i * sizeof(struct columnHeader) + tupleOffset * sizeof(float); + lseek(outFd, offset, SEEK_SET); + read(outFd, &header, sizeof(struct columnHeader)); + offset += sizeof(struct columnHeader); + _partsupp_table->dataFormat[2] = header.format; + outSize = header.tupleNum * sizeof(float); + _partsupp_table->attrTotalSize[2] = outSize; + + clock_gettime(CLOCK_REALTIME,&diskStart); + outTable =(char *)mmap(0, outSize, PROT_READ, MAP_SHARED, outFd, offset); + _partsupp_table->content[2] = (char *)memalign(256, outSize); + memcpy(_partsupp_table->content[2], outTable, outSize); + munmap(outTable, outSize); + clock_gettime(CLOCK_REALTIME, &diskEnd); + diskTotal += (diskEnd.tv_sec - diskStart.tv_sec)* BILLION + diskEnd.tv_nsec - diskStart.tv_nsec; + close(outFd); + + _partsupp_table->tupleSize = 0 + sizeof(int) + sizeof(int) + sizeof(float); + _partsupp_table->tupleNum = header.tupleNum; + + if(blockTotal != 1){ + mergeIntoTable(partsuppTable,_partsupp_table, &pp); + clock_gettime(CLOCK_REALTIME, &diskStart); + freeTable(_partsupp_table); + clock_gettime(CLOCK_REALTIME, &diskEnd); + diskTotal += (diskEnd.tv_sec - diskStart.tv_sec)* BILLION + diskEnd.tv_nsec - diskStart.tv_nsec; + }else{ + free(partsuppTable); + partsuppTable = _partsupp_table; + } + tupleOffset += header.tupleNum; + } + partsuppTable->colIdxNum = 0; + _partsupp_table->keepInGpuIdx = 1; + } + + struct tableNode *result; + char * subqRes0; + + // Process the TableNode for PART + struct tableNode *pa0; + pa0 = (struct tableNode *)malloc(sizeof(struct tableNode)); + CHECK_POINTER(pa0); + initTable(pa0); + { + struct tableNode *partTablePartial; + partTablePartial = (struct tableNode *)malloc(sizeof(struct tableNode)); + CHECK_POINTER(partTablePartial); + partTablePartial->totalAttr = 4; + partTablePartial->attrType = (int *)malloc(sizeof(int) * 4); + CHECK_POINTER(partTablePartial->attrType); + partTablePartial->attrSize = (int *)malloc(sizeof(int) * 4); + CHECK_POINTER(partTablePartial->attrSize); + partTablePartial->attrIndex = (int *)malloc(sizeof(int) * 4); + CHECK_POINTER(partTablePartial->attrIndex); + partTablePartial->attrTotalSize = (int *)malloc(sizeof(int) * 4); + CHECK_POINTER(partTablePartial->attrTotalSize); + partTablePartial->dataPos = (int *)malloc(sizeof(int) * 4); + CHECK_POINTER(partTablePartial->dataPos); + partTablePartial->dataFormat = (int *) malloc(sizeof(int) * 4); + CHECK_POINTER(partTablePartial->dataFormat); + partTablePartial->content = (char **)malloc(sizeof(char *) * 4); + CHECK_POINTER(partTablePartial->content); + + int tuple_size = 0; + partTablePartial->attrSize[0] = partTable->attrSize[0]; + partTablePartial->attrIndex[0] = partTable->attrIndex[0]; + partTablePartial->attrType[0] = partTable->attrType[0]; + partTablePartial->dataPos[0] = partTable->dataPos[0]; + partTablePartial->dataFormat[0] = partTable->dataFormat[0]; + partTablePartial->attrTotalSize[0] = partTable->attrTotalSize[0]; + partTablePartial->content[0] = partTable->content[0]; + tuple_size += partTablePartial->attrSize[0]; + + partTablePartial->attrSize[1] = partTable->attrSize[1]; + partTablePartial->attrIndex[1] = partTable->attrIndex[1]; + partTablePartial->attrType[1] = partTable->attrType[1]; + partTablePartial->dataPos[1] = partTable->dataPos[1]; + partTablePartial->dataFormat[1] = partTable->dataFormat[1]; + partTablePartial->attrTotalSize[1] = partTable->attrTotalSize[1]; + partTablePartial->content[1] = partTable->content[1]; + tuple_size += partTablePartial->attrSize[1]; + + partTablePartial->attrSize[2] = partTable->attrSize[2]; + partTablePartial->attrIndex[2] = partTable->attrIndex[2]; + partTablePartial->attrType[2] = partTable->attrType[2]; + partTablePartial->dataPos[2] = partTable->dataPos[2]; + partTablePartial->dataFormat[2] = partTable->dataFormat[2]; + partTablePartial->attrTotalSize[2] = partTable->attrTotalSize[2]; + partTablePartial->content[2] = partTable->content[2]; + tuple_size += partTablePartial->attrSize[2]; + + partTablePartial->attrSize[3] = partTable->attrSize[3]; + partTablePartial->attrIndex[3] = partTable->attrIndex[3]; + partTablePartial->attrType[3] = partTable->attrType[3]; + partTablePartial->dataPos[3] = partTable->dataPos[3]; + partTablePartial->dataFormat[3] = partTable->dataFormat[3]; + partTablePartial->attrTotalSize[3] = partTable->attrTotalSize[3]; + partTablePartial->content[3] = partTable->content[3]; + tuple_size += partTablePartial->attrSize[3]; + + partTablePartial->tupleSize = tuple_size; + partTablePartial->tupleNum = partTable->tupleNum; + + partTablePartial->colIdxNum = 0; + partTablePartial->keepInGpuIdx = 1; + // Where conditions: AND(EQ(PART.5,20),LIKE(PART.4,LIST("MEDIUM",""))) + struct scanNode partRel; + partRel.tn = partTablePartial; + partRel.hasWhere = 1; + partRel.whereAttrNum = 2; + partRel.whereIndex = (int *)malloc(sizeof(int) * 2); + CHECK_POINTER(partRel.whereIndex); + partRel.outputNum = 2; + partRel.outputIndex = (int *)malloc(sizeof(int) * 2); + CHECK_POINTER(partRel.outputIndex); + partRel.outputIndex[0] = 0; + partRel.outputIndex[1] = 1; + partRel.whereIndex[0] = 2; + partRel.whereIndex[1] = 3; + partRel.keepInGpu = 1; + partRel.filter = (struct whereCondition *)malloc(sizeof(struct whereCondition)); + CHECK_POINTER(partRel.filter); + (partRel.filter)->nested = 0; + (partRel.filter)->expNum = 2; + (partRel.filter)->exp = (struct whereExp*)malloc(sizeof(struct whereExp) *2); + CHECK_POINTER((partRel.filter)->exp); + (partRel.filter)->andOr = AND; + (partRel.filter)->exp[0].index = 0; + (partRel.filter)->exp[0].relation = EQ; + (partRel.filter)->exp[0].dataPos = MEM; + { + int tmp = 20; + memcpy((partRel.filter)->exp[0].content, &tmp, sizeof(int)); + } + (partRel.filter)->exp[1].index = 1; + (partRel.filter)->exp[1].relation = LIKE; + (partRel.filter)->exp[1].dataPos = MEM; + (partRel.filter)->exp[1].vlen = 2; + { + char *vec = (char *)malloc(25 * 2); + memset(vec, 0, 25 * 2); + memcpy(vec + 25 * 0, "MEDIUM", 25); + memcpy(vec + 25 * 1, "", 25); + memcpy((partRel.filter)->exp[1].content, &vec, sizeof(char **)); + } + pa0 = tableScan(&partRel, &pp); + clock_gettime(CLOCK_REALTIME, &diskStart); + partTablePartial->content[0] = NULL; + partTablePartial->content[1] = NULL; + partTablePartial->content[2] = NULL; + partTablePartial->content[3] = NULL; + freeScan(&partRel); + + clock_gettime(CLOCK_REALTIME, &diskEnd); + pa0->colIdxNum = 0; + } + + // Process the TableNode for PARTSUPP + struct tableNode *ps0; + ps0 = (struct tableNode *)malloc(sizeof(struct tableNode)); + CHECK_POINTER(ps0); + initTable(ps0); + { + struct tableNode *partsuppTablePartial; + partsuppTablePartial = (struct tableNode *)malloc(sizeof(struct tableNode)); + CHECK_POINTER(partsuppTablePartial); + partsuppTablePartial->totalAttr = 3; + partsuppTablePartial->attrType = (int *)malloc(sizeof(int) * 3); + CHECK_POINTER(partsuppTablePartial->attrType); + partsuppTablePartial->attrSize = (int *)malloc(sizeof(int) * 3); + CHECK_POINTER(partsuppTablePartial->attrSize); + partsuppTablePartial->attrIndex = (int *)malloc(sizeof(int) * 3); + CHECK_POINTER(partsuppTablePartial->attrIndex); + partsuppTablePartial->attrTotalSize = (int *)malloc(sizeof(int) * 3); + CHECK_POINTER(partsuppTablePartial->attrTotalSize); + partsuppTablePartial->dataPos = (int *)malloc(sizeof(int) * 3); + CHECK_POINTER(partsuppTablePartial->dataPos); + partsuppTablePartial->dataFormat = (int *) malloc(sizeof(int) * 3); + CHECK_POINTER(partsuppTablePartial->dataFormat); + partsuppTablePartial->content = (char **)malloc(sizeof(char *) * 3); + CHECK_POINTER(partsuppTablePartial->content); + + int tuple_size = 0; + partsuppTablePartial->attrSize[0] = partsuppTable->attrSize[0]; + partsuppTablePartial->attrIndex[0] = partsuppTable->attrIndex[0]; + partsuppTablePartial->attrType[0] = partsuppTable->attrType[0]; + partsuppTablePartial->dataPos[0] = partsuppTable->dataPos[0]; + partsuppTablePartial->dataFormat[0] = partsuppTable->dataFormat[0]; + partsuppTablePartial->attrTotalSize[0] = partsuppTable->attrTotalSize[0]; + partsuppTablePartial->content[0] = partsuppTable->content[0]; + tuple_size += partsuppTablePartial->attrSize[0]; + + partsuppTablePartial->attrSize[1] = partsuppTable->attrSize[1]; + partsuppTablePartial->attrIndex[1] = partsuppTable->attrIndex[1]; + partsuppTablePartial->attrType[1] = partsuppTable->attrType[1]; + partsuppTablePartial->dataPos[1] = partsuppTable->dataPos[1]; + partsuppTablePartial->dataFormat[1] = partsuppTable->dataFormat[1]; + partsuppTablePartial->attrTotalSize[1] = partsuppTable->attrTotalSize[1]; + partsuppTablePartial->content[1] = partsuppTable->content[1]; + tuple_size += partsuppTablePartial->attrSize[1]; + + partsuppTablePartial->attrSize[2] = partsuppTable->attrSize[2]; + partsuppTablePartial->attrIndex[2] = partsuppTable->attrIndex[2]; + partsuppTablePartial->attrType[2] = partsuppTable->attrType[2]; + partsuppTablePartial->dataPos[2] = partsuppTable->dataPos[2]; + partsuppTablePartial->dataFormat[2] = partsuppTable->dataFormat[2]; + partsuppTablePartial->attrTotalSize[2] = partsuppTable->attrTotalSize[2]; + partsuppTablePartial->content[2] = partsuppTable->content[2]; + tuple_size += partsuppTablePartial->attrSize[2]; + + partsuppTablePartial->tupleSize = tuple_size; + partsuppTablePartial->tupleNum = partsuppTable->tupleNum; + + partsuppTablePartial->colIdxNum = 0; + partsuppTablePartial->keepInGpuIdx = 1; + ps0 = partsuppTablePartial; + ps0->colIdxNum = 0; + } + + // Join two tables: pa0, ps0 + struct tableNode *pa0_ps0; + + { + + struct joinNode jNode; + jNode.leftTable = pa0; + jNode.rightTable = ps0; + jNode.totalAttr = 4; + jNode.keepInGpu = (int *) malloc(sizeof(int) * 4); + CHECK_POINTER(jNode.keepInGpu); + for(int k=0; k<4; k++) + jNode.keepInGpu[k] = 1; + jNode.leftOutputAttrNum = 2; + jNode.rightOutputAttrNum = 2; + jNode.leftOutputAttrType = (int *)malloc(sizeof(int)*2); + CHECK_POINTER(jNode.leftOutputAttrType); + jNode.leftOutputIndex = (int *)malloc(sizeof(int)*2); + CHECK_POINTER(jNode.leftOutputIndex); + jNode.leftPos = (int *)malloc(sizeof(int)*2); + CHECK_POINTER(jNode.leftPos); + jNode.tupleSize = 0; + jNode.leftOutputIndex[0] = 0; + jNode.leftOutputAttrType[0] = INT; + jNode.leftPos[0] = 0; + jNode.tupleSize += pa0->attrSize[0]; + jNode.leftOutputIndex[1] = 1; + jNode.leftOutputAttrType[1] = STRING; + jNode.leftPos[1] = 1; + jNode.tupleSize += pa0->attrSize[1]; + jNode.rightOutputAttrType = (int *)malloc(sizeof(int)*2); + CHECK_POINTER(jNode.rightOutputAttrType); + jNode.rightOutputIndex = (int *)malloc(sizeof(int)*2); + CHECK_POINTER(jNode.rightOutputIndex); + jNode.rightPos = (int *)malloc(sizeof(int)*2); + CHECK_POINTER(jNode.rightPos); + jNode.rightOutputIndex[0] = 0; + jNode.rightOutputAttrType[0] = INT; + jNode.rightPos[0] = 2; + jNode.tupleSize += ps0->attrSize[0]; + jNode.rightOutputIndex[1] = 2; + jNode.rightOutputAttrType[1] = FLOAT; + jNode.rightPos[1] = 3; + jNode.tupleSize += ps0->attrSize[2]; + jNode.leftKeyIndex = 0; + jNode.rightKeyIndex = 1; + struct tableNode *joinRes; + joinRes = hashJoin(&jNode,&pp); + + // Where conditions: AND(EQ(RIGHT.2,SUBQ(0,LEFT.0))) + struct scanNode joinRel; + joinRel.tn = joinRes; + joinRel.hasWhere = 1; + joinRel.whereAttrNum = 1; + joinRel.whereIndex = (int *)malloc(sizeof(int) * 1); + CHECK_POINTER(joinRel.whereIndex); + joinRel.outputNum = 3; + joinRel.outputIndex = (int *)malloc(sizeof(int) * 3); + CHECK_POINTER(joinRel.outputIndex); + joinRel.outputIndex[0] = 0; + joinRel.outputIndex[1] = 1; + joinRel.outputIndex[2] = 2; + joinRel.whereIndex[0] = 3; + joinRel.keepInGpu = 1; + joinRel.filter = (struct whereCondition *)malloc(sizeof(struct whereCondition)); + CHECK_POINTER(joinRel.filter); + (joinRel.filter)->nested = 0; + (joinRel.filter)->expNum = 1; + (joinRel.filter)->exp = (struct whereExp*)malloc(sizeof(struct whereExp) *1); + CHECK_POINTER((joinRel.filter)->exp); + (joinRel.filter)->andOr = AND; + (joinRel.filter)->exp[0].index = 0; + (joinRel.filter)->exp[0].relation = EQ_VEC; + (joinRel.filter)->exp[0].dataPos = MEM; + + // Process the subquery + char *_PART_0 = (char *)malloc(sizeof(int)); + CHECK_POINTER(_PART_0); + subqRes0 = (char *)malloc(sizeof(float) * joinRes->tupleNum); + CHECK_POINTER(subqRes0); + + + //Manual optimizations (Avoid creating objects etc) + //=========================================================== + + struct tableNode *result; + char * subqRes1; + + // Process the TableNode for PARTSUPP + struct tableNode *ps1; + ps1 = (struct tableNode *)malloc(sizeof(struct tableNode)); + CHECK_POINTER(ps1); + + //Scan table (Once) + initTable(ps1); + + struct tableNode *partsuppTablePartial; + partsuppTablePartial = (struct tableNode *)malloc(sizeof(struct tableNode)); + CHECK_POINTER(partsuppTablePartial); + + partsuppTablePartial->totalAttr = 2; + partsuppTablePartial->attrType = (int *)malloc(sizeof(int) * 2); + CHECK_POINTER(partsuppTablePartial->attrType); + partsuppTablePartial->attrSize = (int *)malloc(sizeof(int) * 2); + CHECK_POINTER(partsuppTablePartial->attrSize); + partsuppTablePartial->attrIndex = (int *)malloc(sizeof(int) * 2); + CHECK_POINTER(partsuppTablePartial->attrIndex); + partsuppTablePartial->attrTotalSize = (int *)malloc(sizeof(int) * 2); + CHECK_POINTER(partsuppTablePartial->attrTotalSize); + partsuppTablePartial->dataPos = (int *)malloc(sizeof(int) * 2); + CHECK_POINTER(partsuppTablePartial->dataPos); + partsuppTablePartial->dataFormat = (int *) malloc(sizeof(int) * 2); + CHECK_POINTER(partsuppTablePartial->dataFormat); + partsuppTablePartial->content = (char **)malloc(sizeof(char *) * 2); + CHECK_POINTER(partsuppTablePartial->content); + int tuple_size = 0; + + partsuppTablePartial->attrSize[0] = partsuppTable->attrSize[2]; + partsuppTablePartial->attrIndex[0] = partsuppTable->attrIndex[2]; + partsuppTablePartial->attrType[0] = partsuppTable->attrType[2]; + partsuppTablePartial->dataPos[0] = partsuppTable->dataPos[2]; + partsuppTablePartial->dataFormat[0] = partsuppTable->dataFormat[2]; + partsuppTablePartial->attrTotalSize[0] = partsuppTable->attrTotalSize[2]; + tuple_size += partsuppTablePartial->attrSize[0]; + + partsuppTablePartial->attrSize[1] = partsuppTable->attrSize[1]; + partsuppTablePartial->attrIndex[1] = partsuppTable->attrIndex[1]; + partsuppTablePartial->attrType[1] = partsuppTable->attrType[1]; + partsuppTablePartial->dataPos[1] = partsuppTable->dataPos[1]; + partsuppTablePartial->dataFormat[1] = partsuppTable->dataFormat[1]; + partsuppTablePartial->attrTotalSize[1] = partsuppTable->attrTotalSize[1]; + + tuple_size += partsuppTablePartial->attrSize[1]; + partsuppTablePartial->tupleSize = tuple_size; + partsuppTablePartial->tupleNum = partsuppTable->tupleNum; + partsuppTablePartial->colIdxNum = 0; + partsuppTablePartial->keepInGpuIdx = 1; + + // Where conditions: EQ(Cons(ref. to PART.0),PARTSUPP.0) + struct scanNode partsuppRel; + partsuppRel.tn = partsuppTablePartial; + partsuppRel.hasWhere = 1; + partsuppRel.whereAttrNum = 1; + partsuppRel.whereIndex = (int *)malloc(sizeof(int) * 1); + CHECK_POINTER(partsuppRel.whereIndex); + partsuppRel.outputNum = 1; + partsuppRel.outputIndex = (int *)malloc(sizeof(int) * 1); + CHECK_POINTER(partsuppRel.outputIndex); + partsuppRel.outputIndex[0] = 0; + partsuppRel.whereIndex[0] = 1; + partsuppRel.keepInGpu = 1; + partsuppRel.filter = (struct whereCondition *)malloc(sizeof(struct whereCondition)); + CHECK_POINTER(partsuppRel.filter); + (partsuppRel.filter)->nested = 0; + (partsuppRel.filter)->expNum = 1; + (partsuppRel.filter)->exp = (struct whereExp*)malloc(sizeof(struct whereExp) *1); + CHECK_POINTER((partsuppRel.filter)->exp); + (partsuppRel.filter)->andOr = EXP; + (partsuppRel.filter)->exp[0].index = 0; + (partsuppRel.filter)->exp[0].relation = EQ; + (partsuppRel.filter)->exp[0].dataPos = MEM; + + //Get data reference only only once + partsuppTablePartial->content[0] = partsuppTable->content[2]; + partsuppTablePartial->content[1] = partsuppTable->content[1]; + + + //Group by object + struct tableNode * ps1_gb; + struct groupByNode * gbNode = (struct groupByNode *) malloc(sizeof(struct groupByNode)); + CHECK_POINTER(gbNode); + gbNode->groupByColNum = 1; + gbNode->groupByIndex = (int *)malloc(sizeof(int) * 1); + CHECK_POINTER(gbNode->groupByIndex); + gbNode->groupByType = (int *)malloc(sizeof(int) * 1); + CHECK_POINTER(gbNode->groupByType); + gbNode->groupBySize = (int *)malloc(sizeof(int) * 1); + CHECK_POINTER(gbNode->groupBySize); + gbNode->groupByIndex[0] = -1; + gbNode->groupByType[0] = INT; + gbNode->groupBySize[0] = sizeof(int); + gbNode->outputAttrNum = 1; + gbNode->attrType = (int *) malloc(sizeof(int) *1); + CHECK_POINTER(gbNode->attrType); + gbNode->attrSize = (int *) malloc(sizeof(int) *1); + CHECK_POINTER(gbNode->attrSize); + gbNode->tupleSize = 0; + gbNode->gbExp = (struct groupByExp *) malloc(sizeof(struct groupByExp) * 1); + gbNode->tupleSize += sizeof(float); + gbNode->attrType[0] = FLOAT; + gbNode->attrSize[0] = sizeof(float); + gbNode->gbExp[0].func = MIN; + gbNode->gbExp[0].exp.op = NOOP; + gbNode->gbExp[0].exp.opNum = 1; + gbNode->gbExp[0].exp.exp = 0; + gbNode->gbExp[0].exp.opType = COLUMN_DECIMAL; + gbNode->gbExp[0].exp.opValue = 0; + + //Meterialization + struct materializeNode mn; + + //-----TableScanNest Objects----- + + struct tableNode *res = (struct tableNode *) malloc(sizeof(struct tableNode)); + CHECK_POINTER(res); + res->totalAttr = partsuppRel.outputNum; + res->attrType = (int *) malloc(sizeof(int) * res->totalAttr); + CHECK_POINTER(res->attrType); + res->attrSize = (int *) malloc(sizeof(int) * res->totalAttr); + CHECK_POINTER(res->attrSize); + res->attrTotalSize = (int *) malloc(sizeof(int) * res->totalAttr); + CHECK_POINTER(res->attrTotalSize); + res->attrIndex = (int *) malloc(sizeof(int) * res->totalAttr); + CHECK_POINTER(res->attrIndex); + res->dataPos = (int *) malloc(sizeof(int) * res->totalAttr); + CHECK_POINTER(res->dataPos); + res->dataFormat = (int *) malloc(sizeof(int) * res->totalAttr); + CHECK_POINTER(res->dataFormat); + res->content = (char **) malloc(sizeof(char *) * res->totalAttr); + CHECK_POINTER(res->content); + res->colIdxNum = 0; + for(int i=0;itotalAttr;i++){ + int index = partsuppRel.outputIndex[i]; + res->attrType[i] = partsuppRel.tn->attrType[index]; + res->attrSize[i] = partsuppRel.tn->attrSize[index]; + } + + //Column and Where objs + int attrNum = partsuppRel.whereAttrNum; + char ** column = (char **) malloc(attrNum * sizeof(char *)); + CHECK_POINTER(column); + int * whereFree = (int *)malloc(attrNum * sizeof(int)); + CHECK_POINTER(whereFree); + int * colWherePos = (int *)malloc(partsuppRel.outputNum * sizeof(int)); + CHECK_POINTER(colWherePos); + for(int i=0;itupleNum; + int blockNum = totalTupleNum / block.x + 1; + if(blockNum<2048){ + grid = blockNum; + } + int threadNum = grid.x * block.x; + CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&gpuFilter,sizeof(int) * totalTupleNum)); + CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void**)&gpuPsum,sizeof(int)*threadNum)); + CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void**)&gpuCount,sizeof(int)*threadNum)); + //------------------------------- + + //=========================================================== + + //Loop through all the tuples + for(int tupleid = 0; tupleid < joinRes->tupleNum; tupleid++){ + + //Create Copy tupleid value + CUDA_SAFE_CALL_NO_SYNC( cudaMemcpy(_PART_0, (char *)(joinRes->content[0]) + tupleid * sizeof(int), sizeof(int), cudaMemcpyDeviceToHost) ); + int tmp = *(int *)(_PART_0); + memcpy((partsuppRel.filter)->exp[0].content, &tmp, sizeof(int)); + + //Table scan to filter linking predicate + ps1 = tableScanNest(&partsuppRel, res, &pp, + + //Other objs + column, + whereFree, + colWherePos, + + //Gpu objs, + gpuCount, + gpuFilter, + gpuPsum, + grid, + block + ); + ps1->colIdxNum = 0; + + //Group by + gbNode->table = ps1; + ps1_gb = groupBy(gbNode, &pp); + + //Materialize (we need to move this outside the loop) + mn.table = ps1_gb; + char *final = materializeCol(&mn, &pp); + + //Copy result to device + mempcpy(subqRes0 + tupleid * sizeof(float), final, sizeof(float)); + } + + //Manual optimizations (delete objects at the end etc) + //=========================================================== + freeScan(&partsuppRel); + free(_PART_0); + + //--Table scan de-allocaiton + CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuPsum)); + CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuFilter)); + CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuCount)); + free(column); + //=========================================================== + + memcpy((joinRel.filter)->exp[0].content, &subqRes0, sizeof(void *)); + pa0_ps0 = tableScan(&joinRel, &pp); + freeScan(&joinRel); + + } + + // Process the TableNode for SUPPLIER + struct tableNode *su0; + su0 = (struct tableNode *)malloc(sizeof(struct tableNode)); + CHECK_POINTER(su0); + initTable(su0); + { + struct tableNode *supplierTablePartial; + supplierTablePartial = (struct tableNode *)malloc(sizeof(struct tableNode)); + CHECK_POINTER(supplierTablePartial); + supplierTablePartial->totalAttr = 7; + supplierTablePartial->attrType = (int *)malloc(sizeof(int) * 7); + CHECK_POINTER(supplierTablePartial->attrType); + supplierTablePartial->attrSize = (int *)malloc(sizeof(int) * 7); + CHECK_POINTER(supplierTablePartial->attrSize); + supplierTablePartial->attrIndex = (int *)malloc(sizeof(int) * 7); + CHECK_POINTER(supplierTablePartial->attrIndex); + supplierTablePartial->attrTotalSize = (int *)malloc(sizeof(int) * 7); + CHECK_POINTER(supplierTablePartial->attrTotalSize); + supplierTablePartial->dataPos = (int *)malloc(sizeof(int) * 7); + CHECK_POINTER(supplierTablePartial->dataPos); + supplierTablePartial->dataFormat = (int *) malloc(sizeof(int) * 7); + CHECK_POINTER(supplierTablePartial->dataFormat); + supplierTablePartial->content = (char **)malloc(sizeof(char *) * 7); + CHECK_POINTER(supplierTablePartial->content); + + int tuple_size = 0; + supplierTablePartial->attrSize[0] = supplierTable->attrSize[0]; + supplierTablePartial->attrIndex[0] = supplierTable->attrIndex[0]; + supplierTablePartial->attrType[0] = supplierTable->attrType[0]; + supplierTablePartial->dataPos[0] = supplierTable->dataPos[0]; + supplierTablePartial->dataFormat[0] = supplierTable->dataFormat[0]; + supplierTablePartial->attrTotalSize[0] = supplierTable->attrTotalSize[0]; + supplierTablePartial->content[0] = supplierTable->content[0]; + tuple_size += supplierTablePartial->attrSize[0]; + + supplierTablePartial->attrSize[1] = supplierTable->attrSize[1]; + supplierTablePartial->attrIndex[1] = supplierTable->attrIndex[1]; + supplierTablePartial->attrType[1] = supplierTable->attrType[1]; + supplierTablePartial->dataPos[1] = supplierTable->dataPos[1]; + supplierTablePartial->dataFormat[1] = supplierTable->dataFormat[1]; + supplierTablePartial->attrTotalSize[1] = supplierTable->attrTotalSize[1]; + supplierTablePartial->content[1] = supplierTable->content[1]; + tuple_size += supplierTablePartial->attrSize[1]; + + supplierTablePartial->attrSize[2] = supplierTable->attrSize[2]; + supplierTablePartial->attrIndex[2] = supplierTable->attrIndex[2]; + supplierTablePartial->attrType[2] = supplierTable->attrType[2]; + supplierTablePartial->dataPos[2] = supplierTable->dataPos[2]; + supplierTablePartial->dataFormat[2] = supplierTable->dataFormat[2]; + supplierTablePartial->attrTotalSize[2] = supplierTable->attrTotalSize[2]; + supplierTablePartial->content[2] = supplierTable->content[2]; + tuple_size += supplierTablePartial->attrSize[2]; + + supplierTablePartial->attrSize[3] = supplierTable->attrSize[3]; + supplierTablePartial->attrIndex[3] = supplierTable->attrIndex[3]; + supplierTablePartial->attrType[3] = supplierTable->attrType[3]; + supplierTablePartial->dataPos[3] = supplierTable->dataPos[3]; + supplierTablePartial->dataFormat[3] = supplierTable->dataFormat[3]; + supplierTablePartial->attrTotalSize[3] = supplierTable->attrTotalSize[3]; + supplierTablePartial->content[3] = supplierTable->content[3]; + tuple_size += supplierTablePartial->attrSize[3]; + + supplierTablePartial->attrSize[4] = supplierTable->attrSize[4]; + supplierTablePartial->attrIndex[4] = supplierTable->attrIndex[4]; + supplierTablePartial->attrType[4] = supplierTable->attrType[4]; + supplierTablePartial->dataPos[4] = supplierTable->dataPos[4]; + supplierTablePartial->dataFormat[4] = supplierTable->dataFormat[4]; + supplierTablePartial->attrTotalSize[4] = supplierTable->attrTotalSize[4]; + supplierTablePartial->content[4] = supplierTable->content[4]; + tuple_size += supplierTablePartial->attrSize[4]; + + supplierTablePartial->attrSize[5] = supplierTable->attrSize[5]; + supplierTablePartial->attrIndex[5] = supplierTable->attrIndex[5]; + supplierTablePartial->attrType[5] = supplierTable->attrType[5]; + supplierTablePartial->dataPos[5] = supplierTable->dataPos[5]; + supplierTablePartial->dataFormat[5] = supplierTable->dataFormat[5]; + supplierTablePartial->attrTotalSize[5] = supplierTable->attrTotalSize[5]; + supplierTablePartial->content[5] = supplierTable->content[5]; + tuple_size += supplierTablePartial->attrSize[5]; + + supplierTablePartial->attrSize[6] = supplierTable->attrSize[6]; + supplierTablePartial->attrIndex[6] = supplierTable->attrIndex[6]; + supplierTablePartial->attrType[6] = supplierTable->attrType[6]; + supplierTablePartial->dataPos[6] = supplierTable->dataPos[6]; + supplierTablePartial->dataFormat[6] = supplierTable->dataFormat[6]; + supplierTablePartial->attrTotalSize[6] = supplierTable->attrTotalSize[6]; + supplierTablePartial->content[6] = supplierTable->content[6]; + tuple_size += supplierTablePartial->attrSize[6]; + + supplierTablePartial->tupleSize = tuple_size; + supplierTablePartial->tupleNum = supplierTable->tupleNum; + + supplierTablePartial->colIdxNum = 0; + supplierTablePartial->keepInGpuIdx = 1; + su0 = supplierTablePartial; + su0->colIdxNum = 0; + } + + // Join two tables: pa0_ps0, su0 + struct tableNode *pa0_ps0_su0; + + { + + struct joinNode jNode; + jNode.leftTable = pa0_ps0; + jNode.rightTable = su0; + jNode.totalAttr = 8; + jNode.keepInGpu = (int *) malloc(sizeof(int) * 8); + CHECK_POINTER(jNode.keepInGpu); + for(int k=0; k<8; k++) + jNode.keepInGpu[k] = 1; + jNode.leftOutputAttrNum = 2; + jNode.rightOutputAttrNum = 6; + jNode.leftOutputAttrType = (int *)malloc(sizeof(int)*2); + CHECK_POINTER(jNode.leftOutputAttrType); + jNode.leftOutputIndex = (int *)malloc(sizeof(int)*2); + CHECK_POINTER(jNode.leftOutputIndex); + jNode.leftPos = (int *)malloc(sizeof(int)*2); + CHECK_POINTER(jNode.leftPos); + jNode.tupleSize = 0; + jNode.leftOutputIndex[0] = 0; + jNode.leftOutputAttrType[0] = INT; + jNode.leftPos[0] = 2; + jNode.tupleSize += pa0_ps0->attrSize[0]; + jNode.leftOutputIndex[1] = 1; + jNode.leftOutputAttrType[1] = STRING; + jNode.leftPos[1] = 3; + jNode.tupleSize += pa0_ps0->attrSize[1]; + jNode.rightOutputAttrType = (int *)malloc(sizeof(int)*6); + CHECK_POINTER(jNode.rightOutputAttrType); + jNode.rightOutputIndex = (int *)malloc(sizeof(int)*6); + CHECK_POINTER(jNode.rightOutputIndex); + jNode.rightPos = (int *)malloc(sizeof(int)*6); + CHECK_POINTER(jNode.rightPos); + jNode.rightOutputIndex[0] = 0; + jNode.rightOutputAttrType[0] = FLOAT; + jNode.rightPos[0] = 0; + jNode.tupleSize += su0->attrSize[0]; + jNode.rightOutputIndex[1] = 1; + jNode.rightOutputAttrType[1] = STRING; + jNode.rightPos[1] = 1; + jNode.tupleSize += su0->attrSize[1]; + jNode.rightOutputIndex[2] = 2; + jNode.rightOutputAttrType[2] = STRING; + jNode.rightPos[2] = 4; + jNode.tupleSize += su0->attrSize[2]; + jNode.rightOutputIndex[3] = 3; + jNode.rightOutputAttrType[3] = STRING; + jNode.rightPos[3] = 5; + jNode.tupleSize += su0->attrSize[3]; + jNode.rightOutputIndex[4] = 4; + jNode.rightOutputAttrType[4] = STRING; + jNode.rightPos[4] = 6; + jNode.tupleSize += su0->attrSize[4]; + jNode.rightOutputIndex[5] = 5; + jNode.rightOutputAttrType[5] = INT; + jNode.rightPos[5] = 7; + jNode.tupleSize += su0->attrSize[5]; + jNode.leftKeyIndex = 2; + jNode.rightKeyIndex = 6; + struct tableNode *joinRes; + joinRes = hashJoin(&jNode,&pp); + + pa0_ps0_su0 = joinRes; + } + + // Process the TableNode for NATION + struct tableNode *na0; + na0 = (struct tableNode *)malloc(sizeof(struct tableNode)); + CHECK_POINTER(na0); + initTable(na0); + { + struct tableNode *nationTablePartial; + nationTablePartial = (struct tableNode *)malloc(sizeof(struct tableNode)); + CHECK_POINTER(nationTablePartial); + nationTablePartial->totalAttr = 3; + nationTablePartial->attrType = (int *)malloc(sizeof(int) * 3); + CHECK_POINTER(nationTablePartial->attrType); + nationTablePartial->attrSize = (int *)malloc(sizeof(int) * 3); + CHECK_POINTER(nationTablePartial->attrSize); + nationTablePartial->attrIndex = (int *)malloc(sizeof(int) * 3); + CHECK_POINTER(nationTablePartial->attrIndex); + nationTablePartial->attrTotalSize = (int *)malloc(sizeof(int) * 3); + CHECK_POINTER(nationTablePartial->attrTotalSize); + nationTablePartial->dataPos = (int *)malloc(sizeof(int) * 3); + CHECK_POINTER(nationTablePartial->dataPos); + nationTablePartial->dataFormat = (int *) malloc(sizeof(int) * 3); + CHECK_POINTER(nationTablePartial->dataFormat); + nationTablePartial->content = (char **)malloc(sizeof(char *) * 3); + CHECK_POINTER(nationTablePartial->content); + + int tuple_size = 0; + nationTablePartial->attrSize[0] = nationTable->attrSize[0]; + nationTablePartial->attrIndex[0] = nationTable->attrIndex[0]; + nationTablePartial->attrType[0] = nationTable->attrType[0]; + nationTablePartial->dataPos[0] = nationTable->dataPos[0]; + nationTablePartial->dataFormat[0] = nationTable->dataFormat[0]; + nationTablePartial->attrTotalSize[0] = nationTable->attrTotalSize[0]; + nationTablePartial->content[0] = nationTable->content[0]; + tuple_size += nationTablePartial->attrSize[0]; + + nationTablePartial->attrSize[1] = nationTable->attrSize[1]; + nationTablePartial->attrIndex[1] = nationTable->attrIndex[1]; + nationTablePartial->attrType[1] = nationTable->attrType[1]; + nationTablePartial->dataPos[1] = nationTable->dataPos[1]; + nationTablePartial->dataFormat[1] = nationTable->dataFormat[1]; + nationTablePartial->attrTotalSize[1] = nationTable->attrTotalSize[1]; + nationTablePartial->content[1] = nationTable->content[1]; + tuple_size += nationTablePartial->attrSize[1]; + + nationTablePartial->attrSize[2] = nationTable->attrSize[2]; + nationTablePartial->attrIndex[2] = nationTable->attrIndex[2]; + nationTablePartial->attrType[2] = nationTable->attrType[2]; + nationTablePartial->dataPos[2] = nationTable->dataPos[2]; + nationTablePartial->dataFormat[2] = nationTable->dataFormat[2]; + nationTablePartial->attrTotalSize[2] = nationTable->attrTotalSize[2]; + nationTablePartial->content[2] = nationTable->content[2]; + tuple_size += nationTablePartial->attrSize[2]; + + nationTablePartial->tupleSize = tuple_size; + nationTablePartial->tupleNum = nationTable->tupleNum; + + nationTablePartial->colIdxNum = 0; + nationTablePartial->keepInGpuIdx = 1; + na0 = nationTablePartial; + na0->colIdxNum = 0; + } + + // Join two tables: pa0_ps0_su0, na0 + struct tableNode *pa0_ps0_su0_na0; + + { + + struct joinNode jNode; + jNode.leftTable = pa0_ps0_su0; + jNode.rightTable = na0; + jNode.totalAttr = 9; + jNode.keepInGpu = (int *) malloc(sizeof(int) * 9); + CHECK_POINTER(jNode.keepInGpu); + for(int k=0; k<9; k++) + jNode.keepInGpu[k] = 1; + jNode.leftOutputAttrNum = 7; + jNode.rightOutputAttrNum = 2; + jNode.leftOutputAttrType = (int *)malloc(sizeof(int)*7); + CHECK_POINTER(jNode.leftOutputAttrType); + jNode.leftOutputIndex = (int *)malloc(sizeof(int)*7); + CHECK_POINTER(jNode.leftOutputIndex); + jNode.leftPos = (int *)malloc(sizeof(int)*7); + CHECK_POINTER(jNode.leftPos); + jNode.tupleSize = 0; + jNode.leftOutputIndex[0] = 0; + jNode.leftOutputAttrType[0] = FLOAT; + jNode.leftPos[0] = 0; + jNode.tupleSize += pa0_ps0_su0->attrSize[0]; + jNode.leftOutputIndex[1] = 1; + jNode.leftOutputAttrType[1] = STRING; + jNode.leftPos[1] = 1; + jNode.tupleSize += pa0_ps0_su0->attrSize[1]; + jNode.leftOutputIndex[2] = 2; + jNode.leftOutputAttrType[2] = INT; + jNode.leftPos[2] = 3; + jNode.tupleSize += pa0_ps0_su0->attrSize[2]; + jNode.leftOutputIndex[3] = 3; + jNode.leftOutputAttrType[3] = STRING; + jNode.leftPos[3] = 4; + jNode.tupleSize += pa0_ps0_su0->attrSize[3]; + jNode.leftOutputIndex[4] = 4; + jNode.leftOutputAttrType[4] = STRING; + jNode.leftPos[4] = 5; + jNode.tupleSize += pa0_ps0_su0->attrSize[4]; + jNode.leftOutputIndex[5] = 5; + jNode.leftOutputAttrType[5] = STRING; + jNode.leftPos[5] = 6; + jNode.tupleSize += pa0_ps0_su0->attrSize[5]; + jNode.leftOutputIndex[6] = 6; + jNode.leftOutputAttrType[6] = STRING; + jNode.leftPos[6] = 7; + jNode.tupleSize += pa0_ps0_su0->attrSize[6]; + jNode.rightOutputAttrType = (int *)malloc(sizeof(int)*2); + CHECK_POINTER(jNode.rightOutputAttrType); + jNode.rightOutputIndex = (int *)malloc(sizeof(int)*2); + CHECK_POINTER(jNode.rightOutputIndex); + jNode.rightPos = (int *)malloc(sizeof(int)*2); + CHECK_POINTER(jNode.rightPos); + jNode.rightOutputIndex[0] = 0; + jNode.rightOutputAttrType[0] = STRING; + jNode.rightPos[0] = 2; + jNode.tupleSize += na0->attrSize[0]; + jNode.rightOutputIndex[1] = 1; + jNode.rightOutputAttrType[1] = INT; + jNode.rightPos[1] = 8; + jNode.tupleSize += na0->attrSize[1]; + jNode.leftKeyIndex = 7; + jNode.rightKeyIndex = 2; + struct tableNode *joinRes; + joinRes = hashJoin(&jNode,&pp); + + pa0_ps0_su0_na0 = joinRes; + } + + // Process the TableNode for REGION + struct tableNode *re0; + re0 = (struct tableNode *)malloc(sizeof(struct tableNode)); + CHECK_POINTER(re0); + initTable(re0); + { + struct tableNode *regionTablePartial; + regionTablePartial = (struct tableNode *)malloc(sizeof(struct tableNode)); + CHECK_POINTER(regionTablePartial); + regionTablePartial->totalAttr = 2; + regionTablePartial->attrType = (int *)malloc(sizeof(int) * 2); + CHECK_POINTER(regionTablePartial->attrType); + regionTablePartial->attrSize = (int *)malloc(sizeof(int) * 2); + CHECK_POINTER(regionTablePartial->attrSize); + regionTablePartial->attrIndex = (int *)malloc(sizeof(int) * 2); + CHECK_POINTER(regionTablePartial->attrIndex); + regionTablePartial->attrTotalSize = (int *)malloc(sizeof(int) * 2); + CHECK_POINTER(regionTablePartial->attrTotalSize); + regionTablePartial->dataPos = (int *)malloc(sizeof(int) * 2); + CHECK_POINTER(regionTablePartial->dataPos); + regionTablePartial->dataFormat = (int *) malloc(sizeof(int) * 2); + CHECK_POINTER(regionTablePartial->dataFormat); + regionTablePartial->content = (char **)malloc(sizeof(char *) * 2); + CHECK_POINTER(regionTablePartial->content); + + int tuple_size = 0; + regionTablePartial->attrSize[0] = regionTable->attrSize[0]; + regionTablePartial->attrIndex[0] = regionTable->attrIndex[0]; + regionTablePartial->attrType[0] = regionTable->attrType[0]; + regionTablePartial->dataPos[0] = regionTable->dataPos[0]; + regionTablePartial->dataFormat[0] = regionTable->dataFormat[0]; + regionTablePartial->attrTotalSize[0] = regionTable->attrTotalSize[0]; + regionTablePartial->content[0] = regionTable->content[0]; + tuple_size += regionTablePartial->attrSize[0]; + + regionTablePartial->attrSize[1] = regionTable->attrSize[1]; + regionTablePartial->attrIndex[1] = regionTable->attrIndex[1]; + regionTablePartial->attrType[1] = regionTable->attrType[1]; + regionTablePartial->dataPos[1] = regionTable->dataPos[1]; + regionTablePartial->dataFormat[1] = regionTable->dataFormat[1]; + regionTablePartial->attrTotalSize[1] = regionTable->attrTotalSize[1]; + regionTablePartial->content[1] = regionTable->content[1]; + tuple_size += regionTablePartial->attrSize[1]; + + regionTablePartial->tupleSize = tuple_size; + regionTablePartial->tupleNum = regionTable->tupleNum; + + regionTablePartial->colIdxNum = 0; + regionTablePartial->keepInGpuIdx = 1; + // Where conditions: EQ(REGION.1,"ASIA") + struct scanNode regionRel; + regionRel.tn = regionTablePartial; + regionRel.hasWhere = 1; + regionRel.whereAttrNum = 1; + regionRel.whereIndex = (int *)malloc(sizeof(int) * 1); + CHECK_POINTER(regionRel.whereIndex); + regionRel.outputNum = 1; + regionRel.outputIndex = (int *)malloc(sizeof(int) * 1); + CHECK_POINTER(regionRel.outputIndex); + regionRel.outputIndex[0] = 0; + regionRel.whereIndex[0] = 1; + regionRel.keepInGpu = 1; + regionRel.filter = (struct whereCondition *)malloc(sizeof(struct whereCondition)); + CHECK_POINTER(regionRel.filter); + (regionRel.filter)->nested = 0; + (regionRel.filter)->expNum = 1; + (regionRel.filter)->exp = (struct whereExp*)malloc(sizeof(struct whereExp) *1); + CHECK_POINTER((regionRel.filter)->exp); + (regionRel.filter)->andOr = EXP; + (regionRel.filter)->exp[0].index = 0; + (regionRel.filter)->exp[0].relation = EQ; + (regionRel.filter)->exp[0].dataPos = MEM; + strcpy((regionRel.filter)->exp[0].content, "ASIA"); + + re0 = tableScan(®ionRel, &pp); + clock_gettime(CLOCK_REALTIME, &diskStart); + regionTablePartial->content[0] = NULL; + regionTablePartial->content[1] = NULL; + freeScan(®ionRel); + + clock_gettime(CLOCK_REALTIME, &diskEnd); + re0->colIdxNum = 0; + } + + // Join two tables: pa0_ps0_su0_na0, re0 + struct tableNode *pa0_ps0_su0_na0_re0; + + { + + struct joinNode jNode; + jNode.leftTable = pa0_ps0_su0_na0; + jNode.rightTable = re0; + jNode.totalAttr = 8; + jNode.keepInGpu = (int *) malloc(sizeof(int) * 8); + CHECK_POINTER(jNode.keepInGpu); + for(int k=0; k<8; k++) + jNode.keepInGpu[k] = 1; + jNode.leftOutputAttrNum = 8; + jNode.rightOutputAttrNum = 0; + jNode.leftOutputAttrType = (int *)malloc(sizeof(int)*8); + CHECK_POINTER(jNode.leftOutputAttrType); + jNode.leftOutputIndex = (int *)malloc(sizeof(int)*8); + CHECK_POINTER(jNode.leftOutputIndex); + jNode.leftPos = (int *)malloc(sizeof(int)*8); + CHECK_POINTER(jNode.leftPos); + jNode.tupleSize = 0; + jNode.leftOutputIndex[0] = 0; + jNode.leftOutputAttrType[0] = FLOAT; + jNode.leftPos[0] = 0; + jNode.tupleSize += pa0_ps0_su0_na0->attrSize[0]; + jNode.leftOutputIndex[1] = 1; + jNode.leftOutputAttrType[1] = STRING; + jNode.leftPos[1] = 1; + jNode.tupleSize += pa0_ps0_su0_na0->attrSize[1]; + jNode.leftOutputIndex[2] = 2; + jNode.leftOutputAttrType[2] = STRING; + jNode.leftPos[2] = 2; + jNode.tupleSize += pa0_ps0_su0_na0->attrSize[2]; + jNode.leftOutputIndex[3] = 3; + jNode.leftOutputAttrType[3] = INT; + jNode.leftPos[3] = 3; + jNode.tupleSize += pa0_ps0_su0_na0->attrSize[3]; + jNode.leftOutputIndex[4] = 4; + jNode.leftOutputAttrType[4] = STRING; + jNode.leftPos[4] = 4; + jNode.tupleSize += pa0_ps0_su0_na0->attrSize[4]; + jNode.leftOutputIndex[5] = 5; + jNode.leftOutputAttrType[5] = STRING; + jNode.leftPos[5] = 5; + jNode.tupleSize += pa0_ps0_su0_na0->attrSize[5]; + jNode.leftOutputIndex[6] = 6; + jNode.leftOutputAttrType[6] = STRING; + jNode.leftPos[6] = 6; + jNode.tupleSize += pa0_ps0_su0_na0->attrSize[6]; + jNode.leftOutputIndex[7] = 7; + jNode.leftOutputAttrType[7] = STRING; + jNode.leftPos[7] = 7; + jNode.tupleSize += pa0_ps0_su0_na0->attrSize[7]; + jNode.rightOutputAttrType = (int *)malloc(sizeof(int)*0); + CHECK_POINTER(jNode.rightOutputAttrType); + jNode.rightOutputIndex = (int *)malloc(sizeof(int)*0); + CHECK_POINTER(jNode.rightOutputIndex); + jNode.rightPos = (int *)malloc(sizeof(int)*0); + CHECK_POINTER(jNode.rightPos); + jNode.leftKeyIndex = 8; + jNode.rightKeyIndex = 0; + struct tableNode *joinRes; + joinRes = hashJoin(&jNode,&pp); + + pa0_ps0_su0_na0_re0 = joinRes; + } + + struct tableNode * pa0_ps0_su0_na0_re0_ob; + { + + struct orderByNode * odNode = (struct orderByNode *) malloc(sizeof(struct orderByNode)); + CHECK_POINTER(odNode); + odNode->table = pa0_ps0_su0_na0_re0; + odNode->orderByNum = 4; + odNode->orderBySeq = (int *) malloc(sizeof(int) * odNode->orderByNum); + CHECK_POINTER(odNode->orderBySeq); + odNode->orderByIndex = (int *) malloc(sizeof(int) * odNode->orderByNum); + CHECK_POINTER(odNode->orderByIndex); + odNode->orderBySeq[0] = ASC; + odNode->orderByIndex[0] = 0; + odNode->orderBySeq[1] = ASC; + odNode->orderByIndex[1] = 2; + odNode->orderBySeq[2] = ASC; + odNode->orderByIndex[2] = 1; + odNode->orderBySeq[3] = ASC; + odNode->orderByIndex[3] = 3; + pa0_ps0_su0_na0_re0_ob = orderBy(odNode, &pp); + freeOrderByNode(odNode); + + } + + result = pa0_ps0_su0_na0_re0_ob; + struct materializeNode mn; + mn.table = result; + char *final = materializeCol(&mn, &pp); + printMaterializedTable(mn, final); + clock_gettime(CLOCK_REALTIME, &end); + double timeE = (end.tv_sec - start.tv_sec)* BILLION + end.tv_nsec - start.tv_nsec; + printf("<--Disk Load Time--> : %lf\n", diskTotal/(1000*1000)); + printf("\n"); + printf("<--Build index time--> : %lf\n", pp.buildIndexTotal/(1000*1000)); + + printf("\n"); + printf("<---TableScan()--->\n"); + printf("Total time : %lf\n", pp.tableScanTotal/(1000*1000)); + printf("Calls : %d\n", pp.tableScanCount); + + printf("Step 1 - memCopy where clause : %lf\n", pp.whereMemCopy_s1/(1000*1000)); + printf("Step 2 - memCopy predicate col : %lf\n", pp.dataMemCopy_s2/(1000*1000)); + printf("Step 3 - Scan : %lf\n", pp.scanTotal_s3/(1000*1000)); + printf("Step 4 - CountRes(PreScan) : %lf\n", pp.preScanTotal_s4/(1000*1000)); + printf("Step 5 - memReturn countRes : %lf\n", pp.preScanResultMemCopy_s5/(1000*1000)); + printf("Step 6 - Copy rest of columns : %lf\n", pp.dataMemCopyOther_s6/(1000*1000)); + printf("Step 7 - Materialize result : %lf\n", pp.materializeResult_s7/(1000*1000)); + printf("Step 8 - Copy final result : %lf\n", pp.finalResultMemCopy_s8/(1000*1000)); + printf("Other 1 - Create tableNode : %lf\n", pp.create_tableNode_S01/(1000*1000)); + printf("Other 2 - Malloc res : %lf\n", pp.mallocRes_S02/(1000*1000)); + printf("Other 3 - Deallocate buffers : %lf\n", pp.deallocateBuffs_S03/(1000*1000)); + printf("<----------------->"); + printf("\n"); + printf("Total Time: %lf\n", timeE/(1000*1000)); +} + diff --git a/cuda_code/driver_functions_1.cu b/cuda_code/driver_functions_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..f074310e8d65b83b2c5bed730a28ace19b918521 --- /dev/null +++ b/cuda_code/driver_functions_1.cu @@ -0,0 +1,1067 @@ +// RUN: %run_test hipify "%s" "%t" %hipify_args 2 --skip-excluded-preprocessor-conditional-blocks --experimental %clang_args + +// CHECK: #include +#include +#include +#include + +int main() { + printf("09. CUDA Driver API Functions synthetic test\n"); + + unsigned int flags = 0; + size_t bytes = 0; + size_t bytes_2 = 0; + void* image = nullptr; + std::string name = "str"; + uint32_t u_value = 0; + float ms = 0; + int* value = 0; + unsigned long long ull =0; + // CHECK: hipDevice_t device; + // CHECK-NEXT: hipCtx_t context; + // CHECK-NEXT: hipFuncCache_t func_cache; + // CHECK-NEXT: hipLimit_t limit; + // CHECK-NEXT: hipSharedMemConfig pconfig; + // CHECK-NEXT: hipFunction_t function; + // CHECK-NEXT: hipFunction_attribute function_attribute; + // CHECK-NEXT: hipModule_t module_; + // CHECK-NEXT: hipDeviceptr_t deviceptr; + // CHECK-NEXT: hipDeviceptr_t deviceptr_2; + // CHECK-NEXT: hipTexRef texref; + // CHECK-NEXT: hipJitOption jit_option; + // CHECK-NEXT: hipArray_t array_; + // CHECK-NEXT: HIP_ARRAY3D_DESCRIPTOR ARRAY3D_DESCRIPTOR; + // CHECK-NEXT: HIP_ARRAY_DESCRIPTOR ARRAY_DESCRIPTOR; + // CHECK-NEXT: hipIpcEventHandle_t ipcEventHandle; + // CHECK-NEXT: hipEvent_t event_; + // CHECK-NEXT: hipEvent_t event_start; + // CHECK-NEXT: hipEvent_t event_end; + // CHECK-NEXT: hipIpcMemHandle_t ipcMemHandle; + // CHECK-NEXT: hip_Memcpy2D MEMCPY2D; + // CHECK-NEXT: HIP_MEMCPY3D MEMCPY3D; + // CHECK-NEXT: hipStream_t stream; + // CHECK-NEXT: hipMipmappedArray_t mipmappedArray; + // CHECK-NEXT: hipStreamCallback_t streamCallback; + // CHECK-NEXT: hipPointer_attribute pointer_attribute; + CUdevice device; + CUcontext context; + CUfunc_cache func_cache; + CUlimit limit; + CUsharedconfig pconfig; + CUfunction function; + CUfunction_attribute function_attribute; + CUmodule module_; + CUdeviceptr deviceptr; + CUdeviceptr deviceptr_2; + CUtexref texref; + CUjit_option jit_option; + CUarray array_; + CUDA_ARRAY3D_DESCRIPTOR ARRAY3D_DESCRIPTOR; + CUDA_ARRAY_DESCRIPTOR ARRAY_DESCRIPTOR; + CUipcEventHandle ipcEventHandle; + CUevent event_; + CUevent event_start; + CUevent event_end; + CUipcMemHandle ipcMemHandle; + CUDA_MEMCPY2D MEMCPY2D; + CUDA_MEMCPY3D MEMCPY3D; + CUstream stream; + CUmipmappedArray mipmappedArray; + CUstreamCallback streamCallback; + CUpointer_attribute pointer_attribute; + +#if CUDA_VERSION > 7050 + // CHECK: hipMemRangeAttribute MemoryRangeAttribute; + // CHECK-NEXT: hipMemoryAdvise MemoryAdvise; + CUmem_range_attribute MemoryRangeAttribute; + CUmem_advise MemoryAdvise; +#endif + +#if CUDA_VERSION > 9020 + // CHECK: hipGraph_t graph, graph2; + // CHECK-NEXT: hipGraphNode_t graphNode, graphNode2; + // CHECK-NEXT: const hipGraphNode_t *pGraphNode = nullptr; + // CHECK-NEXT: hipKernelNodeParams KERNEL_NODE_PARAMS; + // CHECK-NEXT: hipMemsetParams MEMSET_NODE_PARAMS; + // CHECK-NEXT: hipGraphExec_t graphExec; + // CHECK-NEXT: hipExternalMemory_t externalMemory; + // CHECK-NEXT: hipExternalSemaphore_t externalSemaphore; + // CHECK-NEXT: hipExternalMemoryBufferDesc EXTERNAL_MEMORY_BUFFER_DESC; + // CHECK-NEXT: hipExternalMemoryHandleDesc EXTERNAL_MEMORY_HANDLE_DESC; + // CHECK-NEXT: hipExternalSemaphoreHandleDesc EXTERNAL_SEMAPHORE_HANDLE_DESC; + // CHECK-NEXT: hipExternalSemaphoreSignalParams EXTERNAL_SEMAPHORE_SIGNAL_PARAMS; + // CHECK-NEXT: hipExternalSemaphoreWaitParams EXTERNAL_SEMAPHORE_WAIT_PARAMS; + // CHECK-NEXT: hipStreamCaptureStatus streamCaptureStatus; + // CHECK-NEXT: hipGraphNodeType graphNodeType; + // CHECK-NEXT: hipHostNodeParams host_node_params; + CUgraph graph, graph2; + CUgraphNode graphNode, graphNode2; + const CUgraphNode *pGraphNode = nullptr; + CUDA_KERNEL_NODE_PARAMS KERNEL_NODE_PARAMS; + CUDA_MEMSET_NODE_PARAMS MEMSET_NODE_PARAMS; + CUgraphExec graphExec; + CUexternalMemory externalMemory; + CUexternalSemaphore externalSemaphore; + CUDA_EXTERNAL_MEMORY_BUFFER_DESC EXTERNAL_MEMORY_BUFFER_DESC; + CUDA_EXTERNAL_MEMORY_HANDLE_DESC EXTERNAL_MEMORY_HANDLE_DESC; + CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC EXTERNAL_SEMAPHORE_HANDLE_DESC; + CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS EXTERNAL_SEMAPHORE_SIGNAL_PARAMS; + CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS EXTERNAL_SEMAPHORE_WAIT_PARAMS; + CUstreamCaptureStatus streamCaptureStatus; + CUgraphNodeType graphNodeType; + CUDA_HOST_NODE_PARAMS host_node_params; +#endif + +#if CUDA_VERSION > 10000 + // CHECK: hipStreamCaptureMode streamCaptureMode; + CUstreamCaptureMode streamCaptureMode; +#endif + +#if CUDA_VERSION > 10010 + // CHECK: hipGraphExecUpdateResult graphExecUpdateResult; + CUgraphExecUpdateResult graphExecUpdateResult; +#endif + + // CUDA: CUresult CUDAAPI cuInit(unsigned int Flags); + // HIP: hipError_t hipInit(unsigned int flags); + // CHECK: hipError_t result = hipInit(flags); + CUresult result = cuInit(flags); + + int driverVersion = 0; + // CUDA: CUresult CUDAAPI cuDriverGetVersion(int *driverVersion); + // HIP: hipError_t hipDriverGetVersion(int* driverVersion); + // CHECK: result = hipDriverGetVersion(&driverVersion); + result = cuDriverGetVersion(&driverVersion); + + int ordinal = 0; + // CUDA: CUresult CUDAAPI cuDeviceGet(CUdevice *device, int ordinal); + // HIP: hipError_t hipDeviceGet(hipDevice_t* device, int ordinal); + // CHECK: result = hipDeviceGet(&device, ordinal); + result = cuDeviceGet(&device, ordinal); + + int pi = 0; + // CHECK: hipDeviceAttribute_t device_attribute = hipDeviceAttributePciBusId; + CUdevice_attribute device_attribute = CU_DEVICE_ATTRIBUTE_PCI_BUS_ID; + // CUDA: CUresult CUDAAPI cuDeviceGetAttribute(int *pi, CUdevice_attribute attrib, CUdevice dev); + // HIP: hipError_t hipDeviceGetAttribute(int* pi, hipDeviceAttribute_t attr, int deviceId); + // CHECK: result = hipDeviceGetAttribute(&pi, device_attribute, device); + result = cuDeviceGetAttribute(&pi, device_attribute, device); + + int count = 0; + // CUDA: CUresult CUDAAPI cuDeviceGetCount(int *count); + // HIP: hipError_t hipGetDeviceCount(int* count); + // CHECK: result = hipGetDeviceCount(&count); + result = cuDeviceGetCount(&count); + + // CUDA: CUresult CUDAAPI cuDeviceTotalMem(size_t *bytes, CUdevice dev); + // HIP: hipError_t hipDeviceTotalMem(size_t* bytes, hipDevice_t device); + // CHECK: result = hipDeviceTotalMem(&bytes, device); + // CHECK-NEXT: result = hipDeviceTotalMem(&bytes, device); + result = cuDeviceTotalMem(&bytes, device); + result = cuDeviceTotalMem_v2(&bytes, device); + + int major = 0, minor = 0; + // CUDA: __CUDA_DEPRECATED CUresult CUDAAPI cuDeviceComputeCapability(int *major, int *minor, CUdevice dev); + // HIP: hipError_t hipDeviceComputeCapability(int* major, int* minor, hipDevice_t device); + // CHECK: result = hipDeviceComputeCapability(&major, &minor, device); + result = cuDeviceComputeCapability(&major, &minor, device); + + int active = 0; + // CUDA: CUresult CUDAAPI cuDevicePrimaryCtxGetState(CUdevice dev, unsigned int *flags, int *active); + // HIP: hipError_t hipDevicePrimaryCtxGetState(hipDevice_t dev, unsigned int* flags, int* active); + // CHECK: result = hipDevicePrimaryCtxGetState(device, &flags, &active); + result = cuDevicePrimaryCtxGetState(device, &flags, &active); + + // CUDA: CUresult CUDAAPI cuDevicePrimaryCtxRelease(CUdevice dev); + // HIP: hipError_t hipDevicePrimaryCtxRelease(hipDevice_t dev); + // CHECK: result = hipDevicePrimaryCtxRelease(device); + result = cuDevicePrimaryCtxRelease(device); +#if CUDA_VERSION > 10020 + // CHECK: result = hipDevicePrimaryCtxRelease(device); + result = cuDevicePrimaryCtxRelease_v2(device); +#endif + + // CUDA: CUresult CUDAAPI cuDevicePrimaryCtxReset(CUdevice dev); + // HIP: hipError_t hipDevicePrimaryCtxReset(hipDevice_t dev); + // CHECK: result = hipDevicePrimaryCtxReset(device); + result = cuDevicePrimaryCtxReset(device); +#if CUDA_VERSION > 10020 + // CHECK: result = hipDevicePrimaryCtxReset(device); + result = cuDevicePrimaryCtxReset_v2(device); +#endif + + // CUDA: CUresult CUDAAPI cuDevicePrimaryCtxRetain(CUcontext *pctx, CUdevice dev); + // HIP: hipError_t hipDevicePrimaryCtxRetain(hipCtx_t* pctx, hipDevice_t dev); + // CHECK: result = hipDevicePrimaryCtxRetain(&context, device); + result = cuDevicePrimaryCtxRetain(&context, device); + + // CUDA: CUresult CUDAAPI cuDevicePrimaryCtxSetFlags(CUdevice dev, unsigned int flags); + // HIP: hipError_t hipDevicePrimaryCtxSetFlags(hipDevice_t dev, unsigned int flags); + // CHECK: result = hipDevicePrimaryCtxSetFlags(device, flags); + result = cuDevicePrimaryCtxSetFlags(device, flags); +#if CUDA_VERSION > 10020 + // CHECK: result = hipDevicePrimaryCtxSetFlags(device, flags); + result = cuDevicePrimaryCtxSetFlags_v2(device, flags); +#endif + + // CUDA: CUresult CUDAAPI cuCtxCreate(CUcontext *pctx, unsigned int flags, CUdevice dev); + // HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxCreate(hipCtx_t *ctx, unsigned int flags, hipDevice_t device); + // CHECK: result = hipCtxCreate(&context, flags, device); + // CHECK-NEXT: result = hipCtxCreate(&context, flags, device); + result = cuCtxCreate(&context, flags, device); + result = cuCtxCreate_v2(&context, flags, device); + + // CUDA: CUresult CUDAAPI cuCtxDestroy(CUcontext ctx); + // HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxDestroy(hipCtx_t ctx); + // CHECK: result = hipCtxDestroy(context); + // CHECK-NEXT: result = hipCtxDestroy(context); + result = cuCtxDestroy(context); + result = cuCtxDestroy_v2(context); + + unsigned int version = 0; + // CUDA: CUresult CUDAAPI cuCtxGetApiVersion(CUcontext ctx, unsigned int *version); + // HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxGetApiVersion(hipCtx_t ctx, int* apiVersion); + // CHECK: result = hipCtxGetApiVersion(context, &version); + result = cuCtxGetApiVersion(context, &version); + + // CUDA: CUresult CUDAAPI cuCtxGetCacheConfig(CUfunc_cache *pconfig); + // HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxGetCacheConfig(hipFuncCache_t* cacheConfig); + // CHECK: result = hipCtxGetCacheConfig(&func_cache); + result = cuCtxGetCacheConfig(&func_cache); + + // CUDA: CUresult CUDAAPI cuCtxGetCurrent(CUcontext *pctx); + // HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxGetCurrent(hipCtx_t* ctx); + // CHECK: result = hipCtxGetCurrent(&context); + result = cuCtxGetCurrent(&context); + + // CUDA: CUresult CUDAAPI cuCtxGetDevice(CUdevice *device); + // HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxGetDevice(hipDevice_t* device); + // CHECK: result = hipCtxGetDevice(&device); + result = cuCtxGetDevice(&device); + + // CUDA: CUresult CUDAAPI cuCtxGetFlags(unsigned int *flags); + // HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxGetFlags(unsigned int* flags); + // CHECK: result = hipCtxGetFlags(&flags); + result = cuCtxGetFlags(&flags); + + size_t pvalue = 0; + // CUDA: CUresult CUDAAPI cuCtxGetLimit(size_t *pvalue, CUlimit limit); + // HIP: hipError_t hipDeviceGetLimit(size_t* pValue, enum hipLimit_t limit); + // CHECK: result = hipDeviceGetLimit(&pvalue, limit); + result = cuCtxGetLimit(&pvalue, limit); + + // CUDA: CUresult CUDAAPI cuCtxGetSharedMemConfig(CUsharedconfig *pConfig); + // HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxGetSharedMemConfig(hipSharedMemConfig* pConfig); + // CHECK: result = hipCtxGetSharedMemConfig(&pconfig); + result = cuCtxGetSharedMemConfig(&pconfig); + + int leastPriority = 0, greatestPriority = 0; + // CUDA: CUresult CUDAAPI cuCtxGetStreamPriorityRange(int *leastPriority, int *greatestPriority); + // HIP: hipError_t hipDeviceGetStreamPriorityRange(int* leastPriority, int* greatestPriority); + // CHECK: result = hipDeviceGetStreamPriorityRange(&leastPriority, &greatestPriority); + result = cuCtxGetStreamPriorityRange(&leastPriority, &greatestPriority); + + // CUDA: CUresult CUDAAPI cuCtxPopCurrent(CUcontext *pctx); + // HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxPopCurrent(hipCtx_t* ctx); + // CHECK: result = hipCtxPopCurrent(&context); + // CHECK-NEXT: result = hipCtxPopCurrent(&context); + result = cuCtxPopCurrent(&context); + result = cuCtxPopCurrent_v2(&context); + + // CUDA: CUresult CUDAAPI cuCtxPushCurrent(CUcontext ctx); + // HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxPushCurrent(hipCtx_t ctx); + // CHECK: result = hipCtxPushCurrent(context); + // CHECK-NEXT: result = hipCtxPushCurrent(context); + result = cuCtxPushCurrent(context); + result = cuCtxPushCurrent_v2(context); + + // CUDA: CUresult CUDAAPI cuCtxSetCacheConfig(CUfunc_cache config); + // HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxSetCacheConfig(hipFuncCache_t cacheConfig); + // CHECK: result = hipCtxSetCacheConfig(func_cache); + result = cuCtxSetCacheConfig(func_cache); + + // CUDA: CUresult CUDAAPI cuCtxSetCurrent(CUcontext ctx); + // HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxSetCurrent(hipCtx_t ctx); + // CHECK: result = hipCtxSetCurrent(context); + result = cuCtxSetCurrent(context); + + // CUDA: CUresult CUDAAPI cuCtxSetSharedMemConfig(CUsharedconfig config); + // HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxSetSharedMemConfig(hipSharedMemConfig config); + // CHECK: result = hipCtxSetSharedMemConfig(pconfig); + result = cuCtxSetSharedMemConfig(pconfig); + + // CUDA: CUresult CUDAAPI cuCtxSynchronize(void); + // HIP: DEPRECATED(DEPRECATED_MSG) hipError_t hipCtxSynchronize(void); + // CHECK: result = hipCtxSynchronize(); + result = cuCtxSynchronize(); + + // CUDA: CUresult CUDAAPI cuModuleGetFunction(CUfunction *hfunc, CUmodule hmod, const char *name); + // HIP: hipError_t hipModuleGetFunction(hipFunction_t* function, hipModule_t module, const char* kname); + // CHECK: result = hipModuleGetFunction(&function, module_, name.c_str()); + result = cuModuleGetFunction(&function, module_, name.c_str()); + + // CUDA: CUresult CUDAAPI cuModuleGetGlobal(CUdeviceptr *dptr, size_t *bytes, CUmodule hmod, const char *name); + // HIP: hipError_t hipModuleGetGlobal(hipDeviceptr_t* dptr, size_t* bytes, hipModule_t hmod, const char* name); + // CHECK: result = hipModuleGetGlobal(&deviceptr, &bytes, module_, name.c_str()); + // CHECK-NEXT: result = hipModuleGetGlobal(&deviceptr, &bytes, module_, name.c_str()); + result = cuModuleGetGlobal(&deviceptr, &bytes, module_, name.c_str()); + result = cuModuleGetGlobal_v2(&deviceptr, &bytes, module_, name.c_str()); + + // CUDA: CUresult CUDAAPI cuModuleGetTexRef(CUtexref *pTexRef, CUmodule hmod, const char *name); + // HIP: hipError_t hipModuleGetTexRef(textureReference** texRef, hipModule_t hmod, const char* name); + // CHECK: result = hipModuleGetTexRef(&texref, module_, name.c_str()); + result = cuModuleGetTexRef(&texref, module_, name.c_str()); + + // CUDA: CUresult CUDAAPI cuModuleLoad(CUmodule *module, const char *fname); + // HIP: hipError_t hipModuleLoad(hipModule_t* module, const char* fname); + // CHECK: result = hipModuleLoad(&module_, name.c_str()); + result = cuModuleLoad(&module_, name.c_str()); + + // CUDA: CUresult CUDAAPI cuModuleLoadData(CUmodule *module, const void *image); + // HIP: hipError_t hipModuleLoadData(hipModule_t* module, const void* image); + // CHECK: result = hipModuleLoadData(&module_, image); + result = cuModuleLoadData(&module_, image); + + unsigned int numOptions = 0; + void* optionValues = nullptr; + // CUDA: CUresult CUDAAPI cuModuleLoadDataEx(CUmodule *module, const void *image, unsigned int numOptions, CUjit_option *options, void **optionValues); + // HIP: hipError_t hipModuleLoadDataEx(hipModule_t* module, const void* image, unsigned int numOptions, hipJitOption* options, void** optionValues); + // CHECK: result = hipModuleLoadDataEx(&module_, image, numOptions, &jit_option, &optionValues); + result = cuModuleLoadDataEx(&module_, image, numOptions, &jit_option, &optionValues); + + // CUDA: CUresult CUDAAPI cuModuleUnload(CUmodule hmod); + // HIP: hipError_t hipModuleUnload(hipModule_t module); + // CHECK: result = hipModuleUnload(module_); + result = cuModuleUnload(module_); + + // CUDA: CUresult CUDAAPI cuArray3DCreate(CUarray *pHandle, const CUDA_ARRAY3D_DESCRIPTOR *pAllocateArray); + // HIP: hipError_t hipArray3DCreate(hipArray** array, const HIP_ARRAY3D_DESCRIPTOR* pAllocateArray); + // CHECK: result = hipArray3DCreate(&array_, &ARRAY3D_DESCRIPTOR); + // CHECK-NEXT: result = hipArray3DCreate(&array_, &ARRAY3D_DESCRIPTOR); + result = cuArray3DCreate(&array_, &ARRAY3D_DESCRIPTOR); + result = cuArray3DCreate_v2(&array_, &ARRAY3D_DESCRIPTOR); + + // CUDA: CUresult CUDAAPI cuArrayCreate(CUarray *pHandle, const CUDA_ARRAY_DESCRIPTOR *pAllocateArray); + // HIP: hipError_t hipArrayCreate(hipArray** pHandle, const HIP_ARRAY_DESCRIPTOR* pAllocateArray); + // CHECK: result = hipArrayCreate(&array_, &ARRAY_DESCRIPTOR); + // CHECK: result = hipArrayCreate(&array_, &ARRAY_DESCRIPTOR); + result = cuArrayCreate(&array_, &ARRAY_DESCRIPTOR); + result = cuArrayCreate_v2(&array_, &ARRAY_DESCRIPTOR); + + // CUDA: CUresult CUDAAPI cuArrayDestroy(CUarray hArray); + // HIP: hipError_t hipArrayDestroy(hipArray* array); + // CHECK: result = hipArrayDestroy(array_); + result = cuArrayDestroy(array_); + + std::string pciBusId; + // CUDA: CUresult CUDAAPI cuDeviceGetByPCIBusId(CUdevice *dev, const char *pciBusId); + // HIP: hipError_t hipDeviceGetByPCIBusId(int* device, const char* pciBusId); + // CHECK: result = hipDeviceGetByPCIBusId(&device, pciBusId.c_str()); + result = cuDeviceGetByPCIBusId(&device, pciBusId.c_str()); + + int len = 0; + char* pciBusId_ = const_cast(pciBusId.c_str()); + // CUDA: CUresult CUDAAPI cuDeviceGetPCIBusId(char *pciBusId, int len, CUdevice dev); + // HIP: hipError_t hipDeviceGetPCIBusId(char* pciBusId, int len, int device); + // CHECK: result = hipDeviceGetPCIBusId(pciBusId_, len, device); + result = cuDeviceGetPCIBusId(pciBusId_, len, device); + + // CUDA: CUresult CUDAAPI cuIpcCloseMemHandle(CUdeviceptr dptr); + // HIP: hipError_t hipIpcCloseMemHandle(void* devPtr); + // CHECK: result = hipIpcCloseMemHandle(deviceptr); + result = cuIpcCloseMemHandle(deviceptr); + + // CUDA: CUresult CUDAAPI cuIpcGetEventHandle(CUipcEventHandle *pHandle, CUevent event); + // HIP: hipError_t hipIpcGetEventHandle(hipIpcEventHandle_t* handle, hipEvent_t event); + // CHECK: result = hipIpcGetEventHandle(&ipcEventHandle, event_); + result = cuIpcGetEventHandle(&ipcEventHandle, event_); + + // CUDA: CUresult CUDAAPI cuIpcGetMemHandle(CUipcMemHandle *pHandle, CUdeviceptr dptr); + // HIP: hipError_t hipIpcGetMemHandle(hipIpcMemHandle_t* handle, void* devPtr); + // CHECK: result = hipIpcGetMemHandle(&ipcMemHandle, deviceptr); + result = cuIpcGetMemHandle(&ipcMemHandle, deviceptr); + + // CUDA: CUresult CUDAAPI cuIpcOpenEventHandle(CUevent *phEvent, CUipcEventHandle handle); + // HIP: hipError_t hipIpcOpenEventHandle(hipEvent_t* event, hipIpcEventHandle_t handle); + // CHECK: result = hipIpcOpenEventHandle(&event_, ipcEventHandle); + result = cuIpcOpenEventHandle(&event_, ipcEventHandle); + + // CUDA: CUresult CUDAAPI cuIpcOpenMemHandle(CUdeviceptr *pdptr, CUipcMemHandle handle, unsigned int Flags); + // HIP: hipError_t hipIpcOpenMemHandle(void** devPtr, hipIpcMemHandle_t handle, unsigned int flags); + // CHECK: result = hipIpcOpenMemHandle(&deviceptr, ipcMemHandle, flags); + result = cuIpcOpenMemHandle(&deviceptr, ipcMemHandle, flags); + + // CUDA: CUresult CUDAAPI cuMemAlloc(CUdeviceptr *dptr, size_t bytesize); + // HIP: hipError_t hipMalloc(void** ptr, size_t size); + // CHECK: result = hipMalloc(&deviceptr, bytes); + // CHECK-NEXT: result = hipMalloc(&deviceptr, bytes); + result = cuMemAlloc(&deviceptr, bytes); + result = cuMemAlloc_v2(&deviceptr, bytes); + + //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + ///////////// TODO: Get rid of additional attribute 'unsigned int flags' used by HIP without a default value /////// + //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + + // CUDA: CUresult CUDAAPI cuMemAllocHost(void **pp, size_t bytesize); + // HIP: DEPRECATED("use hipHostMalloc instead") hipError_t hipHostAlloc(void** ptr, size_t size, unsigned int flags); + // TODO: should be hipHostAlloc(&image, bytes, 0); + // CHECK: result = hipHostAlloc(&image, bytes); + // CHECK-NEXT: result = hipHostAlloc(&image, bytes); + result = cuMemAllocHost(&image, bytes); + result = cuMemAllocHost_v2(&image, bytes); + + // CUDA: CUresult CUDAAPI cuMemAllocManaged(CUdeviceptr *dptr, size_t bytesize, unsigned int flags); + // HIP: hipError_t hipMallocManaged(void** dev_ptr, size_t size, unsigned int flags __dparm(hipMemAttachGlobal)); + // CHECK: result = hipMallocManaged(&deviceptr, bytes, flags); + result = cuMemAllocManaged(&deviceptr, bytes, flags); + + size_t pitch = 0, width = 0, height = 0; + // CUDA: CUresult CUDAAPI cuMemAllocPitch(CUdeviceptr *dptr, size_t *pPitch, size_t WidthInBytes, size_t Height, unsigned int ElementSizeBytes); + // HIP: hipError_t hipMemAllocPitch(hipDeviceptr_t* dptr, size_t* pitch, size_t widthInBytes, size_t height, unsigned int elementSizeBytes); + // CHECK: result = hipMemAllocPitch(&deviceptr, &pitch, width, height, bytes); + // CHECK-NEXT: result = hipMemAllocPitch(&deviceptr, &pitch, width, height, bytes); + result = cuMemAllocPitch(&deviceptr, &pitch, width, height, bytes); + result = cuMemAllocPitch_v2(&deviceptr, &pitch, width, height, bytes); + + // CUDA: CUresult CUDAAPI cuMemcpy2D(const CUDA_MEMCPY2D *pCopy); + // HIP: hipError_t hipMemcpyParam2D(const hip_Memcpy2D* pCopy); + // CHECK: result = hipMemcpyParam2D(&MEMCPY2D); + // CHECK-NEXT: result = hipMemcpyParam2D(&MEMCPY2D); + result = cuMemcpy2D(&MEMCPY2D); + result = cuMemcpy2D_v2(&MEMCPY2D); + + // CUDA: CUresult CUDAAPI cuMemcpy2DAsync(const CUDA_MEMCPY2D *pCopy, CUstream hStream); + // HIP: hipError_t hipMemcpyParam2DAsync(const hip_Memcpy2D* pCopy, hipStream_t stream __dparm(0)); + // CHECK: result = hipMemcpyParam2DAsync(&MEMCPY2D, stream); + // CHECK-NEXT: result = hipMemcpyParam2DAsync(&MEMCPY2D, stream); + result = cuMemcpy2DAsync(&MEMCPY2D, stream); + result = cuMemcpy2DAsync_v2(&MEMCPY2D, stream); + + // CUDA: CUresult CUDAAPI cuMemcpy2DUnaligned(const CUDA_MEMCPY2D *pCopy); + // HIP: hipError_t hipDrvMemcpy2DUnaligned(const hip_Memcpy2D* pCopy); + // CHECK: result = hipDrvMemcpy2DUnaligned(&MEMCPY2D); + // CHECK-NEXT: result = hipDrvMemcpy2DUnaligned(&MEMCPY2D); + result = cuMemcpy2DUnaligned(&MEMCPY2D); + result = cuMemcpy2DUnaligned_v2(&MEMCPY2D); + + // CUDA: CUresult CUDAAPI cuMemcpy3D(const CUDA_MEMCPY3D *pCopy); + // HIP: hipError_t hipDrvMemcpy3D(const HIP_MEMCPY3D* pCopy); + // CHECK: result = hipDrvMemcpy3D(&MEMCPY3D); + // CHECK-NEXT: result = hipDrvMemcpy3D(&MEMCPY3D); + result = cuMemcpy3D(&MEMCPY3D); + result = cuMemcpy3D_v2(&MEMCPY3D); + + // CUDA: CUresult CUDAAPI cuMemcpy3DAsync(const CUDA_MEMCPY3D *pCopy, CUstream hStream); + // HIP: hipError_t hipDrvMemcpy3DAsync(const HIP_MEMCPY3D* pCopy, hipStream_t stream); + // CHECK: result = hipDrvMemcpy3DAsync(&MEMCPY3D, stream); + // CHECK-NEXT: result = hipDrvMemcpy3DAsync(&MEMCPY3D, stream); + result = cuMemcpy3DAsync(&MEMCPY3D, stream); + result = cuMemcpy3DAsync_v2(&MEMCPY3D, stream); + + void* dsthost = nullptr; + size_t offset = 0; + // CUDA: CUresult CUDAAPI cuMemcpyAtoH(void *dstHost, CUarray srcArray, size_t srcOffset, size_t ByteCount); + // HIP: hipError_t hipMemcpyAtoH(void* dst, hipArray* srcArray, size_t srcOffset, size_t count); + // CHECK: result = hipMemcpyAtoH(dsthost, array_, offset, bytes); + // CHECK-NEXT: result = hipMemcpyAtoH(dsthost, array_, offset, bytes); + result = cuMemcpyAtoH(dsthost, array_, offset, bytes); + result = cuMemcpyAtoH_v2(dsthost, array_, offset, bytes); + + // CUDA: CUresult CUDAAPI cuMemcpyDtoD(CUdeviceptr dstDevice, CUdeviceptr srcDevice, size_t ByteCount); + // HIP: hipError_t hipMemcpyDtoD(hipDeviceptr_t dst, hipDeviceptr_t src, size_t sizeBytes); + // CHECK: result = hipMemcpyDtoD(deviceptr, deviceptr, bytes); + // CHECK-NEXT: result = hipMemcpyDtoD(deviceptr, deviceptr, bytes); + result = cuMemcpyDtoD(deviceptr, deviceptr, bytes); + result = cuMemcpyDtoD_v2(deviceptr, deviceptr, bytes); + + // CUDA: CUresult CUDAAPI cuMemcpyDtoDAsync(CUdeviceptr dstDevice, CUdeviceptr srcDevice, size_t ByteCount, CUstream hStream); + // HIP: hipError_t hipMemcpyDtoDAsync(hipDeviceptr_t dst, hipDeviceptr_t src, size_t sizeBytes, hipStream_t stream); + // CHECK: result = hipMemcpyDtoDAsync(deviceptr, deviceptr, bytes, stream); + // CHECK-NEXT: result = hipMemcpyDtoDAsync(deviceptr, deviceptr, bytes, stream); + result = cuMemcpyDtoDAsync(deviceptr, deviceptr, bytes, stream); + result = cuMemcpyDtoDAsync_v2(deviceptr, deviceptr, bytes, stream); + + // CUDA: CUresult CUDAAPI cuMemcpyDtoH(void *dstHost, CUdeviceptr srcDevice, size_t ByteCount); + // HIP: hipError_t hipMemcpyDtoH(void* dst, hipDeviceptr_t src, size_t sizeBytes); + // CHECK: result = hipMemcpyDtoH(dsthost, deviceptr, bytes); + // CHECK-NEXT: result = hipMemcpyDtoH(dsthost, deviceptr, bytes); + result = cuMemcpyDtoH(dsthost, deviceptr, bytes); + result = cuMemcpyDtoH_v2(dsthost, deviceptr, bytes); + + // CUDA: CUresult CUDAAPI cuMemcpyDtoHAsync(void *dstHost, CUdeviceptr srcDevice, size_t ByteCount, CUstream hStream); + // HIP: hipError_t hipMemcpyDtoHAsync(void* dst, hipDeviceptr_t src, size_t sizeBytes, hipStream_t stream); + // CHECK: result = hipMemcpyDtoHAsync(dsthost, deviceptr, bytes, stream); + // CHECK-NEXT: result = hipMemcpyDtoHAsync(dsthost, deviceptr, bytes, stream); + result = cuMemcpyDtoHAsync(dsthost, deviceptr, bytes, stream); + result = cuMemcpyDtoHAsync_v2(dsthost, deviceptr, bytes, stream); + + // CUDA: CUresult CUDAAPI cuMemcpyHtoA(CUarray dstArray, size_t dstOffset, const void *srcHost, size_t ByteCount); + // HIP: hipError_t hipMemcpyHtoA(hipArray* dstArray, size_t dstOffset, const void* srcHost, size_t count); + // CHECK: result = hipMemcpyHtoA(array_, offset, dsthost, bytes); + // CHECK-NEXT: result = hipMemcpyHtoA(array_, offset, dsthost, bytes); + result = cuMemcpyHtoA(array_, offset, dsthost, bytes); + result = cuMemcpyHtoA_v2(array_, offset, dsthost, bytes); + + // CUDA: CUresult CUDAAPI cuMemcpyHtoD(CUdeviceptr dstDevice, const void *srcHost, size_t ByteCount); + // HIP: hipError_t hipMemcpyHtoD(hipDeviceptr_t dst, void* src, size_t sizeBytes); + // CHECK: result = hipMemcpyHtoD(deviceptr, dsthost, bytes); + // CHECK-NEXT: result = hipMemcpyHtoD(deviceptr, dsthost, bytes); + result = cuMemcpyHtoD(deviceptr, dsthost, bytes); + result = cuMemcpyHtoD_v2(deviceptr, dsthost, bytes); + + // CUDA: CUresult CUDAAPI cuMemcpyHtoDAsync(CUdeviceptr dstDevice, const void *srcHost, size_t ByteCount, CUstream hStream); + // HIP: hipError_t hipMemcpyHtoDAsync(hipDeviceptr_t dst, void* src, size_t sizeBytes, hipStream_t stream); + // CHECK: result = hipMemcpyHtoDAsync(deviceptr, dsthost, bytes, stream); + // CHECK-NEXT: result = hipMemcpyHtoDAsync(deviceptr, dsthost, bytes, stream); + result = cuMemcpyHtoDAsync(deviceptr, dsthost, bytes, stream); + result = cuMemcpyHtoDAsync_v2(deviceptr, dsthost, bytes, stream); + + // CUDA: CUresult CUDAAPI cuMemFree(CUdeviceptr dptr); + // HIP: hipError_t hipFree(void* ptr); + // CHECK: result = hipFree(deviceptr); + // CHECK-NEXT: result = hipFree(deviceptr); + result = cuMemFree(deviceptr); + result = cuMemFree_v2(deviceptr); + + // CUDA: CUresult CUDAAPI cuMemFreeHost(void *p); + // HIP: hipError_t hipHostFree(void* ptr); + // CHECK: result = hipHostFree(image); + result = cuMemFreeHost(image); + + // CUDA: CUresult CUDAAPI cuMemGetAddressRange(CUdeviceptr *pbase, size_t *psize, CUdeviceptr dptr); + // HIP: hipError_t hipMemGetAddressRange(hipDeviceptr_t* pbase, size_t* psize, hipDeviceptr_t dptr); + // CHECK: result = hipMemGetAddressRange(&deviceptr, &bytes, deviceptr_2); + // CHECK-NEXT: result = hipMemGetAddressRange(&deviceptr, &bytes, deviceptr_2); + result = cuMemGetAddressRange(&deviceptr, &bytes, deviceptr_2); + result = cuMemGetAddressRange_v2(&deviceptr, &bytes, deviceptr_2); + + // CUDA: CUresult CUDAAPI cuMemGetInfo(size_t *free, size_t *total); + // HIP: hipError_t hipMemGetInfo(size_t* free, size_t* total); + // CHECK: result = hipMemGetInfo(&bytes, &bytes_2); + // CHECK-NEXT: result = hipMemGetInfo(&bytes, &bytes_2); + result = cuMemGetInfo(&bytes, &bytes_2); + result = cuMemGetInfo_v2(&bytes, &bytes_2); + + // CUDA: CUresult CUDAAPI cuMemHostAlloc(void **pp, size_t bytesize, unsigned int Flags); + // HIP: DEPRECATED("use hipHostMalloc instead") hipError_t hipHostAlloc(void** ptr, size_t size, unsigned int flags); + // CHECK: result = hipHostAlloc(&image, bytes, flags); + result = cuMemHostAlloc(&image, bytes, flags); + + // CUDA: CUresult CUDAAPI cuMemHostGetDevicePointer(CUdeviceptr *pdptr, void *p, unsigned int Flags); + // HIP: hipError_t hipHostGetDevicePointer(void** devPtr, void* hstPtr, unsigned int flags); + // CHECK: result = hipHostGetDevicePointer(&deviceptr, image, flags); + // CHECK-NEXT: result = hipHostGetDevicePointer(&deviceptr, image, flags); + result = cuMemHostGetDevicePointer(&deviceptr, image, flags); + result = cuMemHostGetDevicePointer_v2(&deviceptr, image, flags); + + // CUDA: CUresult CUDAAPI cuMemHostGetFlags(unsigned int *pFlags, void *p); + // HIP: hipError_t hipHostGetFlags(&flags, image); + // CHECK: result = hipHostGetFlags(&flags, image); + result = cuMemHostGetFlags(&flags, image); + + // CUDA: CUresult CUDAAPI cuMemHostRegister(void *p, size_t bytesize, unsigned int Flags); + // HIP: hipError_t hipHostRegister(void* hostPtr, size_t sizeBytes, unsigned int flags); + // CHECK: result = hipHostRegister(image, bytes, flags); + // CHECK-NEXT: result = hipHostRegister(image, bytes, flags); + result = cuMemHostRegister(image, bytes, flags); + result = cuMemHostRegister_v2(image, bytes, flags); + + // CUDA: CUresult CUDAAPI cuMemHostUnregister(void *p); + // HIP: hipError_t hipHostUnregister(void* hostPtr); + // CHECK: result = hipHostUnregister(image); + result = cuMemHostUnregister(image); + + unsigned short us = 0; + // CUDA: CUresult CUDAAPI cuMemsetD16(CUdeviceptr dstDevice, unsigned short us, size_t N); + // HIP: hipError_t hipMemsetD16(hipDeviceptr_t dest, unsigned short value, size_t count); + // CHECK: result = hipMemsetD16(deviceptr, us, bytes); + // CHECK-NEXT: result = hipMemsetD16(deviceptr, us, bytes); + result = cuMemsetD16(deviceptr, us, bytes); + result = cuMemsetD16_v2(deviceptr, us, bytes); + + // CUDA: CUresult CUDAAPI cuMemsetD16Async(CUdeviceptr dstDevice, unsigned short us, size_t N, CUstream hStream); + // HIP: hipError_t hipMemsetD16Async(hipDeviceptr_t dest, unsigned short value, size_t count, hipStream_t stream __dparm(0)); + // CHECK: result = hipMemsetD16Async(deviceptr, us, bytes, stream); + result = cuMemsetD16Async(deviceptr, us, bytes, stream); + + // CUDA: CUresult CUDAAPI cuMemsetD32(CUdeviceptr dstDevice, unsigned int ui, size_t N) + // HIP: hipError_t hipMemsetD32(hipDeviceptr_t dest, int value, size_t count); + // CHECK: result = hipMemsetD32(deviceptr, flags, bytes); + // CHECK-NEXT: result = hipMemsetD32(deviceptr, flags, bytes); + result = cuMemsetD32(deviceptr, flags, bytes); + result = cuMemsetD32_v2(deviceptr, flags, bytes); + + // CUDA: CUresult CUDAAPI cuMemsetD32Async(CUdeviceptr dstDevice, unsigned int ui, size_t N, CUstream hStream); + // HIP: hipError_t hipMemsetD32Async(hipDeviceptr_t dst, int value, size_t count, hipStream_t stream __dparm(0)); + // CHECK: result = hipMemsetD32Async(deviceptr, flags, bytes, stream); + result = cuMemsetD32Async(deviceptr, flags, bytes, stream); + + unsigned char uc = 0; + // CUDA: CUresult CUDAAPI cuMemsetD8(CUdeviceptr dstDevice, unsigned char uc, size_t N); + // HIP: hipError_t hipMemsetD8(hipDeviceptr_t dest, unsigned char value, size_t count); + // CHECK: result = hipMemsetD8(deviceptr, uc, bytes); + // CHECK-NEXT: result = hipMemsetD8(deviceptr, uc, bytes); + result = cuMemsetD8(deviceptr, uc, bytes); + result = cuMemsetD8_v2(deviceptr, uc, bytes); + + // CUDA: CUresult CUDAAPI cuMemsetD8Async(CUdeviceptr dstDevice, unsigned char uc, size_t N, CUstream hStream); + // HIP: hipError_t hipMemsetD8Async(hipDeviceptr_t dest, unsigned char value, size_t count, hipStream_t stream __dparm(0)); + // CHECK: result = hipMemsetD8Async(deviceptr, uc, bytes, stream); + result = cuMemsetD8Async(deviceptr, uc, bytes, stream); + + // CUDA: CUresult CUDAAPI cuMipmappedArrayCreate(CUmipmappedArray *pHandle, const CUDA_ARRAY3D_DESCRIPTOR *pMipmappedArrayDesc, unsigned int numMipmapLevels); + // HIP: hipError_t hipMipmappedArrayCreate(hipMipmappedArray_t* pHandle, HIP_ARRAY3D_DESCRIPTOR* pMipmappedArrayDesc, unsigned int numMipmapLevels); + // CHECK: result = hipMipmappedArrayCreate(&mipmappedArray, &ARRAY3D_DESCRIPTOR, flags); + result = cuMipmappedArrayCreate(&mipmappedArray, &ARRAY3D_DESCRIPTOR, flags); + + // CUDA: CUresult CUDAAPI cuMipmappedArrayDestroy(CUmipmappedArray hMipmappedArray); + // HIP: hipError_t hipMipmappedArrayDestroy(hipMipmappedArray_t hMipmappedArray); + // CHECK: result = hipMipmappedArrayDestroy(mipmappedArray); + result = cuMipmappedArrayDestroy(mipmappedArray); + + // CUDA: CUresult CUDAAPI cuMipmappedArrayGetLevel(CUarray *pLevelArray, CUmipmappedArray hMipmappedArray, unsigned int level); + // HIP: hipError_t hipMipmappedArrayGetLevel(hipArray_t* pLevelArray, hipMipmappedArray_t hMipMappedArray, unsigned int level); + // CHECK: result = hipMipmappedArrayGetLevel(&array_, mipmappedArray, flags); + result = cuMipmappedArrayGetLevel(&array_, mipmappedArray, flags); + +#if CUDA_VERSION > 7050 + // CUDA: CUresult CUDAAPI cuMemAdvise(CUdeviceptr devPtr, size_t count, CUmem_advise advice, CUdevice device); + // HIP: hipError_t hipMemAdvise(const void* dev_ptr, size_t count, hipMemoryAdvise advice, int device); + // CHECK: result = hipMemAdvise(deviceptr, bytes, MemoryAdvise, device); + result = cuMemAdvise(deviceptr, bytes, MemoryAdvise, device); + + // CUDA: CUresult CUDAAPI cuMemPrefetchAsync(CUdeviceptr devPtr, size_t count, CUdevice dstDevice, CUstream hStream); + // HIP: hipError_t hipMemPrefetchAsync(const void* dev_ptr, size_t count, int device, hipStream_t stream __dparm(0)); + // CHECK: result = hipMemPrefetchAsync(deviceptr, bytes, device, stream); + result = cuMemPrefetchAsync(deviceptr, bytes, device, stream); + + // CUDA: CUresult CUDAAPI cuMemRangeGetAttribute(void *data, size_t dataSize, CUmem_range_attribute attribute, CUdeviceptr devPtr, size_t count); + // HIP: hipError_t hipMemRangeGetAttribute(void* data, size_t data_size, hipMemRangeAttribute attribute, const void* dev_ptr, size_t count); + // CHECK: result = hipMemRangeGetAttribute(image, bytes, MemoryRangeAttribute, deviceptr, bytes); + result = cuMemRangeGetAttribute(image, bytes, MemoryRangeAttribute, deviceptr, bytes); + + // CUDA: CUresult CUDAAPI cuMemRangeGetAttributes(void **data, size_t *dataSizes, CUmem_range_attribute *attributes, size_t numAttributes, CUdeviceptr devPtr, size_t count); + // HIP: hipError_t hipMemRangeGetAttributes(void** data, size_t* data_sizes, hipMemRangeAttribute* attributes, size_t num_attributes, const void* dev_ptr, size_t count); + // CHECK: result = hipMemRangeGetAttributes(&image, &bytes, &MemoryRangeAttribute, bytes, deviceptr, bytes); + result = cuMemRangeGetAttributes(&image, &bytes, &MemoryRangeAttribute, bytes, deviceptr, bytes); +#endif + + // CUDA: CUresult CUDAAPI cuPointerGetAttribute(void *data, CUpointer_attribute attribute, CUdeviceptr ptr); + // HIP: hipError_t hipPointerGetAttribute(void* data, hipPointer_attribute attribute, hipDeviceptr_t ptr); + // CHECK: result = hipPointerGetAttribute(image, pointer_attribute, deviceptr); + result = cuPointerGetAttribute(image, pointer_attribute, deviceptr); + + // CUDA: CUresult CUDAAPI cuPointerGetAttributes(unsigned int numAttributes, CUpointer_attribute *attributes, void **data, CUdeviceptr ptr); + // HIP: hipError_t hipDrvPointerGetAttributes(unsigned int numAttributes, hipPointer_attribute* attributes, void** data, hipDeviceptr_t ptr); + // CHECK: result = hipDrvPointerGetAttributes(flags, &pointer_attribute, &image, deviceptr); + result = cuPointerGetAttributes(flags, &pointer_attribute, &image, deviceptr); + + // CUDA: CUresult CUDAAPI cuStreamAddCallback(CUstream hStream, CUstreamCallback callback, void *userData, unsigned int flags); + // HIP: hipError_t hipStreamAddCallback(hipStream_t stream, hipStreamCallback_t callback, void* userData, unsigned int flags); + // CHECK: result = hipStreamAddCallback(stream, streamCallback, image, flags); + result = cuStreamAddCallback(stream, streamCallback, image, flags); + + // CUDA: CUresult CUDAAPI cuStreamAttachMemAsync(CUstream hStream, CUdeviceptr dptr, size_t length, unsigned int flags); + // HIP: hipError_t hipStreamAttachMemAsync(hipStream_t stream, void* dev_ptr, size_t length __dparm(0), unsigned int flags __dparm(hipMemAttachSingle)); + // CHECK: result = hipStreamAttachMemAsync(stream, deviceptr, bytes, flags); + result = cuStreamAttachMemAsync(stream, deviceptr, bytes, flags); + +#if CUDA_VERSION > 10000 + // CUDA: CUresult CUDAAPI cuStreamBeginCapture(CUstream hStream, CUstreamCaptureMode mode); + // HIP: hipError_t hipStreamBeginCapture(hipStream_t stream, hipStreamCaptureMode mode); + // CHECK: result = hipStreamBeginCapture(stream, streamCaptureMode); + // CHECK-NEXT: result = hipStreamBeginCapture(stream, streamCaptureMode); + result = cuStreamBeginCapture(stream, streamCaptureMode); + result = cuStreamBeginCapture_v2(stream, streamCaptureMode); +#endif + + // CUDA: CUresult CUDAAPI cuStreamCreate(CUstream *phStream, unsigned int Flags); + // HIP: hipError_t hipStreamCreateWithFlags(hipStream_t* stream, unsigned int flags); + // CHECK: result = hipStreamCreateWithFlags(&stream, flags); + result = cuStreamCreate(&stream, flags); + + // CUDA: CUresult CUDAAPI cuStreamCreateWithPriority(CUstream *phStream, unsigned int flags, int priority); + // HIP: hipError_t hipStreamCreateWithPriority(hipStream_t* stream, unsigned int flags, int priority); + // CHECK: result = hipStreamCreateWithPriority(&stream, flags, leastPriority); + result = cuStreamCreateWithPriority(&stream, flags, leastPriority); + + // CUDA: CUresult CUDAAPI cuStreamDestroy(CUstream hStream); + // HIP: hipError_t hipStreamDestroy(hipStream_t stream); + // CHECK: result = hipStreamDestroy(stream); + // CHECK-NEXT: result = hipStreamDestroy(stream); + result = cuStreamDestroy(stream); + result = cuStreamDestroy_v2(stream); + +#if CUDA_VERSION > 9020 + // CUDA: CUresult CUDAAPI cuStreamEndCapture(CUstream hStream, CUgraph *phGraph); + // HIP: hipError_t hipStreamEndCapture(hipStream_t stream, hipGraph_t* pGraph); + // CHECK: result = hipStreamEndCapture(stream, &graph); + result = cuStreamEndCapture(stream, &graph); +#endif + + // CUDA: CUresult CUDAAPI cuStreamGetFlags(CUstream hStream, unsigned int *flags); + // HIP: hipError_t hipStreamGetFlags(hipStream_t stream, unsigned int* flags); + // CHECK: result = hipStreamGetFlags(stream, &flags); + result = cuStreamGetFlags(stream, &flags); + + // CUDA: CUresult CUDAAPI cuStreamGetPriority(CUstream hStream, int *priority); + // HIP: hipError_t hipStreamGetPriority(hipStream_t stream, int* priority); + // CHECK: result = hipStreamGetPriority(stream, &leastPriority); + result = cuStreamGetPriority(stream, &leastPriority); + + // CUDA: CUresult CUDAAPI cuStreamQuery(CUstream hStream); + // HIP: hipError_t hipStreamQuery(hipStream_t stream); + // CHECK: result = hipStreamQuery(stream); + result = cuStreamQuery(stream); + + // CUDA: CUresult CUDAAPI cuStreamSynchronize(CUstream hStream); + // HIP: hipError_t hipStreamSynchronize(hipStream_t stream); + // CHECK: result = hipStreamSynchronize(stream); + result = cuStreamSynchronize(stream); + + // CUDA: CUresult CUDAAPI cuStreamWaitEvent(CUstream hStream, CUevent hEvent, unsigned int Flags); + // HIP: hipError_t hipStreamWaitEvent(hipStream_t stream, hipEvent_t event, unsigned int flags); + // CHECK: result = hipStreamWaitEvent(stream, event_, flags); + result = cuStreamWaitEvent(stream, event_, flags); + + // CUDA: CUresult CUDAAPI cuEventCreate(CUevent *phEvent, unsigned int Flags); + // HIP: hipError_t hipEventCreateWithFlags(hipEvent_t* event, unsigned flags); + // CHECK: result = hipEventCreateWithFlags(&event_, flags); + result = cuEventCreate(&event_, flags); + + // CUDA: CUresult CUDAAPI cuEventDestroy(CUevent hEvent); + // HIP: hipError_t hipEventDestroy(hipEvent_t event); + // CHECK: result = hipEventDestroy(event_); + // CHECK-NEXT: result = hipEventDestroy(event_); + result = cuEventDestroy(event_); + result = cuEventDestroy_v2(event_); + + // CUDA: CUresult CUDAAPI cuEventElapsedTime(float *pMilliseconds, CUevent hStart, CUevent hEnd); + // HIP: hipError_t hipEventElapsedTime(float* ms, hipEvent_t start, hipEvent_t stop); + // CHECK: result = hipEventElapsedTime(&ms, event_start, event_end); + result = cuEventElapsedTime(&ms, event_start, event_end); + + // CUDA: CUresult CUDAAPI cuEventRecord(CUevent hEvent, CUstream hStream); + // HIP: hipError_t hipEventRecord(hipEvent_t event, hipStream_t stream); + // CHECK: result = hipEventRecord(event_, stream); + result = cuEventRecord(event_, stream); + + // CUDA: CUresult CUDAAPI cuEventSynchronize(CUevent hEvent); + // HIP: hipError_t hipEventSynchronize(hipEvent_t event); + // CHECK: result = hipEventSynchronize(event_); + result = cuEventSynchronize(event_); + +#if CUDA_VERSION > 9020 + // CUDA: CUresult CUDAAPI cuDestroyExternalMemory(CUexternalMemory extMem); + // HIP: hipError_t hipDestroyExternalMemory(hipExternalMemory_t extMem); + // CHECK: result = hipDestroyExternalMemory(externalMemory); + result = cuDestroyExternalMemory(externalMemory); + + // CUDA: CUresult CUDAAPI cuDestroyExternalSemaphore(CUexternalSemaphore extSem); + // HIP: hipError_t hipDestroyExternalSemaphore(hipExternalSemaphore_t extSem); + // CHECK: result = hipDestroyExternalSemaphore(externalSemaphore); + result = cuDestroyExternalSemaphore(externalSemaphore); + + // CUDA: CUresult CUDAAPI cuExternalMemoryGetMappedBuffer(CUdeviceptr *devPtr, CUexternalMemory extMem, const CUDA_EXTERNAL_MEMORY_BUFFER_DESC *bufferDesc); + // HIP: hipError_t hipExternalMemoryGetMappedBuffer(void **devPtr, hipExternalMemory_t extMem, const hipExternalMemoryBufferDesc *bufferDesc); + // CHECK: result = hipExternalMemoryGetMappedBuffer(&deviceptr, externalMemory, &EXTERNAL_MEMORY_BUFFER_DESC); + result = cuExternalMemoryGetMappedBuffer(&deviceptr, externalMemory, &EXTERNAL_MEMORY_BUFFER_DESC); + + // CUDA: CUresult CUDAAPI cuImportExternalMemory(CUexternalMemory *extMem_out, const CUDA_EXTERNAL_MEMORY_HANDLE_DESC *memHandleDesc); + // HIP: hipError_t hipImportExternalMemory(hipExternalMemory_t* extMem_out, const hipExternalMemoryHandleDesc* memHandleDesc); + // CHECK: result = hipImportExternalMemory(&externalMemory, &EXTERNAL_MEMORY_HANDLE_DESC); + result = cuImportExternalMemory(&externalMemory, &EXTERNAL_MEMORY_HANDLE_DESC); + + // CUDA: CUresult CUDAAPI cuImportExternalSemaphore(CUexternalSemaphore *extSem_out, const CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC *semHandleDesc); + // HIP: hipError_t hipImportExternalSemaphore(hipExternalSemaphore_t* extSem_out, const hipExternalSemaphoreHandleDesc* semHandleDesc); + // CHECK: result = hipImportExternalSemaphore(&externalSemaphore, &EXTERNAL_SEMAPHORE_HANDLE_DESC); + result = cuImportExternalSemaphore(&externalSemaphore, &EXTERNAL_SEMAPHORE_HANDLE_DESC); + + // CUDA: CUresult CUDAAPI cuSignalExternalSemaphoresAsync(const CUexternalSemaphore *extSemArray, const CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS *paramsArray, unsigned int numExtSems, CUstream stream); + // HIP: hipError_t hipSignalExternalSemaphoresAsync(const hipExternalSemaphore_t* extSemArray, const hipExternalSemaphoreSignalParams* paramsArray, unsigned int numExtSems, hipStream_t stream); + // CHECK: result = hipSignalExternalSemaphoresAsync(&externalSemaphore, &EXTERNAL_SEMAPHORE_SIGNAL_PARAMS, flags, stream); + result = cuSignalExternalSemaphoresAsync(&externalSemaphore, &EXTERNAL_SEMAPHORE_SIGNAL_PARAMS, flags, stream); + + // CUDA: CUresult CUDAAPI cuWaitExternalSemaphoresAsync(const CUexternalSemaphore *extSemArray, const CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS *paramsArray, unsigned int numExtSems, CUstream stream); + // HIP: hipError_t hipWaitExternalSemaphoresAsync(const hipExternalSemaphore_t* extSemArray, const hipExternalSemaphoreWaitParams* paramsArray, unsigned int numExtSems, hipStream_t stream); + // CHECK: result = hipWaitExternalSemaphoresAsync(&externalSemaphore, &EXTERNAL_SEMAPHORE_WAIT_PARAMS, flags, stream); + result = cuWaitExternalSemaphoresAsync(&externalSemaphore, &EXTERNAL_SEMAPHORE_WAIT_PARAMS, flags, stream); +#endif + +#if CUDA_VERSION > 7050 + // CUDA: CUresult CUDAAPI cuStreamWaitValue32(CUstream stream, CUdeviceptr addr, cuuint32_t value, unsigned int flags); + // HIP: hipError_t hipStreamWaitValue32(hipStream_t stream, void* ptr, uint32_t value, unsigned int flags, uint32_t mask __dparm(0xFFFFFFFF)); + // CHECK: result = hipStreamWaitValue32(stream, deviceptr, u_value, flags); + result = cuStreamWaitValue32(stream, deviceptr, u_value, flags); + + // CUDA: CUresult CUDAAPI cuStreamWriteValue32(CUstream stream, CUdeviceptr addr, cuuint32_t value, unsigned int flags); + // HIP: hipError_t hipStreamWriteValue32(hipStream_t stream, void* ptr, uint32_t value, unsigned int flags, uint32_t mask __dparm(0xFFFFFFFF)); + // CHECK: result = hipStreamWriteValue32(stream, deviceptr, u_value, flags); + result = cuStreamWriteValue32(stream, deviceptr, u_value, flags); +#endif + +#if CUDA_VERSION > 8000 + // CUDA: CUresult CUDAAPI cuStreamWaitValue64(CUstream stream, CUdeviceptr addr, cuuint64_t value, unsigned int flags); + // HIP: hipError_t hipStreamWaitValue64(hipStream_t stream, void* ptr, uint64_t value, unsigned int flags, uint64_t mask __dparm(0xFFFFFFFFFFFFFFFF)); + // CHECK: result = hipStreamWaitValue64(stream, deviceptr, u_value, flags); + result = cuStreamWaitValue64(stream, deviceptr, u_value, flags); + + // CUDA: CUresult CUDAAPI cuStreamWriteValue64(CUstream stream, CUdeviceptr addr, cuuint64_t value, unsigned int flags); + // HIP: hipError_t hipStreamWriteValue64(hipStream_t stream, void* ptr, uint64_t value, unsigned int flags, uint64_t mask __dparm(0xFFFFFFFFFFFFFFFF)); + // CHECK: result = hipStreamWriteValue64(stream, deviceptr, u_value, flags); + result = cuStreamWriteValue64(stream, deviceptr, u_value, flags); +#endif + + // CUDA: CUresult CUDAAPI cuFuncGetAttribute(int *pi, CUfunction_attribute attrib, CUfunction hfunc); + // HIP: hipError_t hipFuncGetAttribute(int* value, hipFunction_attribute attrib, hipFunction_t hfunc); + // CHECK: result = hipFuncGetAttribute(value, function_attribute, function); + result = cuFuncGetAttribute(value, function_attribute, function); + + unsigned int gridDimX = 0, gridDimY = 0, gridDimZ = 0, blockDimX = 0, blockDimY = 0, blockDimZ = 0, sharedMemBytes = 0; + void* kernelParams = nullptr, *extra = nullptr; + // CUDA: CUresult CUDAAPI cuLaunchKernel(CUfunction f, unsigned int gridDimX, unsigned int gridDimY, unsigned int gridDimZ, unsigned int blockDimX, unsigned int blockDimY, unsigned int blockDimZ, unsigned int sharedMemBytes, CUstream hStream, void **kernelParams, void **extra); + // HIP: hipError_t hipModuleLaunchKernel(hipFunction_t f, unsigned int gridDimX, unsigned int gridDimY, unsigned int gridDimZ, unsigned int blockDimX, unsigned int blockDimY, unsigned int blockDimZ, unsigned int sharedMemBytes, hipStream_t stream, void** kernelParams, void** extra); + // CHECK: result = hipModuleLaunchKernel(function, gridDimX, gridDimY, gridDimZ, blockDimX, blockDimY, blockDimZ, sharedMemBytes, stream, &kernelParams, &extra); + result = cuLaunchKernel(function, gridDimX, gridDimY, gridDimZ, blockDimX, blockDimY, blockDimZ, sharedMemBytes, stream, &kernelParams, &extra); + +#if CUDA_VERSION > 9020 + // CUDA: CUresult CUDAAPI cuGraphAddDependencies(CUgraph hGraph, const CUgraphNode *from, const CUgraphNode *to, size_t numDependencies); + // HIP: hipError_t hipGraphAddDependencies(hipGraph_t graph, const hipGraphNode_t* from, const hipGraphNode_t* to, size_t numDependencies); + // CHECK: result = hipGraphAddDependencies(graph, &graphNode, &graphNode2, bytes); + result = cuGraphAddDependencies(graph, &graphNode, &graphNode2, bytes); + + // CUDA: CUresult CUDAAPI cuGraphAddEmptyNode(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies); + // HIP: hipError_t hipGraphAddEmptyNode(hipGraphNode_t* pGraphNode, hipGraph_t graph, const hipGraphNode_t* pDependencies, size_t numDependencies); + // CHECK: result = hipGraphAddEmptyNode(&graphNode, graph, &graphNode2, bytes); + result = cuGraphAddEmptyNode(&graphNode, graph, &graphNode2, bytes); + + // CUDA: CUresult CUDAAPI cuGraphAddKernelNode(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, const CUDA_KERNEL_NODE_PARAMS *nodeParams); + // HIP: hipError_t hipGraphAddKernelNode(hipGraphNode_t* pGraphNode, hipGraph_t graph, const hipGraphNode_t* pDependencies, size_t numDependencies, const hipKernelNodeParams* pNodeParams); + // CHECK: result = hipGraphAddKernelNode(&graphNode, graph, &graphNode2, bytes, &KERNEL_NODE_PARAMS); + result = cuGraphAddKernelNode(&graphNode, graph, &graphNode2, bytes, &KERNEL_NODE_PARAMS); + + // CUDA: CUresult CUDAAPI cuGraphCreate(CUgraph *phGraph, unsigned int flags); + // HIP: hipError_t hipGraphCreate(hipGraph_t* pGraph, unsigned int flags); + // CHECK: result = hipGraphCreate(&graph, flags); + result = cuGraphCreate(&graph, flags); + + // CUDA: CUresult CUDAAPI cuGraphDestroy(CUgraph hGraph); + // HIP: hipError_t hipGraphDestroy(hipGraph_t graph); + // CHECK: result = hipGraphDestroy(graph); + result = cuGraphDestroy(graph); + + // CUDA: CUresult CUDAAPI cuGraphExecDestroy(CUgraphExec hGraphExec); + // HIP: hipError_t hipGraphExecDestroy(hipGraphExec_t pGraphExec); + // CHECK: result = hipGraphExecDestroy(graphExec); + result = cuGraphExecDestroy(graphExec); + + // CUDA: CUresult CUDAAPI cuGraphGetNodes(CUgraph hGraph, CUgraphNode *nodes, size_t *numNodes); + // HIP: hipError_t hipGraphGetNodes(hipGraph_t graph, hipGraphNode_t* nodes, size_t* numNodes); + // CHECK: result = hipGraphGetNodes(graph, &graphNode, &bytes); + result = cuGraphGetNodes(graph, &graphNode, &bytes); + + // CUDA: CUresult CUDAAPI cuGraphGetRootNodes(CUgraph hGraph, CUgraphNode *rootNodes, size_t *numRootNodes); + // HIP: hipError_t hipGraphGetRootNodes(hipGraph_t graph, hipGraphNode_t* pRootNodes, size_t* pNumRootNodes); + // CHECK: result = hipGraphGetRootNodes(graph, &graphNode, &bytes); + result = cuGraphGetRootNodes(graph, &graphNode, &bytes); + + // CUDA: CUresult CUDAAPI cuGraphInstantiate(CUgraphExec *phGraphExec, CUgraph hGraph, CUgraphNode *phErrorNode, char *logBuffer, size_t bufferSize); + // HIP: hipError_t hipGraphInstantiate(hipGraphExec_t* pGraphExec, hipGraph_t graph, hipGraphNode_t* pErrorNode, char* pLogBuffer, size_t bufferSize); + // CHECK: result = hipGraphInstantiate(&graphExec, graph, &graphNode, nullptr, bytes); + // CHECK-NEXT: result = hipGraphInstantiate(&graphExec, graph, &graphNode, nullptr, bytes); + result = cuGraphInstantiate(&graphExec, graph, &graphNode, nullptr, bytes); + result = cuGraphInstantiate_v2(&graphExec, graph, &graphNode, nullptr, bytes); + + // CUDA: CUresult CUDAAPI cuGraphKernelNodeGetParams(CUgraphNode hNode, CUDA_KERNEL_NODE_PARAMS *nodeParams); + // HIP: hipError_t hipGraphKernelNodeGetParams(hipGraphNode_t node, hipKernelNodeParams* pNodeParams); + // CHECK: result = hipGraphKernelNodeGetParams(graphNode, &KERNEL_NODE_PARAMS); + result = cuGraphKernelNodeGetParams(graphNode, &KERNEL_NODE_PARAMS); + + // CUDA: CUresult CUDAAPI cuGraphKernelNodeSetParams(CUgraphNode hNode, const CUDA_KERNEL_NODE_PARAMS *nodeParams); + // HIP: hipError_t hipGraphKernelNodeSetParams(hipGraphNode_t node, const hipKernelNodeParams* pNodeParams); + // CHECK: result = hipGraphKernelNodeSetParams(graphNode, &KERNEL_NODE_PARAMS); + result = cuGraphKernelNodeSetParams(graphNode, &KERNEL_NODE_PARAMS); + + // CUDA: CUresult CUDAAPI cuGraphLaunch(CUgraphExec hGraphExec, CUstream hStream); + // HIP: hipError_t hipGraphLaunch(hipGraphExec_t graphExec, hipStream_t stream); + // CHECK: result = hipGraphLaunch(graphExec, stream); + result = cuGraphLaunch(graphExec, stream); + + // CUDA: CUresult CUDAAPI cuGraphMemcpyNodeGetParams(CUgraphNode hNode, CUDA_MEMCPY3D *nodeParams); + // HIP: hipError_t hipGraphMemcpyNodeGetParams(hipGraphNode_t node, hipMemcpy3DParms* pNodeParams); + // CHECK: result = hipGraphMemcpyNodeGetParams(graphNode, &MEMCPY3D); + result = cuGraphMemcpyNodeGetParams(graphNode, &MEMCPY3D); + + // CUDA: CUresult CUDAAPI cuGraphMemcpyNodeSetParams(CUgraphNode hNode, const CUDA_MEMCPY3D *nodeParams); + // HIP: hipError_t hipGraphMemcpyNodeSetParams(hipGraphNode_t node, const hipMemcpy3DParms* pNodeParams); + // CHECK: result = hipGraphMemcpyNodeSetParams(graphNode, &MEMCPY3D); + result = cuGraphMemcpyNodeSetParams(graphNode, &MEMCPY3D); + + // CUDA: CUresult CUDAAPI cuGraphMemsetNodeGetParams(CUgraphNode hNode, CUDA_MEMSET_NODE_PARAMS *nodeParams); + // HIP: hipError_t hipGraphMemsetNodeGetParams(hipGraphNode_t node, hipMemsetParams* pNodeParams); + // CHECK: result = hipGraphMemsetNodeGetParams(graphNode, &MEMSET_NODE_PARAMS); + result = cuGraphMemsetNodeGetParams(graphNode, &MEMSET_NODE_PARAMS); + + // CUDA: CUresult CUDAAPI cuGraphMemsetNodeSetParams(CUgraphNode hNode, const CUDA_MEMSET_NODE_PARAMS *nodeParams); + // HIP: hipError_t hipGraphMemsetNodeSetParams(hipGraphNode_t node, const hipMemsetParams* pNodeParams); + // CHECK: result = hipGraphMemsetNodeSetParams(graphNode, &MEMSET_NODE_PARAMS); + result = cuGraphMemsetNodeSetParams(graphNode, &MEMSET_NODE_PARAMS); + + // CUDA: CUresult CUDAAPI cuGraphGetEdges(CUgraph hGraph, CUgraphNode *from, CUgraphNode *to, size_t *numEdges); + // HIP: hipError_t hipGraphGetEdges(hipGraph_t graph, hipGraphNode_t* from, hipGraphNode_t* to, size_t* numEdges); + // CHECK: result = hipGraphGetEdges(graph, &graphNode, &graphNode2, &bytes); + result = cuGraphGetEdges(graph, &graphNode, &graphNode2, &bytes); + + // CUDA: CUresult CUDAAPI cuGraphNodeGetDependencies(CUgraphNode hNode, CUgraphNode *dependencies, size_t *numDependencies); + // HIP: hipError_t hipGraphNodeGetDependencies(hipGraphNode_t node, hipGraphNode_t* pDependencies, size_t* pNumDependencies); + // CHECK: result = hipGraphNodeGetDependencies(graphNode, &graphNode2, &bytes); + result = cuGraphNodeGetDependencies(graphNode, &graphNode2, &bytes); + + // CUDA: CUresult CUDAAPI cuGraphRemoveDependencies(CUgraph hGraph, const CUgraphNode *from, const CUgraphNode *to, size_t numDependencies); + // HIP: hipError_t hipGraphRemoveDependencies(hipGraph_t graph, const hipGraphNode_t* from, const hipGraphNode_t* to, size_t numDependencies); + // CHECK: result = hipGraphRemoveDependencies(graph, &graphNode, &graphNode2, bytes); + result = cuGraphRemoveDependencies(graph, &graphNode, &graphNode2, bytes); + + // CUDA: CUresult CUDAAPI cuGraphNodeGetDependentNodes(CUgraphNode hNode, CUgraphNode *dependentNodes, size_t *numDependentNodes); + // HIP: hipError_t hipGraphNodeGetDependentNodes(hipGraphNode_t node, hipGraphNode_t* pDependentNodes, size_t* pNumDependentNodes); + // CHECK: result = hipGraphNodeGetDependentNodes(graphNode, &graphNode2, &bytes); + result = cuGraphNodeGetDependentNodes(graphNode, &graphNode2, &bytes); + + // CUDA: CUresult CUDAAPI cuGraphNodeGetType(CUgraphNode hNode, CUgraphNodeType *type); + // HIP: hipError_t hipGraphNodeGetType(hipGraphNode_t node, hipGraphNodeType* pType); + // CHECK: result = hipGraphNodeGetType(graphNode, &graphNodeType); + result = cuGraphNodeGetType(graphNode, &graphNodeType); + + // CUDA: CUresult CUDAAPI cuGraphDestroyNode(CUgraphNode hNode); + // HIP: hipError_t hipGraphDestroyNode(hipGraphNode_t node); + // CHECK: result = hipGraphDestroyNode(graphNode); + result = cuGraphDestroyNode(graphNode); + + // CUDA: CUresult CUDAAPI cuGraphAddHostNode(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, const CUDA_HOST_NODE_PARAMS *nodeParams); + // HIP: hipError_t hipGraphAddHostNode(hipGraphNode_t* pGraphNode, hipGraph_t graph, const hipGraphNode_t* pDependencies, size_t numDependencies, const hipHostNodeParams* pNodeParams); + // CHECK: result = hipGraphAddHostNode(&graphNode, graph, &graphNode2, bytes, &host_node_params); + result = cuGraphAddHostNode(&graphNode, graph, &graphNode2, bytes, &host_node_params); + + // CUDA: CUresult CUDAAPI cuGraphNodeFindInClone(CUgraphNode *phNode, CUgraphNode hOriginalNode, CUgraph hClonedGraph); + // HIP: hipError_t hipGraphNodeFindInClone(hipGraphNode_t* pNode, hipGraphNode_t originalNode, hipGraph_t clonedGraph); + // CHECK: result = hipGraphNodeFindInClone(&graphNode2, graphNode, graph); + result = cuGraphNodeFindInClone(&graphNode2, graphNode, graph); + + // CUDA: CUresult CUDAAPI cuStreamIsCapturing(CUstream hStream, CUstreamCaptureStatus *captureStatus); + // HIP: hipError_t hipStreamIsCapturing(hipStream_t stream, hipStreamCaptureStatus* pCaptureStatus); + // CHECK: result = hipStreamIsCapturing(stream, &streamCaptureStatus); + result = cuStreamIsCapturing(stream, &streamCaptureStatus); + + // CUDA: CUresult CUDAAPI cuGraphHostNodeGetParams(CUgraphNode hNode, CUDA_HOST_NODE_PARAMS *nodeParams); + // HIP: hipError_t hipGraphHostNodeGetParams(hipGraphNode_t node, hipHostNodeParams* pNodeParams); + // CHECK: result = hipGraphHostNodeGetParams(graphNode, &host_node_params); + result = cuGraphHostNodeGetParams(graphNode, &host_node_params); + + // CUDA: CUresult CUDAAPI cuGraphHostNodeSetParams(CUgraphNode hNode, const CUDA_HOST_NODE_PARAMS *nodeParams); + // HIP: hipError_t hipGraphHostNodeSetParams(hipGraphNode_t node, const hipHostNodeParams* pNodeParams); + // CHECK: result = hipGraphHostNodeSetParams(graphNode, &host_node_params); + result = cuGraphHostNodeSetParams(graphNode, &host_node_params); + + // CUDA: CUresult CUDAAPI cuGraphAddChildGraphNode(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, CUgraph childGraph); + // HIP: hipError_t hipGraphAddChildGraphNode(hipGraphNode_t* pGraphNode, hipGraph_t graph, const hipGraphNode_t* pDependencies, size_t numDependencies, hipGraph_t childGraph); + // CHECK: result = hipGraphAddChildGraphNode(&graphNode, graph, &graphNode2, bytes, graph2); + result = cuGraphAddChildGraphNode(&graphNode, graph, &graphNode2, bytes, graph2); + + // CUDA: CUresult CUDAAPI cuGraphChildGraphNodeGetGraph(CUgraphNode hNode, CUgraph *phGraph); + // HIP: hipError_t hipGraphChildGraphNodeGetGraph(hipGraphNode_t node, hipGraph_t* pGraph); + // CHECK: result = hipGraphChildGraphNodeGetGraph(graphNode, &graph); + result = cuGraphChildGraphNodeGetGraph(graphNode, &graph); +#endif + +#if CUDA_VERSION > 10000 + // CUDA: CUresult CUDAAPI cuStreamGetCaptureInfo(CUstream hStream, CUstreamCaptureStatus *captureStatus_out, cuuint64_t *id_out); + // HIP: hipError_t hipStreamGetCaptureInfo(hipStream_t stream, hipStreamCaptureStatus* pCaptureStatus, unsigned long long* pId); + // CHECK: result = hipStreamGetCaptureInfo(stream, &streamCaptureStatus, &ull); + result = cuStreamGetCaptureInfo(stream, &streamCaptureStatus, &ull); + + // CUDA: CUresult CUDAAPI cuGraphExecKernelNodeSetParams(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_KERNEL_NODE_PARAMS *nodeParams); + // HIP: hipError_t hipGraphExecKernelNodeSetParams(hipGraphExec_t hGraphExec, hipGraphNode_t node, const hipKernelNodeParams* pNodeParams); + // CHECK: result = hipGraphExecKernelNodeSetParams(graphExec, graphNode, &KERNEL_NODE_PARAMS); + result = cuGraphExecKernelNodeSetParams(graphExec, graphNode, &KERNEL_NODE_PARAMS); +#endif + +#if CUDA_VERSION > 10010 + // CUDA: CUresult CUDAAPI cuGraphExecUpdate(CUgraphExec hGraphExec, CUgraph hGraph, CUgraphNode *hErrorNode_out, CUgraphExecUpdateResult *updateResult_out); + // HIP: hipError_t hipGraphExecUpdate(hipGraphExec_t hGraphExec, hipGraph_t hGraph, hipGraphNode_t* hErrorNode_out, hipGraphExecUpdateResult* updateResult_out); + // CHECK: result = hipGraphExecUpdate(graphExec, graph, &graphNode, &graphExecUpdateResult); + result = cuGraphExecUpdate(graphExec, graph, &graphNode, &graphExecUpdateResult); + + // CUDA: CUresult CUDAAPI cuGraphExecHostNodeSetParams(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_HOST_NODE_PARAMS *nodeParams); + // HIP: hipError_t hipError_t hipGraphExecHostNodeSetParams(hipGraphExec_t hGraphExec, hipGraphNode_t node, const hipHostNodeParams* pNodeParams); + // CHECK: result = hipGraphExecHostNodeSetParams(graphExec, graphNode, &host_node_params); + result = cuGraphExecHostNodeSetParams(graphExec, graphNode, &host_node_params); +#endif + +#if CUDA_VERSION > 11000 + // CUDA: CUresult CUDAAPI cuGraphExecChildGraphNodeSetParams(CUgraphExec hGraphExec, CUgraphNode hNode, CUgraph childGraph); + // HIP: hipError_t hipGraphExecChildGraphNodeSetParams(hipGraphExec_t hGraphExec, hipGraphNode_t node, hipGraph_t childGraph); + // CHECK: result = hipGraphExecChildGraphNodeSetParams(graphExec, graphNode, graph); + result = cuGraphExecChildGraphNodeSetParams(graphExec, graphNode, graph); + + // CUDA: CUresult CUDAAPI cuGraphAddEventRecordNode(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, CUevent event); + // HIP: hipError_t hipGraphAddEventRecordNode(hipGraphNode_t* pGraphNode, hipGraph_t graph, const hipGraphNode_t* pDependencies, size_t numDependencies, hipEvent_t event); + // CHECK: result = hipGraphAddEventRecordNode(&graphNode, graph, &graphNode2, bytes, event_); + result = cuGraphAddEventRecordNode(&graphNode, graph, &graphNode2, bytes, event_); + + // CUDA: CUresult CUDAAPI cuGraphEventRecordNodeGetEvent(CUgraphNode hNode, CUevent *event_out); + // HIP: hipError_t hipGraphEventRecordNodeGetEvent(hipGraphNode_t node, hipEvent_t* event_out); + // CHECK: result = hipGraphEventRecordNodeGetEvent(graphNode, &event_); + result = cuGraphEventRecordNodeGetEvent(graphNode, &event_); + + // CUDA: CUresult CUDAAPI cuGraphEventRecordNodeSetEvent(CUgraphNode hNode, CUevent event); + // HIP: hipError_t hipGraphEventRecordNodeSetEvent(hipGraphNode_t node, hipEvent_t event); + // CHECK: result = hipGraphEventRecordNodeSetEvent(graphNode, event_); + result = cuGraphEventRecordNodeSetEvent(graphNode, event_); + + // CUDA: CUresult CUDAAPI cuGraphExecEventRecordNodeSetEvent(CUgraphExec hGraphExec, CUgraphNode hNode, CUevent event); + // HIP: hipError_t hipGraphExecEventRecordNodeSetEvent(hipGraphExec_t hGraphExec, hipGraphNode_t hNode, hipEvent_t event); + // CHECK: result = hipGraphExecEventRecordNodeSetEvent(graphExec, graphNode, event_); + result = cuGraphExecEventRecordNodeSetEvent(graphExec, graphNode, event_); + + // CUDA: CUresult CUDAAPI cuGraphAddEventWaitNode(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, CUevent event); + // HIP: hipError_t hipGraphAddEventWaitNode(hipGraphNode_t* pGraphNode, hipGraph_t graph, const hipGraphNode_t* pDependencies, size_t numDependencies, hipEvent_t event); + // CHECK: result = hipGraphAddEventWaitNode(&graphNode, graph, &graphNode2, bytes, event_); + result = cuGraphAddEventWaitNode(&graphNode, graph, &graphNode2, bytes, event_); + + // CUDA: CUresult CUDAAPI cuGraphEventWaitNodeGetEvent(CUgraphNode hNode, CUevent *event_out); + // HIP: hipError_t hipGraphEventWaitNodeGetEvent(hipGraphNode_t node, hipEvent_t* event_out); + // CHECK: result = hipGraphEventWaitNodeGetEvent(graphNode, &event_); + result = cuGraphEventWaitNodeGetEvent(graphNode, &event_); + + // CUDA: CUresult CUDAAPI cuGraphEventWaitNodeSetEvent(CUgraphNode hNode, CUevent event); + // HIP: hipError_t hipGraphEventWaitNodeSetEvent(hipGraphNode_t node, hipEvent_t event); + // CHECK: result = hipGraphEventWaitNodeSetEvent(graphNode, event_); + result = cuGraphEventWaitNodeSetEvent(graphNode, event_); + + // CUDA: CUresult CUDAAPI cuGraphExecEventWaitNodeSetEvent(CUgraphExec hGraphExec, CUgraphNode hNode, CUevent event); + // HIP: hipError_t hipGraphExecEventWaitNodeSetEvent(hipGraphExec_t hGraphExec, hipGraphNode_t hNode, hipEvent_t event); + // CHECK: result = hipGraphExecEventWaitNodeSetEvent(graphExec, graphNode, event_); + result = cuGraphExecEventWaitNodeSetEvent(graphExec, graphNode, event_); +#endif + +#if CUDA_VERSION > 11020 + // CUDA: CUresult CUDAAPI cuStreamGetCaptureInfo_v2(CUstream hStream, CUstreamCaptureStatus *captureStatus_out, cuuint64_t *id_out, CUgraph *graph_out, const CUgraphNode **dependencies_out, size_t *numDependencies_out); + // HIP: hipError_t hipStreamGetCaptureInfo_v2(hipStream_t stream, hipStreamCaptureStatus* captureStatus_out, unsigned long long* id_out __dparm(0), hipGraph_t* graph_out __dparm(0), const hipGraphNode_t** dependencies_out __dparm(0), size_t* numDependencies_out __dparm(0)); + // CHECK: result = hipStreamGetCaptureInfo_v2(stream, &streamCaptureStatus, &ull, &graph, &pGraphNode, &bytes); + result = cuStreamGetCaptureInfo_v2(stream, &streamCaptureStatus, &ull, &graph, &pGraphNode, &bytes); + + // CUDA: CUresult CUDAAPI cuStreamUpdateCaptureDependencies(CUstream hStream, CUgraphNode *dependencies, size_t numDependencies, unsigned int flags); + // HIP: hipError_t hipStreamUpdateCaptureDependencies(hipStream_t stream, hipGraphNode_t* dependencies, size_t numDependencies, unsigned int flags __dparm(0)); + // CHECK: result = hipStreamUpdateCaptureDependencies(stream, &graphNode, bytes, flags); + result = cuStreamUpdateCaptureDependencies(stream, &graphNode, bytes, flags); +#endif + +#if CUDA_VERSION > 11030 + // CUDA: CUresult CUDAAPI cuGraphInstantiateWithFlags(CUgraphExec *phGraphExec, CUgraph hGraph, unsigned long long flags); + // HIP: hipError_t hipGraphInstantiateWithFlags(hipGraphExec_t* pGraphExec, hipGraph_t graph, unsigned long long flags); + // CHECK: result = hipGraphInstantiateWithFlags(&graphExec, graph, ull); + result = cuGraphInstantiateWithFlags(&graphExec, graph, ull); +#endif + + return 0; +} diff --git a/cuda_code/dropout_layer_35.cu b/cuda_code/dropout_layer_35.cu new file mode 100644 index 0000000000000000000000000000000000000000..a706335872d43ae12dd3093f390fccdeb51952a2 --- /dev/null +++ b/cuda_code/dropout_layer_35.cu @@ -0,0 +1,99 @@ +/* +All modification made by Cambricon Corporation: © 2018 Cambricon Corporation +All rights reserved. +All other contributions: +Copyright (c) 2014--2018, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include + +#include "caffe/layers/dropout_layer.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +template +__global__ void DropoutForward(const int n, const Dtype* in, + const unsigned int* mask, const unsigned int threshold, const float scale, + Dtype* out) { + CUDA_KERNEL_LOOP(index, n) { + out[index] = in[index] * (mask[index] > threshold) * scale; + } +} + +template +void DropoutLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); + const int count = bottom[0]->count(); + if (this->phase_ == TRAIN) { + unsigned int* mask = + static_cast(rand_vec_.mutable_gpu_data()); + caffe_gpu_rng_uniform(count, mask); + // set thresholds + // NOLINT_NEXT_LINE(whitespace/operators) + DropoutForward<<>>( + count, bottom_data, mask, uint_thres_, scale_, top_data); + CUDA_POST_KERNEL_CHECK; + } else { + caffe_copy(count, bottom_data, top_data); + } +} + +template +__global__ void DropoutBackward(const int n, const Dtype* in_diff, + const unsigned int* mask, const unsigned int threshold, const float scale, + Dtype* out_diff) { + CUDA_KERNEL_LOOP(index, n) { + out_diff[index] = in_diff[index] * scale * (mask[index] > threshold); + } +} + +template +void DropoutLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + if (propagate_down[0]) { + const Dtype* top_diff = top[0]->gpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + if (this->phase_ == TRAIN) { + const unsigned int* mask = + static_cast(rand_vec_.gpu_data()); + const int count = bottom[0]->count(); + // NOLINT_NEXT_LINE(whitespace/operators) + DropoutBackward<<>>( + count, top_diff, mask, uint_thres_, scale_, bottom_diff); + CUDA_POST_KERNEL_CHECK; + } else { + caffe_copy(top[0]->count(), top_diff, bottom_diff); + } + } +} + +INSTANTIATE_LAYER_GPU_FUNCS(DropoutLayer); + +} // namespace caffe diff --git a/cuda_code/dropout_layer_53.cu b/cuda_code/dropout_layer_53.cu new file mode 100644 index 0000000000000000000000000000000000000000..dfb74b3835fb83eddd54ca5409587cdd2bb17d8e --- /dev/null +++ b/cuda_code/dropout_layer_53.cu @@ -0,0 +1,99 @@ +#include + +#include "caffe/layers/dropout_layer.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +template +__global__ void ClipData(const int n, const Dtype lower, const Dtype higher, + Dtype* data){ + CUDA_KERNEL_LOOP(index, n){ + Dtype value = data[index]; + data[index] = value > higher ? higher : (value < lower ? lower : value); + } +} + +template +void DropoutLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); + const int count = bottom[0]->count(0, num_axes_); + Dtype* mask = rand_vec_->mutable_gpu_data(); + if (this->phase_ == TRAIN) { + switch (drop_type_){ + case DropoutParameter_DropType_BERNOULLI: + { + // Create random numbers + caffe_gpu_rng_bernoulli(count, Dtype(1. - threshold_), mask); + break; + } + case DropoutParameter_DropType_GAUSSIAN: + { + caffe_gpu_rng_gaussian(count, Dtype(mu_), Dtype(sigma_), mask); + // clip to be in [0,1] + ClipData // NOLINT_NEXT_LINE(whitespace/operators) + << > > + (count, Dtype(0), Dtype(1), mask); + CUDA_POST_KERNEL_CHECK; + break; + } + case DropoutParameter_DropType_UNIFORM: + { + caffe_gpu_rng_uniform(count, Dtype(a_), Dtype(b_), mask); + break; + } + } + if (drop_batch_){ + Dtype drop = rand_vec_->cpu_data()[0]; + caffe_copy(top[0]->count(), bottom_data, top_data); + caffe_gpu_scal(top[0]->count(), Dtype(scale_ * drop), top_data); + } + else{ + vector*> scale_bottom(2, NULL); + scale_bottom[0] = bottom[0]; + scale_bottom[1] = rand_vec_; + const vector*> scale_top(1, top[0]); + scale_layer_->Forward(scale_bottom, scale_top); + caffe_gpu_scal(top[0]->count(), scale_, top_data); + } + } else { + caffe_copy(count, bottom_data, top_data); + } +} + +template +void DropoutLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + if (propagate_down[0]) { + Dtype* top_diff = top[0]->mutable_gpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + if (this->phase_ == TRAIN) { + if (drop_batch_){ + Dtype drop = rand_vec_->cpu_data()[0]; + caffe_gpu_scal(top[0]->count(), Dtype(scale_ * drop), top_diff); + caffe_copy(top[0]->count(), top_diff, bottom_diff); + } + else{ + // scale + caffe_gpu_scal(top[0]->count(), scale_, top_diff); + // multiply mask + vector*> scale_bottom(2, NULL); + scale_bottom[0] = bottom[0]; + scale_bottom[1] = rand_vec_; + const vector*> scale_top(1, top[0]); + vector prop_down(2, true); + prop_down[1] = false; + scale_layer_->Backward(scale_top, prop_down, scale_bottom); + } + } else { + caffe_copy(top[0]->count(), top_diff, bottom_diff); + } + } +} + +INSTANTIATE_LAYER_GPU_FUNCS(DropoutLayer); + +} // namespace caffe diff --git a/cuda_code/dropout_layer_kernels_11.cu b/cuda_code/dropout_layer_kernels_11.cu new file mode 100644 index 0000000000000000000000000000000000000000..b7aeb5c7a23ac07d8988debfb1571c99401b11fb --- /dev/null +++ b/cuda_code/dropout_layer_kernels_11.cu @@ -0,0 +1,40 @@ +#include "cuda_runtime.h" +#include "curand.h" +#include "cublas_v2.h" + +extern "C" { +#include "dropout_layer.h" +#include "cuda.h" +#include "utils.h" +} + +__global__ void yoloswag420blazeit360noscope(float *input, int size, float *rand, float prob, float scale) { + int id = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; + if (id < size) input[id] = (rand[id] < prob) ? 0 : input[id] * scale; +} + +void forward_dropout_layer_gpu(dropout_layer layer, network net) { + if (!net.train) return; + int size = layer.inputs * layer.batch; + cuda_random(layer.rand_gpu, size); + /* + int i; + for(i = 0; i < size; ++i){ + layer.rand[i] = rand_uniform(); + } + cuda_push_array(layer.rand_gpu, layer.rand, size); + */ + + yoloswag420blazeit360noscope << < cuda_gridsize(size), BLOCK >> > + (net.input_gpu, size, layer.rand_gpu, layer.probability, layer.scale); + check_error(cudaPeekAtLastError()); +} + +void backward_dropout_layer_gpu(dropout_layer layer, network net) { + if (!net.delta_gpu) return; + int size = layer.inputs * layer.batch; + + yoloswag420blazeit360noscope << < cuda_gridsize(size), BLOCK >> > + (net.delta_gpu, size, layer.rand_gpu, layer.probability, layer.scale); + check_error(cudaPeekAtLastError()); +} diff --git a/cuda_code/dropout_op_14.cu b/cuda_code/dropout_op_14.cu new file mode 100644 index 0000000000000000000000000000000000000000..ebb3f275cbfa0737e777f4eb1f09b25838118a28 --- /dev/null +++ b/cuda_code/dropout_op_14.cu @@ -0,0 +1,100 @@ +#include "caffe2/core/context_gpu.h" +#include "caffe2/operators/dropout_op.h" + +namespace caffe2 { + +namespace { +__global__ void DropoutKernel( + const int N, + const float ratio, + const float* Xdata, + float* Ydata, + bool* maskdata) { + const float scale = 1. / (1. - ratio); + CUDA_1D_KERNEL_LOOP(i, N) { + maskdata[i] = (Ydata[i] > ratio); + Ydata[i] = Xdata[i] * scale * maskdata[i]; + } +} +} // namespace + +template <> +bool DropoutOp::RunOnDevice() { + auto& X = Input(0); + auto* Y = Output(0); + Y->Resize(X.dims()); + if (is_test_) { + if (Y != &X) { + context_.CopySameDevice( + X.size(), X.data(), Y->template mutable_data()); + } + return true; + } else { + // We do a simple trick here: since curand cannot generate random + // boolean numbers, we will generate into dY and write the result to + // mask. + float* Ydata = Y->template mutable_data(); + auto* mask = Output(1); + mask->Resize(X.dims()); + CAFFE_ENFORCE(X.data() != Ydata, "In-place GPU dropout is broken"); + CURAND_ENFORCE( + curandGenerateUniform(context_.curand_generator(), Ydata, X.size())); + DropoutKernel<<< + CAFFE_GET_BLOCKS(X.size()), + CAFFE_CUDA_NUM_THREADS, + 0, + context_.cuda_stream()>>>( + X.size(), + ratio_, + X.data(), + Ydata, + mask->template mutable_data()); + return true; + } +} + +namespace { +__global__ void DropoutGradientKernel( + const int N, + const float* dYdata, + const bool* maskdata, + const float scale, + float* dXdata) { + CUDA_1D_KERNEL_LOOP(i, N) { + dXdata[i] = dYdata[i] * maskdata[i] * scale; + } +} +} // namespace + +template <> +bool DropoutGradientOp::RunOnDevice() { + auto& dY = Input(0); + auto* dX = Output(0); + dX->Resize(dY.dims()); + if (is_test_) { + if (dX != &dY) { + context_.CopySameDevice( + dY.size(), dY.data(), dX->template mutable_data()); + } + return true; + } else { + auto& mask = Input(1); + CAFFE_ENFORCE_EQ(dY.size(), mask.size()); + const float scale = 1. / (1. - ratio_); + DropoutGradientKernel<<< + CAFFE_GET_BLOCKS(dY.size()), + CAFFE_CUDA_NUM_THREADS, + 0, + context_.cuda_stream()>>>( + dY.size(), + dY.data(), + mask.data(), + scale, + dX->template mutable_data()); + return true; + } +} + +REGISTER_CUDA_OPERATOR(Dropout, DropoutOp); +REGISTER_CUDA_OPERATOR(DropoutGrad, DropoutGradientOp); +} // namespace caffe2 diff --git a/cuda_code/dstorm_push.cu b/cuda_code/dstorm_push.cu new file mode 100644 index 0000000000000000000000000000000000000000..2a599a7b22a212c9a75ea734beb50ea4e572116b --- /dev/null +++ b/cuda_code/dstorm_push.cu @@ -0,0 +1,159 @@ +/* + * Copyright (C) 2017 NEC Laboratories America, Inc. ("NECLA"). All rights reserved. + * + * This source code is licensed under the license found in the LICENSE file in + * the root directory of this source tree. An additional grant of patent rights + * can be found in the PATENTS file in the same directory. + */ +#include "dstorm.hpp" // cpu/gpu now agree on a common Dstorm API +#if !WITH_GPU +#error "WITH_GPU compile flag must be set, so Dstorm:: gpu functions get declared" +#endif +#include "dstorm_any.hh" // temporary inline header code common to both cuda and host-side gpu +#include "dstorm_any2.hh" // need dstorm_any + segImpl.hh +#include "segImpl.hh" // SegImpl + SegInfo + segVecGpu inlines and template fns + +#include "demangle.hpp" // just for debug messages (is this even ok for cuda ??) + +namespace dStorm { + + ssize_t Dstorm::push_impl_gpu( SegNum const s, uint32_t const which ) + { + int const verbose=0; + if(verbose>1) ORM_COUT(this->orm, " Entering Dstorm::push_impl_gpu( segnum="<ipc == nullptr ); + if( transport != GPU ){ + throw std::runtime_error("push_impl_gpu called without transport == GPU!"); + } + if( !( segsync==SEGSYNC_NONE || segsync==SEGSYNC_NOTIFY )) + // remaps NOTIFY to full barrier, NOTIFY_ACK not supported + throw std::domain_error("GPU push supports only SEGSYNC_NONE or SEGSYNC_NOTIFY ???"); + if (verbose>1) { + if( this->iProc == 0U ){ + this->print_seg_info(s); + } + } + } + + uint_least32_t result; + uint_least8_t pushes; + auto obufHdr = mem_as*>( sInfo.ptrObuf() ); + { + uint_least32_t *d_result; + uint_least8_t *d_pushes; + CudaCheckError(); + checkCudaErrors(cudaMalloc((void**)&d_result, sizeof(uint_least32_t))); + checkCudaErrors(cudaMalloc((void**)&d_pushes, sizeof(uint_least8_t))); + + //ORM_COUT(this->orm, "About to call push_init " << obufHdr << s << d_result << d_pushes); + //ORM_COUT(this->orm, "About to call push_init "); + //push_init<<<1,1>>>(obufHdr, s, d_result, d_pushes); + push_init<<<16, 16>>>(obufHdr, s, d_result, d_pushes); + + CudaCheckError(); + checkCudaErrors(cudaMemcpy(&result, d_result, sizeof(uint_least32_t), cudaMemcpyDeviceToHost)); + checkCudaErrors(cudaMemcpy(&pushes, d_pushes, sizeof(uint_least8_t), cudaMemcpyDeviceToHost)); + // OHOH: d_result ... were never freed -- they should really be allocated JUST ONCE XXX + cudaFree(d_result); + cudaFree(d_pushes); + } + if( pushes > 0U ){ + ORM_COUT(this->orm, " duplicate Dstorm::push"); + return 0; + } + if(verbose>2) ORM_COUT(this->orm, "trace 000"); + + uint32_t nWrite = 0U; + std::vector send_list = iographs.send(sInfo.ionet); //send_lists[ sInfo.ionet ]; + std::vector send_bufnum = send_bufnums[ sInfo.ionet ]; + uint32_t nErr = 0U; + std::ostringstream oss; + assert( which==-1U || which < send_list.size() ); + uint32_t const sndBeg = (which==-1U? (uint32_t)0U : which); + uint32_t const sndEnd = (which==-1U? static_cast(send_list.size()): which+1U); + //ORM_COUT(this->orm, "send list begin: "<win_start(orm, sInfo.seg_id); + for (size_t snd = sndBeg; snd < sndEnd; ++nWrite, ++snd) { + if(verbose>1) ORM_COUT( this->orm, "trace 003 send_list["<net->live(send_list[snd]) ) { // skip dead recipients + continue; + } + uint32_t remotebuf = sInfo.rbufBeg() // in SegBase, common to all nodes + /* */ + send_bufnum[snd]; // OUR index in DEST recv_list + assert( remotebuf >= sInfo.rbufBeg() ); + if(verbose>1) ORM_COUT( this->orm, "trace 003 with remotebuf "< remotebuf " + <wait(orm, GQUEUE_write,ORM_BLOCK); + } + // 2. write (or write_notify) our oBuf --> remote rBuf + if( WITH_LIBORM || segsync == SEGSYNC_NONE ){ + try{ + if(verbose>1) ORM_COUT(this->orm, (snd>0?"\n\t\t":" ")<<"write seg "<<(unsigned)sInfo.seg_id<<" r"< remote r"<write(orm, __VA_ARGS__ ) + OrmWrite( sInfo.seg_id, sInfo.obufnum() * sInfo.bufBytes, + send_list[snd], + sInfo.seg_id, remotebuf * sInfo.bufBytes, + result,//obufHdr->hdr.a.bytes, + GQUEUE_write, ORM_BLOCK ); +#undef OrmWrite + }catch(orm_error &e){ + ORM_COUT( this->orm, " **WARNING** Dstorm::push write failure\n" ); + ++nErr; // failed write is non-fatal, trigger netRecover after write loop + } + } + } // loop INITIATING the sending of oBuf to remote ranks + orm->win_complete(orm, sInfo.seg_id); + // + // At this point it is possible to reduce the receipt of mixed-version + // buffers at out-vertices by delaying the sender before he begins to + // modify his obuf. + if(0){ + // If settings above QPAUSE_NONE are necessary, you probably have a bug + // somewhere else. + // + // Note: even with full barrier, you still get mixed-version vectors (rarely) + // To reduce them a lot, you can try SEGSYNC_NOTIFY, or better, SEGSYNC_NOTIFY_ACK. + // TBD: SEGSYNC_NOTIFY_ACK2 to ensure 100% squash of mixed-version + if( WITH_LIBORM || segsync == SEGSYNC_NONE ){ + // SEGSYNC_NONE should always be a no-op here (hogwild-ish) + this->wait( QPAUSE_NONE, GQUEUE_write, ORM_BLOCK ); + } + + } + + if(verbose>0) ORM_COUT(this->orm, oss.str()); + if(verbose>2) ORM_COUT(this->orm, "trace 004"); + // receiver: DO NOT WORRY if "pushes" field does not + // match value in MsgTrailer, + + push_con<<<1,1>>>(obufHdr,nWrite); + + //obufHdr->hdr.a.pushes += (1U + (nWrite>0U?1U:0U) ); // a bool would have been ok + if( nErr ){ + ORM_COUT(this->orm, " push: mpi_write FAILED, nErr = "<1) ORM_COUT(this->orm, " push: mpi_write OK"); + } + + if(nErr) this->netRecover(); + // Hmmm. perhaps keep this. Might be useful for stats: + // (Note; at this point, do NOT know whether the data + // has been transfered or not. We never get to know that. + // Even if queue size is zero, NIC may still be xfering! + if(verbose>2) ORM_COUT( this->orm, "trace 007"); + + return nErr + ? -(ssize_t)nErr + : result * nWrite ; + //: (ssize_t)obufHdr->hdr.a.bytes * (nWrite /*-nErr*/ ); + } //push + +}//dStorm:: diff --git a/cuda_code/dswap_2.cu b/cuda_code/dswap_2.cu new file mode 100644 index 0000000000000000000000000000000000000000..320c751d237d55cf6d1cd6e94093f6ceb748fecd --- /dev/null +++ b/cuda_code/dswap_2.cu @@ -0,0 +1,76 @@ +/* + -- MAGMA (version 2.0) -- + Univ. of Tennessee, Knoxville + Univ. of California, Berkeley + Univ. of Colorado, Denver + @date + + @author Mark Gates + + @generated from magmablas/zswap.cu, normal z -> d, Sat Mar 27 20:31:26 2021 + +*/ +#include "magma_internal.h" + +#define NB 64 + + +/* Vector is divided into ceil(n/nb) blocks. + Each thread swaps one element, x[tid] <---> y[tid]. +*/ +__global__ void dswap_kernel( + int n, + double *x, int incx, + double *y, int incy ) +{ + double tmp; + int ind = threadIdx.x + blockDim.x*blockIdx.x; + if ( ind < n ) { + x += ind*incx; + y += ind*incy; + tmp = *x; + *x = *y; + *y = tmp; + } +} + + +/***************************************************************************//** + Purpose: + ============= + Swap vector x and y; \f$ x <-> y \f$. + + @param[in] + n Number of elements in vector x and y. n >= 0. + + @param[in,out] + dx DOUBLE PRECISION array on GPU device. + The n element vector x of dimension (1 + (n-1)*incx). + + @param[in] + incx Stride between consecutive elements of dx. incx != 0. + + @param[in,out] + dy DOUBLE PRECISION array on GPU device. + The n element vector y of dimension (1 + (n-1)*incy). + + @param[in] + incy Stride between consecutive elements of dy. incy != 0. + + @param[in] + queue magma_queue_t + Queue to execute in. + + @ingroup magma_swap +*******************************************************************************/ +extern "C" void +magmablas_dswap( + magma_int_t n, + magmaDouble_ptr dx, magma_int_t incx, + magmaDouble_ptr dy, magma_int_t incy, + magma_queue_t queue ) +{ + dim3 threads( NB ); + dim3 grid( magma_ceildiv( n, NB ) ); + dswap_kernel<<< grid, threads, 0, queue->cuda_stream() >>>( n, dx, incx, dy, incy ); +} diff --git a/cuda_code/dtrsm_5.cu b/cuda_code/dtrsm_5.cu new file mode 100644 index 0000000000000000000000000000000000000000..809ac0693fb889922d8e106c2f86152fa806ff99 --- /dev/null +++ b/cuda_code/dtrsm_5.cu @@ -0,0 +1,1152 @@ +#include "blas.h" + +#if __CUDA_ARCH__ < 200 && (!defined(__BANK_CONFLICTS__) || __BANK_CONFLICTS__ <= 1) + +// y(1:4) -= alpha * x(1:4) +__device__ void daxpy(double alpha, const int * x_hi, const int * x_lo, double * y) { + y[0] -= alpha * __hiloint2double(x_hi[0], x_lo[0]); + y[1] -= alpha * __hiloint2double(x_hi[1], x_lo[1]); + y[2] -= alpha * __hiloint2double(x_hi[2], x_lo[2]); + y[3] -= alpha * __hiloint2double(x_hi[3], x_lo[3]); +} + +// y(1:n) -= alpha * x(1:n) +__device__ void daxpy(int n, double alpha, const int * x_hi, const int * x_lo, double * y) { + y[0] -= alpha * __hiloint2double(x_hi[0], x_lo[0]); if (1 >= n) return; + y[1] -= alpha * __hiloint2double(x_hi[1], x_lo[1]); if (2 >= n) return; + y[2] -= alpha * __hiloint2double(x_hi[2], x_lo[2]); if (3 >= n) return; + y[3] -= alpha * __hiloint2double(x_hi[3], x_lo[3]); +} + +/** + * DTRSM: + * B := alpha * inv( A ) * B for side == CBlasLeft, transA == CBlasNoTrans + * B := alpha * inv( A') * B for side == CBlasLeft, transA == CBlasTrans + * B := alpha * B * inv( A ) for side == CBlasRight, transA == CBlasNoTrans + * B := alpha * B * inv( A') for side == CBlasRight, transA == CBlasTrans + * + * Only the upper or lower triangle of A is used. + * + * @param side whether A multiplies B from the left or right. + * @param uplo uplo for A. + * @param transA transpose for A. + * @param diag whether A is unit or nonunit diagonal. + * @param mb the number of rows in the block of B. + * @param nb the number of columns in the block of B. + * @param bx blockDim.x. + * @param by blockDim.y. + */ +template +__global__ void dtrsm(const double * __restrict__ A, double * __restrict__ B, + double alpha, + int lda, int ldb, + int m, int n) { + + if (side == CBlasLeft) { + // For CBlasLeft each thread updates a column. This means that B needs to be + // read and written via shared memory to transpose it after reading it + // efficiently from global memory. +// typedef char _x[(mb == 4) ? 1 : -1]; +// typedef char _y[(nb == bx * by) ? 1 : -1]; +// typedef char _z[(bx == mb) ? 1 : -1]; + + // Blocks of A and B is shared memory and X in registers + __shared__ int a_hi[mb][(transA == CBlasNoTrans) ? mb : mb + 1]; + __shared__ int a_lo[mb][(transA == CBlasNoTrans) ? mb : mb + 1]; + __shared__ int b_hi[mb][nb + 1]; + __shared__ int b_lo[mb][nb + 1]; + double x[4]; + + const int ti = threadIdx.y * bx + threadIdx.x; + + // Work out the column for the current thread in A and B + A += threadIdx.y * lda + threadIdx.x; + B += (blockIdx.y * nb + threadIdx.y) * ldb + threadIdx.x; + n -= blockIdx.y * nb + threadIdx.y; + + // There are 2 common cases for each of CBlasLeft and CBlasRight + if ((uplo == CBlasUpper && transA == CBlasNoTrans) || + (uplo == CBlasLower && transA != CBlasNoTrans)) { + // For this case start at the bottom of B and work upwards + const int mm = m & (mb - 1); + int i = m - mm; + + // Since we need to read B to update it we need two pointers into it: one + // into the block we are updating (X), and one into the block we are + // currently reading to update it (_B, defined later in terms of X). + A += i * lda + i; + double * X = B + i; + + // Handle the trailing elements first, if any. This only requires reading + // the block of B that we are also updating (X == _B). + if (mm > 0) { + // Read the block of B we are updating and transpose into shared + // memory using b. + #pragma unroll + for (int j = 0; j < nb; j += by) { + b_hi[threadIdx.x][j + threadIdx.y] = __double2hiint(X[j * ldb]); + b_lo[threadIdx.x][j + threadIdx.y] = __double2loint(X[j * ldb]); + } + + __syncthreads(); + + // Place it into X as alpha * B + x[0] = alpha * __hiloint2double(b_hi[0][ti], b_lo[0][ti]); + x[1] = alpha * __hiloint2double(b_hi[1][ti], b_lo[1][ti]); + x[2] = alpha * __hiloint2double(b_hi[2][ti], b_lo[2][ti]); + x[3] = alpha * __hiloint2double(b_hi[3][ti], b_lo[3][ti]); + + // Read the current block of A + if (transA == CBlasNoTrans) { + a_hi[threadIdx.y][threadIdx.x] = __double2hiint(A[0]); + a_lo[threadIdx.y][threadIdx.x] = __double2loint(A[0]); + } + else { + a_hi[threadIdx.x][threadIdx.y] = __double2hiint(A[0]); + a_lo[threadIdx.x][threadIdx.y] = __double2loint(A[0]); + } + + __syncthreads(); + + // Update X from top to bottom + switch (mm - 1) { + case 3: if (diag == CBlasNonUnit) x[3] /= __hiloint2double(a_hi[3][3], a_lo[3][3]); daxpy(3, x[3], a_hi[3], a_lo[3], x); + case 2: if (diag == CBlasNonUnit) x[2] /= __hiloint2double(a_hi[2][2], a_lo[2][2]); daxpy(2, x[2], a_hi[2], a_lo[2], x); + case 1: if (diag == CBlasNonUnit) x[1] /= __hiloint2double(a_hi[1][1], a_lo[1][1]); daxpy(1, x[1], a_hi[1], a_lo[1], x); + case 0: if (diag == CBlasNonUnit) x[0] /= __hiloint2double(a_hi[0][0], a_lo[0][0]); + } + + __syncthreads(); + + // Write X out transposing it back via shared memory using b. + b_hi[0][ti] = __double2hiint(x[0]); b_lo[0][ti] = __double2loint(x[0]); + b_hi[1][ti] = __double2hiint(x[1]); b_lo[1][ti] = __double2loint(x[1]); + b_hi[2][ti] = __double2hiint(x[2]); b_lo[2][ti] = __double2loint(x[2]); + b_hi[3][ti] = __double2hiint(x[3]); b_lo[3][ti] = __double2loint(x[3]); + + __syncthreads(); + + if (threadIdx.x < mm) { + if (0 * by < n) { X[0 * by * ldb] = __hiloint2double(b_hi[threadIdx.x][0 * by + threadIdx.y], b_lo[threadIdx.x][0 * by + threadIdx.y]); + if (1 * by < n) { X[1 * by * ldb] = __hiloint2double(b_hi[threadIdx.x][1 * by + threadIdx.y], b_lo[threadIdx.x][1 * by + threadIdx.y]); + if (2 * by < n) { X[2 * by * ldb] = __hiloint2double(b_hi[threadIdx.x][2 * by + threadIdx.y], b_lo[threadIdx.x][2 * by + threadIdx.y]); + if (3 * by < n) { X[3 * by * ldb] = __hiloint2double(b_hi[threadIdx.x][3 * by + threadIdx.y], b_lo[threadIdx.x][3 * by + threadIdx.y]); }}}} + } + } + + // Move up to the next block + A -= mb * lda + mb; + X -= mb; + i -= mb; + + while (i >= 0) { + + __syncthreads(); + + // Read the current block of X + #pragma unroll + for (int j = 0; j < nb; j += by) { + b_hi[threadIdx.x][j + threadIdx.y] = __double2hiint(X[j * ldb]); + b_lo[threadIdx.x][j + threadIdx.y] = __double2loint(X[j * ldb]); + } + + __syncthreads(); + + x[0] = alpha * __hiloint2double(b_hi[0][ti], b_lo[0][ti]); + x[1] = alpha * __hiloint2double(b_hi[1][ti], b_lo[1][ti]); + x[2] = alpha * __hiloint2double(b_hi[2][ti], b_lo[2][ti]); + x[3] = alpha * __hiloint2double(b_hi[3][ti], b_lo[3][ti]); + + __syncthreads(); + + // Start at the block one beyond X and move to the bottom + const double * _A = A + ((transA == CBlasNoTrans) ? mb * lda : mb); + const double * _B = X + mb; + int k = m - i - mb; + while (k > 0) { + + // Read A and B into shared memory + if (transA == CBlasNoTrans) { + a_hi[threadIdx.y][threadIdx.x] = __double2hiint(_A[0]); + a_lo[threadIdx.y][threadIdx.x] = __double2loint(_A[0]); + } + else { + a_hi[threadIdx.x][threadIdx.y] = __double2hiint(_A[0]); + a_lo[threadIdx.x][threadIdx.y] = __double2loint(_A[0]); + } + + #pragma unroll + for (int j = 0; j < nb; j += by) { + b_hi[threadIdx.x][j + threadIdx.y] = __double2hiint(_B[j * ldb]); + b_lo[threadIdx.x][j + threadIdx.y] = __double2loint(_B[j * ldb]); + } + + __syncthreads(); + + if (k < mb) break; + + // Update X in registers + #pragma unroll + for (int l = 0; l < mb; l++) + daxpy(__hiloint2double(b_hi[l][ti], b_lo[l][ti]), a_hi[l], a_lo[l], x); + + __syncthreads(); + + // Move to the next blocks of A and B + _A += (transA == CBlasNoTrans) ? mb * lda : mb; + _B += mb; + k -= mb; + } + + // Process odd elements of A and B + for (int l = 0; l < k; l++) + daxpy(__hiloint2double(b_hi[l][ti], b_lo[l][ti]), a_hi[l], a_lo[l], x); + + __syncthreads(); + + // Read the block of A that matches the block of B which is in registers + if (transA == CBlasNoTrans) { + a_hi[threadIdx.y][threadIdx.x] = __double2hiint(A[0]); + a_lo[threadIdx.y][threadIdx.x] = __double2loint(A[0]); + } + else { + a_hi[threadIdx.x][threadIdx.y] = __double2hiint(A[0]); + a_lo[threadIdx.x][threadIdx.y] = __double2loint(A[0]); + } + + __syncthreads(); + + // Update X unrolled (reverse loop) + if (diag == CBlasNonUnit) x[3] /= __hiloint2double(a_hi[3][3], a_lo[3][3]); daxpy(3, x[3], a_hi[3], a_lo[3], x); + if (diag == CBlasNonUnit) x[2] /= __hiloint2double(a_hi[2][2], a_lo[2][2]); daxpy(2, x[2], a_hi[2], a_lo[2], x); + if (diag == CBlasNonUnit) x[1] /= __hiloint2double(a_hi[1][1], a_lo[1][1]); daxpy(1, x[1], a_hi[1], a_lo[1], x); + if (diag == CBlasNonUnit) x[0] /= __hiloint2double(a_hi[0][0], a_lo[0][0]); + + // Write X out transposing it back via shared memory using b + b_hi[0][ti] = __double2hiint(x[0]); b_lo[0][ti] = __double2loint(x[0]); + b_hi[1][ti] = __double2hiint(x[1]); b_lo[1][ti] = __double2loint(x[1]); + b_hi[2][ti] = __double2hiint(x[2]); b_lo[2][ti] = __double2loint(x[2]); + b_hi[3][ti] = __double2hiint(x[3]); b_lo[3][ti] = __double2loint(x[3]); + + __syncthreads(); + + #pragma unroll + for (int j = 0; j < nb; j += by) + X[j * ldb] = __hiloint2double(b_hi[threadIdx.x][j + threadIdx.y], b_lo[threadIdx.x][j + threadIdx.y]); + + // Move up to the next blocks of A and B (through X) + A -= mb * lda + mb; + X -= mb; + i -= mb; + } + } + else { /* (uplo == CBlasLower && transA == CBlasNoTrans) || (uplo == CBlasUpper && transA != CBlasNoTrans) */ + // For this case we start at the top of B and work downwards + double * X = B; + int i = 0; + + while (m > 0) { + // Read the current block of X + #pragma unroll + for (int j = 0; j < nb; j += by) { + b_hi[threadIdx.x][j + threadIdx.y] = __double2hiint(X[j * ldb]); + b_lo[threadIdx.x][j + threadIdx.y] = __double2loint(X[j * ldb]); + } + + __syncthreads(); + + // Place it into X as alpha * B + x[0] = alpha * __hiloint2double(b_hi[0][ti], b_lo[0][ti]); + x[1] = alpha * __hiloint2double(b_hi[1][ti], b_lo[1][ti]); + x[2] = alpha * __hiloint2double(b_hi[2][ti], b_lo[2][ti]); + x[3] = alpha * __hiloint2double(b_hi[3][ti], b_lo[3][ti]); + + __syncthreads(); + + // Start at the top of B and move down to X + const double * _A = A; + const double * _B = B; + int k = i; + while (k > 0) { + + // Read A and B into shared memory + if (transA == CBlasNoTrans) { + a_hi[threadIdx.y][threadIdx.x] = __double2hiint(_A[0]); + a_lo[threadIdx.y][threadIdx.x] = __double2loint(_A[0]); + } + else { + a_hi[threadIdx.x][threadIdx.y] = __double2hiint(_A[0]); + a_lo[threadIdx.x][threadIdx.y] = __double2loint(_A[0]); + } + + #pragma unroll + for (int j = 0; j < nb; j += by) { + b_hi[threadIdx.x][j + threadIdx.y] = __double2hiint(_B[j * ldb]); + b_lo[threadIdx.x][j + threadIdx.y] = __double2loint(_B[j * ldb]); + } + + __syncthreads(); + + // Update X in registers + #pragma unroll + for (int l = 0; l < mb; l++) + daxpy(__hiloint2double(b_hi[l][ti], b_lo[l][ti]), a_hi[l], a_lo[l], x); + + __syncthreads(); + + // Move to the next blocks of A and B + _A += (transA == CBlasNoTrans) ? mb * lda : mb; + _B += mb; + k -= mb; + } + + // Process odd elements of A and B + for (int l = 0; l < k; l++) + daxpy(__hiloint2double(b_hi[l][ti], b_lo[l][ti]), a_hi[l], a_lo[l], x); + + __syncthreads(); + + // Read the block of A that matches the block of B which is in registers + if (transA == CBlasNoTrans) { + a_hi[threadIdx.y][threadIdx.x] = __double2hiint(_A[0]); + a_lo[threadIdx.y][threadIdx.x] = __double2loint(_A[0]); + } + else { + a_hi[threadIdx.x][threadIdx.y] = __double2hiint(_A[0]); + a_lo[threadIdx.x][threadIdx.y] = __double2loint(_A[0]); + } + + __syncthreads(); + + if (m < mb) break; + + // Update X unrolled (forward loop) + if (diag == CBlasNonUnit) x[0] /= __hiloint2double(a_hi[0][0], a_lo[0][0]); daxpy(3, x[0], &a_hi[0][1], &a_lo[0][1], &x[1]); + if (diag == CBlasNonUnit) x[1] /= __hiloint2double(a_hi[1][1], a_lo[1][1]); daxpy(2, x[1], &a_hi[1][2], &a_lo[1][2], &x[2]); + if (diag == CBlasNonUnit) x[2] /= __hiloint2double(a_hi[2][2], a_lo[2][2]); daxpy(1, x[2], &a_hi[2][3], &a_lo[2][3], &x[3]); + if (diag == CBlasNonUnit) x[3] /= __hiloint2double(a_hi[3][3], a_lo[3][3]); + + // Write X out transposing it back via shared memory using b + b_hi[0][ti] = __double2hiint(x[0]); b_lo[0][ti] = __double2loint(x[0]); + b_hi[1][ti] = __double2hiint(x[1]); b_lo[1][ti] = __double2loint(x[1]); + b_hi[2][ti] = __double2hiint(x[2]); b_lo[2][ti] = __double2loint(x[2]); + b_hi[3][ti] = __double2hiint(x[3]); b_lo[3][ti] = __double2loint(x[3]); + + __syncthreads(); + + #pragma unroll + for (int j = 0; j < nb; j += by) + X[j * ldb] = __hiloint2double(b_hi[threadIdx.x][j + threadIdx.y], b_lo[threadIdx.x][j + threadIdx.y]); + + __syncthreads(); + + // Move up to the next blocks of A and B (through X) + A += (transA == CBlasNoTrans) ? mb : mb * lda; + X += mb; + m -= mb; + i += mb; + } + + // Handle the trailing elements last, if any. + if (m > 0) { if (diag == CBlasNonUnit) x[0] /= __hiloint2double(a_hi[0][0], a_lo[0][0]); + if (m > 1) { daxpy(m - 1, x[0], &a_hi[0][1], &a_lo[0][1], &x[1]); if (diag == CBlasNonUnit) x[1] /= __hiloint2double(a_hi[1][1], a_lo[1][1]); + if (m > 2) { daxpy(m - 2, x[1], &a_hi[1][2], &a_lo[1][2], &x[2]); if (diag == CBlasNonUnit) x[2] /= __hiloint2double(a_hi[2][2], a_lo[2][2]); + if (m > 3) { daxpy(m - 3, x[2], &a_hi[2][3], &a_lo[2][3], &x[3]); if (diag == CBlasNonUnit) x[3] /= __hiloint2double(a_hi[3][3], a_lo[3][3]); }}}} + + __syncthreads(); + + // Write X out transposing it back via shared memory using b + b_hi[0][ti] = __double2hiint(x[0]); b_lo[0][ti] = __double2loint(x[0]); + b_hi[1][ti] = __double2hiint(x[1]); b_lo[1][ti] = __double2loint(x[1]); + b_hi[2][ti] = __double2hiint(x[2]); b_lo[2][ti] = __double2loint(x[2]); + b_hi[3][ti] = __double2hiint(x[3]); b_lo[3][ti] = __double2loint(x[3]); + + __syncthreads(); + + if (threadIdx.x < m) { + X[0] = __hiloint2double(b_hi[threadIdx.x][by * 0 + threadIdx.y], b_lo[threadIdx.x][by * 0 + threadIdx.y]); if (by * 1 >= n) return; X += by * ldb; + X[0] = __hiloint2double(b_hi[threadIdx.x][by * 1 + threadIdx.y], b_lo[threadIdx.x][by * 1 + threadIdx.y]); if (by * 2 >= n) return; X += by * ldb; + X[0] = __hiloint2double(b_hi[threadIdx.x][by * 2 + threadIdx.y], b_lo[threadIdx.x][by * 2 + threadIdx.y]); if (by * 3 >= n) return; X += by * ldb; + X[0] = __hiloint2double(b_hi[threadIdx.x][by * 3 + threadIdx.y], b_lo[threadIdx.x][by * 3 + threadIdx.y]); + } + } + } + else { + // For CBlasRight each thread updates a row. This means that B can be read + // efficiently straight from global memory. +// typedef char _x[(nb == 4) ? 1 : -1]; +// typedef char _y[(mb == bx * by) ? 1 : -1]; +// typedef char _z[(by == nb) ? 1 : -1]; + + // Each thread computes a row of B 4 elements at a time + __shared__ int a_hi[nb][(transA == CBlasNoTrans) ? nb + 1 : nb]; + __shared__ int a_lo[nb][(transA == CBlasNoTrans) ? nb + 1 : nb]; + double x[4]; + + const int ti = threadIdx.y * bx + threadIdx.x; + + // Compute the starting points in A and B for each thread + A += threadIdx.y * lda + threadIdx.x; + B += blockIdx.x * mb + ti; + m -= blockIdx.x * mb; + + // There are 2 common cases for each of CBlasLeft and CBlasRight + if ((uplo == CBlasUpper && transA == CBlasNoTrans) || + (uplo == CBlasLower && transA != CBlasNoTrans)) { + // For this case start on the left and work right + double * X = B; + int j = 0; + + while (n > 0) { + // Read the current block of X + x[0] = alpha * X[0 * ldb]; x[1] = alpha * X[1 * ldb]; x[2] = alpha * X[2 * ldb]; x[3] = alpha * X[3 * ldb]; + + // Start at the left of B and move right to X + const double * _A = A; + const double * _B = B; + int k = j; + while (k > 0) { + + // Read A into shared memory + if (transA == CBlasNoTrans) { + a_hi[threadIdx.x][threadIdx.y] = __double2hiint(_A[0]); + a_lo[threadIdx.x][threadIdx.y] = __double2loint(_A[0]); + } + else { + a_hi[threadIdx.y][threadIdx.x] = __double2hiint(_A[0]); + a_lo[threadIdx.y][threadIdx.x] = __double2loint(_A[0]); + } + + __syncthreads(); + + // Update X reading B straight from global memory + #pragma unroll + for (int l = 0; l < nb; l++) + daxpy(_B[l * ldb], a_hi[l], a_lo[l], x); + + __syncthreads(); + + // Move to the next blocks of A and B + _A += (transA == CBlasNoTrans) ? nb : nb * lda; + _B += nb * ldb; + k -= nb; + } + + // Read the block of A that matches the block of B which is in registers + if (transA == CBlasNoTrans) { + a_hi[threadIdx.x][threadIdx.y] = __double2hiint(_A[0]); + a_lo[threadIdx.x][threadIdx.y] = __double2loint(_A[0]); + } + else { + a_hi[threadIdx.y][threadIdx.x] = __double2hiint(_A[0]); + a_lo[threadIdx.y][threadIdx.x] = __double2loint(_A[0]); + } + + __syncthreads(); + + if (n < nb) break; + + // Update X unrolled (forward loop) + if (diag == CBlasNonUnit) x[0] /= __hiloint2double(a_hi[0][0], a_lo[0][0]); + daxpy(3, x[0], &a_hi[0][1], &a_lo[0][1], &x[1]); if (diag == CBlasNonUnit) x[1] /= __hiloint2double(a_hi[1][1], a_lo[1][1]); + daxpy(2, x[1], &a_hi[1][2], &a_lo[1][2], &x[2]); if (diag == CBlasNonUnit) x[2] /= __hiloint2double(a_hi[2][2], a_lo[2][2]); + daxpy(1, x[2], &a_hi[2][3], &a_lo[2][3], &x[3]); if (diag == CBlasNonUnit) x[3] /= __hiloint2double(a_hi[3][3], a_lo[3][3]); + + // Write X + if (ti < m) { + X[0 * ldb] = x[0]; X[1 * ldb] = x[1]; X[2 * ldb] = x[2]; X[3 * ldb] = x[3]; + } + + __syncthreads(); + + // Move right to the next blocks of A and B (through X) + A += (transA == CBlasNoTrans) ? nb * lda : nb; + X += nb * ldb; + n -= nb; + j += nb; + } + + // Update X unrolled (forward loop) + if (n > 0) { + if (diag == CBlasNonUnit) x[0] /= __hiloint2double(a_hi[0][0], a_lo[0][0]); + if (n > 1) { daxpy(n - 1, x[0], &a_hi[0][1], &a_lo[0][1], &x[1]); if (diag == CBlasNonUnit) x[1] /= __hiloint2double(a_hi[1][1], a_lo[1][1]); + if (n > 2) { daxpy(n - 2, x[1], &a_hi[1][2], &a_lo[1][2], &x[2]); if (diag == CBlasNonUnit) x[2] /= __hiloint2double(a_hi[2][2], a_lo[2][2]); + if (n > 3) { daxpy(n - 3, x[2], &a_hi[2][3], &a_lo[2][3], &x[3]); if (diag == CBlasNonUnit) x[3] /= __hiloint2double(a_hi[3][3], a_lo[3][3]); }}} + + // Write X + if (ti < m) { + X[0] = x[0]; if (1 >= n) return; X += ldb; X[0] = x[1]; if (2 >= n) return; X += ldb; + X[0] = x[2]; if (3 >= n) return; X += ldb; X[0] = x[3]; + } + } + } + else { /* (uplo == CBlasLower && transA == CBlasNoTrans) || (uplo == CBlasUpper && transA != CBlasNoTrans) */ + // For this case start on the right and work left + const int nn = n & (nb - 1); + int j = n - nn; + + A += j * lda + j; + double * X = B + j * ldb; + + // Handle the trailing elements first, if any. This only requires reading + // the block of B that we are also updating (X == _B). + if (nn > 0) { + // Read the block of B we are updating + x[0] = alpha * X[0 * ldb]; x[1] = alpha * X[1 * ldb]; x[2] = alpha * X[2 * ldb]; x[3] = alpha * X[3 * ldb]; + + // Read the current block of A + if (transA == CBlasNoTrans) { + a_hi[threadIdx.x][threadIdx.y] = __double2hiint(A[0]); + a_lo[threadIdx.x][threadIdx.y] = __double2loint(A[0]); + } + else { + a_hi[threadIdx.y][threadIdx.x] = __double2hiint(A[0]); + a_lo[threadIdx.y][threadIdx.x] = __double2loint(A[0]); + } + + __syncthreads(); + + // Update X from right to left + switch (nn - 1) { + case 3: if (diag == CBlasNonUnit) x[3] /= __hiloint2double(a_hi[3][3], a_lo[3][3]); daxpy(3, x[3], a_hi[3], a_lo[3], x); + case 2: if (diag == CBlasNonUnit) x[2] /= __hiloint2double(a_hi[2][2], a_lo[2][2]); daxpy(2, x[2], a_hi[2], a_lo[2], x); + case 1: if (diag == CBlasNonUnit) x[1] /= __hiloint2double(a_hi[1][1], a_lo[1][1]); daxpy(1, x[1], a_hi[1], a_lo[1], x); + case 0: if (diag == CBlasNonUnit) x[0] /= __hiloint2double(a_hi[0][0], a_lo[0][0]); + } + + // Write X + if (ti < m) { + X[0 * ldb] = x[0]; if (1 < nn) { + X[1 * ldb] = x[1]; if (2 < nn) { + X[2 * ldb] = x[2]; if (3 < nn) { + X[3 * ldb] = x[3]; }}} + } + } + + // Move left to the next block + A -= nb * lda + nb; + X -= nb * ldb; + j -= nb; + + while (j >= 0) { + // Read the current block of X and multiply by alpha + x[0] = alpha * X[0 * ldb]; x[1] = alpha * X[1 * ldb]; x[2] = alpha * X[2 * ldb]; x[3] = alpha * X[3 * ldb]; + + __syncthreads(); + + // Start one block beyond X and move to the right + const double * _A = A + ((transA == CBlasNoTrans) ? nb : nb * lda); + const double * _B = X + nb * ldb; + int k = n - j - nb; + while (k > 0) { + + // Read the current block of A + if (transA == CBlasNoTrans) { + a_hi[threadIdx.x][threadIdx.y] = __double2hiint(_A[0]); + a_lo[threadIdx.x][threadIdx.y] = __double2loint(_A[0]); + } + else { + a_hi[threadIdx.y][threadIdx.x] = __double2hiint(_A[0]); + a_lo[threadIdx.y][threadIdx.x] = __double2loint(_A[0]); + } + + __syncthreads(); + + if (k < nb) break; + + // Update X in registers + #pragma unroll + for (int l = 0; l < nb; l++) + daxpy(_B[l * ldb], a_hi[l], a_lo[l], x); + + __syncthreads(); + + // Move to the next blocks of A and B + _A += (transA == CBlasNoTrans) ? nb : nb * lda; + _B += nb * ldb; + k -= nb; + } + + // Process odd elements of A and B + for (int l = 0; l < k; l++) + daxpy(_B[l * ldb], a_hi[l], a_lo[l], x); + + __syncthreads(); + + // Read the block of A that matches the block of B which is in registers + if (transA == CBlasNoTrans) { + a_hi[threadIdx.x][threadIdx.y] = __double2hiint(A[0]); + a_lo[threadIdx.x][threadIdx.y] = __double2loint(A[0]); + } + else { + a_hi[threadIdx.y][threadIdx.x] = __double2hiint(A[0]); + a_lo[threadIdx.y][threadIdx.x] = __double2loint(A[0]); + } + + __syncthreads(); + + // Update X from right to left + if (diag == CBlasNonUnit) x[3] /= __hiloint2double(a_hi[3][3], a_lo[3][3]); daxpy(3, x[3], a_hi[3], a_lo[3], x); + if (diag == CBlasNonUnit) x[2] /= __hiloint2double(a_hi[2][2], a_lo[2][2]); daxpy(2, x[2], a_hi[2], a_lo[2], x); + if (diag == CBlasNonUnit) x[1] /= __hiloint2double(a_hi[1][1], a_lo[1][1]); daxpy(1, x[1], a_hi[1], a_lo[1], x); + if (diag == CBlasNonUnit) x[0] /= __hiloint2double(a_hi[0][0], a_lo[0][0]); + + // Write X + if (ti < m) { + X[0 * ldb] = x[0]; X[1 * ldb] = x[1]; X[2 * ldb] = x[2]; X[3 * ldb] = x[3]; + } + + // Move left to the next blocks of A and B (through X) + A -= nb * lda + nb; + X -= nb * ldb; + j -= nb; + } + } + } +} + +#else + +// y(1:4) -= alpha * x(1:4) +__device__ void daxpy(double alpha, const double * x, double * y) { + y[0] -= alpha * x[0]; y[1] -= alpha * x[1]; + y[2] -= alpha * x[2]; y[3] -= alpha * x[3]; +} + +// y(1:n) -= alpha * x(1:n) +__device__ void daxpy(int n, double alpha, const double * x, double * y) { + y[0] -= alpha * x[0]; if (1 >= n) return; y[1] -= alpha * x[1]; if (2 >= n) return; + y[2] -= alpha * x[2]; if (3 >= n) return; y[3] -= alpha * x[3]; +} + +/** + * DTRSM: + * B := alpha * inv( A ) * B for side == CBlasLeft, transA == CBlasNoTrans + * B := alpha * inv( A') * B for side == CBlasLeft, transA == CBlasTrans + * B := alpha * B * inv( A ) for side == CBlasRight, transA == CBlasNoTrans + * B := alpha * B * inv( A') for side == CBlasRight, transA == CBlasTrans + * + * Only the upper or lower triangle of A is used. + * + * @param side whether A multiplies B from the left or right. + * @param uplo uplo for A. + * @param transA transpose for A. + * @param diag whether A is unit or nonunit diagonal. + * @param mb the number of rows in the block of B. + * @param nb the number of columns in the block of B. + * @param bx blockDim.x. + * @param by blockDim.y. + */ +template +__global__ void dtrsm(const double * __restrict__ A, double * __restrict__ B, + double alpha, + int lda, int ldb, + int m, int n) { + + if (side == CBlasLeft) { + // For CBlasLeft each thread updates a column. This means that B needs to be + // read and written via shared memory to transpose it after reading it + // efficiently from global memory. +// typedef char _x[(mb == 4) ? 1 : -1]; +// typedef char _y[(nb == bx * by) ? 1 : -1]; +// typedef char _z[(bx == mb) ? 1 : -1]; + + // Blocks of A and B is shared memory and X in registers + __shared__ double a[mb][(transA == CBlasNoTrans) ? mb : mb + 1]; + __shared__ double b[mb][nb + 1]; + double x[8]; + + const int ti = threadIdx.y * bx + threadIdx.x; + + // Work out the column for the current thread in A and B + A += threadIdx.y * lda + threadIdx.x; + B += (blockIdx.y * nb + threadIdx.y) * ldb + threadIdx.x; + n -= blockIdx.y * nb + threadIdx.y; + + // There are 2 common cases for each of CBlasLeft and CBlasRight + if ((uplo == CBlasUpper && transA == CBlasNoTrans) || + (uplo == CBlasLower && transA != CBlasNoTrans)) { + // For this case start at the bottom of B and work upwards + const int mm = m & (mb - 1); + int i = m - mm; + + // Since we need to read B to update it we need two pointers into it: one + // into the block we are updating (X), and one into the block we are + // currently reading to update it (_B, defined later in terms of X). + A += i * lda + i; + double * X = B + i; + + // Handle the trailing elements first, if any. This only requires reading + // the block of B that we are also updating (X == _B). + if (mm > 0) { + // Read the block of B we are updating and transpose into shared + // memory using b. + #pragma unroll + for (int j = 0; j < nb; j += by) + b[threadIdx.x][j + threadIdx.y] = X[j * ldb]; + + __syncthreads(); + + // Place it into X as alpha * B + x[0] = alpha * b[0][ti]; x[1] = alpha * b[1][ti]; x[2] = alpha * b[2][ti]; x[3] = alpha * b[3][ti]; + + // Read the current block of A + if (transA == CBlasNoTrans) + a[threadIdx.y][threadIdx.x] = A[0]; + else + a[threadIdx.x][threadIdx.y] = A[0]; + + __syncthreads(); + + // Update X from top to bottom + switch (mm - 1) { + case 3: if (diag == CBlasNonUnit) x[3] /= a[3][3]; daxpy(3, x[3], a[3], x); + case 2: if (diag == CBlasNonUnit) x[2] /= a[2][2]; daxpy(2, x[2], a[2], x); + case 1: if (diag == CBlasNonUnit) x[1] /= a[1][1]; daxpy(1, x[1], a[1], x); + case 0: if (diag == CBlasNonUnit) x[0] /= a[0][0]; + } + + __syncthreads(); + + // Write X out transposing it back via shared memory using b. + b[0][ti] = x[0]; b[1][ti] = x[1]; b[2][ti] = x[2]; b[3][ti] = x[3]; + + __syncthreads(); + + if (threadIdx.x < mm) { + if (0 * by < n) { X[0 * by * ldb] = b[threadIdx.x][0 * by + threadIdx.y]; + if (1 * by < n) { X[1 * by * ldb] = b[threadIdx.x][1 * by + threadIdx.y]; + if (2 * by < n) { X[2 * by * ldb] = b[threadIdx.x][2 * by + threadIdx.y]; + if (3 * by < n) { X[3 * by * ldb] = b[threadIdx.x][3 * by + threadIdx.y]; }}}} + } + } + + // Move up to the next block + A -= mb * lda + mb; + X -= mb; + i -= mb; + + while (i >= 0) { + + __syncthreads(); + + // Read the current block of X + #pragma unroll + for (int j = 0; j < nb; j += by) + b[threadIdx.x][j + threadIdx.y] = X[j * ldb]; + + __syncthreads(); + + x[0] = alpha * b[0][ti]; x[1] = alpha * b[1][ti]; x[2] = alpha * b[2][ti]; x[3] = alpha * b[3][ti]; + + __syncthreads(); + + // Start at the block one beyond X and move to the bottom + const double * _A = A + ((transA == CBlasNoTrans) ? mb * lda : mb); + const double * _B = X + mb; + int k = m - i - mb; + while (k > 0) { + + // Read A and B into shared memory + if (transA == CBlasNoTrans) + a[threadIdx.y][threadIdx.x] = _A[0]; + else + a[threadIdx.x][threadIdx.y] = _A[0]; + + #pragma unroll + for (int j = 0; j < nb; j += by) + b[threadIdx.x][j + threadIdx.y] = _B[j * ldb]; + + __syncthreads(); + + if (k < mb) break; + + // Update X in registers + #pragma unroll + for (int l = 0; l < mb; l++) + daxpy(b[l][ti], a[l], x); + + __syncthreads(); + + // Move to the next blocks of A and B + _A += (transA == CBlasNoTrans) ? mb * lda : mb; + _B += mb; + k -= mb; + } + + // Process odd elements of A and B + for (int l = 0; l < k; l++) + daxpy(b[l][ti], a[l], x); + + __syncthreads(); + + // Read the block of A that matches the block of B which is in registers + if (transA == CBlasNoTrans) + a[threadIdx.y][threadIdx.x] = A[0]; + else + a[threadIdx.x][threadIdx.y] = A[0]; + + __syncthreads(); + + // Update X unrolled (reverse loop) + if (diag == CBlasNonUnit) x[3] /= a[3][3]; daxpy(3, x[3], a[3], x); + if (diag == CBlasNonUnit) x[2] /= a[2][2]; daxpy(2, x[2], a[2], x); + if (diag == CBlasNonUnit) x[1] /= a[1][1]; daxpy(1, x[1], a[1], x); + if (diag == CBlasNonUnit) x[0] /= a[0][0]; + + // Write X out transposing it back via shared memory using b + b[0][ti] = x[0]; b[1][ti] = x[1]; b[2][ti] = x[2]; b[3][ti] = x[3]; + + __syncthreads(); + + #pragma unroll + for (int j = 0; j < nb; j += by) + X[j * ldb] = b[threadIdx.x][j + threadIdx.y]; + + // Move up to the next blocks of A and B (through X) + A -= mb * lda + mb; + X -= mb; + i -= mb; + } + } + else { /* (uplo == CBlasLower && transA == CBlasNoTrans) || (uplo == CBlasUpper && transA != CBlasNoTrans) */ + // For this case we start at the top of B and work downwards + double * X = B; + int i = 0; + + while (m > 0) { + // Read the current block of X + #pragma unroll + for (int j = 0; j < nb; j += by) + b[threadIdx.x][j + threadIdx.y] = X[j * ldb]; + + __syncthreads(); + + x[0] = alpha * b[0][ti]; x[1] = alpha * b[1][ti]; x[2] = alpha * b[2][ti]; x[3] = alpha * b[3][ti]; + + __syncthreads(); + + // Start at the top of B and move down to X + const double * _A = A; + const double * _B = B; + int k = i; + while (k > 0) { + + // Read A and B into shared memory + if (transA == CBlasNoTrans) + a[threadIdx.y][threadIdx.x] = _A[0]; + else + a[threadIdx.x][threadIdx.y] = _A[0]; + + #pragma unroll + for (int j = 0; j < nb; j += by) + b[threadIdx.x][j + threadIdx.y] = _B[j * ldb]; + + __syncthreads(); + + // Update X in registers + #pragma unroll + for (int l = 0; l < mb; l++) + daxpy(b[l][ti], a[l], x); + + __syncthreads(); + + // Move to the next blocks of A and B + _A += (transA == CBlasNoTrans) ? mb * lda : mb; + _B += mb; + k -= mb; + } + + // Process odd elements of A and B + for (int l = 0; l < k; l++) + daxpy(b[l][ti], a[l], x); + + __syncthreads(); + + // Read the block of A that matches the block of B which is in registers + if (transA == CBlasNoTrans) + a[threadIdx.y][threadIdx.x] = _A[0]; + else + a[threadIdx.x][threadIdx.y] = _A[0]; + + __syncthreads(); + + if (m < mb) break; + + // Update X unrolled (forward loop) + if (diag == CBlasNonUnit) x[0] /= a[0][0]; daxpy(3, x[0], &a[0][1], &x[1]); + if (diag == CBlasNonUnit) x[1] /= a[1][1]; daxpy(2, x[1], &a[1][2], &x[2]); + if (diag == CBlasNonUnit) x[2] /= a[2][2]; daxpy(1, x[2], &a[2][3], &x[3]); + if (diag == CBlasNonUnit) x[3] /= a[3][3]; + + // Write X out transposing it back via shared memory using b + b[0][ti] = x[0]; b[1][ti] = x[1]; b[2][ti] = x[2]; b[3][ti] = x[3]; + + __syncthreads(); + + #pragma unroll + for (int j = 0; j < nb; j += by) + X[j * ldb] = b[threadIdx.x][j + threadIdx.y]; + + __syncthreads(); + + // Move up to the next blocks of A and B (through X) + A += (transA == CBlasNoTrans) ? mb : mb * lda; + X += mb; + m -= mb; + i += mb; + } + + // Handle the trailing elements last, if any. + if (m > 0) { if (diag == CBlasNonUnit) x[0] /= a[0][0]; + if (m > 1) { daxpy(m - 1, x[0], &a[0][1], &x[1]); if (diag == CBlasNonUnit) x[1] /= a[1][1]; + if (m > 2) { daxpy(m - 2, x[1], &a[1][2], &x[2]); if (diag == CBlasNonUnit) x[2] /= a[2][2]; + if (m > 3) { daxpy(m - 3, x[2], &a[2][3], &x[3]); if (diag == CBlasNonUnit) x[3] /= a[3][3]; }}}} + + __syncthreads(); + + // Write X out transposing it back via shared memory using b + b[0][ti] = x[0]; b[1][ti] = x[1]; b[2][ti] = x[2]; b[3][ti] = x[3]; + + __syncthreads(); + + if (threadIdx.x < m) { + X[0] = b[threadIdx.x][by * 0 + threadIdx.y]; if (by * 1 >= n) return; X += by * ldb; + X[0] = b[threadIdx.x][by * 1 + threadIdx.y]; if (by * 2 >= n) return; X += by * ldb; + X[0] = b[threadIdx.x][by * 2 + threadIdx.y]; if (by * 3 >= n) return; X += by * ldb; + X[0] = b[threadIdx.x][by * 3 + threadIdx.y]; + } + } + } + else { + // For CBlasRight each thread updates a row. This means that B can be read + // efficiently straight from global memory. +// typedef char _x[(nb == 4) ? 1 : -1]; +// typedef char _y[(mb == bx * by) ? 1 : -1]; +// typedef char _z[(by == nb) ? 1 : -1]; + + // Each thread computes a row of B 4 elements at a time + __shared__ double a[nb][(transA == CBlasNoTrans) ? nb + 1 : nb]; + double x[4]; + + const int ti = threadIdx.y * bx + threadIdx.x; + + // Compute the starting points in A and B for each thread + A += threadIdx.y * lda + threadIdx.x; + B += blockIdx.x * mb + ti; + m -= blockIdx.x * mb; + + // There are 2 common cases for each of CBlasLeft and CBlasRight + if ((uplo == CBlasUpper && transA == CBlasNoTrans) || + (uplo == CBlasLower && transA != CBlasNoTrans)) { + // For this case start on the left and work right + double * X = B; + int j = 0; + + while (n > 0) { + // Read the current block of X + x[0] = alpha * X[0 * ldb]; x[1] = alpha * X[1 * ldb]; x[2] = alpha * X[2 * ldb]; x[3] = alpha * X[3 * ldb]; + + // Start at the left of B and move right to X + const double * _A = A; + const double * _B = B; + int k = j; + while (k > 0) { + + // Read A into shared memory + if (transA == CBlasNoTrans) + a[threadIdx.x][threadIdx.y] = _A[0]; + else + a[threadIdx.y][threadIdx.x] = _A[0]; + + __syncthreads(); + + // Update X reading B straight from global memory + #pragma unroll + for (int l = 0; l < nb; l++) + daxpy(_B[l * ldb], a[l], x); + + __syncthreads(); + + // Move to the next blocks of A and B + _A += (transA == CBlasNoTrans) ? nb : nb * lda; + _B += nb * ldb; + k -= nb; + } + + // Read the block of A that matches the block of B which is in registers + if (transA == CBlasNoTrans) + a[threadIdx.x][threadIdx.y] = _A[0]; + else + a[threadIdx.y][threadIdx.x] = _A[0]; + + __syncthreads(); + + if (n < nb) break; + + // Update X unrolled (forward loop) + if (diag == CBlasNonUnit) x[0] /= a[0][0]; + daxpy(3, x[0], &a[0][1], &x[1]); if (diag == CBlasNonUnit) x[1] /= a[1][1]; + daxpy(2, x[1], &a[1][2], &x[2]); if (diag == CBlasNonUnit) x[2] /= a[2][2]; + daxpy(1, x[2], &a[2][3], &x[3]); if (diag == CBlasNonUnit) x[3] /= a[3][3]; + + // Write X + if (ti < m) { + X[0 * ldb] = x[0]; X[1 * ldb] = x[1]; X[2 * ldb] = x[2]; X[3 * ldb] = x[3]; + } + + __syncthreads(); + + // Move right to the next blocks of A and B (through X) + A += (transA == CBlasNoTrans) ? nb * lda : nb; + X += nb * ldb; + n -= nb; + j += nb; + } + + // Update X unrolled (forward loop) + if (n > 0) { + if (diag == CBlasNonUnit) x[0] /= a[0][0]; + if (n > 1) { daxpy(n - 1, x[0], &a[0][1], &x[1]); if (diag == CBlasNonUnit) x[1] /= a[1][1]; + if (n > 2) { daxpy(n - 2, x[1], &a[1][2], &x[2]); if (diag == CBlasNonUnit) x[2] /= a[2][2]; + if (n > 3) { daxpy(n - 3, x[2], &a[2][3], &x[3]); if (diag == CBlasNonUnit) x[3] /= a[3][3]; }}} + + // Write X + if (ti < m) { + X[0] = x[0]; if (1 >= n) return; X += ldb; X[0] = x[1]; if (2 >= n) return; X += ldb; + X[0] = x[2]; if (3 >= n) return; X += ldb; X[0] = x[3]; + } + } + } + else { /* (uplo == CBlasLower && transA == CBlasNoTrans) || (uplo == CBlasUpper && transA != CBlasNoTrans) */ + // For this case start on the right and work left + const int nn = n & (nb - 1); + int j = n - nn; + + A += j * lda + j; + double * X = B + j * ldb; + + // Handle the trailing elements first, if any. This only requires reading + // the block of B that we are also updating (X == _B). + if (nn > 0) { + // Read the block of B we are updating + x[0] = alpha * X[0 * ldb]; x[1] = alpha * X[1 * ldb]; x[2] = alpha * X[2 * ldb]; x[3] = alpha * X[3 * ldb]; + + // Read the current block of A + if (transA == CBlasNoTrans) + a[threadIdx.x][threadIdx.y] = A[0]; + else + a[threadIdx.y][threadIdx.x] = A[0]; + + __syncthreads(); + + // Update X from right to left + switch (nn - 1) { + case 3: if (diag == CBlasNonUnit) x[3] /= a[3][3]; daxpy(3, x[3], a[3], x); + case 2: if (diag == CBlasNonUnit) x[2] /= a[2][2]; daxpy(2, x[2], a[2], x); + case 1: if (diag == CBlasNonUnit) x[1] /= a[1][1]; daxpy(1, x[1], a[1], x); + case 0: if (diag == CBlasNonUnit) x[0] /= a[0][0]; + } + + // Write X + if (ti < m) { + X[0 * ldb] = x[0]; if (1 < nn) { + X[1 * ldb] = x[1]; if (2 < nn) { + X[2 * ldb] = x[2]; if (3 < nn) { + X[3 * ldb] = x[3]; }}} + } + } + + // Move left to the next block + A -= nb * lda + nb; + X -= nb * ldb; + j -= nb; + + while (j >= 0) { + // Read the current block of X and multiply by alpha + x[0] = alpha * X[0 * ldb]; x[1] = alpha * X[1 * ldb]; x[2] = alpha * X[2 * ldb]; x[3] = alpha * X[3 * ldb]; + + __syncthreads(); + + // Start one block beyond X and move to the right + const double * _A = A + ((transA == CBlasNoTrans) ? nb : nb * lda); + const double * _B = X + nb * ldb; + int k = n - j - nb; + while (k > 0) { + + // Read the current block of A + if (transA == CBlasNoTrans) + a[threadIdx.x][threadIdx.y] = _A[0]; + else + a[threadIdx.y][threadIdx.x] = _A[0]; + + __syncthreads(); + + if (k < nb) break; + + // Update X in registers + #pragma unroll + for (int l = 0; l < nb; l++) + daxpy(_B[l * ldb], a[l], x); + + __syncthreads(); + + // Move to the next blocks of A and B + _A += (transA == CBlasNoTrans) ? nb : nb * lda; + _B += nb * ldb; + k -= nb; + } + + // Process odd elements of A and B + for (int l = 0; l < k; l++) + daxpy(_B[l * ldb], a[l], x); + + __syncthreads(); + + // Read the block of A that matches the block of B which is in registers + if (transA == CBlasNoTrans) + a[threadIdx.x][threadIdx.y] = A[0]; + else + a[threadIdx.y][threadIdx.x] = A[0]; + + __syncthreads(); + + // Update X from right to left + if (diag == CBlasNonUnit) x[3] /= a[3][3]; daxpy(3, x[3], a[3], x); + if (diag == CBlasNonUnit) x[2] /= a[2][2]; daxpy(2, x[2], a[2], x); + if (diag == CBlasNonUnit) x[1] /= a[1][1]; daxpy(1, x[1], a[1], x); + if (diag == CBlasNonUnit) x[0] /= a[0][0]; + + // Write X + if (ti < m) { + X[0 * ldb] = x[0]; X[1 * ldb] = x[1]; X[2 * ldb] = x[2]; X[3 * ldb] = x[3]; + } + + // Move left to the next blocks of A and B (through X) + A -= nb * lda + nb; + X -= nb * ldb; + j -= nb; + } + } + } +} +#endif + +template __global__ void dtrsm(const double * __restrict__, double * __restrict__, double, int, int, int, int); +template __global__ void dtrsm(const double * __restrict__, double * __restrict__, double, int, int, int, int); +template __global__ void dtrsm(const double * __restrict__, double * __restrict__, double, int, int, int, int); +template __global__ void dtrsm(const double * __restrict__, double * __restrict__, double, int, int, int, int); +template __global__ void dtrsm(const double * __restrict__, double * __restrict__, double, int, int, int, int); +template __global__ void dtrsm(const double * __restrict__, double * __restrict__, double, int, int, int, int); +template __global__ void dtrsm(const double * __restrict__, double * __restrict__, double, int, int, int, int); +template __global__ void dtrsm(const double * __restrict__, double * __restrict__, double, int, int, int, int); +template __global__ void dtrsm(const double * __restrict__, double * __restrict__, double, int, int, int, int); +template __global__ void dtrsm(const double * __restrict__, double * __restrict__, double, int, int, int, int); +template __global__ void dtrsm(const double * __restrict__, double * __restrict__, double, int, int, int, int); +template __global__ void dtrsm(const double * __restrict__, double * __restrict__, double, int, int, int, int); +template __global__ void dtrsm(const double * __restrict__, double * __restrict__, double, int, int, int, int); +template __global__ void dtrsm(const double * __restrict__, double * __restrict__, double, int, int, int, int); +template __global__ void dtrsm(const double * __restrict__, double * __restrict__, double, int, int, int, int); +template __global__ void dtrsm(const double * __restrict__, double * __restrict__, double, int, int, int, int); diff --git a/cuda_code/dynamics_stream_managed_test.cu b/cuda_code/dynamics_stream_managed_test.cu new file mode 100644 index 0000000000000000000000000000000000000000..baabfe79c424c8b2cae1779df68b100cadbc781c --- /dev/null +++ b/cuda_code/dynamics_stream_managed_test.cu @@ -0,0 +1,81 @@ +#include +#include +#include + +namespace mock { + const int S_DIM = 4; + const int C_DIM = 2; +} +// Create a mock parameter object +struct MockParams { + int x = 5; + int y = 10; + MockParams() = default; + ~MockParams() = default; +}; + +// Create a mock object that inherits from dynamics +class MockDynamics : public GATE_internal::Dynamics { +public: + MockDynamics(cudaStream_t stream = nullptr) : + GATE_internal::Dynamics(stream) { + this->params_ = MockParams(); + } + + MockDynamics(std::array ctrl_ranges, cudaStream_t stream = nullptr) : + GATE_internal::Dynamics(ctrl_ranges, stream) { + this->params_ = MockParams(); + } + + ~MockDynamics() = default; +}; + + +TEST(DynamicsStreamManaged, Construction) { + auto A = MockDynamics(); +} + +TEST(DynamicsStreamManaged, GetControlRanges_Max) { + auto A = MockDynamics(); + auto ctrl_range = A.getControlRanges(); + for (int i = 0; i < mock::C_DIM; ++i) { + ASSERT_EQ(-FLT_MAX, ctrl_range[i].x); + ASSERT_EQ(FLT_MAX, ctrl_range[i].y); + } +} + +TEST(DynamicsStreamManaged, GetControlRanges_Fixed) { + std::array input_ranges; + input_ranges[0].x = -3; + input_ranges[0].y = 2; + input_ranges[1].x = 1; + input_ranges[1].y = 21; + auto A = MockDynamics(input_ranges); + auto ctrl_range = A.getControlRanges(); + for (int i = 0; i < mock::C_DIM; ++i) { + ASSERT_EQ(input_ranges[i].x, ctrl_range[i].x); + ASSERT_EQ(input_ranges[i].y, ctrl_range[i].y); + } +} + +TEST(DynamicsStreamManaged, GPUSetupFlags) { + auto* A = new MockDynamics(); + ASSERT_FALSE(A->getGPUMemStatus()); + A->GPUSetup(); + ASSERT_TRUE(A->getGPUMemStatus()); +} + +TEST(DynamicsStreamManaged, CPUParams) { + auto A = MockDynamics(); + auto default_params = A.getParams(); + ASSERT_EQ(default_params.x, 5); +} + +TEST(DynamicsStreamManaged, SetParams) { + auto A = MockDynamics(); + auto new_params = MockParams(); + new_params.x = 7; + A.setParams(new_params); + auto set_params = A.getParams(); + ASSERT_EQ(new_params.x, set_params.x); +} \ No newline at end of file diff --git a/cuda_code/dznrm2.cu b/cuda_code/dznrm2.cu new file mode 100644 index 0000000000000000000000000000000000000000..d090db692d163682b9d42e8958432a0c31ba7c4d --- /dev/null +++ b/cuda_code/dznrm2.cu @@ -0,0 +1,279 @@ +/* + -- MAGMA (version 2.2.0) -- + Univ. of Tennessee, Knoxville + Univ. of California, Berkeley + Univ. of Colorado, Denver + @date November 2016 + + @precisions normal z -> s d c + +*/ +#include "magma_internal.h" +#include "commonblas_z.h" +#include "magma_templates.h" + +// 512 is maximum number of threads for CUDA capability 1.x +#define BLOCK_SIZE 512 +#define BLOCK_SIZEx 32 +#define BLOCK_SIZEy 16 + +#define COMPLEX + + +/******************************************************************************/ +__global__ void +magmablas_dznrm2_kernel( + int m, + magmaDoubleComplex *dA, int ldda, + double *dxnorm ) +{ + const int tx = threadIdx.x; + magmaDoubleComplex *dx = dA + blockIdx.x * ldda; + + __shared__ double sum[ BLOCK_SIZE ]; + + // get norm of dx + double lsum = 0; + for( int j = tx; j < m; j += BLOCK_SIZE ) { + #ifdef REAL + double re = dx[j]; + lsum += re*re; + #else + double re = MAGMA_Z_REAL( dx[j] ); + double im = MAGMA_Z_IMAG( dx[j] ); + lsum += re*re + im*im; + #endif + } + sum[tx] = lsum; + magma_sum_reduce< BLOCK_SIZE >( tx, sum ); + + if (tx == 0) + dxnorm[blockIdx.x] = sqrt(sum[0]); +} + + +/******************************************************************************/ +__global__ void +magmablas_dznrm2_check_kernel( + int m, + magmaDoubleComplex *dA, int ldda, + double *dxnorm, + double *lsticc ) +{ + const int tx = threadIdx.x; + magmaDoubleComplex *dx = dA + blockIdx.x * ldda; + + __shared__ double sum[ BLOCK_SIZE ]; + + // get norm of dx only if lsticc[blockIdx+1] != 0 + if ( lsticc[blockIdx.x + 1] == 0 ) + return; + + double lsum = 0; + for( int j = tx; j < m; j += BLOCK_SIZE ) { + #ifdef REAL + double re = dx[j]; + lsum += re*re; + #else + double re = MAGMA_Z_REAL( dx[j] ); + double im = MAGMA_Z_IMAG( dx[j] ); + lsum += re*re + im*im; + #endif + } + sum[tx] = lsum; + magma_sum_reduce< BLOCK_SIZE >( tx, sum ); + + if (tx == 0) + dxnorm[blockIdx.x] = sqrt(sum[0]); +} + + +/******************************************************************************/ +extern "C" void +magmablas_dznrm2_check( + magma_int_t m, magma_int_t n, + magmaDoubleComplex_ptr dA, magma_int_t ldda, + magmaDouble_ptr dxnorm, + magmaDouble_ptr dlsticc, + magma_queue_t queue ) +{ + dim3 threads( BLOCK_SIZE ); + dim3 blocks( n ); + magmablas_dznrm2_check_kernel + <<< blocks, threads, 0, queue->cuda_stream() >>> + ( m, dA, ldda, dxnorm, dlsticc ); +} + + +/******************************************************************************/ +__global__ void +magmablas_dznrm2_smkernel( + int m, int n, + magmaDoubleComplex *dA, int ldda, + double *dxnorm ) +{ + const int tx = threadIdx.x; + const int ty = threadIdx.y; + __shared__ double sum[ BLOCK_SIZEx ][ BLOCK_SIZEy + 1]; + + for( int k = ty; k < n; k += BLOCK_SIZEy ) { + magmaDoubleComplex *dx = dA + k * ldda; + + // get norm of dx + double lsum = 0; + for( int j = tx; j < m; j += BLOCK_SIZEx ) { + #ifdef REAL + double re = dx[j]; + lsum += re*re; + #else + double re = MAGMA_Z_REAL( dx[j] ); + double im = MAGMA_Z_IMAG( dx[j] ); + lsum += re*re + im*im; + #endif + } + sum[tx][ty] = lsum; + magma_sum_reduce_2d< BLOCK_SIZEx, BLOCK_SIZEy+1 >( tx, ty, sum ); + + if (tx == 0) + dxnorm[k] = sqrt(sum[0][ty]); + __syncthreads(); + } +} + + +/******************************************************************************/ +/* + Compute the dznrm2 of each column of m-by-n matrix dA. + The resulting norms are written in the dxnorm array. + This routine uses only one SM (block). +*/ +extern "C" void +magmablas_dznrm2_sm( + magma_int_t m, magma_int_t n, + magmaDoubleComplex_ptr dA, magma_int_t ldda, + magmaDouble_ptr dxnorm, + magma_queue_t queue ) +{ + dim3 threads( BLOCK_SIZEx, BLOCK_SIZEy ); + dim3 blocks( 1, 1 ); + magmablas_dznrm2_smkernel + <<< blocks, threads, 0, queue->cuda_stream() >>> + ( m, n, dA, ldda, dxnorm ); +} + + +/******************************************************************************/ +__global__ void +magma_dznrm2_adjust_kernel(double *xnorm, magmaDoubleComplex *c) +{ + const int tx = threadIdx.x; + + __shared__ double sum[ BLOCK_SIZE ]; + double temp; + + temp = MAGMA_Z_ABS( c[tx] ) / xnorm[0]; + sum[tx] = -temp * temp; + magma_sum_reduce_n( blockDim.x, tx, sum ); + + __syncthreads(); + if (tx == 0) + xnorm[0] = xnorm[0] * sqrt(1+sum[0]); +} + + +/******************************************************************************/ +/* + Adjust the norm of c to give the norm of c[k+1:], assuming that + c was changed with orthogonal transformations. +*/ +extern "C" void +magmablas_dznrm2_adjust( + magma_int_t k, + magmaDouble_ptr dxnorm, + magmaDoubleComplex_ptr dc, + magma_queue_t queue ) +{ + dim3 threads( k ); + dim3 blocks( 1 ); + magma_dznrm2_adjust_kernel + <<< blocks, threads, 0, queue->cuda_stream() >>> + (dxnorm, dc); +} + + +/******************************************************************************/ + +#define BS 256 + +__global__ void +magma_dznrm2_row_check_adjust_kernel( + int n, double tol, double *xnorm, double *xnorm2, + magmaDoubleComplex *C, int ldc, double *lsticc) +{ + const int tx = threadIdx.x + blockIdx.x*BS; + lsticc[tx+1] = 0; + + if (tx < n) { + double temp = MAGMA_Z_ABS( C[tx*ldc] ) / xnorm[tx]; + temp = max( 0.0, ((1.0 + temp) * (1.0 - temp)) ); + + double temp2 = xnorm[tx] / xnorm2[tx]; + temp2 = temp * (temp2 * temp2); + + if (temp2 <= tol) { + lsticc[tx+1] = 1; + } else { + xnorm[tx] *= sqrt(temp); + } + } + if (tx == 0) + lsticc[0] = 0; + magma_sum_reduce_n( blockDim.x, tx, lsticc ); +} + + +/******************************************************************************/ +/* + Adjust the norm of C[,1:k] to give the norm of C[k+1:,1:k], assuming that + C was changed with orthogonal transformations. + It also do checks for QP3 +*/ +extern "C" void +magmablas_dznrm2_row_check_adjust( + magma_int_t k, double tol, + magmaDouble_ptr dxnorm, + magmaDouble_ptr dxnorm2, + magmaDoubleComplex_ptr dC, magma_int_t lddc, + magmaDouble_ptr dlsticc, + magma_queue_t queue ) +{ + dim3 threads( BS ); + dim3 blocks( magma_ceildiv( k, BS ) ); + magma_dznrm2_row_check_adjust_kernel + <<< blocks, threads, 0, queue->cuda_stream() >>> + (k, tol, dxnorm, dxnorm2, dC, lddc, dlsticc); +} + + +/******************************************************************************/ +/* + Compute the dznrm2 of each column of m-by-n matrix dA. + The resulting norms are written in the dxnorm array. + The computation can be done using n blocks (default) or on one SM (commented). +*/ +extern "C" void +magmablas_dznrm2_cols( + magma_int_t m, magma_int_t n, + magmaDoubleComplex_ptr dA, magma_int_t ldda, + magmaDouble_ptr dxnorm, + magma_queue_t queue ) +{ + dim3 threads( BLOCK_SIZE ); + dim3 blocks( n ); + magmablas_dznrm2_kernel + <<< blocks, threads, 0, queue->cuda_stream() >>> + ( m, dA, ldda, dxnorm ); + + // The following would do the computation on one SM + // magmablas_dznrm2_sm( m, n, dA, ldda, dxnorm, queue ); +} diff --git a/cuda_code/elementwise_maximum_minimum_kernel_2.cu b/cuda_code/elementwise_maximum_minimum_kernel_2.cu new file mode 100644 index 0000000000000000000000000000000000000000..3f2d404226cbf358ef1b6b265ebb7e2775d03b6b --- /dev/null +++ b/cuda_code/elementwise_maximum_minimum_kernel_2.cu @@ -0,0 +1,56 @@ +/* +Copyright 2020 The OneFlow Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +#ifdef WITH_CUDA +#include "oneflow/core/cuda/elementwise.cuh" +#include "oneflow/user/kernels/elementwise_maximum_minimum_kernel.h" +#include "oneflow/core/ep/cuda/cuda_stream.h" + +namespace oneflow { + +namespace { +template class Opt, typename T> +__global__ void ElementwiseXimumGradGpuKernel(int64_t elem_cnt, const T* dz, const T* x, const T* y, + T* dx, T* dy) { + XPU_1D_KERNEL_LOOP(idx, elem_cnt) { + Opt()(dz[idx], x[idx], y[idx], dx ? &dx[idx] : nullptr, dy ? &dy[idx] : nullptr); + } +} + +template class Opt, typename T> +struct ElemwiseXimumGradFunctor final { + void operator()(ep::Stream* stream, int64_t elem_cnt, const T* dz, const T* x, const T* y, T* dx, + T* dy) { + ElementwiseXimumGradGpuKernel + <<As()->cuda_stream()>>>(elem_cnt, dz, x, y, dx, dy); + } +}; + +template class Opt, typename T> +struct ElemwiseXimumFunctor final { + void operator()(ep::Stream* stream, int64_t elem_cnt, T* z, const T* x, const T* y) { + OF_CUDA_CHECK(cuda::elementwise::Binary(Opt(), elem_cnt, z, x, y, + stream->As()->cuda_stream())); + } +}; +} // namespace + +OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_MAXIMUM_KERNELS, (DeviceType::kGPU), + ARITHMETIC_DATA_TYPE_SEQ UNSIGNED_INT_DATA_TYPE_SEQ) +OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_MINIMUM_KERNELS, (DeviceType::kGPU), + ARITHMETIC_DATA_TYPE_SEQ UNSIGNED_INT_DATA_TYPE_SEQ) +} // namespace oneflow +#endif // WITH_CUDA diff --git a/cuda_code/elementwise_metric_21.cu b/cuda_code/elementwise_metric_21.cu new file mode 100644 index 0000000000000000000000000000000000000000..29130c89e4f0cdb71439f8be5e2afc1d083fb89e --- /dev/null +++ b/cuda_code/elementwise_metric_21.cu @@ -0,0 +1,435 @@ +/*! + * Copyright 2015-2019 by Contributors + * \file elementwise_metric.cc + * \brief evaluation metrics for elementwise binary or regression. + * \author Kailong Chen, Tianqi Chen + * + * The expressions like wsum == 0 ? esum : esum / wsum is used to handle empty dataset. + */ +#include +#include +#include +#include + +#include "metric_common.h" +#include "../common/math.h" +#include "../common/common.h" +#include "../common/threading_utils.h" + +#if defined(XGBOOST_USE_CUDA) +#include // thrust::cuda::par +#include // thrust::plus<> +#include +#include + +#include "../common/device_helpers.cuh" +#endif // XGBOOST_USE_CUDA + +namespace xgboost { +namespace metric { +// tag the this file, used by force static link later. +DMLC_REGISTRY_FILE_TAG(elementwise_metric); + +template +class ElementWiseMetricsReduction { + public: + explicit ElementWiseMetricsReduction(EvalRow policy) : policy_(std::move(policy)) {} + + PackedReduceResult + CpuReduceMetrics(const HostDeviceVector &weights, + const HostDeviceVector &labels, + const HostDeviceVector &preds, + int32_t n_threads) const { + size_t ndata = labels.Size(); + + const auto& h_labels = labels.HostVector(); + const auto& h_weights = weights.HostVector(); + const auto& h_preds = preds.HostVector(); + + std::vector score_tloc(n_threads, 0.0); + std::vector weight_tloc(n_threads, 0.0); + + common::ParallelFor(ndata, n_threads, [&](size_t i) { + float wt = h_weights.size() > 0 ? h_weights[i] : 1.0f; + auto t_idx = omp_get_thread_num(); + score_tloc[t_idx] += policy_.EvalRow(h_labels[i], h_preds[i]) * wt; + weight_tloc[t_idx] += wt; + }); + double residue_sum = std::accumulate(score_tloc.cbegin(), score_tloc.cend(), 0.0); + double weights_sum = std::accumulate(weight_tloc.cbegin(), weight_tloc.cend(), 0.0); + + PackedReduceResult res { residue_sum, weights_sum }; + return res; + } + +#if defined(XGBOOST_USE_CUDA) + + PackedReduceResult DeviceReduceMetrics( + const HostDeviceVector& weights, + const HostDeviceVector& labels, + const HostDeviceVector& preds) { + size_t n_data = preds.Size(); + + thrust::counting_iterator begin(0); + thrust::counting_iterator end = begin + n_data; + + auto s_label = labels.DeviceSpan(); + auto s_preds = preds.DeviceSpan(); + auto s_weights = weights.DeviceSpan(); + + bool const is_null_weight = weights.Size() == 0; + + auto d_policy = policy_; + + dh::XGBCachingDeviceAllocator alloc; + PackedReduceResult result = thrust::transform_reduce( + thrust::cuda::par(alloc), + begin, end, + [=] XGBOOST_DEVICE(size_t idx) { + bst_float weight = is_null_weight ? 1.0f : s_weights[idx]; + + bst_float residue = d_policy.EvalRow(s_label[idx], s_preds[idx]); + residue *= weight; + return PackedReduceResult{ residue, weight }; + }, + PackedReduceResult(), + thrust::plus()); + + return result; + } + +#endif // XGBOOST_USE_CUDA + + PackedReduceResult Reduce( + const GenericParameter &ctx, + const HostDeviceVector& weights, + const HostDeviceVector& labels, + const HostDeviceVector& preds) { + PackedReduceResult result; + + if (ctx.gpu_id < 0) { + auto n_threads = ctx.Threads(); + result = CpuReduceMetrics(weights, labels, preds, n_threads); + } +#if defined(XGBOOST_USE_CUDA) + else { // NOLINT + device_ = ctx.gpu_id; + preds.SetDevice(device_); + labels.SetDevice(device_); + weights.SetDevice(device_); + + dh::safe_cuda(cudaSetDevice(device_)); + result = DeviceReduceMetrics(weights, labels, preds); + } +#endif // defined(XGBOOST_USE_CUDA) + return result; + } + + private: + EvalRow policy_; +#if defined(XGBOOST_USE_CUDA) + int device_{-1}; +#endif // defined(XGBOOST_USE_CUDA) +}; + +struct EvalRowRMSE { + char const *Name() const { + return "rmse"; + } + + XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const { + bst_float diff = label - pred; + return diff * diff; + } + static bst_float GetFinal(bst_float esum, bst_float wsum) { + return wsum == 0 ? std::sqrt(esum) : std::sqrt(esum / wsum); + } +}; + +struct EvalRowRMSLE { + char const* Name() const { + return "rmsle"; + } + + XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const { + bst_float diff = std::log1p(label) - std::log1p(pred); + return diff * diff; + } + static bst_float GetFinal(bst_float esum, bst_float wsum) { + return wsum == 0 ? std::sqrt(esum) : std::sqrt(esum / wsum); + } +}; + +struct EvalRowMAE { + const char *Name() const { + return "mae"; + } + + XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const { + return std::abs(label - pred); + } + static bst_float GetFinal(bst_float esum, bst_float wsum) { + return wsum == 0 ? esum : esum / wsum; + } +}; + +struct EvalRowMAPE { + const char *Name() const { + return "mape"; + } + XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const { + return std::abs((label - pred) / label); + } + static bst_float GetFinal(bst_float esum, bst_float wsum) { + return wsum == 0 ? esum : esum / wsum; + } +}; + +struct EvalRowLogLoss { + const char *Name() const { + return "logloss"; + } + + XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float py) const { + const bst_float eps = 1e-16f; + const bst_float pneg = 1.0f - py; + if (py < eps) { + return -y * std::log(eps) - (1.0f - y) * std::log(1.0f - eps); + } else if (pneg < eps) { + return -y * std::log(1.0f - eps) - (1.0f - y) * std::log(eps); + } else { + return -y * std::log(py) - (1.0f - y) * std::log(pneg); + } + } + + static bst_float GetFinal(bst_float esum, bst_float wsum) { + return wsum == 0 ? esum : esum / wsum; + } +}; + +struct EvalRowMPHE { + char const *Name() const { + return "mphe"; + } + XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const { + bst_float diff = label - pred; + return std::sqrt( 1 + diff * diff) - 1; + } + static bst_float GetFinal(bst_float esum, bst_float wsum) { + return wsum == 0 ? esum : esum / wsum; + } +}; + +struct EvalError { + explicit EvalError(const char* param) { + if (param != nullptr) { + CHECK_EQ(sscanf(param, "%f", &threshold_), 1) + << "unable to parse the threshold value for the error metric"; + has_param_ = true; + } else { + threshold_ = 0.5f; + has_param_ = false; + } + } + const char *Name() const { + static std::string name; + if (has_param_) { + std::ostringstream os; + os << "error"; + if (threshold_ != 0.5f) os << '@' << threshold_; + name = os.str(); + return name.c_str(); + } else { + return "error"; + } + } + + XGBOOST_DEVICE bst_float EvalRow( + bst_float label, bst_float pred) const { + // assume label is in [0,1] + return pred > threshold_ ? 1.0f - label : label; + } + + static bst_float GetFinal(bst_float esum, bst_float wsum) { + return wsum == 0 ? esum : esum / wsum; + } + + private: + bst_float threshold_; + bool has_param_; +}; + +struct EvalPoissonNegLogLik { + const char *Name() const { + return "poisson-nloglik"; + } + + XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float py) const { + const bst_float eps = 1e-16f; + if (py < eps) py = eps; + return common::LogGamma(y + 1.0f) + py - std::log(py) * y; + } + + static bst_float GetFinal(bst_float esum, bst_float wsum) { + return wsum == 0 ? esum : esum / wsum; + } +}; + +/** + * Gamma deviance + * + * Expected input: + * label >= 0 + * predt >= 0 + */ +struct EvalGammaDeviance { + const char *Name() const { return "gamma-deviance"; } + + XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float predt) const { + predt += kRtEps; + label += kRtEps; + return std::log(predt / label) + label / predt - 1; + } + + static bst_float GetFinal(bst_float esum, bst_float wsum) { + if (wsum <= 0) { + wsum = kRtEps; + } + return 2 * esum / wsum; + } +}; + +struct EvalGammaNLogLik { + static const char *Name() { + return "gamma-nloglik"; + } + + XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float py) const { + py = std::max(py, 1e-6f); + // hardcoded dispersion. + float constexpr kPsi = 1.0; + bst_float theta = -1. / py; + bst_float a = kPsi; + float b = -std::log(-theta); + // c = 1. / kPsi^2 * std::log(y/kPsi) - std::log(y) - common::LogGamma(1. / kPsi); + // = 1.0f * std::log(y) - std::log(y) - 0 = 0 + float c = 0; + // general form for exponential family. + return -((y * theta - b) / a + c); + } + static bst_float GetFinal(bst_float esum, bst_float wsum) { + return wsum == 0 ? esum : esum / wsum; + } +}; + +struct EvalTweedieNLogLik { + explicit EvalTweedieNLogLik(const char* param) { + CHECK(param != nullptr) + << "tweedie-nloglik must be in format tweedie-nloglik@rho"; + rho_ = atof(param); + CHECK(rho_ < 2 && rho_ >= 1) + << "tweedie variance power must be in interval [1, 2)"; + } + const char *Name() const { + static std::string name; + std::ostringstream os; + os << "tweedie-nloglik@" << rho_; + name = os.str(); + return name.c_str(); + } + + XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float p) const { + bst_float a = y * std::exp((1 - rho_) * std::log(p)) / (1 - rho_); + bst_float b = std::exp((2 - rho_) * std::log(p)) / (2 - rho_); + return -a + b; + } + static bst_float GetFinal(bst_float esum, bst_float wsum) { + return wsum == 0 ? esum : esum / wsum; + } + + protected: + bst_float rho_; +}; +/*! + * \brief base class of element-wise evaluation + * \tparam Derived the name of subclass + */ +template +struct EvalEWiseBase : public Metric { + EvalEWiseBase() = default; + explicit EvalEWiseBase(char const* policy_param) : + policy_{policy_param}, reducer_{policy_} {} + + bst_float Eval(const HostDeviceVector& preds, + const MetaInfo& info, + bool distributed) override { + CHECK_EQ(preds.Size(), info.labels_.Size()) + << "label and prediction size not match, " + << "hint: use merror or mlogloss for multi-class classification"; + auto result = reducer_.Reduce(*tparam_, info.weights_, info.labels_, preds); + + double dat[2] { result.Residue(), result.Weights() }; + + if (distributed) { + rabit::Allreduce(dat, 2); + } + return Policy::GetFinal(dat[0], dat[1]); + } + + const char* Name() const override { + return policy_.Name(); + } + + private: + Policy policy_; + ElementWiseMetricsReduction reducer_{policy_}; +}; + +XGBOOST_REGISTER_METRIC(RMSE, "rmse") +.describe("Rooted mean square error.") +.set_body([](const char* param) { return new EvalEWiseBase(); }); + +XGBOOST_REGISTER_METRIC(RMSLE, "rmsle") +.describe("Rooted mean square log error.") +.set_body([](const char* param) { return new EvalEWiseBase(); }); + +XGBOOST_REGISTER_METRIC(MAE, "mae") +.describe("Mean absolute error.") +.set_body([](const char* param) { return new EvalEWiseBase(); }); + +XGBOOST_REGISTER_METRIC(MAPE, "mape") + .describe("Mean absolute percentage error.") + .set_body([](const char* param) { return new EvalEWiseBase(); }); + +XGBOOST_REGISTER_METRIC(MPHE, "mphe") +.describe("Mean Pseudo Huber error.") +.set_body([](const char* param) { return new EvalEWiseBase(); }); + +XGBOOST_REGISTER_METRIC(LogLoss, "logloss") +.describe("Negative loglikelihood for logistic regression.") +.set_body([](const char* param) { return new EvalEWiseBase(); }); + +XGBOOST_REGISTER_METRIC(PossionNegLoglik, "poisson-nloglik") +.describe("Negative loglikelihood for poisson regression.") +.set_body([](const char* param) { return new EvalEWiseBase(); }); + +XGBOOST_REGISTER_METRIC(GammaDeviance, "gamma-deviance") +.describe("Residual deviance for gamma regression.") +.set_body([](const char* param) { return new EvalEWiseBase(); }); + +XGBOOST_REGISTER_METRIC(GammaNLogLik, "gamma-nloglik") +.describe("Negative log-likelihood for gamma regression.") +.set_body([](const char* param) { return new EvalEWiseBase(); }); + +XGBOOST_REGISTER_METRIC(Error, "error") +.describe("Binary classification error.") +.set_body([](const char* param) { return new EvalEWiseBase(param); }); + +XGBOOST_REGISTER_METRIC(TweedieNLogLik, "tweedie-nloglik") +.describe("tweedie-nloglik@rho for tweedie regression.") +.set_body([](const char* param) { + return new EvalEWiseBase(param); +}); + +} // namespace metric +} // namespace xgboost diff --git a/cuda_code/ellpack_page_source_6.cu b/cuda_code/ellpack_page_source_6.cu new file mode 100644 index 0000000000000000000000000000000000000000..d23042472f9e7be48d95671d07e7df624914d538 --- /dev/null +++ b/cuda_code/ellpack_page_source_6.cu @@ -0,0 +1,88 @@ +/*! + * Copyright 2019 XGBoost contributors + */ +#include +#include + +#include "../common/hist_util.h" + +#include "ellpack_page.cuh" +#include "ellpack_page_source.h" +#include "sparse_page_source.h" + +namespace xgboost { +namespace data { + +// Build the quantile sketch across the whole input data, then use the histogram cuts to compress +// each CSR page, and write the accumulated ELLPACK pages to disk. +EllpackPageSource::EllpackPageSource(DMatrix* dmat, + const std::string& cache_info, + const BatchParam& param) noexcept(false) { + cache_info_ = ParseCacheInfo(cache_info, kPageType_); + for (auto file : cache_info_.name_shards) { + CheckCacheFileExists(file); + } + if (param.gpu_page_size > 0) { + page_size_ = param.gpu_page_size; + } + + monitor_.Init("ellpack_page_source"); + dh::safe_cuda(cudaSetDevice(param.gpu_id)); + + monitor_.Start("Quantiles"); + size_t row_stride = GetRowStride(dmat); + auto cuts = common::DeviceSketch(param.gpu_id, dmat, param.max_bin); + monitor_.Stop("Quantiles"); + + monitor_.Start("WriteEllpackPages"); + WriteEllpackPages(param.gpu_id, dmat, cuts, cache_info, row_stride); + monitor_.Stop("WriteEllpackPages"); + + external_prefetcher_.reset( + new ExternalMemoryPrefetcher(cache_info_)); +} + +// Compress each CSR page to ELLPACK, and write the accumulated pages to disk. +void EllpackPageSource::WriteEllpackPages(int device, DMatrix* dmat, + const common::HistogramCuts& cuts, + const std::string& cache_info, + size_t row_stride) const { + auto cinfo = ParseCacheInfo(cache_info, kPageType_); + const size_t extra_buffer_capacity = 6; + SparsePageWriter writer(cinfo.name_shards, cinfo.format_shards, + extra_buffer_capacity); + std::shared_ptr page; + SparsePage temp_host_page; + writer.Alloc(&page); + auto* impl = page->Impl(); + + size_t bytes_write = 0; + double tstart = dmlc::GetTime(); + for (const auto& batch : dmat->GetBatches()) { + temp_host_page.Push(batch); + + size_t mem_cost_bytes = + EllpackPageImpl::MemCostBytes(temp_host_page.Size(), row_stride, cuts); + if (mem_cost_bytes >= page_size_) { + bytes_write += mem_cost_bytes; + *impl = EllpackPageImpl(device, cuts, temp_host_page, dmat->IsDense(), + row_stride); + writer.PushWrite(std::move(page)); + writer.Alloc(&page); + impl = page->Impl(); + temp_host_page.Clear(); + double tdiff = dmlc::GetTime() - tstart; + LOG(INFO) << "Writing " << kPageType_ << " to " << cache_info << " in " + << ((bytes_write >> 20UL) / tdiff) << " MB/s, " + << (bytes_write >> 20UL) << " written"; + } + } + if (temp_host_page.Size() != 0) { + *impl = EllpackPageImpl(device, cuts, temp_host_page, dmat->IsDense(), + row_stride); + writer.PushWrite(std::move(page)); + } +} + +} // namespace data +} // namespace xgboost diff --git a/cuda_code/embKernels.cc_1.cu b/cuda_code/embKernels.cc_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..6306ab174cc4eb93c5d7ef0ac8b440bdf85ae502 --- /dev/null +++ b/cuda_code/embKernels.cc_1.cu @@ -0,0 +1,633 @@ +#include "common.h" +#include "embKernels.h" + +/** +@file +Implemented the cuda kernel function and its launcher +that required by embedding layer in transformer model. +Currently, fp16 and fp32 versions are provided +*/ +namespace lightseq { +namespace cuda { +/** +@brief: ker_split_multilg_request +the format of request in multilingual: + e.g. <.> + request shape: [batch_size, src_seq_len + 2] + request = numpy.concatenate((src_lang_id, trg_lang_id, src_token_id), axis=1) + +@thread +gridDim.x = (nele + MAX_THREADS - 1) / MAX_THREADS +blockDim.x = MAX_THREADS + +@param +req: [batch_size, src_seq_len + 2, hidden_dim] +src_lang_id: [batch_size] +trg_lang_id: [batch_size] +src_token_id: [batch_size, src_seq_len, hidden_dim] +req_len: src_seq_len + 2 +*/ +__global__ void ker_split_multilg_request(const int *req, int *src_lang_id, + int *trg_lang_id, int *src_token_id, + int batch_size, int req_len) { + int idx = blockIdx.x * blockDim.x + threadIdx.x; + if (idx < batch_size * req_len) { + int value = req[idx]; + int seq_id = idx / req_len; + int token_id = idx % req_len; + + if (token_id == 0) { + src_lang_id[seq_id] = value; + } else if (token_id == 1) { + trg_lang_id[seq_id] = value; + } else { + int new_idx = flat_2dim(seq_id, token_id - 2, req_len - 2); + src_token_id[new_idx] = value; + } + } +} + +void launch_split_multilg_request(const int *req, int *src_lang_id, + int *trg_lang_id, int *src_token_id, + int batch_size, int req_len, + cudaStream_t &stream) { + if (req_len < 3) { + throw std::runtime_error("req_len should be greater than 2"); + } + int nele = batch_size * req_len; + int nblock = (nele + MAX_THREADS - 1) / MAX_THREADS; + ker_split_multilg_request<<>>( + req, src_lang_id, trg_lang_id, src_token_id, batch_size, req_len); +} + +/** +@brief: ker_enc_emb +for encoder, look up token embedding, add position embedding + +@thread +gridDim.x = (nele + MAX_THREADS - 1) / MAX_THREADS +blockDim.x = MAX_THREADS; + +@param +token_emb: [vocab_size, hidden_dim] +pos_emb: [max_step, hidden_dim] +tokens: input token id, [batch_size, seq_len] +output: result, [batch_size, seq_len, hidden_dim] +pad_mask: record the padding token, [batch_size, seq_len] +pad_id, the padding token id +*/ +template +__global__ void ker_enc_emb(const T *token_emb, const T *pos_emb, + const int *tokens, T *output, int *pad_mask, + int pad_id, int batch_size, int seq_len, + int hidden_dim) { + int idx = blockIdx.x * blockDim.x + threadIdx.x; + if (idx >= batch_size * seq_len * hidden_dim) { + return; + } + int batch_idx, seq_idx, dim_idx; + decompose_3dim(idx, seq_len, hidden_dim, &batch_idx, &seq_idx, &dim_idx); + int tokens_idx = batch_idx * seq_len + seq_idx; + int token = tokens[tokens_idx]; + float4 value; + + if (token == pad_id) { + if (dim_idx == 0) { + pad_mask[tokens_idx] = 1; + } + value.x = 0.f; + value.y = 0.f; + value.z = 0.f; + value.w = 0.f; + } else { + if (dim_idx == 0) { + pad_mask[tokens_idx] = 0; + } + value = ((float4 *)token_emb)[token * hidden_dim + dim_idx]; + float4 pemb = ((float4 *)pos_emb)[seq_idx * hidden_dim + dim_idx]; + value.x += pemb.x; + value.y += pemb.y; + value.z += pemb.z; + value.w += pemb.w; + } + ((float4 *)output)[idx] = value; +} + +template <> +__global__ void ker_enc_emb<__half>(const __half *token_emb, + const __half *pos_emb, const int *tokens, + __half *output, int *pad_mask, int pad_id, + int batch_size, int seq_len, + int hidden_dim) { + int idx = blockIdx.x * blockDim.x + threadIdx.x; + if (idx >= batch_size * seq_len * hidden_dim) { + return; + } + int batch_idx, seq_idx, dim_idx; + decompose_3dim(idx, seq_len, hidden_dim, &batch_idx, &seq_idx, &dim_idx); + int tokens_idx = batch_idx * seq_len + seq_idx; + int token = tokens[tokens_idx]; + float4 value; + + if (token == pad_id) { + if (dim_idx == 0) { + pad_mask[tokens_idx] = 1; + } + value.x = 0.f; + value.y = 0.f; + value.z = 0.f; + value.w = 0.f; + } else { + if (dim_idx == 0) { + pad_mask[tokens_idx] = 0; + } + value = ((float4 *)token_emb)[token * hidden_dim + dim_idx]; + float4 pemb = ((float4 *)pos_emb)[seq_idx * hidden_dim + dim_idx]; + __half2 *value_h2 = (__half2 *)(&value); + __half2 *pemb_h2 = (__half2 *)(&pemb); +#pragma unroll + for (int i = 0; i < 4; i++) { + float2 value_f2 = __half22float2(value_h2[i]); + float2 pemb_f2 = __half22float2(pemb_h2[i]); + value_f2.x += pemb_f2.x; + value_f2.y += pemb_f2.y; + value_h2[i] = __float22half2_rn(value_f2); + } + } + ((float4 *)output)[idx] = value; +} + +/** +@brief: ker_enc_emb_multilg_token +for encoder, look up token embedding, add position embedding + +@thread +gridDim.x = (nele + MAX_THREADS - 1) / MAX_THREADS +blockDim.x = MAX_THREADS; + +@param +token_emb: [vocab_size, hidden_dim] +pos_emb: [max_step, hidden_dim] +tokens: input token id, [batch_size, seq_len] +lang_emb: language embedding, [num_lang, hidden_dim] +lang_id: language index, [batch_size] +output: result, [batch_size, seq_len, hidden_dim] +pad_mask: record the padding token, [batch_size, seq_len] +pad_id, the padding token id +*/ +template +__global__ void ker_enc_emb_multilg_token(const T *token_emb, const T *pos_emb, + const int *tokens, const T *lang_emb, + const int *lang_id, T *output, + int *pad_mask, int pad_id, + int batch_size, int seq_len, + int hidden_dim) { + int idx = blockIdx.x * blockDim.x + threadIdx.x; + if (idx >= batch_size * seq_len * hidden_dim) { + return; + } + int batch_idx, seq_idx, dim_idx; + decompose_3dim(idx, seq_len, hidden_dim, &batch_idx, &seq_idx, &dim_idx); + int tokens_idx = batch_idx * seq_len + seq_idx; + int token = tokens[tokens_idx]; + float4 value; + + if (token == pad_id) { + if (dim_idx == 0) { + pad_mask[tokens_idx] = 1; + } + value.x = 0.f; + value.y = 0.f; + value.z = 0.f; + value.w = 0.f; + } else { + if (dim_idx == 0) { + pad_mask[tokens_idx] = 0; + } + value = ((float4 *)token_emb)[token * hidden_dim + dim_idx]; + + // add pos emb + float4 pemb = ((float4 *)pos_emb)[seq_idx * hidden_dim + dim_idx]; + value.x += pemb.x; + value.y += pemb.y; + value.z += pemb.z; + value.w += pemb.w; + // add lang emb + pemb = ((float4 *)lang_emb)[lang_id[batch_idx] * hidden_dim + dim_idx]; + value.x += pemb.x; + value.y += pemb.y; + value.z += pemb.z; + value.w += pemb.w; + } + ((float4 *)output)[idx] = value; +} + +template <> +__global__ void ker_enc_emb_multilg_token<__half>( + const __half *token_emb, const __half *pos_emb, const int *tokens, + const __half *lang_emb, const int *lang_id, __half *output, int *pad_mask, + int pad_id, int batch_size, int seq_len, int hidden_dim) { + int idx = blockIdx.x * blockDim.x + threadIdx.x; + if (idx >= batch_size * seq_len * hidden_dim) { + return; + } + int batch_idx, seq_idx, dim_idx; + decompose_3dim(idx, seq_len, hidden_dim, &batch_idx, &seq_idx, &dim_idx); + int tokens_idx = batch_idx * seq_len + seq_idx; + int token = tokens[tokens_idx]; + float4 value; + + if (token == pad_id) { + if (dim_idx == 0) { + pad_mask[tokens_idx] = 1; + } + value.x = 0.f; + value.y = 0.f; + value.z = 0.f; + value.w = 0.f; + } else { + if (dim_idx == 0) { + pad_mask[tokens_idx] = 0; + } + value = ((float4 *)token_emb)[token * hidden_dim + dim_idx]; + __half2 *value_h2 = (__half2 *)(&value); + + float4 pemb = ((float4 *)pos_emb)[seq_idx * hidden_dim + dim_idx]; + __half2 *pemb_h2 = (__half2 *)(&pemb); + float4 lemb = + ((float4 *)lang_emb)[lang_id[batch_idx] * hidden_dim + dim_idx]; + __half2 *lemb_h2 = (__half2 *)(&lemb); +#pragma unroll + for (int i = 0; i < 4; i++) { + float2 value_f2 = __half22float2(value_h2[i]); + float2 pemb_f2 = __half22float2(pemb_h2[i]); + float2 lemb_f2 = __half22float2(lemb_h2[i]); + value_f2.x += pemb_f2.x + lemb_f2.x; + value_f2.y += pemb_f2.y + lemb_f2.y; + value_h2[i] = __float22half2_rn(value_f2); + } + } + ((float4 *)output)[idx] = value; +} + +/** +@brief: ker_enc_emb_multilg_sentence +for encoder, look up token embedding, add position embedding + +@thread +gridDim.x = (nele + MAX_THREADS - 1) / MAX_THREADS +blockDim.x = MAX_THREADS; + +@param +token_emb: [vocab_size, hidden_dim] +pos_emb: [max_step, hidden_dim] +tokens: input token id, [batch_size, seq_len] +lang_emb: language embedding, [num_lang, hidden_dim] +lang_id: language index, [batch_size] +output: result, [batch_size, seq_len, hidden_dim] +pad_mask: record the padding token, [batch_size, seq_len] +pad_id, the padding token id +*/ +template +__global__ void ker_enc_emb_multilg_sentence( + const T *token_emb, const T *pos_emb, const int *tokens, const T *lang_emb, + const int *lang_id, T *output, int *pad_mask, int pad_id, int batch_size, + int seq_len, int hidden_dim) { + int idx = blockIdx.x * blockDim.x + threadIdx.x; + if (idx >= batch_size * seq_len * hidden_dim) { + return; + } + int batch_idx, seq_idx, dim_idx; + decompose_3dim(idx, seq_len, hidden_dim, &batch_idx, &seq_idx, &dim_idx); + + bool is_pad; + int token_emb_idx; + if (seq_idx == 0) { + is_pad = false; + token_emb = lang_emb; + token_emb_idx = lang_id[batch_idx]; + } else { + token_emb_idx = tokens[batch_idx * (seq_len - 1) + seq_idx - 1]; + is_pad = (token_emb_idx == pad_id); + } + + float4 value; + int tokens_idx = batch_idx * seq_len + seq_idx; + if (is_pad) { + if (dim_idx == 0) { + pad_mask[tokens_idx] = 1; + } + value.x = 0.f; + value.y = 0.f; + value.z = 0.f; + value.w = 0.f; + } else { + if (dim_idx == 0) { + pad_mask[tokens_idx] = 0; + } + value = ((float4 *)token_emb)[token_emb_idx * hidden_dim + dim_idx]; + float4 pemb = ((float4 *)pos_emb)[seq_idx * hidden_dim + dim_idx]; + value.x += pemb.x; + value.y += pemb.y; + value.z += pemb.z; + value.w += pemb.w; + } + ((float4 *)output)[idx] = value; +} + +template <> +__global__ void ker_enc_emb_multilg_sentence<__half>( + const __half *token_emb, const __half *pos_emb, const int *tokens, + const __half *lang_emb, const int *lang_id, __half *output, int *pad_mask, + int pad_id, int batch_size, int seq_len, int hidden_dim) { + int idx = blockIdx.x * blockDim.x + threadIdx.x; + if (idx >= batch_size * seq_len * hidden_dim) { + return; + } + int batch_idx, seq_idx, dim_idx; + decompose_3dim(idx, seq_len, hidden_dim, &batch_idx, &seq_idx, &dim_idx); + + bool is_pad; + int token_emb_idx; + if (seq_idx == 0) { + is_pad = false; + token_emb = lang_emb; + token_emb_idx = lang_id[batch_idx]; + } else { + token_emb_idx = tokens[batch_idx * (seq_len - 1) + seq_idx - 1]; + is_pad = (token_emb_idx == pad_id); + } + + float4 value; + int tokens_idx = batch_idx * seq_len + seq_idx; + if (is_pad) { + if (dim_idx == 0) { + pad_mask[tokens_idx] = 1; + } + value.x = 0.f; + value.y = 0.f; + value.z = 0.f; + value.w = 0.f; + } else { + if (dim_idx == 0) { + pad_mask[tokens_idx] = 0; + } + value = ((float4 *)token_emb)[token_emb_idx * hidden_dim + dim_idx]; + float4 pemb = ((float4 *)pos_emb)[seq_idx * hidden_dim + dim_idx]; + __half2 *value_h2 = (__half2 *)(&value); + __half2 *pemb_h2 = (__half2 *)(&pemb); +#pragma unroll + for (int i = 0; i < 4; i++) { + float2 value_f2 = __half22float2(value_h2[i]); + float2 pemb_f2 = __half22float2(pemb_h2[i]); + value_f2.x += pemb_f2.x; + value_f2.y += pemb_f2.y; + value_h2[i] = __float22half2_rn(value_f2); + } + } + ((float4 *)output)[idx] = value; +} + +template +void launch_enc_emb(const T *token_emb, const T *pos_emb, const int *tokens, + T *output, int *pad_mask, int pad_id, int batch_size, + int seq_len, int hidden_dim, cudaStream_t stream, + const T *lang_emb, const int *lang_id, int multilg_type) { + if (hidden_dim % 4 != 0) { + throw std::runtime_error("violate hidden_dim % 4 = 0"); + } + hidden_dim >>= 2; + int nele = batch_size * seq_len * hidden_dim; + int nblock = (nele + MAX_THREADS - 1) / MAX_THREADS; + if (multilg_type == 0) { + ker_enc_emb<<>>( + token_emb, pos_emb, tokens, output, pad_mask, pad_id, batch_size, + seq_len, hidden_dim); + } else if (multilg_type == 1) { + ker_enc_emb_multilg_token<<>>( + token_emb, pos_emb, tokens, lang_emb, lang_id, output, pad_mask, pad_id, + batch_size, seq_len, hidden_dim); + } else { + ker_enc_emb_multilg_sentence<<>>( + token_emb, pos_emb, tokens, lang_emb, lang_id, output, pad_mask, pad_id, + batch_size, seq_len, hidden_dim); + } +} + +template <> +void launch_enc_emb<__half>(const __half *token_emb, const __half *pos_emb, + const int *tokens, __half *output, int *pad_mask, + int pad_id, int batch_size, int seq_len, + int hidden_dim, cudaStream_t stream, + const __half *lang_emb, const int *lang_id, + int multilg_type) { + if (hidden_dim % 8 != 0) { + throw std::runtime_error("violate hidden_dim % 8 = 0"); + } + hidden_dim >>= 3; + int nele = batch_size * seq_len * hidden_dim; + int nblock = (nele + MAX_THREADS - 1) / MAX_THREADS; + + if (multilg_type == 0) { + ker_enc_emb<__half><<>>( + token_emb, pos_emb, tokens, output, pad_mask, pad_id, batch_size, + seq_len, hidden_dim); + } else if (multilg_type == 1) { + ker_enc_emb_multilg_token<__half><<>>( + token_emb, pos_emb, tokens, lang_emb, lang_id, output, pad_mask, pad_id, + batch_size, seq_len, hidden_dim); + } else { + ker_enc_emb_multilg_sentence<__half><<>>( + token_emb, pos_emb, tokens, lang_emb, lang_id, output, pad_mask, pad_id, + batch_size, seq_len, hidden_dim); + } +} + +template void launch_enc_emb(const float *token_emb, + const float *pos_emb, const int *tokens, + float *output, int *pad_mask, int pad_id, + int batch_size, int seq_len, int hidden_dim, + cudaStream_t stream, const float *lang_emb, + const int *lang_id, int multilg_type); + +template void launch_enc_emb<__half>(const __half *token_emb, + const __half *pos_emb, const int *tokens, + __half *output, int *pad_mask, int pad_id, + int batch_size, int seq_len, + int hidden_dim, cudaStream_t stream, + const __half *lang_emb, const int *lang_id, + int multilg_type); + +/** +@brief: ker_dec_embedding +for decoder, look up token embedding, add position embedding + +@thread +gridDim.x = (nele + MAX_THREADS - 1) / MAX_THREADS; +blockDim.x = MAX_THREADS + +@param +token_emb: [hidden_dim, vocab_size], note, it is different with encoder +pos_emb: [max_step, hidden_dim] +tokens: input token id, [batch_size, beam_size, max_step] +lang_emb: language embedding, [num_lang, hidden_dim] +lang_id: language index, [batch_size] +output: result, [batch_size, beam_size, hidden_dim] +step: current decoder step +max_step: max decoder steps +multilg_type: 0 for no multilg, 1 for token level multilg, + 2 for sentence level multilg +*/ +template +__global__ void ker_dec_emb(const T *token_emb, const T *pos_emb, int *tokens, + const T *lang_emb, const int *lang_id, T *output, + int batch_size, int beam_size, int hidden_dim, + int vocab_size, int step, int max_step, + int multilg_type) { + int idx = blockIdx.x * blockDim.x + threadIdx.x; + if (idx >= batch_size * beam_size * hidden_dim) { + return; + } + int batch_idx, beam_idx, dim_idx; + decompose_3dim(idx, beam_size, hidden_dim, &batch_idx, &beam_idx, &dim_idx); + + T emb; + if ((multilg_type == 2 || multilg_type == 3) && step == 0) { + // the bos of sentense level multilg is target lang id + int lid = lang_id[batch_idx]; + emb = lang_emb[flat_2dim(lid, dim_idx, hidden_dim)]; + tokens[flat_3dim(batch_idx, beam_idx, 0, beam_size, max_step)] = lid; + } else { + int token = + tokens[flat_3dim(batch_idx, beam_idx, step, beam_size, max_step)]; + emb = token_emb[flat_2dim(dim_idx, token, vocab_size)]; + } + float value = + float(emb) + float(pos_emb[flat_2dim(step, dim_idx, hidden_dim)]); + if (multilg_type == 1) { + // token level multilg, add lang_emb + value += + float(lang_emb[flat_2dim(lang_id[batch_idx], dim_idx, hidden_dim)]); + } + output[idx] = T(value); +} + +template +void launch_dec_emb(const T *token_emb, const T *pos_emb, int *tokens, + const T *lang_emb, const int *lang_id, T *output, + int batch_size, int beam_size, int hidden_dim, + int vocab_size, int step, int max_step, int multilg_type, + cudaStream_t stream) { + if (step >= max_step) { + throw std::runtime_error("violate step < max_step"); + } + int nele = batch_size * beam_size * hidden_dim; + int nblock = (nele + MAX_THREADS - 1) / MAX_THREADS; + ker_dec_emb<<>>( + token_emb, pos_emb, tokens, lang_emb, lang_id, output, batch_size, + beam_size, hidden_dim, vocab_size, step, max_step, multilg_type); +} + +template void launch_dec_emb(const float *token_emb, + const float *pos_emb, int *tokens, + const float *lang_emb, const int *lang_id, + float *output, int batch_size, + int beam_size, int hidden_dim, + int vocab_size, int step, int max_step, + int multilg_type, cudaStream_t stream); + +template void launch_dec_emb<__half>(const __half *token_emb, + const __half *pos_emb, int *tokens, + const __half *lang_emb, const int *lang_id, + __half *output, int batch_size, + int beam_size, int hidden_dim, + int vocab_size, int step, int max_step, + int multilg_type, cudaStream_t stream); + +/** +@brief: ker_patch_emb +patch embedding by conv2d, concat cls embedding, add position embedding + +@thread +gridDim.x = batch_size +gridDim.y = max_step +gridDim.z = hidden_dim +blockDim.x = MAX_THREADS + +@param +conv_weight: [hidden_dim, channel_input, patch_size, patch_size] +conv_bias: [hidden_dim] +pos_emb: [max_step, hidden_dim] +cls_emb: [hidden_dim] +input: [batch_size, channel_input, image_size, image_size] +output: result, [batch_size, max_step, hidden_dim] +*/ +template +__global__ void ker_patch_emb(const T *conv_weight, const T *conv_bias, + const T *pos_emb, const T *cls_emb, + const float *input, T *output, int patch_size, + int image_size, int channel_input) { + if (blockIdx.y == 0) { + if (threadIdx.x == 0) { + output[flat_3dim(blockIdx.x, 0, blockIdx.z, gridDim.y, gridDim.z)] = + __ldg(&cls_emb[blockIdx.z]) + __ldg(&pos_emb[blockIdx.z]); + } + return; + } + + int val_num_per_block = channel_input * patch_size * patch_size; + int patch_row_id, patch_col_id, value_row_id, value_col_id, channel_id; + decompose_2dim(blockIdx.y - 1, image_size / patch_size, &patch_row_id, + &patch_col_id); + + float val = 0.f; + for (int idx = threadIdx.x; idx < val_num_per_block; idx += blockDim.x) { + decompose_3dim(idx, patch_size, patch_size, &channel_id, &value_row_id, + &value_col_id); + int conv_weight_offset = flat_2dim(blockIdx.z, idx, val_num_per_block); + int in_offset = flat_4dim(blockIdx.x, channel_id, + patch_row_id * patch_size + value_row_id, + patch_col_id * patch_size + value_col_id, + channel_input, image_size, image_size); + val += __ldg(&input[in_offset]) * + (float)__ldg(&conv_weight[conv_weight_offset]); + } + + float rsum = blockReduceSum(val); + if (threadIdx.x == 0) { + float out_float; + int out_offset = + flat_3dim(blockIdx.x, blockIdx.y, blockIdx.z, gridDim.y, gridDim.z); + out_float = + rsum + (float)__ldg(&conv_bias[blockIdx.z]) + + (float)__ldg(&pos_emb[flat_2dim(blockIdx.y, blockIdx.z, gridDim.z)]); + output[out_offset] = (T)out_float; + } +} + +template +void launch_patch_emb(const T *conv_weight, const T *conv_bias, + const T *pos_emb, const T *cls_emb, const float *input, + T *output, int patch_size, int image_size, int batch_size, + int max_step, int hidden_dim, int channel_input, + cudaStream_t stream) { + ker_patch_emb + <<>>( + conv_weight, conv_bias, pos_emb, cls_emb, input, output, patch_size, + image_size, channel_input); +} + +template void launch_patch_emb( + const float *conv_weight, const float *conv_bias, const float *pos_emb, + const float *cls_emb, const float *input, float *output, int patch_size, + int image_size, int batch_size, int max_step, int hidden_dim, + int channel_input, cudaStream_t stream); + +template void launch_patch_emb<__half>( + const __half *conv_weight, const __half *conv_bias, const __half *pos_emb, + const __half *cls_emb, const float *input, __half *output, int patch_size, + int image_size, int batch_size, int max_step, int hidden_dim, + int channel_input, cudaStream_t stream); + +} // namespace cuda +} // namespace lightseq diff --git a/cuda_code/embLayerNormPlugin_6.cu b/cuda_code/embLayerNormPlugin_6.cu new file mode 100644 index 0000000000000000000000000000000000000000..403c2b6ef59d6e81673d6218144b153ff2eea110 --- /dev/null +++ b/cuda_code/embLayerNormPlugin_6.cu @@ -0,0 +1,678 @@ +/* + * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +#include "NvInfer.h" +#include "embLayerNormPlugin.h" +#include "bertCommon.h" +#include "common.h" +#include "serialize.hpp" + +using namespace nvinfer1; +using bert::operator+; + +namespace bert +{ + +template +__global__ void maskIdxKernelSmall(int ld, const int* mask, int* maskIdx) +{ + + using BlockReduce = cub::BlockReduce; + __shared__ typename BlockReduce::TempStorage tmpStorage; + + // ld is S + // blockIdx.x is b + + const int offset = blockIdx.x * ld; // batch strides of S + + cub::Min min; + int threadData(ld); // if the mask admits all values + + const int idx = offset + threadIdx.x; + if (threadIdx.x < ld) + { + const int val = mask[idx]; + if (val == 0) // masked position: report thread idx + { + threadData = threadIdx.x; + } + } + + const auto minIdx = BlockReduce(tmpStorage).Reduce(threadData, min); + + if (threadIdx.x == 0) + { + maskIdx[blockIdx.x] = minIdx; + } +} + +template +__global__ void maskIdxKernel(int ld, const int* mask, int* maskIdx) +{ + + using BlockReduce = cub::BlockReduce; + __shared__ typename BlockReduce::TempStorage tmpStorage; + + // ld is S + // blockIdx.x is b + + const int offset = blockIdx.x * ld; // batch strides of S + + cub::Min min; + int threadData(ld); // if the mask admits all values + + for (int i = threadIdx.x; i < ld; i += TPB) + { + const int idx = offset + i; + const int val = mask[idx]; + if (val == 0) // masked position: report thread idx + { + threadData = min(threadData, i); + } + } + + const auto minIdx = BlockReduce(tmpStorage).Reduce(threadData, min); + + if (threadIdx.x == 0) + { + maskIdx[blockIdx.x] = minIdx; + } +} + +inline int computeMaskIdx(cudaStream_t stream, const int S, const int B, const int* mask, int* maskIdx) +{ + // Mask idx is of length B and assumes the valid region is contiguous starting + // from the beginning of the sequence + + // Assume n = BxS + if (S <= 32) + { + maskIdxKernelSmall<32><<>>(S, mask, maskIdx); + } + else if (S <= 128) + { + maskIdxKernelSmall<128><<>>(S, mask, maskIdx); + } + else if (S == 384) + { + maskIdxKernelSmall<384><<>>(S, mask, maskIdx); + } + else + { + maskIdxKernel<256><<>>(S, mask, maskIdx); + } + + CHECK(cudaPeekAtLastError()); + + return 0; +} + +template +__global__ void embLayerNormKernel(int ld, const int* inputIds, const int* tokenIds, const float* beta, + const float* gamma, const T* wordEmb, const T* posEmb, const T* tokEmb, T* output) +{ + + cub::Sum pairSum; + // 1. lookup word and token of the block + // blockIdx.x = position in the sequence + // blockIdx.y = batch + // gridDim.x = S + // gridDim.y = B + __shared__ int wordId; + __shared__ int tokenId; + + const T rld = T(1.f) / T(ld); + const int seqPos = blockIdx.y + blockIdx.x * gridDim.y; + if (threadIdx.x == 0) + { + wordId = inputIds[seqPos]; + tokenId = tokenIds[seqPos]; + } + __syncthreads(); + + // 2. load pos/tok/word embeddings and add them toghether + // offset into embeddings is given by wordId * hidden_size + const int poffset = blockIdx.x * ld; + const int woffset = wordId * ld; + const int toffset = tokenId * ld; + // the output offset is given by b * (S*hidden_size) + s * hidden_size + const int outOffset = seqPos * ld; + + kvp threadData(0, 0); + + for (int it = threadIdx.x; it < ld; it += TPB) + { + const T w(wordEmb[woffset + it]); + const T t(tokEmb[toffset + it]); + const T p(posEmb[poffset + it]); + const T val = w + t + p; + + output[outOffset + it] = val; + const T rldval = rld * val; + threadData = pairSum(threadData, kvp(rldval, rldval * val)); + } + + // 3. layer norm on the sum + layerNorm(threadData, ld, outOffset, beta, gamma, output); +} + +template +inline int embSkipLayerNorm(cudaStream_t stream, int ld, int B, int S, const int* inputIds, const int* token_ids, + const float* beta, const float* gamma, const T* wordEmb, const T* posEmb, const T* tokEmb, T* output) +{ + + constexpr int tpb = 256; + const dim3 grid(S, B, 1); + const dim3 block(tpb, 1, 1); + + embLayerNormKernel + <<>>(ld, inputIds, token_ids, beta, gamma, wordEmb, posEmb, tokEmb, output); + CHECK(cudaPeekAtLastError()); + + return 0; +} + +// Clip plugin specific constants +namespace +{ +static const char* EMB_LAYER_NORM_VERSION{"1"}; +static const char* EMB_LAYER_NORM_NAME{"CustomEmbLayerNormPluginDynamic"}; +} // namespace + +// Static class fields initialization +PluginFieldCollection EmbLayerNormPluginDynamicCreator::mFC{}; +std::vector EmbLayerNormPluginDynamicCreator::mPluginAttributes; + +REGISTER_TENSORRT_PLUGIN(EmbLayerNormPluginDynamicCreator); + +EmbLayerNormPluginDynamic::EmbLayerNormPluginDynamic(const std::string& name, const bool outputFp16, + const Weights& beta, const Weights& gamma, const Weights& wordEmb, const Weights& posEmb, const Weights& tokEmb) + : mLayerName(name) + , mLd(beta.count) + , mGamma(gamma) + , mBeta(beta) + , mWordEmb(wordEmb) + , mPosEmb(posEmb) + , mTokEmb(tokEmb) + , mGammaDev(nullptr) + , mBetaDev(nullptr) + , mWordEmbDev(nullptr) + , mTokEmbDev(nullptr) + , mPosEmbDev(nullptr) +{ + // Assuming Weights.count is the number of elements and not bytes + assert(beta.count == gamma.count); + assert(wordEmb.count % mLd == 0); + assert(posEmb.count % mLd == 0); + assert(tokEmb.count % mLd == 0); + mWordVocabSize = wordEmb.count / mLd; + mPosVocabSize = posEmb.count / mLd; + mTokVocabSize = tokEmb.count / mLd; + // We set mS in configure + mType = outputFp16 ? DataType::kHALF : DataType::kFLOAT; +} + +EmbLayerNormPluginDynamic::EmbLayerNormPluginDynamic(const std::string& name, const void* data, size_t length) + : mLayerName(name) +{ + gLogVerbose << "EMB LN Deser start\n"; + // Deserialize in the same order as serialization + deserialize_value(&data, &length, &mType); + deserialize_value(&data, &length, &mLd); + deserialize_value(&data, &length, &mS); + deserialize_value(&data, &length, &mWordVocabSize); + deserialize_value(&data, &length, &mPosVocabSize); + deserialize_value(&data, &length, &mTokVocabSize); + + const char* d = static_cast(data); + mBetaDev = deserToDev(d, mLd); + mGammaDev = deserToDev(d, mLd); + + const size_t wordSize = samplesCommon::getElementSize(mType); + mWordEmbDev = deserToDev(d, mLd * mWordVocabSize * wordSize); + mPosEmbDev = deserToDev(d, mLd * mPosVocabSize * wordSize); + mTokEmbDev = deserToDev(d, mLd * mTokVocabSize * wordSize); + // this signals init not to allocate/copy + mGamma.count = -1; + mBeta.count = -1; + mWordEmb.count = -1; + mTokEmb.count = -1; + mPosEmb.count = -1; + mGamma.values = nullptr; + mBeta.values = nullptr; + mWordEmb.values = nullptr; + mTokEmb.values = nullptr; + mPosEmb.values = nullptr; + + gLogVerbose << "EMB LN Deser done\n"; +} + +// IPluginV2DynamicExt Methods +IPluginV2DynamicExt* EmbLayerNormPluginDynamic::clone() const +{ + gLogVerbose << "EMBLN clone start" << std::endl; + auto ret = new EmbLayerNormPluginDynamic( + mLayerName, mType == DataType::kHALF, mBeta, mGamma, mWordEmb, mPosEmb, mTokEmb); + ret->mS = mS; + + ret->mWordEmbDev = mWordEmbDev; + ret->mPosEmbDev = mPosEmbDev; + ret->mTokEmbDev = mTokEmbDev; + ret->mBetaDev = mBetaDev; + ret->mGammaDev = mGammaDev; + gLogVerbose << "EMBLN clone done" << std::endl; + return ret; +} + +DimsExprs EmbLayerNormPluginDynamic::getOutputDimensions( + int outputIndex, const DimsExprs* inputs, int nbInputs, IExprBuilder& exprBuilder) +{ + // Input should be input ids and token ids and the input mask + // Output should be the embeddings tensor and mask indices + assert(nbInputs == 3); + + assert(inputs[0].nbDims == 2); // BxS + assert(inputs[0].nbDims == inputs[1].nbDims); + assert(inputs[0].nbDims == inputs[2].nbDims); + + assert(outputIndex == 0 || outputIndex == 1); + + if (outputIndex == 0) + { + DimsExprs ret; + ret.nbDims = 5; + ret.d[0] = inputs[0].d[0]; + ret.d[1] = inputs[0].d[1]; + ret.d[2] = exprBuilder.constant(mLd); + ret.d[3] = exprBuilder.constant(1); + ret.d[4] = exprBuilder.constant(1); + return ret; + } + + DimsExprs ret; + ret.nbDims = 1; + ret.d[0] = inputs[0].d[BDIM]; + return ret; +} + +bool EmbLayerNormPluginDynamic::supportsFormatCombination( + int pos, const PluginTensorDesc* inOut, int nbInputs, int nbOutputs) +{ + // 3 inputs of size BxS + assert(nbInputs == 3); + assert(nbOutputs == 2); + + const PluginTensorDesc& desc = inOut[pos]; + if (desc.format != TensorFormat::kLINEAR) + { + return false; + } + if (pos == 0) + { + return desc.type == DataType::kINT32 && desc.dims.nbDims == 2; + } + + const PluginTensorDesc& prev = inOut[pos - 1]; + if (pos == 1 || pos == 2) + { + return desc.type == DataType::kINT32 && desc.dims.nbDims == 2 && desc.dims.d[BDIM] == prev.dims.d[BDIM] + && desc.dims.d[SDIM] == prev.dims.d[SDIM]; + } + + if (pos == 3) + { // embedded sequence + + return desc.type == mType && desc.dims.nbDims == 5 && desc.dims.d[BDIM] == prev.dims.d[BDIM] + && desc.dims.d[SDIM] == prev.dims.d[SDIM] && desc.dims.d[3] == 1 && desc.dims.d[4] == 1; + } + + // pos == 4: mask + return desc.type == DataType::kINT32 && desc.dims.nbDims == 1 && desc.dims.d[0] == prev.dims.d[0]; +} + +void EmbLayerNormPluginDynamic::configurePlugin( + const DynamicPluginTensorDesc* inputs, int nbInputs, const DynamicPluginTensorDesc* outputs, int nbOutputs) +{ + // Validate input arguments + assert(nbOutputs == 2); + assert(nbInputs == 3); + + assert(inputs[0].desc.dims.nbDims == 2); + mS = inputs[0].desc.dims.d[SDIM]; + const int B = inputs[0].desc.dims.d[BDIM]; + TRT_UNUSED B; + assert(mS == inputs[1].desc.dims.d[SDIM]); + assert(B == inputs[1].desc.dims.d[BDIM]); + assert(mS == inputs[2].desc.dims.d[SDIM]); + assert(B == inputs[2].desc.dims.d[BDIM]); + + assert(outputs[0].desc.dims.nbDims == 5); + assert(outputs[0].desc.dims.d[SDIM] == mS); + assert(outputs[0].desc.dims.d[BDIM] == B); + assert(outputs[0].desc.dims.d[2] == mLd); + assert(outputs[0].desc.dims.d[3] == 1); + assert(outputs[0].desc.dims.d[4] == 1); + + assert(outputs[1].desc.dims.nbDims == 1); + assert(outputs[1].desc.dims.d[0] == B); + + assert(inputs[0].desc.type == DataType::kINT32); + assert(inputs[1].desc.type == DataType::kINT32); + assert(inputs[2].desc.type == DataType::kINT32); + assert(outputs[0].desc.type == DataType::kFLOAT || outputs[0].desc.type == DataType::kHALF); + assert(outputs[1].desc.type == DataType::kINT32); +} + +size_t EmbLayerNormPluginDynamic::getWorkspaceSize( + const PluginTensorDesc* inputs, int nbInputs, const PluginTensorDesc* outputs, int nbOutputs) const +{ + return 0; +} + +int EmbLayerNormPluginDynamic::enqueue(const PluginTensorDesc* inputDesc, const PluginTensorDesc* outputDesc, + const void* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) +{ + const int batchSize = inputDesc->dims.d[BDIM]; + const int S = inputDesc->dims.d[SDIM]; + int status = -1; + + // Our plugin outputs only one tensor + const int* inputIds = static_cast(inputs[0]); + const int* segmentIds = static_cast(inputs[1]); + const int* inputMask = static_cast(inputs[2]); + + if (mType == DataType::kFLOAT) + { + float* output = static_cast(outputs[0]); + float* wordEmb = static_cast(mWordEmbDev); + float* tokEmb = static_cast(mTokEmbDev); + float* posEmb = static_cast(mPosEmbDev); + embSkipLayerNorm( + stream, mLd, batchSize, S, inputIds, segmentIds, mBetaDev, mGammaDev, wordEmb, posEmb, tokEmb, output); + } + else if (mType == DataType::kHALF) + { + half* output = static_cast(outputs[0]); + + half* wordEmb = static_cast(mWordEmbDev); + half* tokEmb = static_cast(mTokEmbDev); + half* posEmb = static_cast(mPosEmbDev); + embSkipLayerNorm( + stream, mLd, batchSize, S, inputIds, segmentIds, mBetaDev, mGammaDev, wordEmb, posEmb, tokEmb, output); + } + else + { + assert(false); + } + int* maskIdx = static_cast(outputs[1]); + computeMaskIdx(stream, S, batchSize, inputMask, maskIdx); + + return status; +} + +// IPluginV2Ext Methods +DataType EmbLayerNormPluginDynamic::getOutputDataType(int index, const DataType* inputTypes, int nbInputs) const +{ + + assert(index == 0 || index == 1); + if (index == 0) + { + assert(mType == DataType::kHALF || mType == DataType::kFLOAT); + return mType; + } + return DataType::kINT32; +} + +// IPluginV2 Methods +const char* EmbLayerNormPluginDynamic::getPluginType() const +{ + return EMB_LAYER_NORM_NAME; +} + +const char* EmbLayerNormPluginDynamic::getPluginVersion() const +{ + return EMB_LAYER_NORM_VERSION; +} + +int EmbLayerNormPluginDynamic::getNbOutputs() const +{ + return 2; +} + +int EmbLayerNormPluginDynamic::initialize() +{ + if (mGamma.values) + { + CHECK(cudaMalloc(&mGammaDev, sizeof(float) * mGamma.count)); + CHECK(cudaMemcpy(mGammaDev, mGamma.values, sizeof(float) * mGamma.count, cudaMemcpyHostToDevice)); + } + if (mBeta.values) + { + CHECK(cudaMalloc(&mBetaDev, sizeof(float) * mBeta.count)); + CHECK(cudaMemcpy(mBetaDev, mBeta.values, sizeof(float) * mBeta.count, cudaMemcpyHostToDevice)); + } + const size_t wordSize = samplesCommon::getElementSize(mType); + + if (mWordEmb.values) + { + CHECK(cudaMalloc(&mWordEmbDev, wordSize * mWordEmb.count)); + if (mType == DataType::kFLOAT) + { + convertAndCopyToDevice(mWordEmb, reinterpret_cast(mWordEmbDev)); + } + else + { + convertAndCopyToDevice(mWordEmb, reinterpret_cast(mWordEmbDev)); + } + } + if (mTokEmb.values) + { + CHECK(cudaMalloc(&mTokEmbDev, wordSize * mTokEmb.count)); + if (mType == DataType::kFLOAT) + { + convertAndCopyToDevice(mTokEmb, reinterpret_cast(mTokEmbDev)); + } + else + { + convertAndCopyToDevice(mTokEmb, reinterpret_cast(mTokEmbDev)); + } + } + + if (mPosEmb.values) + { + CHECK(cudaMalloc(&mPosEmbDev, wordSize * mPosEmb.count)); + if (mType == DataType::kFLOAT) + { + convertAndCopyToDevice(mPosEmb, reinterpret_cast(mPosEmbDev)); + } + else + { + convertAndCopyToDevice(mPosEmb, reinterpret_cast(mPosEmbDev)); + } + } + return 0; +} + +void EmbLayerNormPluginDynamic::terminate() +{ + gLogVerbose << "EMBLN terminate start" << std::endl; + CHECK(cudaFree(mGammaDev)); + CHECK(cudaFree(mBetaDev)); + CHECK(cudaFree(mWordEmbDev)); + CHECK(cudaFree(mTokEmbDev)); + CHECK(cudaFree(mPosEmbDev)); + gLogVerbose << "EMBLN terminate done" << std::endl; +} + +size_t EmbLayerNormPluginDynamic::getSerializationSize() const +{ + const size_t wordSize = samplesCommon::getElementSize(mType); + return 2 * sizeof(float) * mLd // beta + gamma + + sizeof(mType) + sizeof(mLd) * 5 //mLd, mS, m*VocabSize + + wordSize * mLd * mWordVocabSize // word emb + + wordSize * mLd * mPosVocabSize // pos emb + + wordSize * mLd * mTokVocabSize // tok emb + ; +} + +void EmbLayerNormPluginDynamic::serialize(void* buffer) const +{ + const size_t wordSize = samplesCommon::getElementSize(mType); + + serialize_value(&buffer, mType); + serialize_value(&buffer, mLd); + serialize_value(&buffer, mS); + serialize_value(&buffer, mWordVocabSize); + serialize_value(&buffer, mPosVocabSize); + serialize_value(&buffer, mTokVocabSize); + + char* d = static_cast(buffer); + serFromDev(d, mBetaDev, mLd); + serFromDev(d, mGammaDev, mLd); + serFromDev(d, static_cast(mWordEmbDev), mLd * mWordVocabSize * wordSize); + serFromDev(d, static_cast(mPosEmbDev), mLd * mPosVocabSize * wordSize); + serFromDev(d, static_cast(mTokEmbDev), mLd * mTokVocabSize * wordSize); +} + +void EmbLayerNormPluginDynamic::destroy() +{ + gLogVerbose << "EMBLN destroy start" << std::endl; + // This gets called when the network containing plugin is destroyed + delete this; + gLogVerbose << "EMBLN destroy start" << std::endl; +} + +void EmbLayerNormPluginDynamic::setPluginNamespace(const char* libNamespace) +{ + mNamespace = libNamespace; +} + +const char* EmbLayerNormPluginDynamic::getPluginNamespace() const +{ + return mNamespace.c_str(); +} + +/////////////////////// + +EmbLayerNormPluginDynamicCreator::EmbLayerNormPluginDynamicCreator() +{ + mFC.nbFields = mPluginAttributes.size(); + mFC.fields = mPluginAttributes.data(); +} + +const char* EmbLayerNormPluginDynamicCreator::getPluginName() const +{ + return EMB_LAYER_NORM_NAME; +} + +const char* EmbLayerNormPluginDynamicCreator::getPluginVersion() const +{ + return EMB_LAYER_NORM_VERSION; +} + +const PluginFieldCollection* EmbLayerNormPluginDynamicCreator::getFieldNames() +{ + return &mFC; +} + +IPluginV2* EmbLayerNormPluginDynamicCreator::createPlugin(const char* name, const PluginFieldCollection* fc) +{ + gLogVerbose << "Creating EmbLayerNormPluginDynamic...\n"; + + bool output_fp16 = false; + Weights beta; + Weights gamma; + Weights word_emb; + Weights pos_emb; + Weights tok_emb; + for (int i = 0; i < fc->nbFields; i++) + { + std::string field_name(fc->fields[i].name); + if (field_name.compare("bert_embeddings_layernorm_beta") == 0) + { + gLogVerbose << "Building bert_embeddings_layernorm_beta...\n"; + beta.values = fc->fields[i].data; + beta.count = fc->fields[i].length; + beta.type = fieldTypeToDataType(fc->fields[i].type); + } + + if (field_name.compare("bert_embeddings_layernorm_gamma") == 0) + { + gLogVerbose << "Building bert_embeddings_layernorm_gamma...\n"; + gamma.values = fc->fields[i].data; + gamma.count = fc->fields[i].length; + gamma.type = fieldTypeToDataType(fc->fields[i].type); + } + + if (field_name.compare("bert_embeddings_word_embeddings") == 0) + { + gLogVerbose << "Building bert_embeddings_word_embeddings...\n"; + word_emb.values = fc->fields[i].data; + word_emb.count = fc->fields[i].length; + word_emb.type = fieldTypeToDataType(fc->fields[i].type); + } + + if (field_name.compare("bert_embeddings_token_type_embeddings") == 0) + { + gLogVerbose << "Building bert_embeddings_token_type_embeddings...\n"; + tok_emb.values = fc->fields[i].data; + tok_emb.count = fc->fields[i].length; + tok_emb.type = fieldTypeToDataType(fc->fields[i].type); + } + + if (field_name.compare("bert_embeddings_position_embeddings") == 0) + { + gLogVerbose << "Building bert_embeddings_position_embeddings...\n"; + pos_emb.values = fc->fields[i].data; + pos_emb.count = fc->fields[i].length; + pos_emb.type = fieldTypeToDataType(fc->fields[i].type); + } + if (field_name.compare("output_fp16") == 0) + { + gLogVerbose << "Building output_fp16...\n"; + assert(fc->fields[i].type == PluginFieldType::kINT32); + output_fp16 = reinterpret_cast(fc->fields[i].data)[0] != 0; + } + } + + gLogVerbose << "Building the Plugin...\n"; + EmbLayerNormPluginDynamic* p + = new EmbLayerNormPluginDynamic(name, output_fp16, beta, gamma, word_emb, pos_emb, tok_emb); + return p; +} + +IPluginV2* EmbLayerNormPluginDynamicCreator::deserializePlugin( + const char* name, const void* serialData, size_t serialLength) +{ + // This object will be deleted when the network is destroyed, which will + // call EmbLayerNormPluginDynamic::destroy() + return new EmbLayerNormPluginDynamic(name, serialData, serialLength); +} + +void EmbLayerNormPluginDynamicCreator::setPluginNamespace(const char* libNamespace) +{ + mNamespace = libNamespace; +} + +const char* EmbLayerNormPluginDynamicCreator::getPluginNamespace() const +{ + return mNamespace.c_str(); +} +} diff --git a/cuda_code/embedding_test.cu b/cuda_code/embedding_test.cu new file mode 100644 index 0000000000000000000000000000000000000000..d6d0901e7dbb3454a949c64ea7ec53bae5e5fc8a --- /dev/null +++ b/cuda_code/embedding_test.cu @@ -0,0 +1,902 @@ +/* + * Copyright (c) 2019, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +#include +#include +#include +#include "HugeCTR/include/data_parser.hpp" +#include "HugeCTR/include/data_reader.hpp" +#include "HugeCTR/include/embedding.hpp" +#include "HugeCTR/include/embeddings/sparse_embedding_hash.hpp" +#include "gtest/gtest.h" +#include "utest/embedding/sparse_embedding_hash_cpu.hpp" +#include "utest/test_utils.h" +#include "nvToolsExt.h" +#include +#include + +#define EPSILON 1e-4 + +//#define PRINT_DEBUG 1 +#ifdef PRINT_DEBUG +#define PRINTF printf +#else +#define PRINTF(...) +#endif + +using namespace HugeCTR; + +bool compare_float(float a, float b) { + // compare absolute error + if (fabs(a - b) < EPSILON) return true; + + // compare relative error + if (fabs(a) >= fabs(b)) + if (fabs((a - b) / a) < EPSILON) + return true; + else + return false; + else if (fabs((a - b) / b) < EPSILON) + return true; + else + return false; +} + +bool compare_float_array(float *a, float *b, size_t len) { + bool rtn = true; + + for (size_t i = 0; i < len; i++) { + if (compare_float(a[i], b[i]) != true) { + printf("Error in compare_float_array: i=%d, a=%.8f, n=%.8f\n", (int)i, a[i], b[i]); + rtn = false; + break; + } + } + + return rtn; +} + +bool compare_float_files(std::string file1, std::string file2) { + std::ifstream file_stream1(file1); + std::ifstream file_stream2(file2); + + if (!file_stream1.is_open() || !file_stream2.is_open()) { + ERROR_MESSAGE_("Error: file open failed"); + return false; + } + + long long start_pos = file_stream1.tellg(); + file_stream1.seekg(0, file_stream1.end); + long long end_pos = file_stream1.tellg(); + long long file_size1 = end_pos - start_pos; + + file_stream2.seekg(0, file_stream1.beg); + start_pos = file_stream2.tellg(); + file_stream2.seekg(0, file_stream2.end); + long long file_size2 = end_pos - start_pos; + + if (file_size1 != file_size2) { + ERROR_MESSAGE_("Error: files size is not same"); + file_stream1.close(); + file_stream2.close(); + return false; + } + + file_stream1.seekg(0, file_stream1.beg); + file_stream2.seekg(0, file_stream2.beg); + + bool rtn = true; + while (file_stream1.peek() != EOF) { + float val1, val2; + file_stream1.read((char *)&val1, sizeof(float)); + file_stream2.read((char *)&val2, sizeof(float)); + if (!compare_float(val1, val2)) { + rtn = false; + break; + } + } + + file_stream1.close(); + file_stream2.close(); + + return rtn; +} + +// hash table files have same keys and values, but they may be unordered +template +bool compare_hash_table_files(std::string file1, std::string file2) { + bool rtn = true; + + std::ifstream file_stream1(file1); + std::ifstream file_stream2(file2); + + if (!file_stream1.is_open() || !file_stream2.is_open()) { + ERROR_MESSAGE_("Error: file open failed"); + return false; + } + + long long start_pos = file_stream1.tellg(); + file_stream1.seekg(0, file_stream1.end); + long long end_pos = file_stream1.tellg(); + long long file_size1 = end_pos - start_pos; + + file_stream2.seekg(0, file_stream1.beg); + start_pos = file_stream2.tellg(); + file_stream2.seekg(0, file_stream2.end); + long long file_size2 = end_pos - start_pos; + + if (file_size1 != file_size2) { + ERROR_MESSAGE_("Error: files size is not same"); + file_stream1.close(); + file_stream2.close(); + return false; + } + + file_stream1.seekg(0, file_stream1.beg); + file_stream2.seekg(0, file_stream2.beg); + + size_t pair_size_in_B = sizeof(TypeHashKey) + sizeof(TypeHashValue); + long long pair_num = file_size1 / pair_size_in_B; + + // CAUSION: file_stream1 is ordered, while file_stream2 is unordered + // So, firstly, we read pairs from file_stream2, and insert it into a hash table. + char *buf = (char *)malloc(pair_size_in_B); + TypeHashKey *key; + TypeHashValue *value; + HashTableCpu *hash_table = + new HashTableCpu(); + while (file_stream2.peek() != EOF) { + file_stream2.read(buf, pair_size_in_B); + key = (TypeHashKey *)buf; + value = (TypeHashValue *)(buf + sizeof(TypeHashKey)); + hash_table->insert(key, value, 1); + } + file_stream2.close(); + + if (hash_table->get_size() != pair_num) { + ERROR_MESSAGE_( + "Error: The number of pair inserting into hash table is not equal to hash " + "table file size\n"); + return false; + } + + // Then, we read pairs from file_stream1, and get(key,value2) from hash table, and + // compare value1 and value2. + TypeHashValue *value1; + TypeHashValue *value2 = (TypeHashValue *)malloc(sizeof(TypeHashValue)); + size_t value_len = sizeof(TypeHashValue) / sizeof(float); + while (file_stream1.peek() != EOF) { + file_stream1.read(buf, pair_size_in_B); + key = (TypeHashKey *)buf; + value1 = (TypeHashValue *)(buf + sizeof(TypeHashKey)); + hash_table->get(key, value2, 1); + if (!compare_float_array((float *)value1, (float *)value2, value_len)) { + rtn = false; + break; + } + } + file_stream1.close(); + + free(value2); + + return rtn; +} + +bool compare_embedding_feature(int num, float *embedding_feature_from_gpu, + float *embedding_feature_from_cpu) { + bool rtn = true; + // int err = 0; + + for (int i = 0; i < num; i++) { + if (!compare_float(embedding_feature_from_gpu[i], embedding_feature_from_cpu[i])) { + rtn = false; + break; + + // err++; + // if(err > 256) { + // break; + // } + // printf("Error: i=%d, embedding_feature_from_gpu=%.8f, + // embedding_feature_from_cpu=%.8f\n", i, embedding_feature_from_gpu[i], + // embedding_feature_from_cpu[i]); + } + } + + return rtn; +} + +bool compare_wgrad(int num, float *wgrad_from_gpu, float *wgrad_from_cpu) { + bool rtn = true; + // int err = 0; + + for (int i = 0; i < num; i++) { + if (!compare_float(wgrad_from_gpu[i], wgrad_from_cpu[i])) { + rtn = false; + break; + + // err++; + // if(err > 256) { + // break; + // } + // printf("Error: i=%d, wgrad_from_gpu=%.8f, wgrad_from_cpu=%.8f\n", i, + // wgrad_from_gpu[i], wgrad_from_cpu[i]); + } + } + + return rtn; +} + +bool compare_embedding_table(long long num, float *embedding_table_from_gpu, + float *embedding_table_from_cpu) { + bool rtn = true; + int err = 0; + + for (long long i = 0; i < num; i++) { + if (!compare_float(embedding_table_from_gpu[i], embedding_table_from_cpu[i])) { + rtn = false; + // break; + + err++; + if (err > 256) { + break; + } + printf("Error: i=%lld, embedding_table_from_gpu=%.8f, embedding_table_from_cpu=%.8f\n", i, + embedding_table_from_gpu[i], embedding_table_from_cpu[i]); + } + } + + return rtn; +} + +template +bool compare_hash_table(long long capacity, TypeHashKey *hash_table_key_from_gpu, + TypeHashValue *hash_table_value_from_gpu, + TypeHashKey *hash_table_key_from_cpu, + TypeHashValue *hash_table_value_from_cpu) { + bool rtn = true; + + // // just for debug + // for(long long i = 0; i < capacity; i++) { + // printf("i=%d, key_from_gpu=%d, key_from_cpu=%d \n", i, hash_table_key_from_gpu[i], + //hash_table_key_from_cpu[i]); + // } + + // Since the and is not the same ordered, we need to insert into a hash_table, then compare value1=hash_table->get(key2) with value2 + HashTableCpu *hash_table = + new HashTableCpu(); + hash_table->insert(hash_table_key_from_gpu, hash_table_value_from_gpu, capacity); + + TypeHashKey *key; + TypeHashValue *value1 = (TypeHashValue *)malloc(sizeof(TypeHashValue)); + TypeHashValue *value2; + size_t value_len = sizeof(TypeHashValue) / sizeof(float); + for (long long i = 0; i < capacity; i++) { + key = hash_table_key_from_cpu + i; + value2 = hash_table_value_from_cpu + i; + + hash_table->get(key, value1, 1); + if (!compare_float_array((float *)value1, (float *)value2, value_len)) { + rtn = false; + break; + } + } + + free(value1); + + return rtn; +} + +template +class UnorderedKeyGenerator { + public: + UnorderedKeyGenerator() : gen_(rd_()) {} + UnorderedKeyGenerator(T min, T max) : gen_(rd_()), dis_(min, max) {} + + // generate unduplicated dataset + void fill_unique(T *data, size_t len) { + if (len == 0) { + return; + } + assert(dis_.max() - dis_.min() >= len - 1); + + std::unordered_set set; + size_t sz = 0; + while (sz < len) { + T x = dis_(gen_); + auto res = set.insert(x); + if (res.second) { + data[sz++] = x; + } + } + assert(sz == set.size()); + assert(sz == len); + } + + private: + std::random_device rd_; + std::mt19937 gen_; + std::uniform_int_distribution dis_; +}; + +#if 0 +// sparse_embedding_hash upload_params() and download_params() testing +TEST(sparse_embedding_hash_test, upload_and_download_params) { + test::mpi_init(); + + // influential params for this test + const long long vocabulary_size = 50010; + //const long long vocabulary_size = 20; + //const long long vocabulary_size = 1010; + const int embedding_vec_size = 128; + //const int embedding_vec_size = 1; + //std::vector device_list = {0}; + std::vector device_list = {0,1}; + int num_devices = device_list.size(); + const char * hash_table_file_name = "hash_table.bin"; + const char * hash_table_check_file_name = "hash_table_check.bin"; + + // uninfluential params + const int slot_num = 2; + const int max_feature_num = 2*slot_num; + const int batchsize = 2; + const int batch_num = 1; // can not more than 32 + const long long num_records = batchsize * batch_num; + const long long label_dim = 1; + typedef long long T; + + // In order to not allocate the total size of hash table on each GPU, the users need to set the size of max_vocabulary_size_per_gpu, + // which should be more than vocabulary_size/gpu_count, eg: (1/0.75)x of that. + float load_factor = 0.75; // CAUSION: this is a very important param for hash_table get() performance + //long long max_vocabulary_size_per_gpu = (long long)((double)(vocabulary_size) / num_devices / load_factor); + + const SparseEmbeddingHashParams embedding_params = { + batchsize, + vocabulary_size, + load_factor, + embedding_vec_size, + max_feature_num, + slot_num, + 0, //combiner: 0-sum, 1-mean + 0 //optimizer: 0-adam + }; + + // CUASION: the data will be used in this test case, but we still need to let the data file non-empty since the DataRead requiring + // generate input data + const std::string tmp_file_name("temp_dataset_embedding.data"); + const std::string file_list_name("file_list_embedding.txt"); + { + //data generation; + std::ofstream out_stream(tmp_file_name, std::ofstream::binary); + DataSetHeader header = {num_records, label_dim, slot_num, 0}; + out_stream.write(reinterpret_cast(&header), sizeof(DataSetHeader)); + for(int i=0; i idata_sim(0, max_feature_num/slot_num-1); //both inclusive + UnifiedDataSimulator ldata_sim(0,vocabulary_size-1); + for(int j=0; j(&label), sizeof(int)); + } + for(int k=0; k(&nnz), sizeof(int)); + for(int j=0; j(&value), sizeof(T)); + } + //std::cout << std::endl; // just for test 20181211 + } + } + out_stream.close(); + std::ofstream file_list_stream(file_list_name, std::ofstream::out); + file_list_stream << (std::to_string(1) + "\n"); + file_list_stream << (tmp_file_name + "\n"); + file_list_stream.close(); + } + GPUResourceGroup gpu_resource_group(device_list); + + std::vector> vvgpu; + vvgpu.push_back(device_list); + DeviceMap device_map(vvgpu, 0); + + //setup a data reader + DataReader* data_reader = new DataReader(file_list_name, batchsize, \ + label_dim, slot_num, max_feature_num, gpu_resource_group, 1, 1); + + // define object + Embedding* embedding = new SparseEmbeddingHash(data_reader->get_row_offsets_tensors(), data_reader->get_value_tensors(), embedding_params, gpu_resource_group); + + // init hash table file + std::ofstream weight_stream(hash_table_file_name); + if(!weight_stream.is_open()) { + ERROR_MESSAGE_("Error: file not open for writing"); + } + UnifiedDataSimulator ldata_sim(0, vocabulary_size-1); + UnifiedDataSimulator fdata_sim(0, vocabulary_size-1); + T * p_key = (T *)malloc(vocabulary_size * sizeof(T)); + UnorderedKeyGenerator unorderedKey; + unorderedKey.fill_unique(p_key, vocabulary_size); + for(int i = 0; i < vocabulary_size; i++) { + //T key = (T)i; + //T key = ldata_sim.get_num(); // CAUSION: can not get correct results when testing by the case with duplicated keys + //weight_stream.write((char *)&key, sizeof(T)); + weight_stream.write((char *)&p_key[i], sizeof(T)); + //float val = (float)i; + float val = fdata_sim.get_num(); + for(int j = 0; j < embedding_vec_size; j++) { + weight_stream.write((char *)&val, sizeof(float)); + } + } + weight_stream.close(); + free(p_key); + + // upload data from host to device + std::ifstream i_weight_stream(hash_table_file_name); + printf("start updaload_params_to_device()\n"); + embedding->upload_params_to_device(i_weight_stream); + i_weight_stream.close(); + + // download data from device to host + std::ofstream o_weight_stream(hash_table_check_file_name); + printf("start download_params_to_host()\n"); + embedding->download_params_to_host(o_weight_stream); + o_weight_stream.close(); + + // comapre the read file with the written file + typedef struct TypeHashValue_{ + float data[embedding_vec_size]; + } TypeHashValue; + //ASSERT_EQ(true, compare_hash_table_files(hash_table_file_name, hash_table_check_file_name)); + printf("start compare_hash_table_files()\n"); + bool rtn = compare_hash_table_files(hash_table_file_name, hash_table_check_file_name); + ASSERT_EQ(true, rtn); +} +#endif + +#if 1 +// sparse_embedding_hash correctness testing: forward->backward->update_params +TEST(sparse_embedding_hash_test, training_correctness) { + test::mpi_init(); + + constexpr int batch_num = 4; // can not more than 32 + // constexpr int batch_num = 1; + constexpr int batchsize = 4096; + // constexpr int batchsize = 2; + constexpr long long num_records = batchsize * batch_num; + // constexpr int slot_num = 1; + constexpr int slot_num = 2; + constexpr int max_feature_num = 10 * slot_num; // max_feature_num in a sample + // constexpr int max_feature_num = 2*slot_num; + constexpr long long vocabulary_size = 55000; + // constexpr long long vocabulary_size = 10; + constexpr int embedding_vec_size = 128; + // constexpr int embedding_vec_size = 1; + constexpr int combiner = 1; // 0-sum, 1-mean + constexpr int optimizer = 0; // 0-adam, 1-momentum_sgd, 2-nesterov + constexpr float lr = 0.01; + // std::vector device_list = {0,1}; + std::vector device_list = {0}; + int num_devices = device_list.size(); + constexpr long long label_dim = 1; + typedef long long T; + + // In order to not allocate the total size of hash table on each GPU, the users need to set the + // size of max_vocabulary_size_per_gpu, which should be more than vocabulary_size/gpu_count, + // eg: 1.25x of that. + float load_factor = 0.75; // CAUSION: this is a very important param for performance + // long long max_vocabulary_size_per_gpu = (long long)((double)(vocabulary_size) / num_devices / + // load_factor); + + const char *hash_table_file_name = "hash_table.bin"; + const char *data_file_name = "temp_dataset_embedding.data"; + + bool init_hash_table = true; // true: init hash_table and upload_to_device + // false: don't init hash_table or upload_to_device, just use an + // empty hash_table to train + + // set up params + OptHyperParams hyper_params; + hyper_params.adam.beta1 = 0.9f; + hyper_params.adam.beta2 = 0.999f; + hyper_params.adam.epsilon = 1e-8f; + hyper_params.momentum.factor = 0.9f; + hyper_params.nesterov.mu = 0.9f; + + OptParams opt_params = {optimizer, lr, hyper_params}; + + const SparseEmbeddingHashParams embedding_params = { + batchsize, vocabulary_size, load_factor, embedding_vec_size, max_feature_num, slot_num, + combiner, // combiner: 0-sum, 1-mean + opt_params}; + + // generate input data + const std::string tmp_file_name(data_file_name); + const std::string file_list_name("file_list_embedding.txt"); + { + // data generation; + std::ofstream out_stream(tmp_file_name, std::ofstream::binary); + DataSetHeader header = {num_records, label_dim, slot_num, 0}; + out_stream.write(reinterpret_cast(&header), sizeof(DataSetHeader)); + for (int i = 0; i < num_records; i++) { + UnifiedDataSimulator idata_sim(0, max_feature_num / slot_num - 1); // both inclusive + UnifiedDataSimulator ldata_sim(0, vocabulary_size - 1); + for (int j = 0; j < label_dim; j++) { + int label = idata_sim.get_num(); + out_stream.write(reinterpret_cast(&label), sizeof(int)); + } + for (int k = 0; k < slot_num; k++) { + int nnz = idata_sim.get_num(); + nnz = (int)(max_feature_num / slot_num); // just for test + out_stream.write(reinterpret_cast(&nnz), sizeof(int)); + for (int j = 0; j < nnz; j++) { + T value = ldata_sim.get_num(); + // T value = k*nnz+j; // just for test, 20190625 + out_stream.write(reinterpret_cast(&value), sizeof(T)); + } + // std::cout << std::endl; // just for test 20181211 + } + } + out_stream.close(); + std::ofstream file_list_stream(file_list_name, std::ofstream::out); + file_list_stream << (std::to_string(1) + "\n"); + file_list_stream << (tmp_file_name + "\n"); + file_list_stream.close(); + } + + std::vector> vvgpu; + vvgpu.push_back(device_list); + DeviceMap device_map(vvgpu, 0); + GPUResourceGroup gpu_resource_group(device_map); + + // setup a data reader + DataReader *data_reader = new DataReader(file_list_name, batchsize, label_dim, slot_num, + max_feature_num, gpu_resource_group, 1, 1); + + Embedding *embedding = new SparseEmbeddingHash(data_reader->get_row_offsets_tensors(), + data_reader->get_value_tensors(), + embedding_params, gpu_resource_group); + + if (init_hash_table) { + // init hash table file + std::ofstream weight_stream(hash_table_file_name); + if (!weight_stream.is_open()) { + ERROR_MESSAGE_("Error: file not open for writing"); + } + UnifiedDataSimulator fdata_sim(-0.1f, 0.1f); + for (long long i = 0; i < vocabulary_size; i++) { + T key = (T)i; + // T key = ldata_sim.get_num(); + // CAUSION: can not set random keys here, because we need to ensure that: + // 1) we can find keys in the data file from this hash table + // 2) there are no repeated keys + weight_stream.write((char *)&key, sizeof(T)); + // float val = (float)i; + // float val = 1.0f; + float val = fdata_sim.get_num(); + for (int j = 0; j < embedding_vec_size; j++) { + weight_stream.write((char *)&val, sizeof(float)); + } + } + weight_stream.close(); + + // upload hash table to device + std::ifstream i_weight_stream(hash_table_file_name); + embedding->upload_params_to_device(i_weight_stream); + i_weight_stream.close(); + } + + // for SparseEmbeddingCpu + std::ifstream weight_stream_cpu(hash_table_file_name); + std::ifstream csr_stream_cpu(data_file_name); + SparseEmbeddingHashCpu *embedding_cpu = new SparseEmbeddingHashCpu( + batchsize, max_feature_num, vocabulary_size, embedding_vec_size, slot_num, combiner, + optimizer, lr, weight_stream_cpu, csr_stream_cpu, label_dim); + + // for results check + float *embedding_feature_from_gpu = + (float *)malloc(batchsize * slot_num * embedding_vec_size * sizeof(float)); + float *embedding_feature_from_cpu = embedding_cpu->get_embedding_feature_ptr(); + float *wgrad_from_gpu[device_list.size()]; + for (unsigned int i = 0; i < device_list.size(); i++) { + wgrad_from_gpu[i] = (float *)malloc(batchsize * slot_num * embedding_vec_size * sizeof(float)); + } + float *wgrad_from_cpu = embedding_cpu->get_wgrad_ptr(); + T *hash_table_key_from_gpu = (T *)malloc(vocabulary_size * sizeof(T)); + float *hash_table_value_from_gpu = + (float *)malloc(vocabulary_size * (long long)embedding_vec_size * sizeof(float)); + T *hash_table_key_from_cpu = embedding_cpu->get_hash_table_key_ptr(); + float *hash_table_value_from_cpu = embedding_cpu->get_hash_table_value_ptr(); + + typedef struct TypeHashValue_ { + float data[embedding_vec_size]; + } TypeHashValue; + + for (int i = 0; i < batch_num; i++) { + printf("Round %d start:\n", i); + + // call read a batch + data_reader->read_a_batch_to_device(); + + // GPU forward + embedding->forward(); + + // CPU forward + embedding_cpu->forward(); + + // check the result of forward + embedding->get_embedding_feature_ptr(embedding_feature_from_gpu); // memcpy from GPU to CPU + ASSERT_EQ(true, + compare_embedding_feature(batchsize * slot_num * embedding_vec_size, + embedding_feature_from_gpu, embedding_feature_from_cpu)); + + // GPU backward + embedding->backward(); + + // CPU backward + embedding_cpu->backward(); + + // check the result of backward + embedding->get_wgrad_ptr(wgrad_from_gpu[0], 0); + // check the result on multi GPUs first + if (device_list.size() > 1) { + for (unsigned int j = 1; j < device_list.size(); j++) { + embedding->get_wgrad_ptr(wgrad_from_gpu[j], j); // memcpy from GPU to CPU + // printf("\ncompare GPU[%d] and GPU[%d]\n", 0, j); + ASSERT_EQ(true, compare_wgrad(batchsize * slot_num * embedding_vec_size, wgrad_from_gpu[0], + wgrad_from_gpu[j])); + } + } + // printf("\ncompare GPU0 and CPU\n"); + ASSERT_EQ(true, compare_wgrad(batchsize * slot_num * embedding_vec_size, wgrad_from_gpu[0], + wgrad_from_cpu)); + + // GPU update_params + embedding->update_params(); + + // CPU update_params + embedding_cpu->update_params(); + + // check the results of update params + embedding->get_hash_table_ptr(hash_table_key_from_gpu, + hash_table_value_from_gpu); // memcpy from GPU to CPU + // ASSERT_EQ(true, compare_embedding_table(vocabulary_size*embedding_vec_size, + // hash_table_value_from_gpu, hash_table_value_from_cpu)); + bool rtn = compare_hash_table( + vocabulary_size, (T *)hash_table_key_from_gpu, (TypeHashValue *)hash_table_value_from_gpu, + (T *)hash_table_key_from_cpu, (TypeHashValue *)hash_table_value_from_cpu); + ASSERT_EQ(true, rtn); + printf("Round %d end:\n", i); + } + + // release resources + free(embedding_feature_from_gpu); + for (int i = 0; i < num_devices; i++) { + free(wgrad_from_gpu[i]); + } + free(hash_table_value_from_gpu); + free(hash_table_key_from_gpu); +} +#endif + +#if 1 +// sparse_embedding_hash performance profiling: forward()/backward()/update_params() +// 1. complie this app as release version +// 2. use nvprof / nvvp to run this app +TEST(sparse_embedding_hash_test, perf_profiling) { + test::mpi_init(); + constexpr int batch_num = 10; // can not more than 32 + // constexpr int batch_num = 1; + constexpr int batchsize = 40960; + // constexpr int batchsize = 2; + constexpr long long num_records = batchsize * batch_num; + // constexpr int slot_num = 1; + constexpr int slot_num = 10; + constexpr int max_feature_num = 10 * slot_num; + // constexpr int max_feature_num = 2*slot_num; + constexpr long long vocabulary_size = 55000; + // constexpr long long vocabulary_size = 100; + constexpr int embedding_vec_size = 128; + // constexpr int embedding_vec_size = 1; + constexpr int combiner = 0; // 0-sum, 1-mean + constexpr int optimizer = 0; // 0-adam, 1-momentum_sgd, 2-nesterov + constexpr float lr = 0.01; + // std::vector device_list = {0,1}; + std::vector device_list = {0}; + int num_devices = device_list.size(); + constexpr long long label_dim = 1; + typedef long long T; + + // In order to not allocate the total size of hash table on each GPU, the users need to set the + // size of max_vocabulary_size_per_gpu, which should be more than vocabulary_size/gpu_count, eg: + // (1/0.75)x of that. + float load_factor = + 0.75; // CAUSION: this is a very important param for hash_table get() performance + // long long max_vocabulary_size_per_gpu = (long long)((double)(vocabulary_size) / num_devices / + // load_factor); + + const char *hash_table_file_name = "hash_table.bin"; + const char *data_file_name = "temp_dataset_embedding.data"; + + bool init_hash_table = true; // true: init hash_table and upload_to_device + // false: don't init hash_table or upload_to_device, just use an + // empty hash_table to train + + // set up params + OptHyperParams hyper_params; + hyper_params.adam.beta1 = 0.9f; + hyper_params.adam.beta2 = 0.999f; + hyper_params.adam.epsilon = 1e-8f; + hyper_params.momentum.factor = 0.9f; + hyper_params.nesterov.mu = 0.9f; + + OptParams opt_params = {optimizer, lr, hyper_params}; + + const SparseEmbeddingHashParams embedding_params = { + batchsize, vocabulary_size, load_factor, embedding_vec_size, max_feature_num, slot_num, + combiner, // combiner: 0-sum, 1-mean + opt_params}; + + // generate input data + const std::string tmp_file_name(data_file_name); + const std::string file_list_name("file_list_embedding.txt"); + { + // data generation; + std::ofstream out_stream(tmp_file_name, std::ofstream::binary); + DataSetHeader header = {num_records, label_dim, slot_num, 0}; + out_stream.write(reinterpret_cast(&header), sizeof(DataSetHeader)); + for (int i = 0; i < num_records; i++) { + UnifiedDataSimulator idata_sim(0, max_feature_num / slot_num - 1); // both inclusive + UnifiedDataSimulator ldata_sim(0, vocabulary_size - 1); + for (int j = 0; j < label_dim; j++) { + int label = idata_sim.get_num(); + out_stream.write(reinterpret_cast(&label), sizeof(int)); + } + for (int k = 0; k < slot_num; k++) { + int nnz = idata_sim.get_num(); + // nnz = 10; // just for test 20181211 + nnz = (int)(max_feature_num / slot_num); // just for test 20181221 + out_stream.write(reinterpret_cast(&nnz), sizeof(int)); + for (int j = 0; j < nnz; j++) { + T value = ldata_sim.get_num(); + out_stream.write(reinterpret_cast(&value), sizeof(T)); + } + // std::cout << std::endl; // just for test 20181211 + } + } + out_stream.close(); + std::ofstream file_list_stream(file_list_name, std::ofstream::out); + file_list_stream << (std::to_string(1) + "\n"); + file_list_stream << (tmp_file_name + "\n"); + file_list_stream.close(); + } + std::vector> vvgpu; + vvgpu.push_back(device_list); + DeviceMap device_map(vvgpu, 0); + GPUResourceGroup gpu_resource_group(device_map); + + // setup a data reader + DataReader *data_reader = new DataReader(file_list_name, batchsize, label_dim, slot_num, + max_feature_num, gpu_resource_group, 1, 1); + + Embedding *embedding = new SparseEmbeddingHash(data_reader->get_row_offsets_tensors(), + data_reader->get_value_tensors(), + embedding_params, gpu_resource_group); + + if (init_hash_table) { + // init hash table file + std::ofstream weight_stream(hash_table_file_name); + if (!weight_stream.is_open()) { + ERROR_MESSAGE_("Error: file not open for writing"); + } + UnifiedDataSimulator fdata_sim(-0.1f, 0.1f); + for (long long i = 0; i < vocabulary_size; i++) { + T key = (T)i; + // T key = ldata_sim.get_num(); + // CAUSION: can not set random keys here, because we need to ensure that: + // 1) we can find keys in the data file from this hash table + // 2) there are no repeated keys + weight_stream.write((char *)&key, sizeof(T)); + // float val = (float)i; + // float val = 1.0f; + float val = fdata_sim.get_num(); + for (int j = 0; j < embedding_vec_size; j++) { + weight_stream.write((char *)&val, sizeof(float)); + } + } + weight_stream.close(); + + // upload hash table to device + std::ifstream i_weight_stream(hash_table_file_name); + embedding->upload_params_to_device(i_weight_stream); + i_weight_stream.close(); + } + + struct timeval start; + struct timeval end; + unsigned long time = 0, time_forward = 0, time_backward = 0, time_update_params = 0; + unsigned long total_time = 0; + + for (int i = 0; i < batch_num; i++) { + nvtxRangePushA("read_a_batch"); + + // call read a batch + data_reader->read_a_batch_to_device(); + + nvtxRangePop(); + + nvtxRangePushA("forward"); + + total_time = 0; + gettimeofday(&start, NULL); + + // GPU forward + embedding->forward(); + + gettimeofday(&end, NULL); + + nvtxRangePop(); + + time = 1000000 * (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec); + printf("Round[%d]: forward time %lu us \n", i, time); + time_forward += time; + total_time += time; + + nvtxRangePushA("backward"); + + gettimeofday(&start, NULL); + + // GPU backward + embedding->backward(); + + gettimeofday(&end, NULL); + + nvtxRangePop(); + + time = 1000000 * (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec); + printf("Round[%d]: backward time %lu us \n", i, time); + time_backward += time; + total_time += time; + + nvtxRangePushA("update_params"); + + gettimeofday(&start, NULL); + + // GPU update_params + embedding->update_params(); + + gettimeofday(&end, NULL); + + nvtxRangePop(); + + time = 1000000 * (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec); + printf("Round[%d]: update_params time %lu us \n", i, time); + total_time += time; + time_update_params += time; + + printf("Round[%d]: total time %lu us \n", i, total_time); + } + + printf("Average time of forward: %lu us\n", (unsigned long)(time_forward / batch_num)); + printf("Average time of backward: %lu us\n", (unsigned long)(time_backward / batch_num)); + printf("Average time of update_params: %lu us\n", + (unsigned long)(time_update_params / batch_num)); + printf("Average time of total_time: %lu us\n", + (unsigned long)((time_update_params + time_forward + time_backward) / batch_num)); +} +#endif diff --git a/cuda_code/encoder_decoder_4.cu b/cuda_code/encoder_decoder_4.cu new file mode 100644 index 0000000000000000000000000000000000000000..93b082c9e74d800bc37bdb5b10e3a44330c5a650 --- /dev/null +++ b/cuda_code/encoder_decoder_4.cu @@ -0,0 +1,130 @@ +// -*- mode: c++; tab-width: 2; indent-tabs-mode: nil -*- +#include + +#include "common/god.h" +#include "common/sentences.h" + +#include "encoder_decoder.h" +#include "gpu/mblas/matrix_functions.h" +#include "gpu/dl4mt/dl4mt.h" +#include "gpu/decoder/encoder_decoder_state.h" +#include "gpu/decoder/best_hyps.h" + +using namespace std; + +namespace amunmt { +namespace GPU { + +EncoderDecoder::EncoderDecoder( + const God &god, + const std::string& name, + const YAML::Node& config, + size_t tab, + const Weights& model) + : Scorer(god, name, config, tab), + model_(model), + encoder_(new Encoder(model_)), + decoder_(new Decoder(god, model_)), + indices_(god.Get("beam-size")), + SourceContext_(new mblas::Matrix()) +{ + BEGIN_TIMER("EncoderDecoder"); +} + +EncoderDecoder::~EncoderDecoder() +{ + PAUSE_TIMER("EncoderDecoder"); +} + +void EncoderDecoder::Decode(const State& in, State& out, const std::vector& beamSizes) { + BEGIN_TIMER("Decode"); + const EDState& edIn = in.get(); + EDState& edOut = out.get(); + + decoder_->Decode(edOut.GetStates(), + edIn.GetStates(), + edIn.GetEmbeddings(), + *SourceContext_, + sentencesMask_, + beamSizes); + PAUSE_TIMER("Decode"); +} + +State* EncoderDecoder::NewState() const { + return new EDState(); +} + +void EncoderDecoder::Encode(const Sentences& source) { + BEGIN_TIMER("Encode"); + encoder_->Encode(source, tab_, *SourceContext_, sentencesMask_); + //cerr << "GPU SourceContext_=" << SourceContext_.Debug(1) << endl; + PAUSE_TIMER("Encode"); +} + +void EncoderDecoder::BeginSentenceState(State& state, size_t batchSize) { + BEGIN_TIMER("BeginSentenceState"); + EDState& edState = state.get(); + decoder_->EmptyState(edState.GetStates(), *SourceContext_, batchSize, sentencesMask_); + + decoder_->EmptyEmbedding(edState.GetEmbeddings(), batchSize); + PAUSE_TIMER("BeginSentenceState"); +} + + +void EncoderDecoder::AssembleBeamState(const State& in, + const Beam& beam, + State& out) { + BEGIN_TIMER("AssembleBeamState"); + std::vector beamWords; + std::vector beamStateIds; + for (const HypothesisPtr &h : beam) { + beamWords.push_back(h->GetWord()); + beamStateIds.push_back(h->GetPrevStateIndex()); + } + //cerr << "beamWords=" << Debug(beamWords, 2) << endl; + //cerr << "beamStateIds=" << Debug(beamStateIds, 2) << endl; + + const EDState& edIn = in.get(); + EDState& edOut = out.get(); + indices_.resize(beamStateIds.size()); + HostVector tmp = beamStateIds; + + mblas::copy(thrust::raw_pointer_cast(tmp.data()), + beamStateIds.size(), + thrust::raw_pointer_cast(indices_.data()), + cudaMemcpyHostToDevice); + //cerr << "indices_=" << mblas::Debug(indices_, 2) << endl; + + mblas::Assemble(edOut.GetStates(), edIn.GetStates(), indices_); + //cerr << "edOut.GetStates()=" << edOut.GetStates().Debug(1) << endl; + + //cerr << "beamWords=" << Debug(beamWords, 2) << endl; + decoder_->Lookup(edOut.GetEmbeddings(), beamWords); + //cerr << "edOut.GetEmbeddings()=" << edOut.GetEmbeddings().Debug(1) << endl; + PAUSE_TIMER("AssembleBeamState"); +} + +void EncoderDecoder::GetAttention(mblas::Matrix& Attention) { + decoder_->GetAttention(Attention); +} + +BaseMatrix& EncoderDecoder::GetProbs() { + return decoder_->GetProbs(); +} + +mblas::Matrix& EncoderDecoder::GetAttention() { + return decoder_->GetAttention(); +} + +size_t EncoderDecoder::GetVocabSize() const { + return decoder_->GetVocabSize(); +} + +void EncoderDecoder::Filter(const std::vector& filterIds) { + decoder_->Filter(filterIds); +} + + +} +} + diff --git a/cuda_code/enumerator_cu.cu b/cuda_code/enumerator_cu.cu new file mode 100644 index 0000000000000000000000000000000000000000..5e3ea76fc10070b48caa0c27624bb7a05cec9dea --- /dev/null +++ b/cuda_code/enumerator_cu.cu @@ -0,0 +1,24 @@ +#include "enumerator_cu.h" +#include "device_cu.h" + +void EnumeratorCu::enum_devices(std::vector &list) +{ + int count = 0; + if (cudaGetDeviceCount(&count)) + { + return; + } + + for (int i = 0; i< count; i++) + { + cudaDeviceProp deviceProps; + if (cudaGetDeviceProperties(&deviceProps, i) == cudaSuccess) + { + DeviceCu *device = new DeviceCu(); + device->m_device = i; + device->info += "name:"; + device->info += deviceProps.name; + list.push_back(device); + } + } +} \ No newline at end of file diff --git a/cuda_code/envmap_1.cu b/cuda_code/envmap_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..e14d9d94a1c9ff5dbfbf6bb9cbba7bef97020734 --- /dev/null +++ b/cuda_code/envmap_1.cu @@ -0,0 +1,474 @@ +#include "light/envmap.h" +#include +#include + +rtDeclareVariable(int, max_depth, , ); + +rtDeclareVariable(optix::Ray, ray, rtCurrentRay, ); +rtDeclareVariable(PerRayData_radiance, prd_radiance, rtPayload, ); + +// Environmental Lighting +rtDeclareVariable(int, isEnvmap, , ); +rtTextureSampler envmap0; +rtTextureSampler envmap1; +rtTextureSampler envmap2; +rtTextureSampler envmap3; +rtTextureSampler envmap4; +rtTextureSampler envmap5; + +rtTextureSampler envmapDirec0; +rtTextureSampler envmapDirec1; +rtTextureSampler envmapDirec2; +rtTextureSampler envmapDirec3; +rtTextureSampler envmapDirec4; +rtTextureSampler envmapDirec5; + +rtBuffer envcdfV0; +rtBuffer envcdfV1; +rtBuffer envcdfV2; +rtBuffer envcdfV3; +rtBuffer envcdfV4; +rtBuffer envcdfV5; + +rtBuffer envcdfH0; +rtBuffer envcdfH1; +rtBuffer envcdfH2; +rtBuffer envcdfH3; +rtBuffer envcdfH4; +rtBuffer envcdfH5; + +rtBuffer envpdf0; +rtBuffer envpdf1; +rtBuffer envpdf2; +rtBuffer envpdf3; +rtBuffer envpdf4; +rtBuffer envpdf5; + +rtDeclareVariable(float, infiniteFar, , ); + + +//RT_CALLABLE_PROGRAM float3 EnvUVToDirec(float u, float v){ +// // Turn uv coordinate into direction +// float theta = 2 * (u - 0.5) * M_PIf; +// float phi = M_PIf * (1 - v); +// return make_float3( +// sinf(phi) * sinf(theta), +// cosf(phi), +// sinf(phi) * cosf(theta) +// ); +//} +// +// +//RT_CALLABLE_PROGRAM float2 EnvDirecToUV(const float3& direc){ +// float theta = atan2f( direc.x, direc.z ); +// float phi = M_PIf - acosf(direc.y ); +// float u = theta * (0.5f * M_1_PIf) + 0.5; +// if(u > 1) +// u = u-1; +// float v = phi / M_PIf; +// return make_float2(u, v); +//} +// +//RT_CALLABLE_PROGRAM float EnvDirecToPdf(const float3& direc) { +// float2 uv = EnvDirecToUV(direc); +// size_t2 pdfSize = envpdf.size(); +// float u = uv.x, v = uv.y; +// int rowId = int(v * (pdfSize.y - 1) ); +// int colId = int(u * (pdfSize.x - 1) ); +// return envpdf[make_uint2(colId, rowId ) ]; +//} + +RT_CALLABLE_PROGRAM float3 EnvUVToDirec(float u, float v, int face_index) { + // Turn uv coordinate into direction + // convert range 0 to 1 to -1 to 1 + float uc = 2.0f * u - 1.0f; + float vc = 2.0f * v - 1.0f; + float x, y, z; + switch (face_index) { + case 0: x = 1.0f; y = vc; z = -uc; break; // POSITIVE X + case 1: x = -1.0f; y = vc; z = uc; break; // NEGATIVE X + case 2: x = uc; y = 1.0f; z = -vc; break; // POSITIVE Y + case 3: x = uc; y = -1.0f; z = vc; break; // NEGATIVE Y + case 4: x = uc; y = vc; z = 1.0f; break; // POSITIVE Z + case 5: x = -uc; y = vc; z = -1.0f; break; // NEGATIVE Z + } + return normalize(make_float3(x, y, z)); + +} + +RT_CALLABLE_PROGRAM float2 EnvDirecToUV(const float3& direc, int& f_index) { + float x = direc.x; + float y = direc.y; + float z = direc.z; + + float absX = fabs(x); + float absY = fabs(y); + float absZ = fabs(z); + + int isXPositive = x > 0 ? 1 : 0; + int isYPositive = y > 0 ? 1 : 0; + int isZPositive = z > 0 ? 1 : 0; + + float maxAxis, uc, vc; + + // POSITIVE X + if (isXPositive && absX >= absY && absX >= absZ) { + // u (0 to 1) goes from +z to -z + // v (0 to 1) goes from -y to +y + maxAxis = absX; + uc = -z; + vc = y; + f_index = 0; + } + // NEGATIVE X + if (!isXPositive && absX >= absY && absX >= absZ) { + // u (0 to 1) goes from -z to +z + // v (0 to 1) goes from -y to +y + maxAxis = absX; + uc = z; + vc = y; + f_index = 1; + } + // POSITIVE Y + if (isYPositive && absY >= absX && absY >= absZ) { + // u (0 to 1) goes from -x to +x + // v (0 to 1) goes from +z to -z + maxAxis = absY; + uc = x; + vc = -z; + f_index = 2; + } + // NEGATIVE Y + if (!isYPositive && absY >= absX && absY >= absZ) { + // u (0 to 1) goes from -x to +x + // v (0 to 1) goes from -z to +z + maxAxis = absY; + uc = x; + vc = z; + f_index = 3; + } + // POSITIVE Z + if (isZPositive && absZ >= absX && absZ >= absY) { + // u (0 to 1) goes from -x to +x + // v (0 to 1) goes from -y to +y + maxAxis = absZ; + uc = x; + vc = y; + f_index = 4; + } + // NEGATIVE Z + if (!isZPositive && absZ >= absX && absZ >= absY) { + // u (0 to 1) goes from +x to -x + // v (0 to 1) goes from -y to +y + maxAxis = absZ; + uc = -x; + vc = y; + f_index = 5; + } + + // Convert range from -1 to 1 to 0 to 1 + auto u = 0.5f * (uc / maxAxis + 1.0f); + auto v = 0.5f * (vc / maxAxis + 1.0f); + return make_float2(u, v); +} + + +RT_CALLABLE_PROGRAM float EnvDirecToPdf(const float3& direc) { + int f_index; + float2 uv = EnvDirecToUV(direc, f_index); + float u = uv.x, v = uv.y; + size_t2 pdfSize; + switch (f_index) { + case 0: pdfSize = envpdf0.size(); break; + case 1: pdfSize = envpdf1.size(); break; + case 2: pdfSize = envpdf2.size(); break; + case 3: pdfSize = envpdf3.size(); break; + case 4: pdfSize = envpdf4.size(); break; + case 5: pdfSize = envpdf5.size(); break; + } + int rowId = int(v * (pdfSize.y - 1)); + int colId = int(u * (pdfSize.x - 1)); + switch (f_index) { + case 0: return envpdf0[make_uint2(colId, rowId ) ]; break; + case 1: return envpdf1[make_uint2(colId, rowId ) ]; break; + case 2: return envpdf2[make_uint2(colId, rowId ) ]; break; + case 3: return envpdf3[make_uint2(colId, rowId ) ]; break; + case 4: return envpdf4[make_uint2(colId, rowId ) ]; break; + case 5: return envpdf5[make_uint2(colId, rowId ) ]; break; + } +} + + +RT_CALLABLE_PROGRAM void sampleEnvironmapLight(unsigned int& seed, float3& radiance, float3& direction, float& pdfSolidEnv){ + float z1 = rnd(seed); + float z2 = rnd(seed); + + int ncols = envcdfH0.size().x; + int nrows = envcdfH0.size().y; + + int f_index = (int) (6 * rnd(seed)); + + float u = 0, v = 0; + int rowId = 0; + int colId = 0; + // Sample the row + switch (f_index) { + case 0: + { + int left = 0, right = nrows-1; + while(right > left){ + int mid = (left + right) / 2; + if(envcdfV0[ make_uint2(0, mid) ] >= z1) + right = mid; + else if(envcdfV0[ make_uint2(0, mid) ] < z1) + left = mid + 1; + } + float up = envcdfV0[make_uint2(0, left) ]; + float down = (left == 0) ? 0 : envcdfV0[make_uint2(0, left-1) ]; + v = ( (z1 - down) / fmaxf( (up - down), 1e-14) + left) / float(nrows); + rowId = left; + } + + // Sample the column + { + int left = 0; int right = ncols - 1; + while(right > left){ + int mid = (left + right) / 2; + if(envcdfH0[ make_uint2(mid, rowId) ] >= z2) + right = mid; + else if(envcdfH0[ make_uint2(mid, rowId) ] < z2) + left = mid + 1; + } + float up = envcdfH0[make_uint2(left, rowId) ]; + float down = (left == 0) ? 0 : envcdfH0[make_uint2(left-1, rowId) ]; + u = ((z2 - down) / fmaxf((up - down), 1e-14) + left) / float(ncols); + colId = left; + } + break; + + case 1: + { + int left = 0, right = nrows-1; + while(right > left){ + int mid = (left + right) / 2; + if(envcdfV1[ make_uint2(0, mid) ] >= z1) + right = mid; + else if(envcdfV1[ make_uint2(0, mid) ] < z1) + left = mid + 1; + } + float up = envcdfV1[make_uint2(0, left) ]; + float down = (left == 0) ? 0 : envcdfV1[make_uint2(0, left-1) ]; + v = ( (z1 - down) / fmaxf( (up - down), 1e-14) + left) / float(nrows); + rowId = left; + } + + // Sample the column + { + int left = 0; int right = ncols - 1; + while(right > left){ + int mid = (left + right) / 2; + if(envcdfH1[ make_uint2(mid, rowId) ] >= z2) + right = mid; + else if(envcdfH1[ make_uint2(mid, rowId) ] < z2) + left = mid + 1; + } + float up = envcdfH1[make_uint2(left, rowId) ]; + float down = (left == 0) ? 0 : envcdfH1[make_uint2(left-1, rowId) ]; + u = ((z2 - down) / fmaxf((up - down), 1e-14) + left) / float(ncols); + colId = left; + } + break; + case 2: + { + int left = 0, right = nrows-1; + while(right > left){ + int mid = (left + right) / 2; + if(envcdfV2[ make_uint2(0, mid) ] >= z1) + right = mid; + else if(envcdfV2[ make_uint2(0, mid) ] < z1) + left = mid + 1; + } + float up = envcdfV2[make_uint2(0, left) ]; + float down = (left == 0) ? 0 : envcdfV2[make_uint2(0, left-1) ]; + v = ( (z1 - down) / fmaxf( (up - down), 1e-14) + left) / float(nrows); + rowId = left; + } + + // Sample the column + { + int left = 0; int right = ncols - 1; + while(right > left){ + int mid = (left + right) / 2; + if(envcdfH2[ make_uint2(mid, rowId) ] >= z2) + right = mid; + else if(envcdfH2[ make_uint2(mid, rowId) ] < z2) + left = mid + 1; + } + float up = envcdfH2[make_uint2(left, rowId) ]; + float down = (left == 0) ? 0 : envcdfH2[make_uint2(left-1, rowId) ]; + u = ((z2 - down) / fmaxf((up - down), 1e-14) + left) / float(ncols); + colId = left; + } + break; + case 3: + { + int left = 0, right = nrows-1; + while(right > left){ + int mid = (left + right) / 2; + if(envcdfV3[ make_uint2(0, mid) ] >= z1) + right = mid; + else if(envcdfV3[ make_uint2(0, mid) ] < z1) + left = mid + 1; + } + float up = envcdfV3[make_uint2(0, left) ]; + float down = (left == 0) ? 0 : envcdfV3[make_uint2(0, left-1) ]; + v = ( (z1 - down) / fmaxf( (up - down), 1e-14) + left) / float(nrows); + rowId = left; + } + + // Sample the column + { + int left = 0; int right = ncols - 1; + while(right > left){ + int mid = (left + right) / 2; + if(envcdfH3[ make_uint2(mid, rowId) ] >= z2) + right = mid; + else if(envcdfH3[ make_uint2(mid, rowId) ] < z2) + left = mid + 1; + } + float up = envcdfH3[make_uint2(left, rowId) ]; + float down = (left == 0) ? 0 : envcdfH3[make_uint2(left-1, rowId) ]; + u = ((z2 - down) / fmaxf((up - down), 1e-14) + left) / float(ncols); + colId = left; + } + break; + case 4: + { + int left = 0, right = nrows-1; + while(right > left){ + int mid = (left + right) / 2; + if(envcdfV4[ make_uint2(0, mid) ] >= z1) + right = mid; + else if(envcdfV4[ make_uint2(0, mid) ] < z1) + left = mid + 1; + } + float up = envcdfV4[make_uint2(0, left) ]; + float down = (left == 0) ? 0 : envcdfV4[make_uint2(0, left-1) ]; + v = ( (z1 - down) / fmaxf( (up - down), 1e-14) + left) / float(nrows); + rowId = left; + } + + // Sample the column + { + int left = 0; int right = ncols - 1; + while(right > left){ + int mid = (left + right) / 2; + if(envcdfH4[ make_uint2(mid, rowId) ] >= z2) + right = mid; + else if(envcdfH4[ make_uint2(mid, rowId) ] < z2) + left = mid + 1; + } + float up = envcdfH4[make_uint2(left, rowId) ]; + float down = (left == 0) ? 0 : envcdfH4[make_uint2(left-1, rowId) ]; + u = ((z2 - down) / fmaxf((up - down), 1e-14) + left) / float(ncols); + colId = left; + } + break; + case 5: + { + int left = 0, right = nrows-1; + while(right > left){ + int mid = (left + right) / 2; + if(envcdfV5[ make_uint2(0, mid) ] >= z1) + right = mid; + else if(envcdfV5[ make_uint2(0, mid) ] < z1) + left = mid + 1; + } + float up = envcdfV5[make_uint2(0, left) ]; + float down = (left == 0) ? 0 : envcdfV5[make_uint2(0, left-1) ]; + v = ( (z1 - down) / fmaxf( (up - down), 1e-14) + left) / float(nrows); + rowId = left; + } + + // Sample the column + { + int left = 0; int right = ncols - 1; + while(right > left){ + int mid = (left + right) / 2; + if(envcdfH5[ make_uint2(mid, rowId) ] >= z2) + right = mid; + else if(envcdfH5[ make_uint2(mid, rowId) ] < z2) + left = mid + 1; + } + float up = envcdfH5[make_uint2(left, rowId) ]; + float down = (left == 0) ? 0 : envcdfH5[make_uint2(left-1, rowId) ]; + u = ((z2 - down) / fmaxf((up - down), 1e-14) + left) / float(ncols); + colId = left; + } + break; + } + // Turn uv coordinate into direction + //int face_index; + direction = EnvUVToDirec(u, v, f_index); + switch (f_index) { + case 0: pdfSolidEnv = envpdf0[make_uint2(colId,rowId)]; radiance = make_float3(tex2D(envmap0, u, v)); break; + case 1: pdfSolidEnv = envpdf1[make_uint2(colId,rowId)]; radiance = make_float3(tex2D(envmap1, u, v)); break; + case 2: pdfSolidEnv = envpdf2[make_uint2(colId,rowId)]; radiance = make_float3(tex2D(envmap2, u, v)); break; + case 3: pdfSolidEnv = envpdf3[make_uint2(colId,rowId)]; radiance = make_float3(tex2D(envmap3, u, v)); break; + case 4: pdfSolidEnv = envpdf4[make_uint2(colId,rowId)]; radiance = make_float3(tex2D(envmap4, u, v)); break; + case 5: pdfSolidEnv = envpdf5[make_uint2(colId,rowId)]; radiance = make_float3(tex2D(envmap5, u, v)); break; + } +} + + +RT_PROGRAM void envmap_miss(){ + if(isEnvmap == 0){ + prd_radiance.attenuation = make_float3(0.0); + } + else if(isEnvmap == 1){ + int f_index; + float2 uv = EnvDirecToUV(prd_radiance.direction, f_index); + + if(prd_radiance.depth == 0){ + switch (f_index) { + case 0: prd_radiance.radiance = make_float3(tex2D(envmapDirec0, uv.x, uv.y) ); break; + case 1: prd_radiance.radiance = make_float3(tex2D(envmapDirec1, uv.x, uv.y) ); break; + case 2: prd_radiance.radiance = make_float3(tex2D(envmapDirec2, uv.x, uv.y) ); break; + case 3: prd_radiance.radiance = make_float3(tex2D(envmapDirec3, uv.x, uv.y) ); break; + case 4: prd_radiance.radiance = make_float3(tex2D(envmapDirec4, uv.x, uv.y) ); break; + case 5: prd_radiance.radiance = make_float3(tex2D(envmapDirec5, uv.x, uv.y) ); break; + } + } + else{ + float3 radiance; + switch (f_index) { + case 0: radiance = make_float3(tex2D(envmap0, uv.x, uv.y) ); break; + case 1: radiance = make_float3(tex2D(envmap1, uv.x, uv.y) ); break; + case 2: radiance = make_float3(tex2D(envmap2, uv.x, uv.y) ); break; + case 3: radiance = make_float3(tex2D(envmap3, uv.x, uv.y) ); break; + case 4: radiance = make_float3(tex2D(envmap4, uv.x, uv.y) ); break; + case 5: radiance = make_float3(tex2D(envmap5, uv.x, uv.y) ); break; + } + // Multiple Importance Sampling + if(prd_radiance.pdf < 0){ + prd_radiance.radiance += radiance * prd_radiance.attenuation; + } + else{ + float pdfSolidEnv = EnvDirecToPdf(prd_radiance.direction); + float pdfSolidBRDF = prd_radiance.pdf; + float pdfSolidEnv2 = pdfSolidEnv * pdfSolidEnv; + float pdfSolidBRDF2 = pdfSolidBRDF * pdfSolidBRDF; + + float3 radianceInc = radiance * pdfSolidBRDF2 / fmaxf(pdfSolidBRDF2 + pdfSolidEnv2, 1e-14)* prd_radiance.attenuation; + prd_radiance.radiance += radianceInc; + } + } + } + prd_radiance.done = true; +} + + +RT_PROGRAM void miss(){ + prd_radiance.radiance = make_float3(0.0); + prd_radiance.done = true; +} diff --git a/cuda_code/equal_7.cu b/cuda_code/equal_7.cu new file mode 100644 index 0000000000000000000000000000000000000000..890b375b37a175d4d1dd9439086784b00f322a5d --- /dev/null +++ b/cuda_code/equal_7.cu @@ -0,0 +1,35 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "src/delegate/tensorrt/cuda_impl/equal.cuh" +#include +#include "src/delegate/tensorrt/cuda_impl/cuda_helper.h" + +template +__global__ void EqualKernel(const T *input1, const T *input2, T *output, int element_cnt) { + for (int pos = blockIdx.x * blockDim.x + threadIdx.x; pos < element_cnt; pos += blockDim.x * gridDim.x) { + output[pos] = (input1[pos] - input2[pos] < 1e-6 && input1[pos] - input2[pos] > -1e-6); + } +} + +template +void Equal(const T *input1, const T *input2, T *output, int element_cnt, cudaStream_t stream) { + EqualKernel<<>>(input1, input2, output, element_cnt); + return; +} + +template void Equal(const float *input1, const float *input2, float *output, int element_cnt, cudaStream_t stream); +template void Equal(const int *input1, const int *input2, int *output, int element_cnt, cudaStream_t stream); diff --git a/cuda_code/equi_miner_17.cu b/cuda_code/equi_miner_17.cu new file mode 100644 index 0000000000000000000000000000000000000000..3db7f7e7b06814b3b15de281b27569cbfcf1e95f --- /dev/null +++ b/cuda_code/equi_miner_17.cu @@ -0,0 +1,1002 @@ +// Equihash CUDA solver +// Copyright (c) 2016 John Tromp + +#define XINTREE +#define UNROLL +#define htole32(x) (x) +#define HAVE_DECL_HTOLE32 1 + +#include "../cpu_tromp/equi.h" +#include +#include +#include +#include +#include +#include + +#include "eqcuda.hpp" + +#include "blake2b.cu" + + +typedef uint16_t u16; +typedef uint64_t u64; + + +#ifndef RESTBITS +#define RESTBITS 4 +#endif + +// 2_log of number of buckets +#define BUCKBITS (DIGITBITS-RESTBITS) + +#ifndef SAVEMEM +#if RESTBITS == 4 +// can't save memory in such small buckets +#define SAVEMEM 1 +#elif RESTBITS >= 8 +// take advantage of law of large numbers (sum of 2^8 random numbers) +// this reduces (200,9) memory to under 144MB, with negligible discarding +#define SAVEMEM 9/14 +#endif +#endif + +// number of buckets +static const u32 NBUCKETS = 1 << BUCKBITS; +// bucket mask +static const u32 BUCKMASK = NBUCKETS - 1; +// 2_log of number of slots per bucket +static const u32 SLOTBITS = RESTBITS + 1 + 1; +static const u32 SLOTRANGE = 1 << SLOTBITS; +// number of slots per bucket +static const u32 NSLOTS = SLOTRANGE * SAVEMEM; +// SLOTBITS mask +static const u32 SLOTMASK = SLOTRANGE - 1; +// number of possible values of xhash (rest of n) bits +static const u32 NRESTS = 1 << RESTBITS; +// RESTBITS mask +static const u32 RESTMASK = NRESTS - 1; +// number of blocks of hashes extracted from single 512 bit blake2b output +static const u32 NBLOCKS = (NHASHES + HASHESPERBLAKE - 1) / HASHESPERBLAKE; +// nothing larger found in 100000 runs +static const u32 MAXSOLS = 8; + +// tree node identifying its children as two different slots in +// a bucket on previous layer with the same rest bits (x-tra hash) +struct tree { + u32 bid_s0_s1_x; // manual bitfields + + __device__ tree(const u32 idx, const u32 xh) { + bid_s0_s1_x = idx << RESTBITS | xh; + } + __device__ tree(const u32 idx) { + bid_s0_s1_x = idx; + } + __device__ tree(const u32 bid, const u32 s0, const u32 s1, const u32 xh) { +#ifdef XINTREE + bid_s0_s1_x = ((((bid << SLOTBITS) | s0) << SLOTBITS) | s1) << RESTBITS | xh; +#else + bid_s0_s1_x = (((bid << SLOTBITS) | s0) << SLOTBITS) | s1; +#endif + } + __device__ u32 getindex() const { +#ifdef XINTREE + return bid_s0_s1_x >> RESTBITS; +#else + return bid_s0_s1_x; +#endif + } + __device__ u32 bucketid() const { +#ifdef XINTREE + return bid_s0_s1_x >> (2 * SLOTBITS + RESTBITS); +#else + return bid_s0_s1_x >> (2 * SLOTBITS); +#endif + } + __device__ u32 slotid0() const { +#ifdef XINTREE + return (bid_s0_s1_x >> SLOTBITS + RESTBITS) & SLOTMASK; +#else + return (bid_s0_s1_x >> SLOTBITS) & SLOTMASK; +#endif + } + __device__ u32 slotid1() const { +#ifdef XINTREE + return (bid_s0_s1_x >> RESTBITS) & SLOTMASK; +#else + return bid_s0_s1_x & SLOTMASK; +#endif + } + __device__ u32 xhash() const { + return bid_s0_s1_x & RESTMASK; + } +}; + +union hashunit { + u32 word; + uchar bytes[sizeof(u32)]; +}; + +#define WORDS(bits) ((bits + 31) / 32) +#define HASHWORDS0 WORDS(WN - DIGITBITS + RESTBITS) +#define HASHWORDS1 WORDS(WN - 2*DIGITBITS + RESTBITS) + +struct slot0 { + tree attr; + hashunit hash[HASHWORDS0]; +}; + +struct slot1 { + tree attr; + hashunit hash[HASHWORDS1]; +}; + +// a bucket is NSLOTS treenodes +typedef slot0 bucket0[NSLOTS]; +typedef slot1 bucket1[NSLOTS]; +// the N-bit hash consists of K+1 n-bit "digits" +// each of which corresponds to a layer of NBUCKETS buckets +typedef bucket0 digit0[NBUCKETS]; +typedef bucket1 digit1[NBUCKETS]; + +// size (in bytes) of hash in round 0 <= r < WK +u32 hhashsize(const u32 r) { +#ifdef XINTREE + const u32 hashbits = WN - (r + 1) * DIGITBITS; +#else + const u32 hashbits = WN - (r + 1) * DIGITBITS + RESTBITS; +#endif + return (hashbits + 7) / 8; +} +// size (in bytes) of hash in round 0 <= r < WK +__device__ u32 hashsize(const u32 r) { +#ifdef XINTREE + const u32 hashbits = WN - (r + 1) * DIGITBITS; +#else + const u32 hashbits = WN - (r + 1) * DIGITBITS + RESTBITS; +#endif + return (hashbits + 7) / 8; +} + +u32 hhashwords(u32 bytes) { + return (bytes + 3) / 4; +} + +__device__ u32 hashwords(u32 bytes) { + return (bytes + 3) / 4; +} + +// manages hash and tree data +struct htalloc { + bucket0 *trees0[(WK + 1) / 2]; + bucket1 *trees1[WK / 2]; +}; + +typedef u32 bsizes[NBUCKETS]; + +struct equi { + blake2b_state blake_ctx; + htalloc hta; + bsizes *nslots; + proof *sols; + u32 nsols; + u32 nthreads; + equi(const u32 n_threads) { + nthreads = n_threads; + } + void setheadernonce(const char *header, const u32 len, const char* nonce, const u32 nlen) { + setheader(&blake_ctx, header, len, nonce, nlen); + checkCudaErrors(cudaMemset(nslots, 0, NBUCKETS * sizeof(u32))); + nsols = 0; + } + __device__ u32 getnslots0(const u32 bid) { + u32 &nslot = nslots[0][bid]; + const u32 n = min(nslot, NSLOTS); + nslot = 0; + return n; + } + __device__ u32 getnslots1(const u32 bid) { + u32 &nslot = nslots[1][bid]; + const u32 n = min(nslot, NSLOTS); + nslot = 0; + return n; + } + __device__ void orderindices(u32 *indices, u32 size) { + if (indices[0] > indices[size]) { + for (u32 i = 0; i < size; i++) { + const u32 tmp = indices[i]; + indices[i] = indices[size + i]; + indices[size + i] = tmp; + } + } + } + __device__ void listindices1(const tree t, u32 *indices) { + const bucket0 &buck = hta.trees0[0][t.bucketid()]; + const u32 size = 1 << 0; + indices[0] = buck[t.slotid0()].attr.getindex(); + indices[size] = buck[t.slotid1()].attr.getindex(); + orderindices(indices, size); + } + __device__ void listindices2(const tree t, u32 *indices) { + const bucket1 &buck = hta.trees1[0][t.bucketid()]; + const u32 size = 1 << 1; + listindices1(buck[t.slotid0()].attr, indices); + listindices1(buck[t.slotid1()].attr, indices + size); + orderindices(indices, size); + } + __device__ void listindices3(const tree t, u32 *indices) { + const bucket0 &buck = hta.trees0[1][t.bucketid()]; + const u32 size = 1 << 2; + listindices2(buck[t.slotid0()].attr, indices); + listindices2(buck[t.slotid1()].attr, indices + size); + orderindices(indices, size); + } + __device__ void listindices4(const tree t, u32 *indices) { + const bucket1 &buck = hta.trees1[1][t.bucketid()]; + const u32 size = 1 << 3; + listindices3(buck[t.slotid0()].attr, indices); + listindices3(buck[t.slotid1()].attr, indices + size); + orderindices(indices, size); + } + __device__ void listindices5(const tree t, u32 *indices) { + const bucket0 &buck = hta.trees0[2][t.bucketid()]; + const u32 size = 1 << 4; + listindices4(buck[t.slotid0()].attr, indices); + listindices4(buck[t.slotid1()].attr, indices+size); + orderindices(indices, size); + } + __device__ void listindices6(const tree t, u32 *indices) { + const bucket1 &buck = hta.trees1[2][t.bucketid()]; + const u32 size = 1 << 5; + listindices5(buck[t.slotid0()].attr, indices); + listindices5(buck[t.slotid1()].attr, indices+size); + orderindices(indices, size); + } + __device__ void listindices7(const tree t, u32 *indices) { + const bucket0 &buck = hta.trees0[3][t.bucketid()]; + const u32 size = 1 << 6; + listindices6(buck[t.slotid0()].attr, indices); + listindices6(buck[t.slotid1()].attr, indices+size); + orderindices(indices, size); + } + __device__ void listindices8(const tree t, u32 *indices) { + const bucket1 &buck = hta.trees1[3][t.bucketid()]; + const u32 size = 1 << 7; + listindices7(buck[t.slotid0()].attr, indices); + listindices7(buck[t.slotid1()].attr, indices+size); + orderindices(indices, size); + } + __device__ void listindices9(const tree t, u32 *indices) { + const bucket0 &buck = hta.trees0[4][t.bucketid()]; + const u32 size = 1 << 8; + listindices8(buck[t.slotid0()].attr, indices); + listindices8(buck[t.slotid1()].attr, indices+size); + orderindices(indices, size); + } + __device__ void candidate(const tree t) { + proof prf; +#if WK==9 + listindices9(t, prf); +#elif WK==5 + listindices5(t, prf); +#else +#error not implemented +#endif + if (probdupe(prf)) + return; + u32 soli = atomicAdd(&nsols, 1); + if (soli < MAXSOLS) +#if WK==9 + listindices9(t, sols[soli]); +#elif WK==5 + listindices5(t, sols[soli]); +#else +#error not implemented +#endif + } + void showbsizes(u32 r) { +#if defined(HIST) || defined(SPARK) || defined(LOGSPARK) + u32 ns[NBUCKETS]; + checkCudaErrors(cudaMemcpy(ns, nslots[r & 1], NBUCKETS * sizeof(u32), cudaMemcpyDeviceToHost)); + u32 binsizes[65]; + memset(binsizes, 0, 65 * sizeof(u32)); + for (u32 bucketid = 0; bucketid < NBUCKETS; bucketid++) { + u32 bsize = min(ns[bucketid], NSLOTS) >> (SLOTBITS - 6); + binsizes[bsize]++; + } + for (u32 i = 0; i < 65; i++) { +#ifdef HIST + printf(" %d:%d", i, binsizes[i]); +#else +#ifdef SPARK + u32 sparks = binsizes[i] / SPARKSCALE; +#else + u32 sparks = 0; + for (u32 bs = binsizes[i]; bs; bs >>= 1) sparks++; + sparks = sparks * 7 / SPARKSCALE; +#endif + printf("\342\226%c", '\201' + sparks); +#endif + } + printf("\n"); +#endif + } + // proper dupe test is a little costly on GPU, so allow false negatives + __device__ bool probdupe(u32 *prf) { + unsigned short susp[PROOFSIZE]; + memset(susp, 0xffff, PROOFSIZE * sizeof(unsigned short)); + for (u32 i=0; i>WK; + if (msb == susp[bin]) + return true; + susp[bin] = msb; + } + return false; + } + struct htlayout { + htalloc hta; + u32 prevhashunits; + u32 nexthashunits; + u32 dunits; + u32 prevbo; + u32 nextbo; + + __device__ htlayout(equi *eq, u32 r) : hta(eq->hta), prevhashunits(0), dunits(0) { + u32 nexthashbytes = hashsize(r); + nexthashunits = hashwords(nexthashbytes); + prevbo = 0; + nextbo = nexthashunits * sizeof(hashunit) - nexthashbytes; // 0-3 + if (r) { + u32 prevhashbytes = hashsize(r-1); + prevhashunits = hashwords(prevhashbytes); + prevbo = prevhashunits * sizeof(hashunit) - prevhashbytes; // 0-3 + dunits = prevhashunits - nexthashunits; + } + } + __device__ u32 getxhash0(const slot0* pslot) const { +#ifdef XINTREE + return pslot->attr.xhash(); +#elif WN == 200 && RESTBITS == 4 + return pslot->hash->bytes[prevbo] >> 4; +#elif WN == 200 && RESTBITS == 8 + return (pslot->hash->bytes[prevbo] & 0xf) << 4 | pslot->hash->bytes[prevbo + 1] >> 4; +#elif WN == 144 && RESTBITS == 4 + return pslot->hash->bytes[prevbo] & 0xf; +#elif WN == 200 && RESTBITS == 6 + return (pslot->hash->bytes[prevbo] & 0x3) << 4 | pslot->hash->bytes[prevbo+1] >> 4; +#else +#error non implemented +#endif + } + __device__ u32 getxhash1(const slot1* pslot) const { +#ifdef XINTREE + return pslot->attr.xhash(); +#elif WN == 200 && RESTBITS == 4 + return pslot->hash->bytes[prevbo] & 0xf; +#elif WN == 200 && RESTBITS == 8 + return pslot->hash->bytes[prevbo]; +#elif WN == 144 && RESTBITS == 4 + return pslot->hash->bytes[prevbo] & 0xf; +#elif WN == 200 && RESTBITS == 6 + return pslot->hash->bytes[prevbo] & 0x3f; +#else +#error non implemented +#endif + } + __device__ bool equal(const hashunit *hash0, const hashunit *hash1) const { + return hash0[prevhashunits - 1].word == hash1[prevhashunits - 1].word; + } + }; + + struct collisiondata { +#ifdef XBITMAP +#if NSLOTS > 64 +#error cant use XBITMAP with more than 64 slots +#endif + u64 xhashmap[NRESTS]; + u64 xmap; +#else +#if RESTBITS <= 6 + typedef uchar xslot; +#else + typedef u16 xslot; +#endif + static const xslot xnil = ~0; + xslot xhashslots[NRESTS]; + xslot nextxhashslot[NSLOTS]; + xslot nextslot; +#endif + u32 s0; + + __device__ void clear() { +#ifdef XBITMAP + memset(xhashmap, 0, NRESTS * sizeof(u64)); +#else + memset(xhashslots, xnil, NRESTS * sizeof(xslot)); + memset(nextxhashslot, xnil, NSLOTS * sizeof(xslot)); +#endif + } + __device__ bool addslot(u32 s1, u32 xh) { +#ifdef XBITMAP + xmap = xhashmap[xh]; + xhashmap[xh] |= (u64)1 << s1; + s0 = ~0; + return true; +#else + nextslot = xhashslots[xh]; + nextxhashslot[s1] = nextslot; + xhashslots[xh] = s1; + return true; +#endif + } + __device__ bool nextcollision() const { +#ifdef XBITMAP + return xmap != 0; +#else + return nextslot != xnil; +#endif + } + __device__ u32 slot() { +#ifdef XBITMAP + const u32 ffs = __ffsll(xmap); + s0 += ffs; xmap >>= ffs; +#else + nextslot = nextxhashslot[s0 = nextslot]; +#endif + return s0; + } + }; + }; + +__global__ void digitH(equi *eq) { + uchar hash[HASHOUT]; + blake2b_state state; + equi::htlayout htl(eq, 0); + const u32 hashbytes = hashsize(0); // always 23 ? + const u32 id = blockIdx.x * blockDim.x + threadIdx.x; + for (u32 block = id; block < NBLOCKS; block += eq->nthreads) { + state = eq->blake_ctx; + blake2b_gpu_hash(&state, block, hash, HASHOUT); + for (u32 i = 0; i> 4; +#endif +#elif BUCKBITS == 14 && RESTBITS == 6 + const u32 bucketid = ((u32)ph[0] << 6) | ph[1] >> 2; +#elif BUCKBITS == 12 && RESTBITS == 8 + const u32 bucketid = ((u32)ph[0] << 4) | ph[1] >> 4; +#elif BUCKBITS == 20 && RESTBITS == 4 + const u32 bucketid = ((((u32)ph[0] << 8) | ph[1]) << 4) | ph[2] >> 4; +#ifdef XINTREE + const u32 xhash = ph[2] & 0xf; +#endif +#elif BUCKBITS == 12 && RESTBITS == 4 + const u32 bucketid = ((u32)ph[0] << 4) | ph[1] >> 4; + const u32 xhash = ph[1] & 0xf; +#else +#error not implemented +#endif + const u32 slot = atomicAdd(&eq->nslots[0][bucketid], 1); + if (slot >= NSLOTS) + continue; + slot0 &s = eq->hta.trees0[0][bucketid][slot]; +#ifdef XINTREE + s.attr = tree(block*HASHESPERBLAKE+i, xhash); +#else + s.attr = tree(block*HASHESPERBLAKE+i); +#endif + memcpy(s.hash->bytes+htl.nextbo, ph+WN/8-hashbytes, hashbytes); + } + } +} + +__global__ void digitO(equi *eq, const u32 r) { + equi::htlayout htl(eq, r); + equi::collisiondata cd; + const u32 id = blockIdx.x * blockDim.x + threadIdx.x; + for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { + cd.clear(); + slot0 *buck = htl.hta.trees0[(r - 1) / 2][bucketid]; + u32 bsize = eq->getnslots0(bucketid); + for (u32 s1 = 0; s1 < bsize; s1++) { + const slot0 *pslot1 = buck + s1; + if (!cd.addslot(s1, htl.getxhash0(pslot1))) + continue; + for (; cd.nextcollision();) { + const u32 s0 = cd.slot(); + const slot0 *pslot0 = buck + s0; + if (htl.equal(pslot0->hash, pslot1->hash)) + continue; + u32 xorbucketid; + u32 xhash; + const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes; +#if WN == 200 && BUCKBITS == 16 && RESTBITS == 4 && defined(XINTREE) + xorbucketid = ((((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) & 0xf) << 8) + | (bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1])) << 4 + | (xhash = bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 4; + xhash &= 0xf; +#elif WN == 144 && BUCKBITS == 20 && RESTBITS == 4 + xorbucketid = ((((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 8) + | (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2])) << 4) + | (xhash = bytes0[htl.prevbo + 3] ^ bytes1[htl.prevbo + 3]) >> 4; + xhash &= 0xf; +#elif WN == 96 && BUCKBITS == 12 && RESTBITS == 4 + xorbucketid = ((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 4) + | (xhash = bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 4; + xhash &= 0xf; +#elif WN == 200 && BUCKBITS == 14 && RESTBITS == 6 + xorbucketid = ((((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) & 0xf) << 8) + | (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2])) << 2 + | (bytes0[htl.prevbo + 3] ^ bytes1[htl.prevbo + 3]) >> 6; +#else +#error not implemented +#endif + const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1); + if (xorslot >= NSLOTS) + continue; + slot1 &xs = htl.hta.trees1[r/2][xorbucketid][xorslot]; +#ifdef XINTREE + xs.attr = tree(bucketid, s0, s1, xhash); +#else + xs.attr = tree(bucketid, s0, s1); +#endif + for (u32 i=htl.dunits; i < htl.prevhashunits; i++) + xs.hash[i - htl.dunits].word = pslot0->hash[i].word ^ pslot1->hash[i].word; + } + } + } +} + +__global__ void digitE(equi *eq, const u32 r) { + equi::htlayout htl(eq, r); + equi::collisiondata cd; + const u32 id = blockIdx.x * blockDim.x + threadIdx.x; + for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { + cd.clear(); + slot1 *buck = htl.hta.trees1[(r - 1) / 2][bucketid]; + u32 bsize = eq->getnslots1(bucketid); + for (u32 s1 = 0; s1 < bsize; s1++) { + const slot1 *pslot1 = buck + s1; + if (!cd.addslot(s1, htl.getxhash1(pslot1))) + continue; + for (; cd.nextcollision();) { + const u32 s0 = cd.slot(); + const slot1 *pslot0 = buck + s0; + if (htl.equal(pslot0->hash, pslot1->hash)) + continue; + u32 xorbucketid; + u32 xhash; + const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes; +#if WN == 200 && BUCKBITS == 16 && RESTBITS == 4 && defined(XINTREE) + xorbucketid = ((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) << 8) + | (bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]); + xhash = (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 4; +#elif WN == 144 && BUCKBITS == 20 && RESTBITS == 4 + xorbucketid = ((((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 8) + | (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2])) << 4) + | (bytes0[htl.prevbo + 3] ^ bytes1[htl.prevbo + 3]) >> 4; +#elif WN == 96 && BUCKBITS == 12 && RESTBITS == 4 + xorbucketid = ((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 4) + | (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 4; +#elif WN == 200 && BUCKBITS == 14 && RESTBITS == 6 + xorbucketid = ((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 6) + | (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 2; +#else +#error not implemented +#endif + const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1); + if (xorslot >= NSLOTS) + continue; + slot0 &xs = htl.hta.trees0[r / 2][xorbucketid][xorslot]; +#ifdef XINTREE + xs.attr = tree(bucketid, s0, s1, xhash); +#else + xs.attr = tree(bucketid, s0, s1); +#endif + for (u32 i = htl.dunits; i < htl.prevhashunits; i++) + xs.hash[i - htl.dunits].word = pslot0->hash[i].word ^ pslot1->hash[i].word; + } + } + } +} + +#ifdef UNROLL +__global__ void digit_1(equi *eq) { + equi::htlayout htl(eq, 1); + equi::collisiondata cd; + const u32 id = blockIdx.x * blockDim.x + threadIdx.x; + for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { + cd.clear(); + slot0 *buck = htl.hta.trees0[0][bucketid]; + u32 bsize = eq->getnslots0(bucketid); + for (u32 s1 = 0; s1 < bsize; s1++) { + const slot0 *pslot1 = buck + s1; + if (!cd.addslot(s1, htl.getxhash0(pslot1))) + continue; + for (; cd.nextcollision();) { + const u32 s0 = cd.slot(); + const slot0 *pslot0 = buck + s0; + if (htl.equal(pslot0->hash, pslot1->hash)) + continue; + const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word; + const u32 bexor = __byte_perm(xor0, 0, 0x0123); + const u32 xorbucketid = bexor >> 4 & BUCKMASK; + const u32 xhash = bexor & 0xf; + const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1); + if (xorslot >= NSLOTS) + continue; + slot1 &xs = htl.hta.trees1[0][xorbucketid][xorslot]; + xs.attr = tree(bucketid, s0, s1, xhash); + xs.hash[0].word = pslot0->hash[1].word ^ pslot1->hash[1].word; + xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word; + xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word; + xs.hash[3].word = pslot0->hash[4].word ^ pslot1->hash[4].word; + xs.hash[4].word = pslot0->hash[5].word ^ pslot1->hash[5].word; + } + } + } +} +__global__ void digit2(equi *eq) { + equi::htlayout htl(eq, 2); + equi::collisiondata cd; + const u32 id = blockIdx.x * blockDim.x + threadIdx.x; + for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { + cd.clear(); + slot1 *buck = htl.hta.trees1[0][bucketid]; + u32 bsize = eq->getnslots1(bucketid); + for (u32 s1 = 0; s1 < bsize; s1++) { + const slot1 *pslot1 = buck + s1; + if (!cd.addslot(s1, htl.getxhash1(pslot1))) + continue; + for (; cd.nextcollision();) { + const u32 s0 = cd.slot(); + const slot1 *pslot0 = buck + s0; + if (htl.equal(pslot0->hash, pslot1->hash)) + continue; + const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word; + const u32 bexor = __byte_perm(xor0, 0, 0x0123); + const u32 xorbucketid = bexor >> 16; + const u32 xhash = bexor >> 12 & 0xf; + const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1); + if (xorslot >= NSLOTS) + continue; + slot0 &xs = htl.hta.trees0[1][xorbucketid][xorslot]; + xs.attr = tree(bucketid, s0, s1, xhash); + xs.hash[0].word = xor0; + xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word; + xs.hash[2].word = pslot0->hash[2].word ^ pslot1->hash[2].word; + xs.hash[3].word = pslot0->hash[3].word ^ pslot1->hash[3].word; + xs.hash[4].word = pslot0->hash[4].word ^ pslot1->hash[4].word; + } + } + } +} +__global__ void digit3(equi *eq) { + equi::htlayout htl(eq, 3); + equi::collisiondata cd; + const u32 id = blockIdx.x * blockDim.x + threadIdx.x; + for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { + cd.clear(); + slot0 *buck = htl.hta.trees0[1][bucketid]; + u32 bsize = eq->getnslots0(bucketid); + for (u32 s1 = 0; s1 < bsize; s1++) { + const slot0 *pslot1 = buck + s1; + if (!cd.addslot(s1, htl.getxhash0(pslot1))) + continue; + for (; cd.nextcollision();) { + const u32 s0 = cd.slot(); + const slot0 *pslot0 = buck + s0; + if (htl.equal(pslot0->hash, pslot1->hash)) + continue; + const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word; + const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word; + const u32 bexor = __byte_perm(xor0, xor1, 0x1234); + const u32 xorbucketid = bexor >> 4 & BUCKMASK; + const u32 xhash = bexor & 0xf; + const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1); + if (xorslot >= NSLOTS) + continue; + slot1 &xs = htl.hta.trees1[1][xorbucketid][xorslot]; + xs.attr = tree(bucketid, s0, s1, xhash); + xs.hash[0].word = xor1; + xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word; + xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word; + xs.hash[3].word = pslot0->hash[4].word ^ pslot1->hash[4].word; + } + } + } +} +__global__ void digit4(equi *eq) { + equi::htlayout htl(eq, 4); + equi::collisiondata cd; + const u32 id = blockIdx.x * blockDim.x + threadIdx.x; + for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { + cd.clear(); + slot1 *buck = htl.hta.trees1[1][bucketid]; + u32 bsize = eq->getnslots1(bucketid); + for (u32 s1 = 0; s1 < bsize; s1++) { + const slot1 *pslot1 = buck + s1; + if (!cd.addslot(s1, htl.getxhash1(pslot1))) + continue; + for (; cd.nextcollision();) { + const u32 s0 = cd.slot(); + const slot1 *pslot0 = buck + s0; + if (htl.equal(pslot0->hash, pslot1->hash)) + continue; + const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word; + const u32 bexor = __byte_perm(xor0, 0, 0x4123); + const u32 xorbucketid = bexor >> 8; + const u32 xhash = bexor >> 4 & 0xf; + const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1); + if (xorslot >= NSLOTS) + continue; + slot0 &xs = htl.hta.trees0[2][xorbucketid][xorslot]; + xs.attr = tree(bucketid, s0, s1, xhash); + xs.hash[0].word = xor0; + xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word; + xs.hash[2].word = pslot0->hash[2].word ^ pslot1->hash[2].word; + xs.hash[3].word = pslot0->hash[3].word ^ pslot1->hash[3].word; + } + } + } +} +__global__ void digit5(equi *eq) { + equi::htlayout htl(eq, 5); + equi::collisiondata cd; + const u32 id = blockIdx.x * blockDim.x + threadIdx.x; + for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { + cd.clear(); + slot0 *buck = htl.hta.trees0[2][bucketid]; + u32 bsize = eq->getnslots0(bucketid); + for (u32 s1 = 0; s1 < bsize; s1++) { + const slot0 *pslot1 = buck + s1; + if (!cd.addslot(s1, htl.getxhash0(pslot1))) + continue; + for (; cd.nextcollision();) { + const u32 s0 = cd.slot(); + const slot0 *pslot0 = buck + s0; + if (htl.equal(pslot0->hash, pslot1->hash)) + continue; + const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word; + const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word; + const u32 bexor = __byte_perm(xor0, xor1, 0x2345); + const u32 xorbucketid = bexor >> 4 & BUCKMASK; + const u32 xhash = bexor & 0xf; + const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1); + if (xorslot >= NSLOTS) + continue; + slot1 &xs = htl.hta.trees1[2][xorbucketid][xorslot]; + xs.attr = tree(bucketid, s0, s1, xhash); + xs.hash[0].word = xor1; + xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word; + xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word; + } + } + } +} +__global__ void digit6(equi *eq) { + equi::htlayout htl(eq, 6); + equi::collisiondata cd; + const u32 id = blockIdx.x * blockDim.x + threadIdx.x; + for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { + cd.clear(); + slot1 *buck = htl.hta.trees1[2][bucketid]; + u32 bsize = eq->getnslots1(bucketid); + for (u32 s1 = 0; s1 < bsize; s1++) { + const slot1 *pslot1 = buck + s1; + if (!cd.addslot(s1, htl.getxhash1(pslot1))) + continue; + for (; cd.nextcollision();) { + const u32 s0 = cd.slot(); + const slot1 *pslot0 = buck + s0; + if (htl.equal(pslot0->hash, pslot1->hash)) + continue; + const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word; + const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word; + const u32 bexor = __byte_perm(xor0, xor1, 0x2345); + const u32 xorbucketid = bexor >> 16; + const u32 xhash = bexor >> 12 & 0xf; + const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1); + if (xorslot >= NSLOTS) + continue; + slot0 &xs = htl.hta.trees0[3][xorbucketid][xorslot]; + xs.attr = tree(bucketid, s0, s1, xhash); + xs.hash[0].word = xor1; + xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word; + } + } + } +} +__global__ void digit7(equi *eq) { + equi::htlayout htl(eq, 7); + equi::collisiondata cd; + const u32 id = blockIdx.x * blockDim.x + threadIdx.x; + for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { + cd.clear(); + slot0 *buck = htl.hta.trees0[3][bucketid]; + u32 bsize = eq->getnslots0(bucketid); + for (u32 s1 = 0; s1 < bsize; s1++) { + const slot0 *pslot1 = buck + s1; + if (!cd.addslot(s1, htl.getxhash0(pslot1))) + continue; + for (; cd.nextcollision();) { + const u32 s0 = cd.slot(); + const slot0 *pslot0 = buck + s0; + if (htl.equal(pslot0->hash, pslot1->hash)) + continue; + const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word; + const u32 bexor = __byte_perm(xor0, 0, 0x4012); + const u32 xorbucketid = bexor >> 4 & BUCKMASK; + const u32 xhash = bexor & 0xf; + const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1); + if (xorslot >= NSLOTS) + continue; + slot1 &xs = htl.hta.trees1[3][xorbucketid][xorslot]; + xs.attr = tree(bucketid, s0, s1, xhash); + xs.hash[0].word = xor0; + xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word; + } + } + } +} +__global__ void digit8(equi *eq) { + equi::htlayout htl(eq, 8); + equi::collisiondata cd; + const u32 id = blockIdx.x * blockDim.x + threadIdx.x; + for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { + cd.clear(); + slot1 *buck = htl.hta.trees1[3][bucketid]; + u32 bsize = eq->getnslots1(bucketid); + for (u32 s1 = 0; s1 < bsize; s1++) { + const slot1 *pslot1 = buck + s1; + if (!cd.addslot(s1, htl.getxhash1(pslot1))) + continue; + for (; cd.nextcollision();) { + const u32 s0 = cd.slot(); + const slot1 *pslot0 = buck + s0; + if (htl.equal(pslot0->hash, pslot1->hash)) + continue; + const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word; + const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word; + const u32 bexor = __byte_perm(xor0, xor1, 0x3456); + const u32 xorbucketid = bexor >> 16; + const u32 xhash = bexor >> 12 & 0xf; + const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1); + if (xorslot >= NSLOTS) + continue; + slot0 &xs = htl.hta.trees0[4][xorbucketid][xorslot]; + xs.attr = tree(bucketid, s0, s1, xhash); + xs.hash[0].word = xor1; + } + } + } +} +#endif + +__global__ void digitK(equi *eq) { + equi::collisiondata cd; + equi::htlayout htl(eq, WK); + const u32 id = blockIdx.x * blockDim.x + threadIdx.x; + for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { + cd.clear(); + slot0 *buck = htl.hta.trees0[(WK - 1) / 2][bucketid]; + u32 bsize = eq->getnslots0(bucketid); // assume WK odd + for (u32 s1 = 0; s1 < bsize; s1++) { + const slot0 *pslot1 = buck + s1; + if (!cd.addslot(s1, htl.getxhash0(pslot1))) // assume WK odd + continue; + for (; cd.nextcollision();) { + const u32 s0 = cd.slot(); + const slot0 *pslot0 = buck + s0; + if (htl.equal(pslot0->hash, pslot1->hash)) { +#ifdef XINTREE + eq->candidate(tree(bucketid, s0, s1, 0)); +#else + eq->candidate(tree(bucketid, s0, s1)); +#endif + } + } + } + } +} + + +eq_cuda_context::eq_cuda_context(int tpb, int blocks, int id) + : threadsperblock(tpb), totalblocks(blocks), device_id(id) +{ + eq = new equi(threadsperblock * totalblocks); + sol_memory = malloc(sizeof(proof) * MAXSOLS + 4096); + solutions = (proof*)(((long long)sol_memory + 4095) & -4096); + + checkCudaErrors(cudaSetDevice(device_id)); + checkCudaErrors(cudaDeviceReset()); + checkCudaErrors(cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync)); + checkCudaErrors(cudaDeviceSetCacheConfig(cudaFuncCachePreferL1)); + + checkCudaErrors(cudaMalloc((void**)&heap0, sizeof(digit0))); + checkCudaErrors(cudaMalloc((void**)&heap1, sizeof(digit1))); + for (u32 r = 0; r < WK; r++) + if ((r & 1) == 0) + eq->hta.trees0[r / 2] = (bucket0 *)(heap0 + r / 2); + else + eq->hta.trees1[r / 2] = (bucket1 *)(heap1 + r / 2); + + checkCudaErrors(cudaMalloc((void**)&eq->nslots, 2 * NBUCKETS * sizeof(u32))); + checkCudaErrors(cudaMalloc((void**)&eq->sols, MAXSOLS * sizeof(proof))); + + checkCudaErrors(cudaMalloc((void**)&device_eq, sizeof(equi))); +} + + +eq_cuda_context::~eq_cuda_context() +{ + /*checkCudaErrors(cudaFree(eq->nslots)); + checkCudaErrors(cudaFree(eq->sols)); + checkCudaErrors(cudaFree(eq->hta.trees0[0])); + checkCudaErrors(cudaFree(eq->hta.trees1[0]));*/ + checkCudaErrors(cudaSetDevice(device_id)); + checkCudaErrors(cudaDeviceReset()); + free(sol_memory); + delete eq; +} + + +void eq_cuda_context::solve(const char *tequihash_header, + unsigned int tequihash_header_len, + const char* nonce, + unsigned int nonce_len, + std::function cancelf, + std::function&, size_t, const unsigned char*)> solutionf, + std::function hashdonef) +{ + checkCudaErrors(cudaSetDevice(device_id)); + + eq->setheadernonce(tequihash_header, tequihash_header_len, nonce, nonce_len); + checkCudaErrors(cudaMemcpy(device_eq, eq, sizeof(equi), cudaMemcpyHostToDevice)); + + digitH << > >(device_eq); + if (cancelf()) return; +#if BUCKBITS == 16 && RESTBITS == 4 && defined XINTREE && defined(UNROLL) + digit_1 << > >(device_eq); + if (cancelf()) return; + digit2 << > >(device_eq); + if (cancelf()) return; + digit3 << > >(device_eq); + if (cancelf()) return; + digit4 << > >(device_eq); + if (cancelf()) return; + digit5 << > >(device_eq); + if (cancelf()) return; + digit6 << > >(device_eq); + if (cancelf()) return; + digit7 << > >(device_eq); + if (cancelf()) return; + digit8 << > >(device_eq); +#else + for (u32 r = 1; r < WK; r++) { + r & 1 ? digitO << > >(device_eq, r) + : digitE << > >(device_eq, r); + } +#endif + if (cancelf()) return; + digitK << > >(device_eq); + + checkCudaErrors(cudaMemcpy(eq, device_eq, sizeof(equi), cudaMemcpyDeviceToHost)); + checkCudaErrors(cudaMemcpy(solutions, eq->sols, MAXSOLS * sizeof(proof), cudaMemcpyDeviceToHost)); + + for (unsigned s = 0; (s < eq->nsols) && (s < MAXSOLS); s++) + { + std::vector index_vector(PROOFSIZE); + for (u32 i = 0; i < PROOFSIZE; i++) { + index_vector[i] = solutions[s][i]; + } + + solutionf(index_vector, DIGITBITS, nullptr); + if (cancelf()) return; + } + hashdonef(); +} \ No newline at end of file diff --git a/cuda_code/erode_dilate.cu b/cuda_code/erode_dilate.cu new file mode 100644 index 0000000000000000000000000000000000000000..1a4d8082600f157d5fa3f05702126d2b47a4599d --- /dev/null +++ b/cuda_code/erode_dilate.cu @@ -0,0 +1,94 @@ +#include "deltaCV/gpu/cudaImg.cuh" +#include +#include +#include +#include +#include + +//腐蚀 +__global__ void erode(unsigned char* dataIn, + unsigned char* dataOut, + short int imgRows, + short int imgCols, + short int erodeElementRows, + short int erodeElementCols) +{ + int xdx = threadIdx.x + __umul24(blockIdx.x, blockDim.x); + int ydx = threadIdx.y + __umul24(blockIdx.y, blockDim.y); + + int tid = xdx + ydx * imgCols; + + char val = dataIn[tid]; + + dataOut[tid] = dataIn[tid]; + + if(xdx > erodeElementCols-1 && xdx < imgCols-erodeElementCols + && ydx>erodeElementRows && ydx < imgRows-erodeElementRows) + { + for (int i = -erodeElementRows; i < erodeElementRows+1; ++i) { //行 + for (int j = -erodeElementCols; j < erodeElementCols+1; ++j) { //列 + char temp_val = dataIn[(ydx+i)*imgCols+(xdx+j)]; + if(temp_val < val) + { + dataOut[tid] = temp_val; + } + } + } + } + +} + +//膨胀 +__global__ void dilate(unsigned char* dataIn, + unsigned char* dataOut, + short int imgRows, + short int imgCols, + short int dilateElementRows, + short int dilateElementCols) +{ + int xdx = threadIdx.x + __umul24(blockIdx.x, blockDim.x); + int ydx = threadIdx.y + __umul24(blockIdx.y, blockDim.y); + + int tid = xdx + ydx * imgCols; + + char val = dataIn[tid]; + + dataOut[tid] = dataIn[tid]; + + if(xdx > dilateElementCols-1 && xdx < imgCols-dilateElementCols + && ydx>dilateElementRows && ydx < imgRows-dilateElementRows) + { + for (int i = -dilateElementRows; i < dilateElementRows+1; ++i) { //行 + for (int j = -dilateElementCols; j < dilateElementCols+1; ++j) { //列 + char temp_val = dataIn[(ydx+i)*imgCols+(xdx+j)]; + if(temp_val > val) + { + dataOut[tid] = temp_val; + } + } + } + } + +} + +void erode_gpu( unsigned char* dataIn, + unsigned char* dataOut, + short int imgRows, + short int imgCols, + cv::Size erodeSize, + dim3 tPerBlock, + dim3 bPerGrid) +{ + erode<<>>(dataIn,dataOut,imgRows,imgCols,(erodeSize.height-1)/2,(erodeSize.width-1)/2); +} + +void dilate_gpu(unsigned char* dataIn, + unsigned char* dataOut, + short int imgRows, + short int imgCols, + cv::Size dilateSize, + dim3 tPerBlock, + dim3 bPerGrid) +{ + dilate<<>>(dataIn,dataOut,imgRows,imgCols,(dilateSize.height-1)/2,(dilateSize.width-1)/2); +} diff --git a/cuda_code/escape_time_5_1gpu_per_proc.cu b/cuda_code/escape_time_5_1gpu_per_proc.cu new file mode 100644 index 0000000000000000000000000000000000000000..96f7cf8c1251144caf44d771aa964afc61d7b986 --- /dev/null +++ b/cuda_code/escape_time_5_1gpu_per_proc.cu @@ -0,0 +1,143 @@ +#include +#include +#include + + +/******************************************************************************/ +#define BLOCK_SIZE_X 256 +#define BLOCK_SIZE_Y 1 + +#define PROCS_PER_NODE 2 + +__constant__ int c_maxiter; +__constant__ float c_xmin; +__constant__ float c_ymin; +__constant__ float c_x_step; +__constant__ float c_y_step; +__constant__ int c_N; +__constant__ int c_width; +__constant__ int c_rowsize; +__constant__ int c_mpi_rowoffset; + +/******************************************************************************/ + +__device__ void d_smooth_fast_element_colormap(int iter, float re2, float im2, + int *rp, int *gp, int *bp) +{ + if(iter == c_maxiter) { + /* black */ + *rp = 0; // Red channel + *gp = 0; // Green channel + *bp = 0; // Blue channel + } + else { + int brightness = 256.*log2(1.75-log2(0.5)+iter-log2(log2(re2+im2)))/log2((float)c_maxiter); + + *rp = brightness; // Red channel + *gp = brightness; // Green channel + *bp = 255; // Blue channel + } +} + + +__device__ void in_cardioid_or_period2_bulb(int *iterp, float x, float y) +{ + float xdiff = x - 0.25; + float y2 = y * y; + float q = xdiff*xdiff + y2; + + // Is the point in the cardioid? + if (q * (q + xdiff) < 0.25*y2) { + *iterp = c_maxiter; + } + else if ((x+1.)*(x+1.) + y2 < 0.0625) { // Is the point in the period-2 bulb? + *iterp = c_maxiter; + } + +} + + +__global__ void compute_escape_time(char *img) +{ + int offset = gridDim.x*blockDim.x*threadIdx.y + blockIdx.x*blockDim.x+threadIdx.x; + int i = offset / c_width; + int j = offset - i * c_width; + int global_offset = c_mpi_rowoffset*c_width + offset; + int global_i = global_offset / c_width; + int global_j = global_offset - global_i * c_width; + int iteration = 0; + float c_re = c_xmin + c_x_step/2 + global_j*c_x_step; + float c_im = c_ymin + c_y_step/2 + global_i*c_y_step; + float zn_re = 0.; + float zn_im = 0.; + float tmp_re; + float re2 = 0.; + float im2 = 0.; + int bailout_radius2 = 2*2; + int r, g, b; + + if (global_offset < c_N) { + // Check if point is in cardioid or in period-2 bulb + in_cardioid_or_period2_bulb(&iteration, c_re, c_im); + + while ((re2 + im2 < bailout_radius2) && (iteration < c_maxiter)) { + tmp_re = re2 - im2 + c_re; + zn_im = zn_re * zn_im; + zn_im += zn_im; // Multiply by two + zn_im += c_im; + zn_re = tmp_re; + + re2 = zn_re * zn_re; + im2 = zn_im * zn_im; + iteration++; + } + + d_smooth_fast_element_colormap(iteration, re2, im2, &r, &g, &b); + + offset = c_rowsize * i + 3 * j; // offset in the image array + img[offset++] = b; + img[offset++] = g; + img[offset] = r; + } +} + +/******************************************************************************/ + +extern "C" void kernel_wrapper(char *h_img, int d_img_size, int MAX_ITER, + float X_MIN, float Y_MIN, float h_x_step, + float h_y_step, int N, + int N_local, int mpi_row_offset, int prank, + int WIDTH, int row_size) +{ + + dim3 block_size, grid_size; + char *d_img; + int dev_id = prank % PROCS_PER_NODE; + + // Create the grid of blocks of threads + block_size.x = BLOCK_SIZE_X; block_size.y = BLOCK_SIZE_Y; + grid_size.x = N_local / (block_size.x*block_size.y) + (N_local%(block_size.x*block_size.y) == 0? 0 : 1); + + cudaSetDevice(dev_id); + cudaMalloc((void **)&d_img, d_img_size); + cudaMemset(d_img, 0, d_img_size); + + // Copy memory to constant memory in the device + cudaMemcpyToSymbol(c_maxiter, &MAX_ITER, sizeof(int)); + cudaMemcpyToSymbol(c_xmin, &X_MIN, sizeof(float)); + cudaMemcpyToSymbol(c_ymin, &Y_MIN, sizeof(float)); + cudaMemcpyToSymbol(c_x_step, &h_x_step, sizeof(float)); + cudaMemcpyToSymbol(c_y_step, &h_y_step, sizeof(float)); + cudaMemcpyToSymbol(c_N, &N, sizeof(int)); + cudaMemcpyToSymbol(c_width, &WIDTH, sizeof(int)); + cudaMemcpyToSymbol(c_rowsize, &row_size, sizeof(int)); + cudaMemcpyToSymbol(c_mpi_rowoffset, &mpi_row_offset, sizeof(int)); + + // Call the kernel to execute on the gpu + compute_escape_time<<>>(d_img); + + // Copy the results back + cudaMemcpy(h_img, d_img, d_img_size, cudaMemcpyDeviceToHost); + + cudaFree(d_img); +} diff --git a/cuda_code/escheme.cu b/cuda_code/escheme.cu new file mode 100644 index 0000000000000000000000000000000000000000..c845eda879a29842303c5570194689f027371c92 --- /dev/null +++ b/cuda_code/escheme.cu @@ -0,0 +1,419 @@ +/* + The MIT License (MIT) + + Copyright (c) 2014 Leonardo Kewitz + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "./cuda_snippets.h" +#include "./escheme.h" + +#define BSIZE 512 +#define putf(a, b) smemcpy(a, b, sizeof(float), cudaMemcpyHostToDevice); +#define getf(a, b) smemcpy(a, b, sizeof(float), cudaMemcpyDeviceToHost); +#define CUDA true + +extern "C" unsigned int alloc(const int nn) { + cudaDeviceProp prop = getInfo(); + unsigned int gm = prop.totalGlobalMem*.8 - sizeof(float)*nn*6 + - sizeof(float)*4; + cudaDeviceReset(); + return cast(unsigned int, gm / (sizeof(node) + 6*sizeof(element))); +} + +#if CUDA +// Kernel de responsável por calcular as matrizes de contribuição de um +// elemento. +__global__ void kernel_element(int ne, element *elements) { + int i = blockIdx.x * blockDim.x + threadIdx.x; + if (i >= ne) return; + + element E = elements[i]; + + // Calcula gradN + float q1 = E.y[1]-E.y[2], q2 = E.y[2]-E.y[0], q3 = E.y[0]-E.y[1]; + float r1 = E.x[2]-E.x[1], r2 = E.x[0]-E.x[2], r3 = E.x[1]-E.x[0]; + // Calcula det(gradN) + float det = E.x[1]*E.y[2] + E.x[0]*E.y[1] + E.x[2]*E.y[0] + - E.x[0]*E.y[2] - E.x[2]*E.y[1] - E.x[1]*E.y[0]; + float cof = (E.mat/det)/2; + // Calcula a matriz de contribuições do elemento. + elements[i].matriz[0] = (q1*q1 + r1*r1)*cof; + elements[i].matriz[1] = (q2*q2 + r2*r2)*cof; + elements[i].matriz[2] = (q3*q3 + r3*r3)*cof; + elements[i].matriz[3] = (q1*q2 + r1*r2)*cof; + elements[i].matriz[4] = (q1*q3 + r1*r3)*cof; + elements[i].matriz[5] = (q2*q3 + r2*r3)*cof; +} + +// Kernel de pré-processamento responsável por calcular diag_sum e right_sum. +__global__ void kernel_preprocess(int ne, element *elements, float *V, + float *dsum, float *rsum) { + int i = blockIdx.x * blockDim.x + threadIdx.x; + if (i >= ne) return; + + int n1, n2, n3; + element E = elements[i]; + n1 = E.nodes[0]; n2 = E.nodes[1]; n3 = E.nodes[2]; + + atomicAdd(&dsum[n1], E.matriz[0]); + atomicAdd(&dsum[n2], E.matriz[1]); + atomicAdd(&dsum[n3], E.matriz[2]); + + atomicAdd(&rsum[n1], - E.matriz[3]*V[n2] - E.matriz[4]*V[n3]); + atomicAdd(&rsum[n2], - E.matriz[3]*V[n1] - E.matriz[5]*V[n3]); + atomicAdd(&rsum[n3], - E.matriz[4]*V[n1] - E.matriz[5]*V[n2]); +} + +// Kernel de pré-processamento responsável por calcular P = R = b - Ax. +__global__ void kernel_residue(int nn, node *nodes, float *dsum, float *rsum, + float *R, float *P, float *V) { + int i = blockIdx.x * blockDim.x + threadIdx.x; + if (i >= nn) return; + + node N = nodes[i]; + + float ri = N.calc ? rsum[N.i] - dsum[N.i]*V[N.i] : 0.0; + R[N.i] = ri; + P[N.i] = ri; +} + +// U = SS*P +__global__ void kernel_iter_element(int ne, element *elements, float *U, + float *P) { + int i = blockIdx.x * blockDim.x + threadIdx.x; + if (i >= ne) return; + + element E = elements[i]; + int n1 = E.nodes[0], n2 = E.nodes[1], n3 = E.nodes[2]; + + atomicAdd(&U[n1], E.matriz[0]*P[n1] + E.matriz[3]*P[n2] + + E.matriz[4]*P[n3]); + atomicAdd(&U[n2], E.matriz[3]*P[n1] + E.matriz[1]*P[n2] + + E.matriz[5]*P[n3]); + atomicAdd(&U[n3], E.matriz[4]*P[n1] + E.matriz[5]*P[n2] + + E.matriz[2]*P[n3]); +} + +// Corrige os valores de U para nós submetidos à condição de contorno. +__global__ void kernel_iter_element_fix(int nn, node *nodes, float *U, + float *P) { + int i = blockIdx.x * blockDim.x + threadIdx.x; + if (i >= nn) return; + + node N = nodes[i]; + if (!N.calc) + U[N.i] = P[N.i]; +} + +// vec[i] = 0.0f +__global__ void kernel_util_zero(int size, float *vec) { + int i = blockIdx.x * blockDim.x + threadIdx.x; + if (i >= size) return; + + vec[i] = 0.0; +} + +// sum += vecA[i]*vecB[i] +__global__ void kernel_util_vecsummult(int size, float *vecA, float *vecB, + float *sum) { + int i = blockIdx.x * blockDim.x + threadIdx.x; + int ti = threadIdx.x; + __shared__ float _sum[BSIZE]; + + _sum[ti] = (i < size) ? vecA[i]*vecB[i] : 0.0f; + __syncthreads(); + + for (int s = blockDim.x/2; s > 0; s >>= 1) { + if (ti < s) + _sum[ti] += _sum[ti + s]; + __syncthreads(); + } + if (ti == 0) + atomicAdd(sum, _sum[0]); +} + +// vecA[i] = vecB[i] + scalar*vecC[i] +__global__ void kernel_util_addtovec(int size, const float scalar, float *vecA, + float *vecB, float *vecC) { + int i = blockIdx.x * blockDim.x + threadIdx.x; + if (i >= size) return; + + vecA[i] = vecB[i] + scalar*vecC[i]; +} + +// Função externa que processa o problema na GPU. +extern "C" int runGPU(int ng, int nn, int kmax, float errmin, group *groups, + float *V, bool verbose, float *bench) { + clock_t t = clock(); + int i, k = 1; + unsigned int maxn = alloc(nn); + + // Array Sizes + size_t s_Elements = sizeof(element)*maxn*6, + s_Nodes = sizeof(node)*maxn, + s_V = sizeof(float)*nn; + + // Scalars. + float sum1 = 0.0f, sum2 = 0.0f, sum3 = 1.0f, sum4 = 0.0f, alpha = 0.0f, + beta = 0.0f, *_sum1, *_sum2, *_sum3, *_sum4; + + // Device Arrays. + float *_dsum, *_rsum, *_V, *_U, *_P, *_R; + group *G; + node *_nodes; + element *_elements; + smalloc(&_dsum, s_V); smalloc(&_rsum, s_V); + smalloc(&_elements, s_Elements); smalloc(&_nodes, s_Nodes); + smalloc(&_sum1, sizeof(float)); smalloc(&_sum2, sizeof(float)); + smalloc(&_sum3, sizeof(float)); smalloc(&_sum4, sizeof(float)); + smalloc(&_V, s_V); smalloc(&_U, s_V); smalloc(&_P, s_V); smalloc(&_R, s_V); + + smemcpy(_V, V, s_V, cudaMemcpyHostToDevice); + + kernel_util_zero<<<(1+nn/BSIZE), BSIZE>>>(nn, _dsum); + kernel_util_zero<<<(1+nn/BSIZE), BSIZE>>>(nn, _rsum); + cudaDeviceSynchronize(); + for (i = 0; i < ng; i++) { + G = &groups[i]; + smemcpy(_elements, G->elements, sizeof(element)*G->ne, + cudaMemcpyHostToDevice); + kernel_element<<<(1+G->ne/BSIZE), BSIZE>>>(G->ne, _elements); + cudaDeviceSynchronize(); + kernel_preprocess<<<(1+G->ne/BSIZE), BSIZE>>>(G->ne, _elements, _V, + _dsum, _rsum); + cudaDeviceSynchronize(); + } + for (i = 0; i < ng; i++) { + G = &groups[i]; + smemcpy(_nodes, G->nodes, sizeof(node)*G->nn, cudaMemcpyHostToDevice); + kernel_residue<<<(1+G->nn/BSIZE), BSIZE>>>(G->nn, _nodes, _dsum, _rsum, + _R, _P, _V); + cudaDeviceSynchronize(); + } + + while (k < kmax && fabs(sqrt(sum3)) > errmin) { + // U[] = 0 + kernel_util_zero<<<(1+nn/BSIZE), BSIZE>>>(nn, _U); + cudaDeviceSynchronize(); + for (i = 0; i < ng; i++) { + G = &groups[i]; + if (ng > 1) + smemcpy(_elements, G->elements, sizeof(element)*G->ne, + cudaMemcpyHostToDevice); + // Integra Elemento. + kernel_element<<<(1+G->ne/BSIZE), BSIZE>>>(G->ne, _elements); + cudaDeviceSynchronize(); + // U = A*R + kernel_iter_element<<<(1+G->ne/BSIZE), BSIZE>>>(G->ne, _elements, + _U, _P); + cudaDeviceSynchronize(); + } + for (i = 0; i < ng; i++) { + G = &groups[i]; + if (ng > 1) + smemcpy(_nodes, G->nodes, sizeof(node)*G->nn, + cudaMemcpyHostToDevice); + kernel_iter_element_fix<<<(1+G->nn/BSIZE), BSIZE>>>(G->nn, _nodes, + _U, _P); + cudaDeviceSynchronize(); + } + + sum1 = 0.0f; sum2 = 0.0f; + putf(_sum1, &sum1); putf(_sum2, &sum2); + kernel_util_vecsummult<<<(1+nn/BSIZE), BSIZE>>>(nn, _P, _R, _sum1); + kernel_util_vecsummult<<<(1+nn/BSIZE), BSIZE>>>(nn, _P, _U, _sum2); + cudaDeviceSynchronize(); + getf(&sum1, _sum1); getf(&sum2, _sum2); + + alpha = sum2 != 0.0 ? sum1/sum2 : 0.0; + kernel_util_addtovec<<<(1+nn/BSIZE), BSIZE>>>(nn, alpha, _V, _V, _P); + kernel_util_addtovec<<<(1+nn/BSIZE), BSIZE>>>(nn, -alpha, _R, _R, _U); + cudaDeviceSynchronize(); + + sum3 = 0.0f; sum4 = 0.0f; + putf(_sum3, &sum3); putf(_sum4, &sum4); + kernel_util_vecsummult<<<(1+nn/BSIZE), BSIZE>>>(nn, _R, _R, _sum3); + kernel_util_vecsummult<<<(1+nn/BSIZE), BSIZE>>>(nn, _R, _U, _sum4); + cudaDeviceSynchronize(); + getf(&sum3, _sum3); getf(&sum4, _sum4); + + beta = sum2 != 0.0 ? -sum4/sum2 : 0.0; + kernel_util_addtovec<<<(1+nn/BSIZE), BSIZE>>>(nn, beta, _P, _R, _P); + cudaDeviceSynchronize(); + + k++; + } + + smemcpy(V, _V, s_V, cudaMemcpyDeviceToHost); + + cudaFree(_V); cudaFree(_U); cudaFree(_P); cudaFree(_R); + cudaFree(_sum1); cudaFree(_sum2); cudaFree(_sum3); cudaFree(_sum4); + cudaFree(_elements); cudaFree(_nodes); + cudaFree(_dsum); cudaFree(_rsum); + + t = clock() - t; + bench[0] = cast(float, t)/CLOCKS_PER_SEC; + return k; +} +#endif + +void integ_element(element *E) { + float mat = E->mat; + // Calcula gradN + float q1 = E->y[1]-E->y[2], q2 = E->y[2]-E->y[0], q3 = E->y[0]-E->y[1]; + float r1 = E->x[2]-E->x[1], r2 = E->x[0]-E->x[2], r3 = E->x[1]-E->x[0]; + // Calcula det(gradN) + float det = E->x[1]*E->y[2] + E->x[0]*E->y[1] + E->x[2]*E->y[0] + - E->x[0]*E->y[2] - E->x[2]*E->y[1] - E->x[1]*E->y[0]; + float cof = (mat/det)/2.0; + assert(!isnan(det)); + assert(!isnan(cof)); + // Calcula a matriz de contribuições do elemento. + E->matriz[0] = (powf(q1, 2.0) + powf(r1, 2.0))*cof; + E->matriz[1] = (powf(q2, 2.0) + powf(r2, 2.0))*cof; + E->matriz[2] = (powf(q3, 2.0) + powf(r3, 2.0))*cof; + E->matriz[3] = (q1*q2 + r1*r2)*cof; + E->matriz[4] = (q1*q3 + r1*r3)*cof; + E->matriz[5] = (q2*q3 + r2*r3)*cof; +} +// Função externa que processa o problema no CPU. +extern "C" int runCPU(int ng, int nn, int kmax, float errmin, group *groups, + float *V, bool verbose, float *bench) { + clock_t t = clock(); + unsigned int i, j, k; + element *E; + group *G; + node N; + + // Pre-processamento. Calcula dsum e rsum. + int n1, n2, n3; + float *rsum = cast(float*, malloc(nn*sizeof(float))); + float *dsum = cast(float*, malloc(nn*sizeof(float))); + // Inicialização dos vetores. + for (i = 0; i < nn; i++) { + rsum[i] = 0.0f; + dsum[i] = 0.0f; + } + for (i = 0; i < ng; i++) { + G = &groups[i]; + for (j = 0; j < G->ne; j++) { + E = &G->elements[j]; + integ_element(E); + + n1 = E->nodes[0]; n2 = E->nodes[1]; n3 = E->nodes[2]; + + dsum[n1] += E->matriz[0]; + dsum[n2] += E->matriz[1]; + dsum[n3] += E->matriz[2]; + + rsum[n1] += -E->matriz[3]*V[n2] -E->matriz[4]*V[n3]; + rsum[n2] += -E->matriz[3]*V[n1] -E->matriz[5]*V[n3]; + rsum[n3] += -E->matriz[4]*V[n1] -E->matriz[5]*V[n2]; + } + } + + // CG + float ri, alpha, beta, sum1, sum2, sum3 = 1.0f, sum4; + float *r = cast(float*, malloc(nn*sizeof(float))); + float *p = cast(float*, malloc(nn*sizeof(float))); + float *u = cast(float*, malloc(nn*sizeof(float))); + + // r = b - Ax + for (i = 0; i < ng; i++) { + G = &groups[i]; + for (j = 0; j < G->nn; j++) { + N = G->nodes[j]; + ri = N.calc ? rsum[N.i] - dsum[N.i]*V[N.i] : 0.0f; + r[N.i] = ri; + p[N.i] = ri; + } + } + + k = 1; + while (k < kmax && fabs(sqrt(sum3)) > errmin) { + for (i = 0; i < nn; i++) { + u[i] = 0.0; + } + + for (i = 0; i < ng; i++) { + G = &groups[i]; + for (j = 0; j < G->ne; j++) { + E = &G->elements[j]; + integ_element(E); + + n1 = E->nodes[0]; n2 = E->nodes[1]; n3 = E->nodes[2]; + u[n1] += E->matriz[0]*p[n1] + E->matriz[3]*p[n2] + + E->matriz[4]*p[n3]; + u[n2] += E->matriz[3]*p[n1] + E->matriz[1]*p[n2] + + E->matriz[5]*p[n3]; + u[n3] += E->matriz[4]*p[n1] + E->matriz[5]*p[n2] + + E->matriz[2]*p[n3]; + } + for (j = 0; j < G->nn; j++) { + N = G->nodes[j]; + if (!N.calc) + u[N.i] = p[N.i]; + } + } + + sum1 = 0.0; sum2 = 0.0; + for (i = 0; i < nn; i++) { + sum1 += p[i]*r[i]; + sum2 += p[i]*u[i]; + } + + alpha = sum2 != 0.0 ? sum1/sum2 : 0.0; + for (i = 0; i < nn; i++) { + V[i] += alpha*p[i]; + r[i] -= alpha*u[i]; + } + + sum3 = 0.0; sum4 = 0.0; + for (i = 0; i < nn; i++) { + sum3 += r[i]*r[i]; + sum4 += r[i]*u[i]; + } + + beta = sum2 != 0.0 ? -sum4/sum2 : 0.0f; + for (i = 0; i < nn; i++) { + p[i] = r[i] + beta*p[i]; + } + + k++; + } + + free(dsum); + free(rsum); + free(r); + free(p); + free(u); + + t = clock() - t; + bench[0] = cast(float, t)/CLOCKS_PER_SEC; + return k; +} diff --git a/cuda_code/esdf_integrator.cu b/cuda_code/esdf_integrator.cu new file mode 100644 index 0000000000000000000000000000000000000000..7c7eeb430207787a56b73d98396240cee10d80bf --- /dev/null +++ b/cuda_code/esdf_integrator.cu @@ -0,0 +1,1214 @@ +/* +Copyright 2022 NVIDIA CORPORATION + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +#include "nvblox/gpu_hash/cuda/gpu_hash_interface.cuh" +#include "nvblox/gpu_hash/cuda/gpu_indexing.cuh" +#include "nvblox/utils/timing.h" + +#include "nvblox/integrators/esdf_integrator.h" + +namespace nvblox { + +EsdfIntegrator::~EsdfIntegrator() { + if (cuda_stream_ != nullptr) { + cudaStreamDestroy(cuda_stream_); + } +} + +void EsdfIntegrator::integrateBlocksOnGPU( + const TsdfLayer& tsdf_layer, const std::vector& block_indices, + EsdfLayer* esdf_layer) { + timing::Timer esdf_timer("esdf/integrate"); + + if (block_indices.empty()) { + return; + } + + // First, check if the stream exists. If not, create one. + if (cuda_stream_ == nullptr) { + checkCudaErrors(cudaStreamCreate(&cuda_stream_)); + } + + timing::Timer allocate_timer("esdf/integrate/allocate"); + // First, allocate all the destination blocks. + allocateBlocksOnCPU(block_indices, esdf_layer); + allocate_timer.Stop(); + + timing::Timer mark_timer("esdf/integrate/mark_sites"); + // Then, mark all the sites on GPU. + // This finds all the blocks that are eligible to be parents. + std::vector blocks_with_sites; + std::vector blocks_to_clear; + markAllSitesOnGPU(tsdf_layer, block_indices, esdf_layer, &blocks_with_sites, + &blocks_to_clear); + mark_timer.Stop(); + + std::vector cleared_blocks; + if (!blocks_to_clear.empty()) { + timing::Timer compute_timer("esdf/integrate/clear"); + clearInvalidOnGPU(blocks_to_clear, esdf_layer, &cleared_blocks); + std::vector all_clear_updated; + } + + timing::Timer compute_timer("esdf/integrate/compute"); + // Parallel block banding on GPU. + computeEsdfOnGPU(blocks_with_sites, esdf_layer); + if (!cleared_blocks.empty()) { + computeEsdfOnGPU(cleared_blocks, esdf_layer); + } + compute_timer.Stop(); +} + +void EsdfIntegrator::integrateSliceOnGPU( + const TsdfLayer& tsdf_layer, const std::vector& block_indices, + float z_min, float z_max, float z_output, EsdfLayer* esdf_layer) { + timing::Timer esdf_timer("esdf/integrate_slice"); + + if (block_indices.empty()) { + return; + } + + // First, check if the stream exists. If not, create one. + if (cuda_stream_ == nullptr) { + checkCudaErrors(cudaStreamCreate(&cuda_stream_)); + } + + timing::Timer allocate_timer("esdf/integrate_slice/allocate"); + // First, allocate all the destination blocks. + allocateBlocksOnCPU(block_indices, esdf_layer); + allocate_timer.Stop(); + + timing::Timer mark_timer("esdf/integrate_slice/mark_sites"); + // Then, mark all the sites on GPU. + // This finds all the blocks that are eligible to be parents. + std::vector blocks_with_sites; + std::vector blocks_to_clear; + markSitesInSliceOnGPU(tsdf_layer, block_indices, z_min, z_max, z_output, + esdf_layer, &blocks_with_sites, &blocks_to_clear); + mark_timer.Stop(); + + std::vector cleared_blocks; + if (!blocks_to_clear.empty()) { + timing::Timer compute_timer("esdf/integrate_slice/clear"); + clearInvalidOnGPU(blocks_to_clear, esdf_layer, &cleared_blocks); + std::vector all_clear_updated; + } + + timing::Timer compute_timer("esdf/integrate_slice/compute"); + // Parallel block banding on GPU. + computeEsdfOnGPU(blocks_with_sites, esdf_layer); + + if (!cleared_blocks.empty()) { + computeEsdfOnGPU(cleared_blocks, esdf_layer); + } + compute_timer.Stop(); +} + +__device__ void clearVoxelDevice(EsdfVoxel* voxel, + float max_squared_distance_vox) { + voxel->parent_direction.setZero(); + voxel->squared_distance_vox = max_squared_distance_vox; +} + +// Takes in a vector of blocks, and outputs an integer true if that block is +// meshable. +// Block size MUST be voxels_per_side x voxels_per_side x voxel_per_size. +// Grid size can be anything. +__global__ void markAllSitesKernel(int num_blocks, + const TsdfBlock** tsdf_blocks, + EsdfBlock** esdf_blocks, + float min_site_distance_m, float min_weight, + float max_squared_distance_vox, + bool* updated, bool* to_clear) { + dim3 voxel_index = threadIdx; + // This for loop allows us to have fewer threadblocks than there are + // blocks in this computation. We assume the threadblock size is constant + // though to make our lives easier. + for (int block_index = blockIdx.x; block_index < num_blocks; + block_index += gridDim.x) { + // Get the correct voxel for this index. + const TsdfVoxel* tsdf_voxel = + &tsdf_blocks[block_index] + ->voxels[voxel_index.x][voxel_index.y][voxel_index.z]; + EsdfVoxel* esdf_voxel = + &esdf_blocks[block_index] + ->voxels[voxel_index.x][voxel_index.y][voxel_index.z]; + if (tsdf_voxel->weight >= min_weight) { + // Mark as inside if the voxel distance is negative. + bool is_inside = tsdf_voxel->distance <= 0.0f; + if (esdf_voxel->is_inside && is_inside == false) { + clearVoxelDevice(esdf_voxel, max_squared_distance_vox); + to_clear[block_index] = true; + } + esdf_voxel->is_inside = is_inside; + if (is_inside && fabsf(tsdf_voxel->distance) <= min_site_distance_m) { + esdf_voxel->is_site = true; + esdf_voxel->squared_distance_vox = 0.0f; + esdf_voxel->parent_direction.setZero(); + updated[block_index] = true; + } else { + if (esdf_voxel->is_site) { + esdf_voxel->is_site = false; + // This voxel needs to be cleared. + clearVoxelDevice(esdf_voxel, max_squared_distance_vox); + to_clear[block_index] = true; + } else if (!esdf_voxel->observed) { + // This is a brand new voxel. + clearVoxelDevice(esdf_voxel, max_squared_distance_vox); + } else if (esdf_voxel->squared_distance_vox <= 1e-4) { + // This is an invalid voxel that should be cleared. + clearVoxelDevice(esdf_voxel, max_squared_distance_vox); + to_clear[block_index] = true; + } + } + esdf_voxel->observed = true; + } + } +} + +// From: +// https://stackoverflow.com/questions/17399119/how-do-i-use-atomicmax-on-floating-point-values-in-cuda +__device__ __forceinline__ float atomicMinFloat(float* addr, float value) { + float old; + old = (value >= 0) + ? __int_as_float(atomicMin((int*)addr, __float_as_int(value))) + : __uint_as_float( + atomicMax((unsigned int*)addr, __float_as_uint(value))); + + return old; +} + +/// Thread size MUST be 8x8x8, block size can be anything. +__global__ void markSitesInSliceKernel( + int num_input_blocks, int num_output_blocks, const TsdfBlock** tsdf_blocks, + EsdfBlock** esdf_blocks, int output_voxel_index, int input_min_voxel_index, + int input_max_voxel_index, float min_site_distance_m, float min_weight, + float max_squared_distance_vox, bool* updated, bool* cleared) { + dim3 voxel_index = threadIdx; + voxel_index.z = output_voxel_index; + int layer_index = threadIdx.z; + int num_layers = blockDim.z; + + constexpr int kVoxelsPerSide = VoxelBlock::kVoxelsPerSide; + + __shared__ EsdfVoxel new_values[kVoxelsPerSide][kVoxelsPerSide]; + __shared__ bool observed[kVoxelsPerSide][kVoxelsPerSide]; + __shared__ float min_distance[kVoxelsPerSide][kVoxelsPerSide]; + + // Initialize these. + if (layer_index == 0) { + observed[voxel_index.x][voxel_index.y] = false; + min_distance[voxel_index.x][voxel_index.y] = 100.0f; + } + __syncthreads(); + + // This for loop allows us to have fewer threadblocks than there are + // blocks in this computation. We assume the threadblock size is constant + // though to make our lives easier. + for (int block_index = blockIdx.x; block_index < num_output_blocks; + block_index += gridDim.x) { + // Get the correct block for this. + const TsdfBlock* tsdf_block = + tsdf_blocks[block_index + num_output_blocks * layer_index]; + // There's also null pointers in there. + if (tsdf_block != nullptr) { + // Iterate over all of the voxels in this block. + int start_index = 0; + int end_index = kVoxelsPerSide; + if (layer_index == 0) { + start_index = input_min_voxel_index; + } + if (layer_index == num_layers - 1) { + end_index = input_max_voxel_index; + } + for (int i = start_index; i < end_index; i++) { + const TsdfVoxel* tsdf_voxel = + &tsdf_block->voxels[voxel_index.x][voxel_index.y][i]; + // EsdfVoxel* new_voxel = &new_values[voxel_index.x][voxel_index.y]; + // Get the correct voxel for this index. + if (tsdf_voxel->weight >= min_weight) { + observed[voxel_index.x][voxel_index.y] = true; + atomicMinFloat(&min_distance[voxel_index.x][voxel_index.y], + tsdf_voxel->distance); + } + } + } + + // sync threads across everyone trying to update this voxel + __syncthreads(); + + // Ok now only if we're layer 0 do we compare the new and old values and + // decide what to output. + if (layer_index == 0) { + EsdfVoxel* esdf_voxel = + &esdf_blocks[block_index] + ->voxels[voxel_index.x][voxel_index.y][voxel_index.z]; + + // Case 0: Just skip it if it's unobserved. We don't care. + if (!observed[voxel_index.x][voxel_index.y]) { + continue; + } + // Determine if the new value puts us inside or in a site. + bool is_inside = min_distance[voxel_index.x][voxel_index.y] <= 0.0f; + bool is_site = fabsf(min_distance[voxel_index.x][voxel_index.y]) <= + min_site_distance_m && + is_inside; + + // First handle the case where the voxel is a site. + if (is_site) { + if (esdf_voxel->is_site) { + // Ok whatever. Add to the site list. + // Its existing values are fine. + updated[block_index] = true; + } else { + // Wasn't a site before, is now. + esdf_voxel->observed = true; + esdf_voxel->is_site = true; + clearVoxelDevice(esdf_voxel, 0.0f); + updated[block_index] = true; + } + } else { + // Here we have to double-check what's going on. + // If it was a site before, and isn't anymore, we have to clear it. + if (esdf_voxel->is_site) { + esdf_voxel->is_site = false; + clearVoxelDevice(esdf_voxel, max_squared_distance_vox); + cleared[block_index] = true; + } + // Otherwise just leave it alone unless it's brand new. + if (!esdf_voxel->observed) { + esdf_voxel->observed = true; + clearVoxelDevice(esdf_voxel, max_squared_distance_vox); + } else if (esdf_voxel->is_inside != is_inside) { + // In case the sidedness swapped, clear the voxel. + clearVoxelDevice(esdf_voxel, max_squared_distance_vox); + cleared[block_index] = true; + } else if (esdf_voxel->squared_distance_vox <= 0.0f) { + // This is somehow invalidly marked as a site despite the fact + // it shouldn't be. + clearVoxelDevice(esdf_voxel, max_squared_distance_vox); + cleared[block_index] = true; + } + } + // Make the sidedness match. + esdf_voxel->is_inside = is_inside; + } + } +} + +__device__ void sweepSingleBand(Index3D voxel_index, int sweep_axis, + float max_squared_distance_vox, + EsdfBlock* esdf_block) { + constexpr int kVoxelsPerSide = VoxelBlock::kVoxelsPerSide; + Index3D last_site; + bool site_found; + // Sweep sweep sweep. + // First we sweep forward, then backwards. + for (int i = 0; i < 2; i++) { + last_site = Index3D::Zero(); + site_found = false; + int direction = 1; + int start_voxel = 0; + int end_voxel = kVoxelsPerSide; + if (i == 1) { + direction = -1; + start_voxel = kVoxelsPerSide - 1; + end_voxel = -1; + } + + for (voxel_index(sweep_axis) = start_voxel; + voxel_index(sweep_axis) != end_voxel; + voxel_index(sweep_axis) += direction) { + EsdfVoxel* esdf_voxel = + &esdf_block + ->voxels[voxel_index.x()][voxel_index.y()][voxel_index.z()]; + if (!esdf_voxel->observed) { + continue; + } + // If this voxel is itself a site, then mark this for future voxels. + if (esdf_voxel->is_site) { + last_site = voxel_index; + site_found = true; + } else if (!site_found) { + // If this voxel isn't a site but we haven't found a site yet, + // then if this voxel is valid we set it as the site. + if (esdf_voxel->squared_distance_vox < max_squared_distance_vox) { + site_found = true; + last_site = esdf_voxel->parent_direction + voxel_index; + } + } else { + // If we've found the site, then should just decide what to do + // here. + Index3D potential_direction = last_site - voxel_index; + float potential_distance = potential_direction.squaredNorm(); + // Either it hasn't been set at all or it's closer to the site + // than to its current value. + if (esdf_voxel->squared_distance_vox > potential_distance) { + esdf_voxel->parent_direction = potential_direction; + esdf_voxel->squared_distance_vox = potential_distance; + } else if (esdf_voxel->squared_distance_vox < + max_squared_distance_vox) { + // If the current value is a better site, then set it as a site. + last_site = esdf_voxel->parent_direction + voxel_index; + } + } + } + } +} +__device__ bool updateSingleNeighbor(const EsdfBlock* esdf_block, + const Index3D& voxel_index, + const Index3D& neighbor_voxel_index, + int axis, int direction, + float max_squared_distance_vox, + EsdfBlock* neighbor_block) { + const EsdfVoxel* esdf_voxel = + &esdf_block->voxels[voxel_index.x()][voxel_index.y()][voxel_index.z()]; + EsdfVoxel* neighbor_voxel = + &neighbor_block + ->voxels[neighbor_voxel_index.x()][neighbor_voxel_index.y()] + [neighbor_voxel_index.z()]; + if (!esdf_voxel->observed || !neighbor_voxel->observed || + neighbor_voxel->is_site || + esdf_voxel->squared_distance_vox >= max_squared_distance_vox) { + return false; + } + // Determine if we can update this. + Eigen::Vector3i potential_direction = esdf_voxel->parent_direction; + potential_direction(axis) -= direction; + float potential_distance = potential_direction.squaredNorm(); + // TODO: might be some concurrency issues here, have to be a bit careful + // on the corners/edges. + if (neighbor_voxel->squared_distance_vox > potential_distance) { + neighbor_voxel->parent_direction = potential_direction; + neighbor_voxel->squared_distance_vox = potential_distance; + return true; + } + return false; +} + +__device__ bool clearSingleNeighbor(const EsdfBlock* esdf_block, + const Index3D& voxel_index, + const Index3D& neighbor_voxel_index, + int axis, int direction, + float max_squared_distance_vox, + EsdfBlock* neighbor_block) { + const EsdfVoxel* esdf_voxel = + &esdf_block->voxels[voxel_index.x()][voxel_index.y()][voxel_index.z()]; + EsdfVoxel* neighbor_voxel = + &neighbor_block + ->voxels[neighbor_voxel_index.x()][neighbor_voxel_index.y()] + [neighbor_voxel_index.z()]; + + if (esdf_voxel->squared_distance_vox < max_squared_distance_vox || + !esdf_voxel->observed || neighbor_voxel->is_site || + neighbor_voxel->squared_distance_vox >= max_squared_distance_vox) { + return false; + } + // Determine if we can update this. + Index3D parent_voxel_dir = neighbor_voxel->parent_direction; + if ((direction > 0 && parent_voxel_dir(axis) > 0) || + (direction < 0 && parent_voxel_dir(axis) < 0)) { + return false; + } + + clearVoxelDevice(neighbor_voxel, max_squared_distance_vox); + return true; +} + +/// Thread size MUST be 8x8xN (where N is a number of blocks up to 8), block +/// size can be anything. +__global__ void sweepBlockBandKernel(int num_blocks, EsdfBlock** esdf_blocks, + float max_squared_distance_vox) { + // We go one axis at a time, syncing threads in between. + dim3 thread_index = threadIdx; + thread_index.z = 0; + + for (int block_index = blockIdx.x * blockDim.z + threadIdx.z; + block_index < num_blocks; block_index += gridDim.x * blockDim.z) { + // For simplicity we have to have the same number of blocks in the CUDA + // kernel call as we have actual blocks. + EsdfBlock* esdf_block = esdf_blocks[block_index]; + Index3D voxel_index(0, thread_index.x, thread_index.y); + + // X axis done. + sweepSingleBand(voxel_index, 0, max_squared_distance_vox, esdf_block); + __syncthreads(); + + // Y axis done. + voxel_index << thread_index.x, 0, thread_index.y; + sweepSingleBand(voxel_index, 1, max_squared_distance_vox, esdf_block); + __syncthreads(); + + // Z axis done. + voxel_index << thread_index.x, thread_index.y, 0; + sweepSingleBand(voxel_index, 2, max_squared_distance_vox, esdf_block); + __syncthreads(); + } +} + +/// Thread size MUST be 8x8xN, where N is the number of blocks processed at +/// a time, block size can be anything. +__global__ void updateLocalNeighborBandsKernel(int num_blocks, int i, + EsdfBlock** esdf_blocks, + int* neighbor_table, + EsdfBlock** neighbor_pointers, + float max_squared_distance_vox, + bool* updated_neighbors) { + // We go one axis at a time, syncing threads in between. + dim3 thread_index = threadIdx; + thread_index.z = 0; + + constexpr int kNumNeighbors = 6; + constexpr int kVoxelsPerSide = VoxelBlock::kVoxelsPerSide; + + for (int block_index = blockIdx.x * blockDim.z + threadIdx.z; + block_index < num_blocks; block_index += gridDim.x * blockDim.z) { + EsdfBlock* esdf_block = esdf_blocks[block_index]; + Index3D voxel_index; + Index3D neighbor_voxel_index; + // Each thread updates 1 neighbors, set by "i". + // Get the neighbor block. + int neighbor_index = neighbor_table[block_index * kNumNeighbors + i]; + if (neighbor_index < 0) { + continue; + } + EsdfBlock* neighbor_block = neighbor_pointers[neighbor_index]; + // Now we have the neighbor block... Let's figure out which voxels we + // should look at. + int axis = i / 2; + int direction = i % 2 ? -1 : 1; + + // Fill in the axes. + if (axis == 0) { + voxel_index << 0, thread_index.x, thread_index.y; + } else if (axis == 1) { + voxel_index << thread_index.x, 0, thread_index.y; + } else if (axis == 2) { + voxel_index << thread_index.x, thread_index.y, 0; + } + neighbor_voxel_index = voxel_index; + // If we're looking backwards... + if (direction < 0) { + voxel_index(axis) = 0; + neighbor_voxel_index(axis) = kVoxelsPerSide - 1; + } else { + voxel_index(axis) = kVoxelsPerSide - 1; + neighbor_voxel_index(axis) = 0; + } + + bool updated = updateSingleNeighbor( + esdf_block, voxel_index, neighbor_voxel_index, axis, direction, + max_squared_distance_vox, neighbor_block); + if (updated) { + updated_neighbors[neighbor_index] = true; + } + } +} + +/// Thread size MUST be 8x8x8, block size can be anything. +__global__ void clearWithinBlockKernel(int num_blocks, EsdfBlock** esdf_blocks, + float max_squared_distance_vox) { + dim3 voxel_index = threadIdx; + constexpr int kVoxelsPerSide = VoxelBlock::kVoxelsPerSide; + + // Allow block size to be whatever. + for (int block_index = blockIdx.x; block_index < num_blocks; + block_index += gridDim.x) { + // Get the voxel. + EsdfVoxel* esdf_voxel = + &esdf_blocks[block_index] + ->voxels[voxel_index.x][voxel_index.y][voxel_index.z]; + // Check if its parent is in the same block. + if (!esdf_voxel->observed || esdf_voxel->is_site || + esdf_voxel->squared_distance_vox >= max_squared_distance_vox) { + continue; + } + // Get the parent. + Index3D parent_index = + Index3D(voxel_index.x, voxel_index.y, voxel_index.z) + + esdf_voxel->parent_direction; + + // Check if the voxel is within the same block. + if (parent_index.x() < 0 || parent_index.x() >= kVoxelsPerSide || + parent_index.y() < 0 || parent_index.y() >= kVoxelsPerSide || + parent_index.z() < 0 || parent_index.z() >= kVoxelsPerSide) { + continue; + } + + // Ok check if the parent index is a site. + if (!esdf_blocks[block_index] + ->voxels[parent_index.x()][parent_index.y()][parent_index.z()] + .is_site) { + clearVoxelDevice(esdf_voxel, max_squared_distance_vox); + } + } +} + +/// Thread size MUST be 8x8x8, block size can be anything. +__global__ void clearInternalVoxelsKernel(int num_blocks, + EsdfBlock** esdf_blocks, + float max_squared_distance_vox) { + dim3 voxel_index = threadIdx; + constexpr int kVoxelsPerSide = VoxelBlock::kVoxelsPerSide; + + // Allow block size to be whatever. + for (int block_index = blockIdx.x; block_index < num_blocks; + block_index += gridDim.x) { + // Get the voxel. + EsdfVoxel* esdf_voxel = + &esdf_blocks[block_index] + ->voxels[voxel_index.x][voxel_index.y][voxel_index.z]; + if (!esdf_voxel->observed || esdf_voxel->is_site || + esdf_voxel->squared_distance_vox >= max_squared_distance_vox) { + continue; + } + // Get the parent. + Index3D parent_index = + Index3D(voxel_index.x, voxel_index.y, voxel_index.z) + + esdf_voxel->parent_direction; + + // Check if we're our own parent. This is definitely wrong since we're not + // a site. + if (esdf_voxel->parent_direction.isZero()) { + clearVoxelDevice(esdf_voxel, max_squared_distance_vox); + continue; + } + + // Get the closest index to the parent within the same block. + // We just get the nearest neighbor. + Index3D closest_parent(min(max(parent_index.x(), 0), kVoxelsPerSide - 1), + min(max(parent_index.y(), 0), kVoxelsPerSide - 1), + min(max(parent_index.z(), 0), kVoxelsPerSide - 1)); + + // Ok check if the parent index is a site. + // TODO: Check if we need the observed rule or not... + const EsdfVoxel& neighbor_voxel = + esdf_blocks[block_index]->voxels[closest_parent.x()][closest_parent.y()] + [closest_parent.z()]; + if (!neighbor_voxel.observed || + neighbor_voxel.squared_distance_vox >= max_squared_distance_vox) { + clearVoxelDevice(esdf_voxel, max_squared_distance_vox); + } + } +} + +/// Thread size MUST be 8x8xN, where N is the number of blocks processed at +/// a time, block size can be anything. +__global__ void clearLocalNeighborBandsKernel(int num_blocks, int i, + EsdfBlock** esdf_blocks, + int* neighbor_table, + EsdfBlock** neighbor_pointers, + float max_squared_distance_vox, + bool* updated_neighbors) { + // We go one axis at a time, syncing threads in between. + dim3 thread_index = threadIdx; + thread_index.z = 0; + + constexpr int kNumNeighbors = 6; + constexpr int kVoxelsPerSide = VoxelBlock::kVoxelsPerSide; + + for (int block_index = blockIdx.x * blockDim.z + threadIdx.z; + block_index < num_blocks; block_index += gridDim.x * blockDim.z) { + EsdfBlock* esdf_block = esdf_blocks[block_index]; + Index3D voxel_index; + Index3D neighbor_voxel_index; + // Each thread updates 1 neighbors, set by "i". + // Get the neighbor block. + int neighbor_index = neighbor_table[block_index * kNumNeighbors + i]; + if (neighbor_index < 0) { + continue; + } + EsdfBlock* neighbor_block = neighbor_pointers[neighbor_index]; + // Now we have the neighbor block... Let's figure out which voxels we + // should look at. + int axis = i / 2; + int direction = i % 2 ? -1 : 1; + + // Fill in the axes. + if (axis == 0) { + voxel_index << 0, thread_index.x, thread_index.y; + } else if (axis == 1) { + voxel_index << thread_index.x, 0, thread_index.y; + } else if (axis == 2) { + voxel_index << thread_index.x, thread_index.y, 0; + } + neighbor_voxel_index = voxel_index; + // If we're looking backwards... + if (direction < 0) { + voxel_index(axis) = 0; + neighbor_voxel_index(axis) = kVoxelsPerSide - 1; + } else { + voxel_index(axis) = kVoxelsPerSide - 1; + neighbor_voxel_index(axis) = 0; + } + + bool updated = clearSingleNeighbor( + esdf_block, voxel_index, neighbor_voxel_index, axis, direction, + max_squared_distance_vox, neighbor_block); + if (updated) { + updated_neighbors[neighbor_index] = true; + } + } +} + +void EsdfIntegrator::markAllSitesOnGPU( + const TsdfLayer& tsdf_layer, const std::vector& block_indices, + EsdfLayer* esdf_layer, std::vector* blocks_with_sites, + std::vector* cleared_blocks) { + CHECK_NOTNULL(esdf_layer); + CHECK_NOTNULL(blocks_with_sites); + + // Caching. + const float voxel_size = tsdf_layer.voxel_size(); + const float max_distance_vox = max_distance_m_ / voxel_size; + const float max_squared_distance_vox = max_distance_vox * max_distance_vox; + // Cache the minimum distance in metric size. + const float min_site_distance_m = min_site_distance_vox_ * voxel_size; + + int num_blocks = block_indices.size(); + + // Get all of the block pointers we need. + tsdf_pointers_host_.resize(num_blocks); + block_pointers_host_.resize(num_blocks); + + // Have an updated output variable as well. + updated_blocks_device_.resize(num_blocks); + updated_blocks_device_.setZero(); + cleared_blocks_device_.resize(num_blocks); + cleared_blocks_device_.setZero(); + + // Populate all the input vectors. + for (size_t i = 0; i < num_blocks; i++) { + const Index3D& block_index = block_indices[i]; + EsdfBlock::Ptr esdf_block = esdf_layer->getBlockAtIndex(block_index); + TsdfBlock::ConstPtr tsdf_block = tsdf_layer.getBlockAtIndex(block_index); + + if (!esdf_block || !tsdf_block) { + LOG(ERROR) << "Somehow trying to update non-existent blocks!"; + continue; + } + + tsdf_pointers_host_[i] = tsdf_block.get(); + block_pointers_host_[i] = esdf_block.get(); + } + + // Copy what we need over to the device. + tsdf_pointers_device_ = tsdf_pointers_host_; + block_pointers_device_ = block_pointers_host_; + + // Call the kernel. + int dim_block = num_blocks; + constexpr int kVoxelsPerSide = EsdfBlock::kVoxelsPerSide; + dim3 dim_threads(kVoxelsPerSide, kVoxelsPerSide, kVoxelsPerSide); + markAllSitesKernel<<>>( + num_blocks, tsdf_pointers_device_.data(), block_pointers_device_.data(), + min_site_distance_m, min_weight_, max_squared_distance_vox, + updated_blocks_device_.data(), cleared_blocks_device_.data()); + checkCudaErrors(cudaStreamSynchronize(cuda_stream_)); + checkCudaErrors(cudaPeekAtLastError()); + + // Copy out. + updated_blocks_host_ = updated_blocks_device_; + cleared_blocks_host_ = cleared_blocks_device_; + + // Get the output vector. + // TODO(helen): swap this to a kernel operation. + for (size_t i = 0; i < num_blocks; i++) { + if (updated_blocks_host_[i]) { + blocks_with_sites->push_back(block_indices[i]); + } + if (cleared_blocks_host_[i]) { + cleared_blocks->push_back(block_indices[i]); + } + } +} + +// 2D slice version of the markAllSites function above. +void EsdfIntegrator::markSitesInSliceOnGPU( + const TsdfLayer& tsdf_layer, const std::vector& block_indices, + float min_z, float max_z, float output_z, EsdfLayer* esdf_layer, + std::vector* updated_blocks, + std::vector* cleared_blocks) { + CHECK_NOTNULL(esdf_layer); + CHECK_NOTNULL(updated_blocks); + CHECK_NOTNULL(cleared_blocks); + + // Caching. + const float voxel_size = tsdf_layer.voxel_size(); + const float max_distance_vox = max_distance_m_ / voxel_size; + const float max_squared_distance_vox = max_distance_vox * max_distance_vox; + // Cache the minimum distance in metric size. + const float min_site_distance_m = min_site_distance_vox_ * voxel_size; + + // We are going to subsample the block_indices. + // We need to figure out all the output blocks, which will be a subset + // of the input blocks. At the same time we need to get all of the stacks + // of input blocks at all levels. + // We are going to pull some "clever" stuff: the input block list will be + // of length N * n_input_blocks, where "N" is the number of vertical + // layers there could be that fall into the min z to max z range. + + // Ok first figure out how many layers we could have. + Index3D min_block_index; + Index3D min_voxel_index; + getBlockAndVoxelIndexFromPositionInLayer(tsdf_layer.block_size(), + Vector3f(0.0f, 0.0f, min_z), + &min_block_index, &min_voxel_index); + const int min_block_index_z = min_block_index.z(); + const int min_voxel_index_z = min_voxel_index.z(); + Index3D max_block_index; + Index3D max_voxel_index; + getBlockAndVoxelIndexFromPositionInLayer(tsdf_layer.block_size(), + Vector3f(0.0f, 0.0f, max_z), + &max_block_index, &max_voxel_index); + const int max_block_index_z = max_block_index.z(); + const int max_voxel_index_z = max_voxel_index.z(); + + // There is always at least 1 layer. + int num_vertical_layers = max_block_index_z - min_block_index_z + 1; + + // And figure out what the index of the output voxel is. + // std::pair output_block_and_voxel_index + Index3D output_block_index; + Index3D output_voxel_index; + getBlockAndVoxelIndexFromPositionInLayer( + tsdf_layer.block_size(), Vector3f(0.0f, 0.0f, output_z), + &output_block_index, &output_voxel_index); + const int output_block_index_z = output_block_index.z(); + const int output_voxel_index_z = output_voxel_index.z(); + + // Next get a list of all the valid input blocks. + Index3DSet output_block_set; + for (const Index3D& block_index : block_indices) { + if (block_index.z() >= min_block_index_z && + block_index.z() <= max_block_index_z) { + output_block_set.insert( + Index3D(block_index.x(), block_index.y(), output_block_index_z)); + } + } + + // Ok now we have all the indices we actually need. + // Just have to get their pointers and we're good. + size_t num_blocks = output_block_set.size(); + if (num_blocks == 0) { + return; + } + + std::vector input_blocks(num_blocks * num_vertical_layers); + std::vector output_blocks(num_blocks); + tsdf_pointers_host_.resize(num_blocks * num_vertical_layers); + tsdf_pointers_host_.setZero(); + block_pointers_host_.resize(num_blocks); + + size_t i = 0; + for (const Index3D& block_index : output_block_set) { + // This is for the output block, which we allocate along the way. + output_blocks[i] = block_index; + block_pointers_host_[i] = + esdf_layer->allocateBlockAtIndex(block_index).get(); + + // Go through all the relevant input pointers: + Index3D input_block_index = block_index; + + int j = 0; + for (input_block_index.z() = min_block_index_z; + input_block_index.z() <= max_block_index_z; input_block_index.z()++) { + input_blocks[i + num_blocks * j] = input_block_index; + // This can be null. It's fine. + tsdf_pointers_host_[i + num_blocks * j] = + tsdf_layer.getBlockAtIndex(input_block_index).get(); + j++; + } + i++; + } + + // Copy what we need over to the device. + tsdf_pointers_device_ = tsdf_pointers_host_; + block_pointers_device_ = block_pointers_host_; + + // Finally, set up the updated and cleared vectors. + updated_blocks_device_.resize(num_blocks); + updated_blocks_device_.setZero(); + cleared_blocks_device_.resize(num_blocks); + cleared_blocks_device_.setZero(); + + // Call the kernel! + int dim_block = num_blocks; + constexpr int kVoxelsPerSide = EsdfBlock::kVoxelsPerSide; + dim3 dim_threads(kVoxelsPerSide, kVoxelsPerSide, num_vertical_layers); + markSitesInSliceKernel<<>>( + num_blocks, num_blocks, tsdf_pointers_device_.data(), + block_pointers_device_.data(), output_voxel_index_z, min_voxel_index_z, + max_voxel_index_z, min_site_distance_m, min_weight_, + max_squared_distance_vox, updated_blocks_device_.data(), + cleared_blocks_device_.data()); + checkCudaErrors(cudaStreamSynchronize(cuda_stream_)); + checkCudaErrors(cudaPeekAtLastError()); + + // Copy out. + updated_blocks_host_ = updated_blocks_device_; + cleared_blocks_host_ = cleared_blocks_device_; + + // Pack the outputs. The rest of the functions should work as before. + for (size_t i = 0; i < output_blocks.size(); i++) { + if (updated_blocks_host_[i]) { + updated_blocks->push_back(output_blocks[i]); + } + if (cleared_blocks_host_[i]) { + cleared_blocks->push_back(output_blocks[i]); + } + } +} + +void EsdfIntegrator::clearInvalidOnGPU( + const std::vector& blocks_to_clear, EsdfLayer* esdf_layer, + std::vector* updated_blocks) { + CHECK_NOTNULL(esdf_layer); + CHECK_NOTNULL(updated_blocks); + + // Caching. + const float voxel_size = esdf_layer->voxel_size(); + const float max_distance_vox = max_distance_m_ / voxel_size; + const float max_squared_distance_vox = max_distance_vox * max_distance_vox; + + int num_blocks = blocks_to_clear.size(); + block_pointers_host_.resize(num_blocks); + + // Have an updated output variable as well. + updated_blocks_device_.resize(num_blocks); + updated_blocks_device_.setZero(); + + // Populate all the input vectors. + for (size_t i = 0; i < num_blocks; i++) { + const Index3D& block_index = blocks_to_clear[i]; + block_pointers_host_[i] = esdf_layer->getBlockAtIndex(block_index).get(); + } + + block_pointers_device_ = block_pointers_host_; + + // Alright now run a kernel to clear all the voxels within a block. + constexpr int kVoxelsPerSide = EsdfBlock::kVoxelsPerSide; + dim3 dim_threads(kVoxelsPerSide, kVoxelsPerSide, kVoxelsPerSide); + clearWithinBlockKernel<<>>( + num_blocks, block_pointers_device_.data(), max_squared_distance_vox); + checkCudaErrors(cudaStreamSynchronize(cuda_stream_)); + checkCudaErrors(cudaPeekAtLastError()); + + // Then clear all the neighbors. + Index3DSet all_cleared_blocks; + std::copy(blocks_to_clear.begin(), blocks_to_clear.end(), + std::inserter(all_cleared_blocks, all_cleared_blocks.end())); + + std::vector clear_list = blocks_to_clear; + std::vector new_clear_list; + VLOG(3) << "Blocks to clear: " << blocks_to_clear.size(); + while (!clear_list.empty()) { + clearBlockNeighbors(clear_list, esdf_layer, &new_clear_list); + std::copy(new_clear_list.begin(), new_clear_list.end(), + std::inserter(all_cleared_blocks, all_cleared_blocks.end())); + std::swap(clear_list, new_clear_list); + new_clear_list.clear(); + VLOG(3) << "Clear list size: " << clear_list.size(); + } + + for (const Index3D& index : all_cleared_blocks) { + updated_blocks->push_back(index); + } +} + +void EsdfIntegrator::clearBlockNeighbors(std::vector& clear_list, + EsdfLayer* esdf_layer, + std::vector* new_clear_list) { + int num_blocks = clear_list.size(); + + if (num_blocks == 0) { + return; + } + constexpr int kNumNeighbors = 6; + const float voxel_size = esdf_layer->voxel_size(); + const float max_distance_vox = max_distance_m_ / voxel_size; + const float max_squared_distance_vox = max_distance_vox * max_distance_vox; + + constexpr int kVoxelsPerSide = EsdfBlock::kVoxelsPerSide; + dim3 dim_threads_per_voxel(kVoxelsPerSide, kVoxelsPerSide, kVoxelsPerSide); + + // Step 0: block pointers. + block_pointers_host_.resize(num_blocks); + for (size_t i = 0; i < num_blocks; i++) { + const Index3D& block_index = clear_list[i]; + block_pointers_host_[i] = esdf_layer->getBlockAtIndex(block_index).get(); + } + block_pointers_device_ = block_pointers_host_; + + // Step 0a: fix up the blocks so their neighbors are valid. + clearInternalVoxelsKernel<<>>( + num_blocks, block_pointers_device_.data(), max_squared_distance_vox); + checkCudaErrors(cudaStreamSynchronize(cuda_stream_)); + + // Step one: set up the neighbor table. + std::vector neighbor_indices; + neighbor_table_host_.resize(num_blocks * kNumNeighbors); + neighbor_table_host_.setZero(); + neighbor_pointers_host_.resize(0); + + createNeighborTable(clear_list, esdf_layer, &neighbor_indices, + &neighbor_pointers_host_, &neighbor_table_host_); + + // Step two: run the neighbor updating kernel. + updated_blocks_device_.resize(neighbor_indices.size()); + updated_blocks_device_.setZero(); + + neighbor_pointers_device_ = neighbor_pointers_host_; + neighbor_table_device_ = neighbor_table_host_; + + constexpr int kNumBlocksPerCudaBlock = 8; + int dim_block = std::max( + static_cast( + std::ceil(num_blocks / static_cast(kNumBlocksPerCudaBlock))), + 1); + dim3 dim_threads(kVoxelsPerSide, kVoxelsPerSide, kNumBlocksPerCudaBlock); + // We have to do the neighbors one at a time basically for concurrency + // issues. + // No clue if the concurrency issues hold for the clearing operation. + // But this is easier to copy-and-paste. + for (int i = 0; i < kNumNeighbors; i++) { + clearLocalNeighborBandsKernel<<>>( + num_blocks, i, block_pointers_device_.data(), + neighbor_table_device_.data(), neighbor_pointers_device_.data(), + max_squared_distance_vox, updated_blocks_device_.data()); + } + checkCudaErrors(cudaStreamSynchronize(cuda_stream_)); + checkCudaErrors(cudaPeekAtLastError()); + + // Repack into output vector. + updated_blocks_host_ = updated_blocks_device_; + block_pointers_host_.resize(0); + + new_clear_list->clear(); + for (size_t i = 0; i < neighbor_indices.size(); i++) { + if (updated_blocks_host_[i]) { + new_clear_list->push_back(neighbor_indices[i]); + block_pointers_host_.push_back(neighbor_pointers_host_[i]); + } + } + + // Step three: clear any remaining voxels on the interior of the blocks + int num_updated_blocks = new_clear_list->size(); + if (num_updated_blocks == 0) { + return; + } + + block_pointers_device_ = block_pointers_host_; + clearInternalVoxelsKernel<<>>(block_pointers_device_.size(), + block_pointers_device_.data(), + max_squared_distance_vox); + checkCudaErrors(cudaStreamSynchronize(cuda_stream_)); + checkCudaErrors(cudaPeekAtLastError()); +} + +void EsdfIntegrator::computeEsdfOnGPU( + const std::vector& blocks_with_sites, EsdfLayer* esdf_layer) { + CHECK_NOTNULL(esdf_layer); + // Cache everything. + constexpr int kVoxelsPerSide = VoxelBlock::kVoxelsPerSide; + const float voxel_size = esdf_layer->block_size() / kVoxelsPerSide; + const float max_distance_vox = max_distance_m_ / voxel_size; + const float max_squared_distance_vox = max_distance_vox * max_distance_vox; + + block_pointers_host_.resize(blocks_with_sites.size()); + for (size_t i = 0; i < blocks_with_sites.size(); i++) { + block_pointers_host_[i] = + esdf_layer->getBlockAtIndex(blocks_with_sites[i]).get(); + } + + // First we go over all of the blocks with sites. + // We compute all the proximal sites inside the block first. + block_pointers_device_ = block_pointers_host_; + sweepBlockBandOnGPU(block_pointers_device_, max_squared_distance_vox); + + // Get the neighbors of all the blocks with sites. + std::vector blocks_to_run = blocks_with_sites; + std::vector updated_blocks; + + int i = 0; + while (!blocks_to_run.empty()) { + updateLocalNeighborBandsOnGPU(blocks_to_run, block_pointers_device_, + max_squared_distance_vox, esdf_layer, + &updated_blocks, &neighbor_pointers_device_); + VLOG(3) << "Iteration: " << i + << " Number of updated blocks: " << updated_blocks.size() + << " blocks with sites: " << blocks_with_sites.size(); + i++; + sweepBlockBandOnGPU(neighbor_pointers_device_, max_squared_distance_vox); + blocks_to_run = std::move(updated_blocks); + block_pointers_device_ = neighbor_pointers_device_; + } +} + +void EsdfIntegrator::sweepBlockBandOnGPU( + device_vector& block_pointers, float max_squared_distance_vox) { + if (block_pointers.empty()) { + return; + } + timing::Timer sweep_timer("esdf/integrate/compute/sweep"); + + // Caching. + constexpr int kVoxelsPerSide = VoxelBlock::kVoxelsPerSide; + const int num_blocks = block_pointers.size(); + + // Call the kernel. + // We do 2-dimensional sweeps in this kernel. Each thread does 3 sweeps. + // We do 8 blocks at a time. + constexpr int kNumBlocksPerCudaBlock = 8; + int dim_block = std::max( + static_cast( + std::ceil(num_blocks / static_cast(kNumBlocksPerCudaBlock))), + 1); + dim3 dim_threads(kVoxelsPerSide, kVoxelsPerSide, kNumBlocksPerCudaBlock); + sweepBlockBandKernel<<>>( + num_blocks, block_pointers.data(), max_squared_distance_vox); + checkCudaErrors(cudaStreamSynchronize(cuda_stream_)); + checkCudaErrors(cudaPeekAtLastError()); +} + +void EsdfIntegrator::updateLocalNeighborBandsOnGPU( + const std::vector& block_indices, + device_vector& block_pointers, float max_squared_distance_vox, + EsdfLayer* esdf_layer, std::vector* updated_blocks, + device_vector* updated_block_pointers) { + if (block_indices.empty()) { + return; + } + + timing::Timer neighbors_timer("esdf/integrate/compute/neighbors"); + + CHECK_EQ(block_indices.size(), block_pointers.size()); + + constexpr int kNumNeighbors = 6; + constexpr int kVoxelsPerSide = VoxelBlock::kVoxelsPerSide; + const int num_blocks = block_pointers.size(); + + timing::Timer table_timer("esdf/integrate/compute/neighbors/table"); + + // This one is quite a bit more complicated. + // For each block, we need to get its 6 neighbors. + std::vector neighbor_indices; + neighbor_table_host_.resize(num_blocks * kNumNeighbors); + neighbor_table_host_.setZero(); + neighbor_pointers_host_.resize(0); + + createNeighborTable(block_indices, esdf_layer, &neighbor_indices, + &neighbor_pointers_host_, &neighbor_table_host_); + + table_timer.Stop(); + + // Set up an updated map. + updated_blocks_device_.resize(neighbor_indices.size()); + updated_blocks_device_.setZero(); + + neighbor_pointers_device_ = neighbor_pointers_host_; + neighbor_table_device_ = neighbor_table_host_; + + timing::Timer kernel_timer("esdf/integrate/compute/neighbors/kernel"); + + // Ok now we have to give all this stuff to the kernel. + // TODO(helen): you get weird-ass concurrency issues if this is not 1. + constexpr int kNumBlocksPerCudaBlock = 8; + int dim_block = std::max( + static_cast( + std::ceil(num_blocks / static_cast(kNumBlocksPerCudaBlock))), + 1); + dim3 dim_threads(kVoxelsPerSide, kVoxelsPerSide, kNumBlocksPerCudaBlock); + // We have to do the neighbors one at a time basically for concurrency + // issues. + for (int i = 0; i < kNumNeighbors; i++) { + updateLocalNeighborBandsKernel<<>>( + num_blocks, i, block_pointers.data(), neighbor_table_device_.data(), + neighbor_pointers_device_.data(), max_squared_distance_vox, + updated_blocks_device_.data()); + } + checkCudaErrors(cudaStreamSynchronize(cuda_stream_)); + checkCudaErrors(cudaPeekAtLastError()); + + kernel_timer.Stop(); + + // Unpack the kernel results. + // TODO(helen): swap this to a kernel operation. + updated_blocks->clear(); + updated_blocks_host_ = updated_blocks_device_; + block_pointers_host_.resize(0); + + for (size_t i = 0; i < neighbor_indices.size(); i++) { + if (updated_blocks_host_[i]) { + updated_blocks->push_back(neighbor_indices[i]); + block_pointers_host_.push_back(neighbor_pointers_host_[i]); + } + } + *updated_block_pointers = block_pointers_host_; +} + +void EsdfIntegrator::createNeighborTable( + const std::vector& block_indices, EsdfLayer* esdf_layer, + std::vector* neighbor_indices, + host_vector* neighbor_pointers, + host_vector* neighbor_table) { + // TODO(helen): make this extensible to different number of neighbors. + constexpr int kNumNeighbors = 6; + int num_blocks = block_indices.size(); + + // Hash map mapping the neighbor index to the pointers above. + Index3DHashMapType::type neighbor_map; + + // Direction Shorthand: axis = neighbor_index/2 + // direction = neighbor_index%2 ? -1 : 1 + Index3D direction = Index3D::Zero(); + for (int block_number = 0; block_number < num_blocks; block_number++) { + const Index3D& block_index = block_indices[block_number]; + for (int neighbor_number = 0; neighbor_number < kNumNeighbors; + neighbor_number++) { + direction.setZero(); + // Change just one axis of the direction. + direction(neighbor_number / 2) = neighbor_number % 2 ? -1 : 1; + // Check if this is already in our hash. + Index3D neighbor_index = block_index + direction; + auto res = neighbor_map.find(neighbor_index); + if (res != neighbor_map.end()) { + (*neighbor_table)[block_number * kNumNeighbors + neighbor_number] = + res->second; + } else { + // Doesn't exist in the neighbor list yet. + EsdfBlock::Ptr esdf_block = esdf_layer->getBlockAtIndex(neighbor_index); + if (esdf_block) { + int next_index = neighbor_indices->size(); + neighbor_indices->push_back(neighbor_index); + neighbor_pointers->push_back(esdf_block.get()); + neighbor_map[neighbor_index] = next_index; + (*neighbor_table)[block_number * kNumNeighbors + neighbor_number] = + next_index; + } else { + (*neighbor_table)[block_number * kNumNeighbors + neighbor_number] = + -1; + neighbor_map[neighbor_index] = -1; + } + } + } + } + CHECK_EQ(neighbor_table->size(), kNumNeighbors * block_indices.size()); + CHECK_EQ(neighbor_indices->size(), neighbor_pointers->size()); +} + +} // namespace nvblox \ No newline at end of file diff --git a/cuda_code/event_based_learning.cu b/cuda_code/event_based_learning.cu new file mode 100644 index 0000000000000000000000000000000000000000..db8bbb9c79a1f5fb1046d5715051c526069a4bdd --- /dev/null +++ b/cuda_code/event_based_learning.cu @@ -0,0 +1,1696 @@ +#include +#include +#include +#include +#include "header.h" +#include +#include +#include +#include +#include +#include "CImg.h" +#include +#include +#include +#include "cifar10_reader.hpp" +#include + +#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } +inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) +{ + if (code != cudaSuccess) + { + fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); + //if (abort) exit(code); + } +} + +void run_event_based_learning(string index_prefix, float input_float, float input_float_2, int input_int, int input_int_2, string input_img){ + + int resume_learning = 0; + CNN_struct *network_config = new CNN_struct; + network_config_generator(3, network_config); + Neuron *NeuronList_temp = new Neuron[1]; + CNN_struct *d_network_config; + cudaMalloc((void **)&d_network_config,sizeof(CNN_struct)); + cudaMemcpy(d_network_config,network_config,sizeof(CNN_struct),cudaMemcpyHostToDevice); + int total_depth_number = 0; + for(int i=0;ilayer[i].depth; + cout<<"depth number: "<layer[i].depth<layer[i].neuron_num; + if(i!=0) total_spiking_num += network_config->layer[i].neuron_num; + } + total_spiking_num += 5; + total_neuron_num += 5; + //total_neuron_num = 20000; + cout<img_load_max){ //manually set the maximum number of images to be loaded once is 60000 + cout<<"Using batch loading"< folder_list; + int input_folder_cnt = 0; + int current_total_read_event = 0; + if(input_image_channel==1 || input_image_channel==2){ + + + current_total_read_event = IBM_DVS128_event_based(image_file, events_host, img_load_max, img_load_max); + cudaMemcpy(events_GPU,events_host,img_load_max*sizeof(Event_Camera_Input),cudaMemcpyHostToDevice); + + }else{ + printf("Input channel error."); + return; + + } + clock_t load_end = clock(); + cout< Neuron y Spike! In the array index [(x-1)*SIZE+(y-1)] => 1 + //curandState_t *states; + //float *random_number_list = new float[SIZE]; + float *log_v_host = new float[MAX_TIME]; + float *log_spike_host = new float[total_depth_number]; + + float *log_total_spike_host = new float[SIZE]; + for(int i=0; i < SIZE; i++){ + log_total_spike_host[i] = 0; + } + int *spike_flag = new int[CNN_total_layer_num]; + for(int i=0; i < CNN_total_layer_num; i++){ + spike_flag[i] = 0; + } + for(int i=0; ilayer[start_layer].first_depth_id - 0.1; + float end_depth = network_config->layer[start_layer].last_depth_id + 0.1; + + reset_weight(NeuronList, start_depth, end_depth, 1, spiking_neuron_num); + + } + //read_neuron_list(NeuronList, 1, "spike_cnn.txt"); + }else{ + read_neuron_list(NeuronList, 1, "spike_cnn.txt"); + //read_neuron_list(NeuronList, 1, "device2_output_network.txt"); + + } + //printf("read out one neuron depth: %f", NeuronList[116000].param[7]); + //write_neuron_list(NeuronList, "learning_output_confirm.txt", SIZE); + //check_neuron(NeuronList, 800, 820); + + + //Neuron *old_device_neurons; + //unsigned char *snapse_timer_device; + float *log_v; + float *log_spike; + float *log_spike_default; + float *log_total_spike; + int *spike_flag_device; + + + printf("2\n"); + //random number function: + + float rand_list_size_to_total_connection_ratio = 1; + int rand_numb_size = SPIKING_NEURON_NUM*MAX_CONNECTION; + + int SIZE_PER_SIDE = sqrt(rand_numb_size)+1; + dim3 dimBlock( ThreadsPerBlock, ThreadsPerBlock ); + dim3 dimGrid( (SIZE_PER_SIDE/dimBlock.x+1), (SIZE_PER_SIDE/dimBlock.y+1)); + dim3 print_grid(1); + dim3 print_block(1); + dim3 dimBlock_unit( 1, 1 ); + dim3 dimGrid_unit(1, 1); + + int SIZE_PER_SIDE_whole_network = sqrt(spiking_neuron_num)+1; + dim3 dimBlock_whole_network( ThreadsPerBlock*2, ThreadsPerBlock ); + dim3 dimGrid_whole_network( (SIZE_PER_SIDE_whole_network/dimBlock.x+1), (SIZE_PER_SIDE_whole_network/dimBlock.y+1)); + printf("2.1\n"); + + curandState_t *states; + +// if (STOCHASTIC_STDP) rand_init<<>>(time(0), rand_numb_size, states); +// float *random_number_list = new float[rand_numb_size]; +// float *random_number_list_device; +// SIZE_PER_SIDE = sqrt(rand_numb_size)+1; +// dim3 dimBlock_synapse( ThreadsPerBlock, ThreadsPerBlock ); +// dim3 dimGrid_synapse( (SIZE_PER_SIDE/dimBlock.x+1), (SIZE_PER_SIDE/dimBlock.y+1)); + +// cudaMalloc((void **)&random_number_list_device,rand_numb_size*sizeof(float)); +// cudaMemcpy(random_number_list_device,random_number_list,rand_numb_size*sizeof(float),cudaMemcpyHostToDevice); +// if (STOCHASTIC_STDP) random<<>>(random_number_list_device, rand_numb_size, states); + + curandGenerator_t gen_uniform; + float *random_number_list_device; + if(STOCHASTIC_STDP || STOCHASTIC_ROUNDING || DEVICE_VARIATION){ + cudaMalloc((void **)&states, rand_numb_size * sizeof(curandState_t)); + cudaMalloc((void **)&random_number_list_device,rand_numb_size*sizeof(float)); + curandCreateGenerator(&gen_uniform, CURAND_RNG_PSEUDO_DEFAULT); + curandSetPseudoRandomGeneratorSeed(gen_uniform, time(0)); + curandGenerateUniform(gen_uniform, random_number_list_device, rand_numb_size); + } + + + curandGenerator_t gen_normal; + float *random_number_normal_device; + float normal_mean = 0; + float normal_sd = 5.0; + if(STOCHASTIC_STDP || STOCHASTIC_ROUNDING || DEVICE_VARIATION){ + cudaMalloc((void **)&random_number_normal_device,rand_numb_size*sizeof(float)); + curandCreateGenerator(&gen_normal, CURAND_RNG_PSEUDO_DEFAULT); + curandSetPseudoRandomGeneratorSeed(gen_normal, time(0)); + curandGenerateNormal(gen_normal, random_number_normal_device, rand_numb_size, normal_mean, normal_sd); + } + +// printf("2.11\n"); + //Setting up input instance matrix: + float **d_input_instance; + float **d_convolution_result; + float **h_input_instance; + float **h_convolution_result; + float *probe = new float[1000]; + int instance_array_size = CNN_total_layer_num; + cudaMalloc(&d_input_instance, instance_array_size*sizeof(float *)); + int convolution_result_size = CNN_total_layer_num - 1; + cudaMalloc(&d_convolution_result, convolution_result_size*sizeof(float *)); + h_input_instance = (float**)malloc(instance_array_size * sizeof(float*)); + h_convolution_result = (float**)malloc(convolution_result_size * sizeof(float*)); + CNN_util(network_config, d_input_instance, d_convolution_result, h_input_instance, h_convolution_result, 0); + printf("2.2\n"); +// float **add = &h_convolution_result[0]; +// printf("Address On GPU: %p\n", add); + + //========Setting up device neuron list============ + + Neuron *Neuron_list_device; + Input_neuron *Input_neuronlist_device; + cudaMalloc((void **)&Neuron_list_device, spiking_neuron_num*sizeof(Neuron)); + cudaMalloc((void **)&Input_neuronlist_device, input_neuron_num*sizeof(Input_neuron)); + //cudaMalloc((void **)&old_device_neurons, SIZE*sizeof(Neuron)); + + //cudaMalloc((void **)&states, SIZE * sizeof(curandState_t)); + cudaMalloc((void **)&log_v, MAX_TIME * sizeof(float)); + cudaMalloc((void **)&log_spike, total_depth_number * sizeof(float)); + cudaMalloc((void **)&log_spike_default, total_depth_number * sizeof(float)); + //cudaMalloc((void **)&log_total_spike, SIZE * sizeof(float)); + gpuErrchk( cudaMalloc((void **)&log_total_spike, SIZE * sizeof(float)) ); + cudaMalloc((void **)&spike_flag_device, instance_array_size*sizeof(int)); + //rand_init<<>>(time(0), states); + printf("2.3\n"); + cudaMemcpy(Neuron_list_device,NeuronList,spiking_neuron_num*sizeof(Neuron),cudaMemcpyHostToDevice); + cudaMemcpy(Input_neuronlist_device,Input_neuronlist,input_neuron_num*sizeof(Input_neuron),cudaMemcpyHostToDevice); + //cudaMemcpy(old_device_neurons,NeuronList,SIZE*sizeof(Neuron),cudaMemcpyHostToDevice); + //cudaMemcpy(random_number_list_device, random_number_list, SIZE*sizeof(float), cudaMemcpyHostToDevice); + cudaMemcpy(log_v,log_v_host,MAX_TIME*sizeof(float),cudaMemcpyHostToDevice); + cudaMemcpy(log_spike,log_spike_host,total_depth_number*sizeof(float),cudaMemcpyHostToDevice); + cudaMemcpy(log_spike_default,log_spike_host,total_depth_number*sizeof(float),cudaMemcpyHostToDevice); + gpuErrchk( cudaMemcpy(log_total_spike,log_total_spike_host,SIZE*sizeof(float),cudaMemcpyHostToDevice) ); + cudaMemcpy(spike_flag_device,spike_flag,instance_array_size*sizeof(int),cudaMemcpyHostToDevice); + printf("3.0\n"); + //cout<<"network size: "< myvector; + for (int i=0; i seq_vector_head; + std::vector seq_vector; + for (int i=0; i>>(d_network_config, h_filter_array[0], 1); + //read_filter_GPU<<<1, 1>>>(d_network_config, d_filter_array); + +// int reiter_run = 1; + + int time = 0; + int training_img_index = 0; + + //============now load all convolution settings=========== + for(int layer_iter=0;layer_iterlayer[1].first_depth_id - 0.1; + float end_depth = network_config->layer[1].last_depth_id + 0.1; + cout<<"Changing threshold, start: "<< start_depth<<" end: "<>>(Neuron_list_device, spiking_neuron_num, start_depth, end_depth, -62.2); + //return; + int event_count = 0; + while (time0) { + write_neuron_list(NeuronList, interval_file_name, spiking_neuron_num); + cudaMemcpy(h_filter_array, d_filter_array, filter_array_size* sizeof(float*), cudaMemcpyDeviceToHost); + filter_util(network_config, NeuronList, network_size, input_neuron_num, h_filter_array, d_filter_array, to_string(time), 1); //write filter to file + } + } + + if(time%time_per_event){ + event_count++; + while(events_host[event_count].valid==False && event_count=current_total_read_event){ + cout<input_file_id_max) current_input_file_id = 1; + + if (current_input_file_id<10) { + image_file = "/hdd2/extra_home/xshe6/Event_camera/event_based/user0" + to_string(current_input_file_id) + "_event_based.csv"; + } + else{ + image_file = "/hdd2/extra_home/xshe6/Event_camera/event_based/user" + to_string(current_input_file_id) + "_event_based.csv"; + } + + current_total_read_event = 0; + current_total_read_event = IBM_DVS128_event_based(image_file, events_host, img_load_max, img_load_max); + cout<<"Total loaded:"<< current_total_read_event<layer[1].first_depth_id - 0.1; + float end_depth = network_config->layer[1].last_depth_id + 0.1; + //cout<<"Changing threshold, start: "<< start_depth<<" end: "<>>(Neuron_list_device, spiking_neuron_num, start_depth, end_depth, 5, -5.07); + update_param<<>>(Neuron_list_device, spiking_neuron_num, start_depth, end_depth, 4, 0.453); + update_param<<>>(Neuron_list_device, spiking_neuron_num, start_depth, end_depth, 0, -0.02); + change_threshold<<>>(Neuron_list_device, spiking_neuron_num, start_depth, end_depth, -63.2); +// cout<<"Changing param of long-term neuron, start: "<< start_depth+32<<" end: "<>>(Neuron_list_device, spiking_neuron_num, start_depth+32, end_depth, 5, -1.6); +// update_param<<>>(Neuron_list_device, spiking_neuron_num, start_depth+32, end_depth, 4, 0.16); +// update_param<<>>(Neuron_list_device, spiking_neuron_num, start_depth+32, end_depth, 0, -0.001); + //change_threshold<<>>(Neuron_list_device, spiking_neuron_num, start_depth+32, end_depth, -56.2); + +// start_depth = network_config->layer[2].first_depth_id - 0.1; +// end_depth = network_config->layer[2].last_depth_id + 0.1; +// cout<<"Changing threshold, start: "<< start_depth<<" end: "<>>(Neuron_list_device, spiking_neuron_num, start_depth, end_depth, -61.0); + + + //training_time_each_img = training_time_each_img*1.3; + cudaDeviceSynchronize(); + + }else if(time==second_layer_time){ + + gpuErrchk( cudaMemcpy(log_total_spike_host,log_total_spike,SIZE*sizeof(float),cudaMemcpyDeviceToHost) ); + ofstream myfile ((index_prefix+"second_stage_device2_spike_of_neuron_out.csv")); + if (myfile.is_open()){ + //myfile << "This is a new test\n"; + for(int i=0; i < SIZE; i++){ + //printf("_%f_", log_v_host[i]); + myfile << log_total_spike_host[i] << ", "; + } + myfile.close(); + } + + float start_depth = network_config->layer[1].first_depth_id - 0.1; + float end_depth = network_config->layer[1].last_depth_id + 0.1; + //cout<<"Changing threshold, start: "<< start_depth<<" end: "<>>(Neuron_list_device, spiking_neuron_num, start_depth, end_depth, -64); + + + start_depth = network_config->layer[2].first_depth_id - 0.1; + end_depth = network_config->layer[2].last_depth_id + 0.1; + //cout<<"Changing threshold, start: "<< start_depth<<" end: "<>>(Neuron_list_device, spiking_neuron_num, start_depth, end_depth, 5, -5.07); + update_param<<>>(Neuron_list_device, spiking_neuron_num, start_depth, end_depth, 4, 0.453); + update_param<<>>(Neuron_list_device, spiking_neuron_num, start_depth, end_depth, 0, -0.02); + change_threshold<<>>(Neuron_list_device, spiking_neuron_num, start_depth, end_depth, -63); +// cout<<"Changing param, start: "<< start_depth+32<<" end: "<>>(Neuron_list_device, spiking_neuron_num, start_depth+64, end_depth, 5, -1.6); +// update_param<<>>(Neuron_list_device, spiking_neuron_num, start_depth+64, end_depth, 4, 0.16); +// update_param<<>>(Neuron_list_device, spiking_neuron_num, start_depth+64, end_depth, 0, -0.001); + +// start_depth = network_config->layer[3].first_depth_id - 0.1; +// end_depth = network_config->layer[3].last_depth_id + 0.1; +// cout<<"Changing threshold, start: "<< start_depth<<" end: "<>>(Neuron_list_device, spiking_neuron_num, start_depth, end_depth, -60.0); + + }else if(time==third_layer_time){ + + gpuErrchk( cudaMemcpy(log_total_spike_host,log_total_spike,SIZE*sizeof(float),cudaMemcpyDeviceToHost) ); + ofstream myfile ((index_prefix+"third_stage_device2_spike_of_neuron_out.csv")); + if (myfile.is_open()){ + //myfile << "This is a new test\n"; + for(int i=0; i < SIZE; i++){ + //printf("_%f_", log_v_host[i]); + myfile << log_total_spike_host[i] << ", "; + } + myfile.close(); + } + + float start_depth = network_config->layer[1].first_depth_id - 0.1; + float end_depth = network_config->layer[1].last_depth_id + 0.1; + //cout<<"Changing threshold, start: "<< start_depth<<" end: "<>>(Neuron_list_device, spiking_neuron_num, start_depth, end_depth, -68.2); + +// + start_depth = network_config->layer[3].first_depth_id - 0.1; + end_depth = network_config->layer[3].last_depth_id + 0.1; + cout<<"Changing threshold, start: "<< start_depth<<" end: "<>>(Neuron_list_device, spiking_neuron_num, start_depth, end_depth, 5, -5.07); +// update_param<<>>(Neuron_list_device, spiking_neuron_num, start_depth, end_depth, 4, 0.453); +// update_param<<>>(Neuron_list_device, spiking_neuron_num, start_depth, end_depth, 0, -0.02); +// change_threshold<<>>(Neuron_list_device, spiking_neuron_num, start_depth, end_depth, -63); +// cout<<"Changing param, start: "<< start_depth+32<<" end: "<>>(Neuron_list_device, spiking_neuron_num, start_depth+128, end_depth, 5, -1.6); +// update_param<<>>(Neuron_list_device, spiking_neuron_num, start_depth+128, end_depth, 4, 0.16); +// update_param<<>>(Neuron_list_device, spiking_neuron_num, start_depth+128, end_depth, 0, -0.001); + + start_depth = network_config->layer[4].first_depth_id - 0.1; + end_depth = network_config->layer[4].last_depth_id + 0.1; +// cout<<"Changing threshold, start: "<< start_depth<<" end: "<>>(Neuron_list_device, spiking_neuron_num, start_depth, end_depth, -60.0); + + cout<<"Parameter Changing complete.\n"; + cudaDeviceSynchronize(); + } + + if(time%tenpercent_iter == 0){ + iter_log = clock(); + cout<layer[layer_iter].neuron_num; + int convolution_result_index = layer_iter - 1; + if (layer_iter==0) {//fault at convolution kernel and spiking cnn + convolution_result_index = 0; + spiking_cnn_main_event_based(Neuron_list_device, Input_neuronlist_device, events_GPU, event_count, network_config, d_network_config, random_number_list_device, d_convolution_result, d_input_instance, \ + layer_iter, network_size, input_neuron_num, log_v, log_spike, log_total_spike, spike_flag_device, input_signal_width, input_float, time, false); + convolution_kernel(convolution_settings[layer_iter], layer_iter, h_input_instance, h_filter_array, h_convolution_result, probe); + }else if(layer_iter==1){ + spiking_cnn_main(Neuron_list_device, Input_neuronlist_device, d_network_config, random_number_list_device, d_convolution_result, d_input_instance, \ + layer_iter, network_size, input_neuron_num, log_v, log_spike, log_total_spike, spike_flag_device, input_signal_width, 0.7*input_float, time, true); + synapse_drive_cnn_v2(Neuron_list_device, Input_neuronlist_device, network_config, d_network_config, d_filter_array, layer_iter, \ + spiking_neuron_num, input_neuron_num, syn_timer_max, connection_size, random_number_list_device, random_number_normal_device, states, -1.0, -1.0, log_total_spike);//STDP + } + } + //=================TRY WITH LAYER wise inhibition===================== + if(depth_wise_inhibition) { + + }else if(through_depth_inhibition){ + + }else if(apply_local_inhibition){ + + } + else{ + lateral_inhibition_mother_thread<<>>(Neuron_list_device, spiking_neuron_num, 1, inhibition_time, d_network_config, spike_flag_device); + } + if(HOMEOSTASIS_ENABLE){ + if(time%HOMEOSTASIS_UPDATE_FREQUENCY == 0 && time != 0){ + //spiking_learning_drive(Neuron_list_device, network_size, inhibition_time, log_total_spike, target_frequency, time, log_spike, 0, 1); + } + } + }else if(timelayer[layer_iter].neuron_num; + int convolution_result_index = layer_iter - 1; + if (layer_iter==0) {//fault at convolution kernel and spiking cnn + convolution_result_index = 0; + spiking_cnn_main_event_based(Neuron_list_device, Input_neuronlist_device, events_GPU, event_count, network_config, d_network_config, random_number_list_device, d_convolution_result, d_input_instance, \ + layer_iter, network_size, input_neuron_num, log_v, log_spike, log_total_spike, spike_flag_device, input_signal_width, input_float, time, false); + convolution_kernel(convolution_settings[layer_iter], layer_iter, h_input_instance, h_filter_array, h_convolution_result, probe); + }else if(layer_iter==1){ + spiking_cnn_main(Neuron_list_device, Input_neuronlist_device, d_network_config, random_number_list_device, d_convolution_result, d_input_instance, \ + layer_iter, network_size, input_neuron_num, log_v, log_spike, log_total_spike, spike_flag_device, input_signal_width, 0.5*input_float, time, false); + if (layer_iter!=(CNN_total_layer_num-1)) convolution_kernel(convolution_settings[layer_iter], layer_iter, h_input_instance, h_filter_array, h_convolution_result, probe); + }else if(layer_iter==2){ + spiking_cnn_main(Neuron_list_device, Input_neuronlist_device, d_network_config, random_number_list_device, d_convolution_result, d_input_instance, \ + layer_iter, network_size, input_neuron_num, log_v, log_spike, log_total_spike, spike_flag_device, input_signal_width, 2*input_float, time, true); + synapse_drive_cnn_v2(Neuron_list_device, Input_neuronlist_device, network_config, d_network_config, d_filter_array, layer_iter, spiking_neuron_num, input_neuron_num, \ + syn_timer_max, connection_size, random_number_list_device, random_number_normal_device, states, -1.0, -1.0, log_total_spike);//STDP + } + + } + //=================TRY WITH LAYER wise inhibition===================== + if(depth_wise_inhibition) { + + }else if(forced_lateral_inhibition_at_last_layer && CNN_total_layer_num==3){//if this is the last layer, use lateral_inhibition + lateral_inhibition_mother_thread<<>>(Neuron_list_device, spiking_neuron_num, 2, inhibition_time, d_network_config, spike_flag_device); + }else if(through_depth_inhibition){ + + }else if(apply_local_inhibition && CNN_total_layer_num!=3){ + + } + else{ + lateral_inhibition_mother_thread<<>>(Neuron_list_device, spiking_neuron_num, 2, inhibition_time, d_network_config, spike_flag_device); + } + if(HOMEOSTASIS_ENABLE){ + if(time%HOMEOSTASIS_UPDATE_FREQUENCY == 0 && time != 0){ + //spiking_learning_drive(Neuron_list_device, network_size, inhibition_time, log_total_spike, target_frequency, time, log_spike, 0, 1); + } + } + + }else if(timelayer[layer_iter].neuron_num; + int convolution_result_index = layer_iter - 1; + if (layer_iter==0) {//fault at convolution kernel and spiking cnn + convolution_result_index = 0; + spiking_cnn_main_event_based(Neuron_list_device, Input_neuronlist_device, events_GPU, event_count, network_config, d_network_config, random_number_list_device, d_convolution_result, d_input_instance, \ + layer_iter, network_size, input_neuron_num, log_v, log_spike, log_total_spike, spike_flag_device, input_signal_width, input_float, time, false); + convolution_kernel(convolution_settings[layer_iter], layer_iter, h_input_instance, h_filter_array, h_convolution_result, probe); + }else if(layer_iter==1){ + spiking_cnn_main(Neuron_list_device, Input_neuronlist_device, d_network_config, random_number_list_device, d_convolution_result, d_input_instance, \ + layer_iter, network_size, input_neuron_num, log_v, log_spike, log_total_spike, spike_flag_device, input_signal_width, 0.5*input_float, time, false); + if (layer_iter!=(CNN_total_layer_num-1)) convolution_kernel(convolution_settings[layer_iter], layer_iter, h_input_instance, h_filter_array, h_convolution_result, probe); + }else if(layer_iter==2){ + spiking_cnn_main(Neuron_list_device, Input_neuronlist_device, d_network_config, random_number_list_device, d_convolution_result, d_input_instance, \ + layer_iter, network_size, input_neuron_num, log_v, log_spike, log_total_spike, spike_flag_device, input_signal_width, 0.5*input_float, time, false); + if (layer_iter!=(CNN_total_layer_num-1)) convolution_kernel(convolution_settings[layer_iter], layer_iter, h_input_instance, h_filter_array, \ + h_convolution_result, probe); + }else if(layer_iter==3){ + spiking_cnn_main(Neuron_list_device, Input_neuronlist_device, d_network_config, random_number_list_device, d_convolution_result, d_input_instance, \ + layer_iter, network_size, input_neuron_num, log_v, log_spike, log_total_spike, spike_flag_device, input_signal_width, 2*input_float, time, true); + synapse_drive_cnn_v2(Neuron_list_device, Input_neuronlist_device, network_config, d_network_config, d_filter_array, layer_iter, spiking_neuron_num, input_neuron_num, \ + syn_timer_max, connection_size, random_number_list_device, random_number_normal_device, states, -1.0, -1.0, log_total_spike);//STDP + } + + } + //=================TRY WITH LAYER wise inhibition===================== + if(depth_wise_inhibition) { + + }else if(forced_lateral_inhibition_at_last_layer && CNN_total_layer_num==3){//if this is the last layer, use lateral_inhibition + lateral_inhibition_mother_thread<<>>(Neuron_list_device, spiking_neuron_num, 2, inhibition_time, d_network_config, spike_flag_device); + }else if(through_depth_inhibition){ + + }else if(apply_local_inhibition && CNN_total_layer_num!=3){ + + } + else{ + lateral_inhibition_mother_thread<<>>(Neuron_list_device, spiking_neuron_num, 2, inhibition_time, d_network_config, spike_flag_device); + } + if(HOMEOSTASIS_ENABLE){ + if(time%HOMEOSTASIS_UPDATE_FREQUENCY == 0 && time != 0){ + //spiking_learning_drive(Neuron_list_device, network_size, inhibition_time, log_total_spike, target_frequency, time, log_spike, 0, 1); + } + } + + }else{ + for(int layer_iter=0;layer_iterlayer[layer_iter].neuron_num; + int convolution_result_index = layer_iter - 1; + if (layer_iter==0) {//fault at convolution kernel and spiking cnn + convolution_result_index = 0; + spiking_cnn_main_event_based(Neuron_list_device, Input_neuronlist_device, events_GPU, event_count, network_config, d_network_config, random_number_list_device, d_convolution_result, d_input_instance, \ + layer_iter, network_size, input_neuron_num, log_v, log_spike, log_total_spike, spike_flag_device, input_signal_width, input_float, time, false); + convolution_kernel(convolution_settings[layer_iter], layer_iter, h_input_instance, h_filter_array, h_convolution_result, probe); + }else if(layer_iter==1 ){ + spiking_cnn_main(Neuron_list_device, Input_neuronlist_device, d_network_config, random_number_list_device, d_convolution_result, d_input_instance, \ + layer_iter, network_size, input_neuron_num, log_v, log_spike, log_total_spike, spike_flag_device, input_signal_width, 3*input_float, time, false); + if (layer_iter!=(CNN_total_layer_num-1)) convolution_kernel(convolution_settings[layer_iter], layer_iter, h_input_instance, h_filter_array, h_convolution_result, probe); + }else if(layer_iter==2){ + spiking_cnn_main(Neuron_list_device, Input_neuronlist_device, d_network_config, random_number_list_device, d_convolution_result, d_input_instance, \ + layer_iter, network_size, input_neuron_num, log_v, log_spike, log_total_spike, spike_flag_device, input_signal_width, 0.5*input_float, time, false); + if (layer_iter!=(CNN_total_layer_num-1)) convolution_kernel(convolution_settings[layer_iter], layer_iter, h_input_instance, h_filter_array, h_convolution_result, probe); + }else if(layer_iter==3){ + bool last_layer_inhib = !last_layer_teach; + spiking_cnn_main(Neuron_list_device, Input_neuronlist_device, d_network_config, random_number_list_device, d_convolution_result, d_input_instance, \ + layer_iter, network_size, input_neuron_num, log_v, log_spike, log_total_spike, spike_flag_device, input_signal_width, 4*input_float, time, false); + if (layer_iter!=(CNN_total_layer_num-1)) convolution_kernel(convolution_settings[layer_iter], layer_iter, h_input_instance, h_filter_array, h_convolution_result, probe); + //synapse_drive_cnn_v2(Neuron_list_device, Input_neuronlist_device, network_config, d_network_config, d_filter_array, layer_iter, spiking_neuron_num, input_neuron_num, syn_timer_max, connection_size, random_number_list_device, random_number_normal_device, states, -1.0, -1.0, log_total_spike);//STDP + }else if(layer_iter==4){ + bool last_layer_inhib = !last_layer_teach; + spiking_cnn_main(Neuron_list_device, Input_neuronlist_device, d_network_config, random_number_list_device, d_convolution_result, d_input_instance, \ + layer_iter, network_size, input_neuron_num, log_v, log_spike, log_total_spike, spike_flag_device, input_signal_width, 15*input_float, time, true); + synapse_drive_cnn_v2(Neuron_list_device, Input_neuronlist_device, network_config, d_network_config, d_filter_array, layer_iter, spiking_neuron_num, input_neuron_num, syn_timer_max, connection_size, random_number_list_device, random_number_normal_device, states, -1.0, -1.0, log_total_spike);//STDP + } + + } + //=================TRY WITH LAYER wise inhibition===================== + if(depth_wise_inhibition) { + + }else if(forced_lateral_inhibition_at_last_layer && CNN_total_layer_num==4){//if this is the last layer, use lateral_inhibition + lateral_inhibition_mother_thread<<>>(Neuron_list_device, spiking_neuron_num, 2, inhibition_time, d_network_config, spike_flag_device); + }else if(through_depth_inhibition){ + + }else if(apply_local_inhibition && CNN_total_layer_num!=3){ + + } + else{ + lateral_inhibition_mother_thread<<>>(Neuron_list_device, spiking_neuron_num, 2, inhibition_time, d_network_config, spike_flag_device); + } + if(HOMEOSTASIS_ENABLE){ + if(time%HOMEOSTASIS_UPDATE_FREQUENCY == 0 && time != 0){ + //spiking_learning_drive(Neuron_list_device, network_size, inhibition_time, log_total_spike, target_frequency, time, log_spike, 0, 1); + } + } + + } +// printf("T: %d_",time); +// if(time==100) break; + time ++; + } + //spiking_learning_drive(Neuron_list_device, network_size, inhibition_time, 2); + //cudaDeviceSynchronize(); + + filter_util(network_config, Neuron_list_device, spiking_neuron_num, input_neuron_num, h_filter_array, d_filter_array, index_prefix, 2); + cudaMemcpy(NeuronList,Neuron_list_device,spiking_neuron_num*sizeof(Neuron),cudaMemcpyDeviceToHost); + cudaMemcpy(log_v_host,log_v,MAX_TIME*sizeof(float),cudaMemcpyDeviceToHost); + cudaMemcpy(log_spike_host,log_spike,total_depth_number*sizeof(float),cudaMemcpyDeviceToHost); + gpuErrchk( cudaMemcpy(log_total_spike_host,log_total_spike,SIZE*sizeof(float),cudaMemcpyDeviceToHost) ); + + + //print out the synapse conductance data + //data_check(NeuronList,log_total_spike,SIZE, mnist_start_index, mnist_end_index, 2); + + ofstream myfile ((index_prefix+"device2_spike_of_neuron_out.csv")); + if (myfile.is_open()){ + //myfile << "This is a new test\n"; + for(int i=0; i < SIZE; i++){ + //printf("_%f_", log_v_host[i]); + myfile << log_total_spike_host[i] << ", "; + } + myfile.close(); + } + + ofstream myfile_p ((index_prefix+"probe.csv")); + if (myfile_p.is_open()){ + //myfile << "This is a new test\n"; + for(int i=0; i < 1000; i++){ + //printf("_%f_", log_v_host[i]); + myfile_p << probe[i] << ", "; + } + myfile_p.close(); + } + +// +// ofstream myfile_0 ((index_prefix+"device2_out_v.csv")); +// if (myfile_0.is_open()){ +// //myfile << "This is a new test\n"; +// for(int i=0; i < MAX_TIME; i++){ +// //printf("_%f_", log_v_host[i]); +// myfile_0 << log_v_host[i] << ", "; +// } +// myfile.close(); +// } +// +// ofstream myfile_2 ((index_prefix+"device2_spike_of_one.csv")); +// if (myfile_2.is_open()){ +// //myfile << "This is a new test\n"; +// for(int i=0; i < MAX_TIME; i++){ +// //printf("_%f_", log_v_host[i]); +// myfile_2 << log_spike_host[i] << ", "; +// } +// myfile_2.close(); +// } + + + + cudaMemcpy(h_filter_array, d_filter_array, filter_array_size* sizeof(float*), cudaMemcpyDeviceToHost); + filter_util(network_config, NeuronList, network_size, input_neuron_num, h_filter_array, d_filter_array, index_prefix, 1); //write filter to file + write_neuron_list(NeuronList, (index_prefix+"device2_output_network.txt"), spiking_neuron_num); + data_check(NeuronList,log_total_spike,SIZE, mnist_start_index, mnist_end_index, 2, ""); + //===clean up=== + //delete[] random_number_list; + delete[] log_v_host; + delete[] NeuronList; + delete[] log_spike_host; + delete[] log_total_spike_host; + delete[] events_host; + delete[] NeuronList_temp; + delete[] one_mnist_img; + delete[] probe; +// delete[] random_number_list; + delete[] mnist_label; + delete[] spike_flag; + delete[] num_one_digit_img; + //cudaFree(states); + cudaFree(log_v); + cudaFree(log_spike); + cudaFree(log_total_spike); + cudaFree(Neuron_list_device); + //cudaFree(old_device_neurons); + cudaFree(random_number_list_device); + cudaFree(d_network_config); + cudaFree(states); + cudaFree(spike_flag_device); + cudaFree(log_spike_default); + +} + + + +void run_event_based_learning_hsnn(string index_prefix, float input_float, float input_float_2, int input_int, int input_int_2, string input_img, int resume_learning, int start_layer){ + int depth_list[3]; + if (start_layer==1){ + depth_list[0]=16; depth_list[1]=32; depth_list[2]=32; + } + else if (start_layer==2){ + depth_list[0]=32; depth_list[1]=32; depth_list[2]=32; + } + else if (start_layer==3){ + depth_list[0]=32; depth_list[1]=64; depth_list[2]=32; + } + //cout<layer[i].depth; + cout<<"depth number: "<layer[i].depth<layer[i].neuron_num; + if(i!=0) total_spiking_num += network_config->layer[i].neuron_num; + } + total_spiking_num += 5; + total_neuron_num += 5; + //total_neuron_num = 20000; + cout<img_load_max){ //manually set the maximum number of images to be loaded once is 60000 + cout<<"Using batch loading"< folder_list; + int input_folder_cnt = 0; + int current_total_read_event = 0; + if(input_image_channel==1 || input_image_channel==2){ + + + current_total_read_event = IBM_DVS128_event_based(image_file, events_host, img_load_max, img_load_max); + cudaMemcpy(events_GPU,events_host,img_load_max*sizeof(Event_Camera_Input),cudaMemcpyHostToDevice); + + }else{ + printf("Input channel error."); + return; + + } + clock_t load_end = clock(); + cout< Neuron y Spike! In the array index [(x-1)*SIZE+(y-1)] => 1 + //curandState_t *states; + //float *random_number_list = new float[SIZE]; + float *log_v_host = new float[MAX_TIME]; + float *log_spike_host = new float[total_depth_number]; + + float *log_total_spike_host = new float[SIZE]; + for(int i=0; i < SIZE; i++){ + log_total_spike_host[i] = 0; + } + int *spike_flag = new int[CNN_total_layer_num]; + for(int i=0; i < CNN_total_layer_num; i++){ + spike_flag[i] = 0; + } + for(int i=0; ilayer[start_layer].first_depth_id - 0.1; + float end_depth = network_config->layer[start_layer].last_depth_id + 0.1; + + reset_weight(NeuronList, start_depth, end_depth, 1, spiking_neuron_num); + + } + //read_neuron_list(NeuronList, 1, "spike_cnn.txt"); + }else{ + read_neuron_list(NeuronList, 1, "spike_cnn.txt"); + //read_neuron_list(NeuronList, 1, "device2_output_network.txt"); + + } + //printf("read out one neuron depth: %f", NeuronList[116000].param[7]); + //write_neuron_list(NeuronList, "learning_output_confirm.txt", SIZE); + //check_neuron(NeuronList, 800, 820); + + + //Neuron *old_device_neurons; + //unsigned char *snapse_timer_device; + float *log_v; + float *log_spike; + float *log_spike_default; + float *log_total_spike; + int *spike_flag_device; + + + printf("2\n"); + //random number function: + + float rand_list_size_to_total_connection_ratio = 1; + int rand_numb_size = SPIKING_NEURON_NUM*MAX_CONNECTION; + + int SIZE_PER_SIDE = sqrt(rand_numb_size)+1; + dim3 dimBlock( ThreadsPerBlock, ThreadsPerBlock ); + dim3 dimGrid( (SIZE_PER_SIDE/dimBlock.x+1), (SIZE_PER_SIDE/dimBlock.y+1)); + dim3 print_grid(1); + dim3 print_block(1); + dim3 dimBlock_unit( 1, 1 ); + dim3 dimGrid_unit(1, 1); + + int SIZE_PER_SIDE_whole_network = sqrt(spiking_neuron_num)+1; + dim3 dimBlock_whole_network( ThreadsPerBlock*2, ThreadsPerBlock ); + dim3 dimGrid_whole_network( (SIZE_PER_SIDE_whole_network/dimBlock.x+1), (SIZE_PER_SIDE_whole_network/dimBlock.y+1)); + printf("2.1\n"); + + curandState_t *states; + +// if (STOCHASTIC_STDP) rand_init<<>>(time(0), rand_numb_size, states); +// float *random_number_list = new float[rand_numb_size]; +// float *random_number_list_device; +// SIZE_PER_SIDE = sqrt(rand_numb_size)+1; +// dim3 dimBlock_synapse( ThreadsPerBlock, ThreadsPerBlock ); +// dim3 dimGrid_synapse( (SIZE_PER_SIDE/dimBlock.x+1), (SIZE_PER_SIDE/dimBlock.y+1)); + +// cudaMalloc((void **)&random_number_list_device,rand_numb_size*sizeof(float)); +// cudaMemcpy(random_number_list_device,random_number_list,rand_numb_size*sizeof(float),cudaMemcpyHostToDevice); +// if (STOCHASTIC_STDP) random<<>>(random_number_list_device, rand_numb_size, states); + + curandGenerator_t gen_uniform; + float *random_number_list_device; + if(STOCHASTIC_STDP || STOCHASTIC_ROUNDING || DEVICE_VARIATION){ + cudaMalloc((void **)&states, rand_numb_size * sizeof(curandState_t)); + cudaMalloc((void **)&random_number_list_device,rand_numb_size*sizeof(float)); + curandCreateGenerator(&gen_uniform, CURAND_RNG_PSEUDO_DEFAULT); + curandSetPseudoRandomGeneratorSeed(gen_uniform, time(0)); + curandGenerateUniform(gen_uniform, random_number_list_device, rand_numb_size); + } + + + curandGenerator_t gen_normal; + float *random_number_normal_device; + float normal_mean = 0; + float normal_sd = 5.0; + if(STOCHASTIC_STDP || STOCHASTIC_ROUNDING || DEVICE_VARIATION){ + cudaMalloc((void **)&random_number_normal_device,rand_numb_size*sizeof(float)); + curandCreateGenerator(&gen_normal, CURAND_RNG_PSEUDO_DEFAULT); + curandSetPseudoRandomGeneratorSeed(gen_normal, time(0)); + curandGenerateNormal(gen_normal, random_number_normal_device, rand_numb_size, normal_mean, normal_sd); + } + +// printf("2.11\n"); + //Setting up input instance matrix: + float **d_input_instance; + float **d_convolution_result; + float **h_input_instance; + float **h_convolution_result; + float *probe = new float[1000]; + int instance_array_size = CNN_total_layer_num; + cudaMalloc(&d_input_instance, instance_array_size*sizeof(float *)); + int convolution_result_size = CNN_total_layer_num - 1; + cudaMalloc(&d_convolution_result, convolution_result_size*sizeof(float *)); + h_input_instance = (float**)malloc(instance_array_size * sizeof(float*)); + h_convolution_result = (float**)malloc(convolution_result_size * sizeof(float*)); + CNN_util(network_config, d_input_instance, d_convolution_result, h_input_instance, h_convolution_result, 0); + printf("2.2\n"); +// float **add = &h_convolution_result[0]; +// printf("Address On GPU: %p\n", add); + + //========Setting up device neuron list============ + + Neuron *Neuron_list_device; + Input_neuron *Input_neuronlist_device; + cudaMalloc((void **)&Neuron_list_device, spiking_neuron_num*sizeof(Neuron)); + cudaMalloc((void **)&Input_neuronlist_device, input_neuron_num*sizeof(Input_neuron)); + //cudaMalloc((void **)&old_device_neurons, SIZE*sizeof(Neuron)); + + //cudaMalloc((void **)&states, SIZE * sizeof(curandState_t)); + cudaMalloc((void **)&log_v, MAX_TIME * sizeof(float)); + cudaMalloc((void **)&log_spike, total_depth_number * sizeof(float)); + cudaMalloc((void **)&log_spike_default, total_depth_number * sizeof(float)); + //cudaMalloc((void **)&log_total_spike, SIZE * sizeof(float)); + gpuErrchk( cudaMalloc((void **)&log_total_spike, SIZE * sizeof(float)) ); + cudaMalloc((void **)&spike_flag_device, instance_array_size*sizeof(int)); + //rand_init<<>>(time(0), states); + printf("2.3\n"); + cudaMemcpy(Neuron_list_device,NeuronList,spiking_neuron_num*sizeof(Neuron),cudaMemcpyHostToDevice); + cudaMemcpy(Input_neuronlist_device,Input_neuronlist,input_neuron_num*sizeof(Input_neuron),cudaMemcpyHostToDevice); + //cudaMemcpy(old_device_neurons,NeuronList,SIZE*sizeof(Neuron),cudaMemcpyHostToDevice); + //cudaMemcpy(random_number_list_device, random_number_list, SIZE*sizeof(float), cudaMemcpyHostToDevice); + cudaMemcpy(log_v,log_v_host,MAX_TIME*sizeof(float),cudaMemcpyHostToDevice); + cudaMemcpy(log_spike,log_spike_host,total_depth_number*sizeof(float),cudaMemcpyHostToDevice); + cudaMemcpy(log_spike_default,log_spike_host,total_depth_number*sizeof(float),cudaMemcpyHostToDevice); + gpuErrchk( cudaMemcpy(log_total_spike,log_total_spike_host,SIZE*sizeof(float),cudaMemcpyHostToDevice) ); + cudaMemcpy(spike_flag_device,spike_flag,instance_array_size*sizeof(int),cudaMemcpyHostToDevice); + printf("3.0\n"); + //cout<<"network size: "< myvector; + for (int i=0; i seq_vector_head; + std::vector seq_vector; + for (int i=0; i>>(d_network_config, h_filter_array[0], 1); + //read_filter_GPU<<<1, 1>>>(d_network_config, d_filter_array); + +// int reiter_run = 1; + + int time = 0; + int training_img_index = 0; + + //============now load all convolution settings=========== + for(int layer_iter=0;layer_iterlayer[1].first_depth_id - 0.1; + float end_depth = network_config->layer[1].last_depth_id + 0.1; + cout<<"Changing threshold, start: "<< start_depth<<" end: "<>>(Neuron_list_device, spiking_neuron_num, start_depth, end_depth, -62.2); + //return; + int event_count = 0; + while (time0) { + write_neuron_list(NeuronList, interval_file_name, spiking_neuron_num); + cudaMemcpy(h_filter_array, d_filter_array, filter_array_size* sizeof(float*), cudaMemcpyDeviceToHost); + filter_util(network_config, NeuronList, network_size, input_neuron_num, h_filter_array, d_filter_array, to_string(time), 1); //write filter to file + } + } + + if(time%time_per_event){ + event_count++; + while(events_host[event_count].valid==False && event_count=current_total_read_event){ + cout<input_file_id_max) current_input_file_id = 1; + + if (current_input_file_id<10) { + image_file = "/hdd2/extra_home/xshe6/Event_camera/event_based/user0" + to_string(current_input_file_id) + "_event_based.csv"; + } + else{ + image_file = "/hdd2/extra_home/xshe6/Event_camera/event_based/user" + to_string(current_input_file_id) + "_event_based.csv"; + } + + current_total_read_event = 0; + current_total_read_event = IBM_DVS128_event_based(image_file, events_host, img_load_max, img_load_max); + cout<<"Total loaded:"<< current_total_read_event<layer[1].first_depth_id - 0.1; + float end_depth = network_config->layer[1].last_depth_id + 0.1; + //cout<<"Changing threshold, start: "<< start_depth<<" end: "<>>(Neuron_list_device, spiking_neuron_num, start_depth, end_depth, 5, -2.07); + update_param<<>>(Neuron_list_device, spiking_neuron_num, start_depth, end_depth, 4, 0.453); + update_param<<>>(Neuron_list_device, spiking_neuron_num, start_depth, end_depth, 0, 0.02); + change_threshold<<>>(Neuron_list_device, spiking_neuron_num, start_depth, end_depth, -65.2); + cout<<"Changing param of long-term neuron, start: "<< start_depth+16<<" end: "<>>(Neuron_list_device, spiking_neuron_num, start_depth+depth_list[0]/2, end_depth, 5, -1.6); + update_param<<>>(Neuron_list_device, spiking_neuron_num, start_depth+depth_list[0]/2, end_depth, 4, 0.4); + update_param<<>>(Neuron_list_device, spiking_neuron_num, start_depth+depth_list[0]/2, end_depth, 0, 0.001); + change_threshold<<>>(Neuron_list_device, spiking_neuron_num, start_depth+depth_list[0]/2, end_depth, -64.2); + + + //training_time_each_img = training_time_each_img*1.3; + cudaDeviceSynchronize(); + + }else if(time==second_layer_time){ + + gpuErrchk( cudaMemcpy(log_total_spike_host,log_total_spike,SIZE*sizeof(float),cudaMemcpyDeviceToHost) ); + ofstream myfile ((index_prefix+"second_stage_device2_spike_of_neuron_out.csv")); + if (myfile.is_open()){ + //myfile << "This is a new test\n"; + for(int i=0; i < SIZE; i++){ + //printf("_%f_", log_v_host[i]); + myfile << log_total_spike_host[i] << ", "; + } + myfile.close(); + } + + float start_depth = network_config->layer[1].first_depth_id - 0.1; + float end_depth = network_config->layer[1].last_depth_id + 0.1; + //cout<<"Changing threshold, start: "<< start_depth<<" end: "<>>(Neuron_list_device, spiking_neuron_num, start_depth, end_depth, -64); + + + start_depth = network_config->layer[2].first_depth_id - 0.1; + end_depth = network_config->layer[2].last_depth_id + 0.1; + //cout<<"Changing threshold, start: "<< start_depth<<" end: "<>>(Neuron_list_device, spiking_neuron_num, start_depth, end_depth, 5, -2.07); + update_param<<>>(Neuron_list_device, spiking_neuron_num, start_depth, end_depth, 4, 0.453); + update_param<<>>(Neuron_list_device, spiking_neuron_num, start_depth, end_depth, 0, 0.02); + change_threshold<<>>(Neuron_list_device, spiking_neuron_num, start_depth, end_depth, -65.2); + cout<<"Changing param of long-term neuron, start: "<< start_depth+16<<" end: "<>>(Neuron_list_device, spiking_neuron_num, start_depth+depth_list[1]/2, end_depth, 5, -1.6); + update_param<<>>(Neuron_list_device, spiking_neuron_num, start_depth+depth_list[1]/2, end_depth, 4, 0.4); + update_param<<>>(Neuron_list_device, spiking_neuron_num, start_depth+depth_list[1]/2, end_depth, 0, 0.001); + change_threshold<<>>(Neuron_list_device, spiking_neuron_num, start_depth+depth_list[1]/2, end_depth, -64.2); +// start_depth = network_config->layer[3].first_depth_id - 0.1; +// end_depth = network_config->layer[3].last_depth_id + 0.1; +// cout<<"Changing threshold, start: "<< start_depth<<" end: "<>>(Neuron_list_device, spiking_neuron_num, start_depth, end_depth, -60.0); + + }else if(time==third_layer_time){ + + gpuErrchk( cudaMemcpy(log_total_spike_host,log_total_spike,SIZE*sizeof(float),cudaMemcpyDeviceToHost) ); + ofstream myfile ((index_prefix+"third_stage_device2_spike_of_neuron_out.csv")); + if (myfile.is_open()){ + //myfile << "This is a new test\n"; + for(int i=0; i < SIZE; i++){ + //printf("_%f_", log_v_host[i]); + myfile << log_total_spike_host[i] << ", "; + } + myfile.close(); + } + + float start_depth = network_config->layer[1].first_depth_id - 0.1; + float end_depth = network_config->layer[1].last_depth_id + 0.1; + //cout<<"Changing threshold, start: "<< start_depth<<" end: "<>>(Neuron_list_device, spiking_neuron_num, start_depth, end_depth, -68.2); + +// + start_depth = network_config->layer[3].first_depth_id - 0.1; + end_depth = network_config->layer[3].last_depth_id + 0.1; + cout<<"Changing threshold, start: "<< start_depth<<" end: "<>>(Neuron_list_device, spiking_neuron_num, start_depth, end_depth, 5, -5.07); +// update_param<<>>(Neuron_list_device, spiking_neuron_num, start_depth, end_depth, 4, 0.453); +// update_param<<>>(Neuron_list_device, spiking_neuron_num, start_depth, end_depth, 0, -0.02); +// change_threshold<<>>(Neuron_list_device, spiking_neuron_num, start_depth, end_depth, -63); +// cout<<"Changing param, start: "<< start_depth+32<<" end: "<>>(Neuron_list_device, spiking_neuron_num, start_depth+128, end_depth, 5, -1.6); +// update_param<<>>(Neuron_list_device, spiking_neuron_num, start_depth+128, end_depth, 4, 0.16); +// update_param<<>>(Neuron_list_device, spiking_neuron_num, start_depth+128, end_depth, 0, -0.001); + + start_depth = network_config->layer[4].first_depth_id - 0.1; + end_depth = network_config->layer[4].last_depth_id + 0.1; +// cout<<"Changing threshold, start: "<< start_depth<<" end: "<>>(Neuron_list_device, spiking_neuron_num, start_depth, end_depth, -60.0); + + cout<<"Parameter Changing complete.\n"; + cudaDeviceSynchronize(); + } + + if(time%tenpercent_iter == 0){ + iter_log = clock(); + cout<layer[layer_iter].neuron_num; + int convolution_result_index = layer_iter - 1; + if (layer_iter==0) {//fault at convolution kernel and spiking cnn + convolution_result_index = 0; + spiking_cnn_main_event_based(Neuron_list_device, Input_neuronlist_device, events_GPU, event_count, network_config, d_network_config, random_number_list_device, d_convolution_result, d_input_instance, \ + layer_iter, network_size, input_neuron_num, log_v, log_spike, log_total_spike, spike_flag_device, input_signal_width, input_float, time, false); + convolution_kernel(convolution_settings[layer_iter], layer_iter, h_input_instance, h_filter_array, h_convolution_result, probe); + }else if(layer_iter==1){ + spiking_cnn_main(Neuron_list_device, Input_neuronlist_device, d_network_config, random_number_list_device, d_convolution_result, d_input_instance, \ + layer_iter, network_size, input_neuron_num, log_v, log_spike, log_total_spike, spike_flag_device, input_signal_width, 0.5*input_float, time, true); + synapse_drive_cnn_v2(Neuron_list_device, Input_neuronlist_device, network_config, d_network_config, d_filter_array, layer_iter, \ + spiking_neuron_num, input_neuron_num, syn_timer_max, connection_size, random_number_list_device, random_number_normal_device, states, -1.0, -1.0, log_total_spike);//STDP + } + } + + if(depth_wise_inhibition) { + //implemented in spiking_cnn_main_event_based + }else if(through_depth_inhibition){ + //implemented in spiking_cnn_main_event_based + }else if(apply_local_inhibition){ + //implemented in spiking_cnn_main_event_based + } + else{ + lateral_inhibition_mother_thread<<>>(Neuron_list_device, spiking_neuron_num, 1, inhibition_time, d_network_config, spike_flag_device); + } + if(HOMEOSTASIS_ENABLE){ + if(time%HOMEOSTASIS_UPDATE_FREQUENCY == 0 && time != 0){ + //spiking_learning_drive(Neuron_list_device, network_size, inhibition_time, log_total_spike, target_frequency, time, log_spike, 0, 1); + } + } + }else if(timelayer[layer_iter].neuron_num; + int convolution_result_index = layer_iter - 1; + if (layer_iter==0) {//fault at convolution kernel and spiking cnn + convolution_result_index = 0; + spiking_cnn_main_event_based(Neuron_list_device, Input_neuronlist_device, events_GPU, event_count, network_config, d_network_config, random_number_list_device, d_convolution_result, d_input_instance, \ + layer_iter, network_size, input_neuron_num, log_v, log_spike, log_total_spike, spike_flag_device, input_signal_width, input_float, time, false); + convolution_kernel(convolution_settings[layer_iter], layer_iter, h_input_instance, h_filter_array, h_convolution_result, probe); + }else if(layer_iter==1){ + spiking_cnn_main(Neuron_list_device, Input_neuronlist_device, d_network_config, random_number_list_device, d_convolution_result, d_input_instance, \ + layer_iter, network_size, input_neuron_num, log_v, log_spike, log_total_spike, spike_flag_device, input_signal_width, 0.5*input_float, time, false); + if (layer_iter!=(CNN_total_layer_num-1)) convolution_kernel(convolution_settings[layer_iter], layer_iter, h_input_instance, h_filter_array, h_convolution_result, probe); + }else if(layer_iter==2){ + spiking_cnn_main(Neuron_list_device, Input_neuronlist_device, d_network_config, random_number_list_device, d_convolution_result, d_input_instance, \ + layer_iter, network_size, input_neuron_num, log_v, log_spike, log_total_spike, spike_flag_device, input_signal_width, input_float, time, true); + synapse_drive_cnn_v2(Neuron_list_device, Input_neuronlist_device, network_config, d_network_config, d_filter_array, layer_iter, spiking_neuron_num, input_neuron_num, \ + syn_timer_max, connection_size, random_number_list_device, random_number_normal_device, states, -1.0, -1.0, log_total_spike);//STDP + } + + } + + if(depth_wise_inhibition) { + + }else if(forced_lateral_inhibition_at_last_layer && CNN_total_layer_num==3){//if this is the last layer, use lateral_inhibition + lateral_inhibition_mother_thread<<>>(Neuron_list_device, spiking_neuron_num, 2, inhibition_time, d_network_config, spike_flag_device); + }else if(through_depth_inhibition){ + + }else if(apply_local_inhibition && CNN_total_layer_num!=3){ + + } + else{ + lateral_inhibition_mother_thread<<>>(Neuron_list_device, spiking_neuron_num, 2, inhibition_time, d_network_config, spike_flag_device); + } + if(HOMEOSTASIS_ENABLE){ + if(time%HOMEOSTASIS_UPDATE_FREQUENCY == 0 && time != 0){ + //spiking_learning_drive(Neuron_list_device, network_size, inhibition_time, log_total_spike, target_frequency, time, log_spike, 0, 1); + } + } + + }else if(timelayer[layer_iter].neuron_num; + int convolution_result_index = layer_iter - 1; + if (layer_iter==0) {//fault at convolution kernel and spiking cnn + convolution_result_index = 0; + spiking_cnn_main_event_based(Neuron_list_device, Input_neuronlist_device, events_GPU, event_count, network_config, d_network_config, random_number_list_device, d_convolution_result, d_input_instance, \ + layer_iter, network_size, input_neuron_num, log_v, log_spike, log_total_spike, spike_flag_device, input_signal_width, input_float, time, false); + convolution_kernel(convolution_settings[layer_iter], layer_iter, h_input_instance, h_filter_array, h_convolution_result, probe); + }else if(layer_iter==1){ + spiking_cnn_main(Neuron_list_device, Input_neuronlist_device, d_network_config, random_number_list_device, d_convolution_result, d_input_instance, \ + layer_iter, network_size, input_neuron_num, log_v, log_spike, log_total_spike, spike_flag_device, input_signal_width, 0.5*input_float, time, false); + if (layer_iter!=(CNN_total_layer_num-1)) convolution_kernel(convolution_settings[layer_iter], layer_iter, h_input_instance, h_filter_array, h_convolution_result, probe); + }else if(layer_iter==2){ + spiking_cnn_main(Neuron_list_device, Input_neuronlist_device, d_network_config, random_number_list_device, d_convolution_result, d_input_instance, \ + layer_iter, network_size, input_neuron_num, log_v, log_spike, log_total_spike, spike_flag_device, input_signal_width, 0.5*input_float, time, false); + if (layer_iter!=(CNN_total_layer_num-1)) convolution_kernel(convolution_settings[layer_iter], layer_iter, h_input_instance, h_filter_array, \ + h_convolution_result, probe); + }else if(layer_iter==3){ + spiking_cnn_main(Neuron_list_device, Input_neuronlist_device, d_network_config, random_number_list_device, d_convolution_result, d_input_instance, \ + layer_iter, network_size, input_neuron_num, log_v, log_spike, log_total_spike, spike_flag_device, input_signal_width, 2*input_float, time, true); + synapse_drive_cnn_v2(Neuron_list_device, Input_neuronlist_device, network_config, d_network_config, d_filter_array, layer_iter, spiking_neuron_num, input_neuron_num, \ + syn_timer_max, connection_size, random_number_list_device, random_number_normal_device, states, -1.0, -1.0, log_total_spike);//STDP + } + + } + //=================TRY WITH LAYER wise inhibition===================== + if(depth_wise_inhibition) { + + }else if(forced_lateral_inhibition_at_last_layer && CNN_total_layer_num==3){//if this is the last layer, use lateral_inhibition + lateral_inhibition_mother_thread<<>>(Neuron_list_device, spiking_neuron_num, 2, inhibition_time, d_network_config, spike_flag_device); + }else if(through_depth_inhibition){ + + }else if(apply_local_inhibition && CNN_total_layer_num!=3){ + + } + else{ + lateral_inhibition_mother_thread<<>>(Neuron_list_device, spiking_neuron_num, 2, inhibition_time, d_network_config, spike_flag_device); + } + if(HOMEOSTASIS_ENABLE){ + if(time%HOMEOSTASIS_UPDATE_FREQUENCY == 0 && time != 0){ + //spiking_learning_drive(Neuron_list_device, network_size, inhibition_time, log_total_spike, target_frequency, time, log_spike, 0, 1); + } + } + + }else{ + for(int layer_iter=0;layer_iterlayer[layer_iter].neuron_num; + int convolution_result_index = layer_iter - 1; + if (layer_iter==0) {//fault at convolution kernel and spiking cnn + convolution_result_index = 0; + spiking_cnn_main_event_based(Neuron_list_device, Input_neuronlist_device, events_GPU, event_count, network_config, d_network_config, random_number_list_device, d_convolution_result, d_input_instance, \ + layer_iter, network_size, input_neuron_num, log_v, log_spike, log_total_spike, spike_flag_device, input_signal_width, input_float, time, false); + convolution_kernel(convolution_settings[layer_iter], layer_iter, h_input_instance, h_filter_array, h_convolution_result, probe); + }else if(layer_iter==1 ){ + spiking_cnn_main(Neuron_list_device, Input_neuronlist_device, d_network_config, random_number_list_device, d_convolution_result, d_input_instance, \ + layer_iter, network_size, input_neuron_num, log_v, log_spike, log_total_spike, spike_flag_device, input_signal_width, 3*input_float, time, false); + if (layer_iter!=(CNN_total_layer_num-1)) convolution_kernel(convolution_settings[layer_iter], layer_iter, h_input_instance, h_filter_array, h_convolution_result, probe); + }else if(layer_iter==2){ + spiking_cnn_main(Neuron_list_device, Input_neuronlist_device, d_network_config, random_number_list_device, d_convolution_result, d_input_instance, \ + layer_iter, network_size, input_neuron_num, log_v, log_spike, log_total_spike, spike_flag_device, input_signal_width, 0.5*input_float, time, false); + if (layer_iter!=(CNN_total_layer_num-1)) convolution_kernel(convolution_settings[layer_iter], layer_iter, h_input_instance, h_filter_array, h_convolution_result, probe); + }else if(layer_iter==3){ + bool last_layer_inhib = !last_layer_teach; + spiking_cnn_main(Neuron_list_device, Input_neuronlist_device, d_network_config, random_number_list_device, d_convolution_result, d_input_instance, \ + layer_iter, network_size, input_neuron_num, log_v, log_spike, log_total_spike, spike_flag_device, input_signal_width, 4*input_float, time, false); + if (layer_iter!=(CNN_total_layer_num-1)) convolution_kernel(convolution_settings[layer_iter], layer_iter, h_input_instance, h_filter_array, h_convolution_result, probe); + //synapse_drive_cnn_v2(Neuron_list_device, Input_neuronlist_device, network_config, d_network_config, d_filter_array, layer_iter, spiking_neuron_num, input_neuron_num, syn_timer_max, connection_size, random_number_list_device, random_number_normal_device, states, -1.0, -1.0, log_total_spike);//STDP + }else if(layer_iter==4){ + bool last_layer_inhib = !last_layer_teach; + spiking_cnn_main(Neuron_list_device, Input_neuronlist_device, d_network_config, random_number_list_device, d_convolution_result, d_input_instance, \ + layer_iter, network_size, input_neuron_num, log_v, log_spike, log_total_spike, spike_flag_device, input_signal_width, 15*input_float, time, true); + synapse_drive_cnn_v2(Neuron_list_device, Input_neuronlist_device, network_config, d_network_config, d_filter_array, layer_iter, spiking_neuron_num, input_neuron_num, syn_timer_max, connection_size, random_number_list_device, random_number_normal_device, states, -1.0, -1.0, log_total_spike);//STDP + } + + } + //=================TRY WITH LAYER wise inhibition===================== + if(depth_wise_inhibition) { + + }else if(forced_lateral_inhibition_at_last_layer && CNN_total_layer_num==4){//if this is the last layer, use lateral_inhibition + lateral_inhibition_mother_thread<<>>(Neuron_list_device, spiking_neuron_num, 2, inhibition_time, d_network_config, spike_flag_device); + }else if(through_depth_inhibition){ + + }else if(apply_local_inhibition && CNN_total_layer_num!=3){ + + } + else{ + lateral_inhibition_mother_thread<<>>(Neuron_list_device, spiking_neuron_num, 2, inhibition_time, d_network_config, spike_flag_device); + } + if(HOMEOSTASIS_ENABLE){ + if(time%HOMEOSTASIS_UPDATE_FREQUENCY == 0 && time != 0){ + //spiking_learning_drive(Neuron_list_device, network_size, inhibition_time, log_total_spike, target_frequency, time, log_spike, 0, 1); + } + } + + } +// printf("T: %d_",time); +// if(time==100) break; + time ++; + //break; + } + //spiking_learning_drive(Neuron_list_device, network_size, inhibition_time, 2); + //cudaDeviceSynchronize(); + + filter_util(network_config, Neuron_list_device, network_size, input_neuron_num, h_filter_array, d_filter_array, index_prefix, 2); + cudaMemcpy(NeuronList,Neuron_list_device,spiking_neuron_num*sizeof(Neuron),cudaMemcpyDeviceToHost); + cudaMemcpy(log_v_host,log_v,MAX_TIME*sizeof(float),cudaMemcpyDeviceToHost); + cudaMemcpy(log_spike_host,log_spike,total_depth_number*sizeof(float),cudaMemcpyDeviceToHost); + gpuErrchk( cudaMemcpy(log_total_spike_host,log_total_spike,SIZE*sizeof(float),cudaMemcpyDeviceToHost) ); + + + //print out the synapse conductance data + //data_check(NeuronList,log_total_spike,SIZE, mnist_start_index, mnist_end_index, 2); + + ofstream myfile ((index_prefix+"device2_spike_of_neuron_out.csv")); + if (myfile.is_open()){ + //myfile << "This is a new test\n"; + for(int i=0; i < SIZE; i++){ + //printf("_%f_", log_v_host[i]); + myfile << log_total_spike_host[i] << ", "; + } + myfile.close(); + } + + ofstream myfile_p ((index_prefix+"probe.csv")); + if (myfile_p.is_open()){ + //myfile << "This is a new test\n"; + for(int i=0; i < 1000; i++){ + //printf("_%f_", log_v_host[i]); + myfile_p << probe[i] << ", "; + } + myfile_p.close(); + } + +// +// ofstream myfile_0 ((index_prefix+"device2_out_v.csv")); +// if (myfile_0.is_open()){ +// //myfile << "This is a new test\n"; +// for(int i=0; i < MAX_TIME; i++){ +// //printf("_%f_", log_v_host[i]); +// myfile_0 << log_v_host[i] << ", "; +// } +// myfile.close(); +// } +// +// ofstream myfile_2 ((index_prefix+"device2_spike_of_one.csv")); +// if (myfile_2.is_open()){ +// //myfile << "This is a new test\n"; +// for(int i=0; i < MAX_TIME; i++){ +// //printf("_%f_", log_v_host[i]); +// myfile_2 << log_spike_host[i] << ", "; +// } +// myfile_2.close(); +// } + + + + cudaMemcpy(h_filter_array, d_filter_array, filter_array_size* sizeof(float*), cudaMemcpyDeviceToHost); + filter_util(network_config, NeuronList, network_size, input_neuron_num, h_filter_array, d_filter_array, index_prefix, 1); //write filter to file + write_neuron_list(NeuronList, (index_prefix+"device2_output_network.txt"), spiking_neuron_num); + data_check(NeuronList,log_total_spike,SIZE, mnist_start_index, mnist_end_index, 2, ""); + //===clean up=== + //delete[] random_number_list; + delete[] log_v_host; + delete[] NeuronList; + delete[] log_spike_host; + delete[] log_total_spike_host; + delete[] events_host; + delete[] NeuronList_temp; + delete[] one_mnist_img; + delete[] probe; +// delete[] random_number_list; + delete[] mnist_label; + delete[] spike_flag; + delete[] num_one_digit_img; + //cudaFree(states); + cudaFree(log_v); + cudaFree(log_spike); + cudaFree(log_total_spike); + cudaFree(Neuron_list_device); + //cudaFree(old_device_neurons); + cudaFree(random_number_list_device); + cudaFree(d_network_config); + cudaFree(states); + cudaFree(spike_flag_device); + cudaFree(log_spike_default); + +} + diff --git a/cuda_code/expandLabels.cu b/cuda_code/expandLabels.cu new file mode 100644 index 0000000000000000000000000000000000000000..eef0b600ebb795a1ae8beafc03cfacf37b965d41 --- /dev/null +++ b/cuda_code/expandLabels.cu @@ -0,0 +1,53 @@ +//****************************************************************************** +// Created by Edward Connell +// Copyright (c) 2016 Connell Research. All rights reserved. +// +#include "include/CudaKernels.h" + +//------------------------------------------------------------------------------ +// device kernel +template +__global__ void expandLabels_kernel(size_t N, const T* labels, size_t labelStride, + T *expanded, size_t expNumCols, + size_t expRowStride, size_t expColStride) +{ + CUDA_KERNEL_LOOP(i, N) { + size_t valueIndex = (size_t)labels[i * labelStride]; + expanded[(i * expRowStride) + valueIndex] = 1; + } +} + +//------------------------------------------------------------------------------ +// Swift importable C functions +cudaError_t cudaExpandLabels( + cudaDataType_t dataType, + const void* labels, size_t labelStride, + void *expanded, const size_t* expandedExtent, const size_t* expandedStrides, + cudaStream_t stream) +{ + CudaKernelPreCheck(stream); + + // clear + size_t count = expandedExtent[0] * expandedStrides[0] * DataTypeSize(dataType); + cudaMemsetAsync(expanded, 0, count, stream); + + // set label columns to 1 + size_t N = expandedExtent[0]; + switch(dataType) { + case CUDA_R_32F: + expandLabels_kernel <<>> (N, + (float *)labels, labelStride, + (float *)expanded, expandedExtent[1], expandedStrides[0], expandedStrides[1]); + break; + + case CUDA_R_64F: + expandLabels_kernel <<>> (N, + (double *)labels, labelStride, + (double *)expanded, expandedExtent[1], expandedStrides[0], expandedStrides[1]); + break; + + default: assert(false); + }; + + return CudaKernelPostCheck(stream); +} diff --git a/cuda_code/expand_kernel_5.cu b/cuda_code/expand_kernel_5.cu new file mode 100644 index 0000000000000000000000000000000000000000..1fcfb0b178314523b8223ef333d7e28a571f5965 --- /dev/null +++ b/cuda_code/expand_kernel_5.cu @@ -0,0 +1,216 @@ +/* +Copyright 2020 The OneFlow Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +#include "oneflow/core/device/cuda_util.h" +#include "oneflow/core/cuda/atomic.cuh" +#include "oneflow/user/kernels/expand_kernel_utils.h" + +namespace oneflow { + +namespace { + +const int32_t NDIMS = 16; +struct STRIDES { + int32_t val[NDIMS]; +}; + +template +__global__ void ExpandCudaKernel(const T* in_ptr, const STRIDES in_stride, + const STRIDES expand_stride, const int32_t dims, + const int32_t elements, T* out_ptr) { + int32_t gid = (blockDim.x * blockIdx.x) + threadIdx.x; + int32_t step = gridDim.x * blockDim.x; + while (gid < elements) { + int32_t offset = OffsetToNdIndexToOffset(gid, in_stride.val, expand_stride.val, dims); + out_ptr[gid] = in_ptr[offset]; + gid += step; + } +} + +template +__global__ void ExpandGradCudaKernel(const T* out_diff_ptr, const STRIDES out_stride, + const STRIDES expand_stride, const int32_t dims, + const int32_t elements, T* in_diff_ptr) { + int32_t gid = (blockDim.x * blockIdx.x) + threadIdx.x; + int32_t step = gridDim.x * blockDim.x; + while (gid < elements) { + int32_t offset = OffsetToNdIndexToOffset(gid, out_stride.val, expand_stride.val, dims); + cuda::atomic::Add(&in_diff_ptr[offset], out_diff_ptr[gid]); + gid += step; + } +} + +template +__global__ void InitPtr(const int32_t elements, T* ptr) { + int32_t gid = (blockDim.x * blockIdx.x) + threadIdx.x; + int32_t step = gridDim.x * blockDim.x; + while (gid < elements) { + ptr[gid] = static_cast(0); + gid += step; + } +} + +template +struct GpuExpandFunctor final { + void operator()(ep::Stream* stream, const T* in_ptr, const STRIDES in_stride, + const STRIDES expand_stride, const int32_t dims, const int32_t elements, + T* out_ptr) { + RUN_CUDA_KERNEL((ExpandCudaKernel), stream, elements, in_ptr, in_stride, expand_stride, dims, + elements, out_ptr); + } +}; + +template<> +void GpuExpandFunctor::operator()(ep::Stream* stream, const float16* in_ptr, + const STRIDES in_stride, const STRIDES expand_stride, + const int32_t dims, const int32_t elements, + float16* out_ptr) { + RUN_CUDA_KERNEL((ExpandCudaKernel), stream, elements, reinterpret_cast(in_ptr), + in_stride, expand_stride, dims, elements, reinterpret_cast(out_ptr)); +} + +template +struct GpuExpandGradFunctor final { + void operator()(ep::Stream* stream, const T* in_ptr, const STRIDES in_stride, + const STRIDES expand_stride, const int32_t dims, const int32_t elements, + const int32_t out_elements, T* out_ptr) { + RUN_CUDA_KERNEL((InitPtr), stream, out_elements, out_elements, out_ptr); + RUN_CUDA_KERNEL((ExpandGradCudaKernel), stream, elements, in_ptr, in_stride, expand_stride, + dims, elements, out_ptr); + } +}; + +template<> +void GpuExpandGradFunctor::operator()(ep::Stream* stream, const float16* in_ptr, + const STRIDES in_stride, const STRIDES expand_stride, + const int32_t dims, const int32_t elements, + const int32_t out_elements, float16* out_ptr) { + RUN_CUDA_KERNEL((InitPtr), stream, out_elements, out_elements, + reinterpret_cast(out_ptr)); + RUN_CUDA_KERNEL((ExpandGradCudaKernel), stream, elements, + reinterpret_cast(in_ptr), in_stride, expand_stride, dims, elements, + reinterpret_cast(out_ptr)); +} + +} // namespace + +template +class GpuExpandKernel final : public user_op::OpKernel { + public: + GpuExpandKernel() = default; + ~GpuExpandKernel() = default; + + private: + using user_op::OpKernel::Compute; + void Compute(user_op::KernelComputeContext* ctx) const override { + const user_op::Tensor* in = ctx->Tensor4ArgNameAndIndex("in", 0); + user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0); + const std::vector& logical_expand_shape = + ctx->Attr>("logical_expand_shape"); + + std::vector in_shape; + in_shape.resize(in->shape().NumAxes()); + for (int i = 0; i < in->shape().NumAxes(); ++i) { in_shape[i] = in->shape().At(i); } + + std::vector out_shape; + std::vector stride; + CHECK_JUST(getOutShapeAndStrideForFp(in_shape, logical_expand_shape, out_shape, stride)); + + const T* in_ptr = in->dptr(); + T* out_ptr = out->mut_dptr(); + const int32_t out_dims = out->shape().NumAxes(); + const int32_t out_size = out->shape().elem_cnt(); + + STRIDES expand_stride; + for (int i = 0; i < out_dims; ++i) { expand_stride.val[i] = stride[i]; } + STRIDES out_stride; + InitStride(out_stride.val, out_shape.data(), out_dims); + GpuExpandFunctor()(ctx->stream(), in_ptr, out_stride, expand_stride, out_dims, out_size, + out_ptr); + } + + bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } +}; + +#define REGISTER_EXPAND_KERNEL(dtype) \ + REGISTER_USER_KERNEL("expand").SetCreateFn>().SetIsMatchedHob( \ + (user_op::HobDeviceType() == DeviceType::kCUDA) \ + && (user_op::HobDataType("in", 0) == GetDataType::value)) + +REGISTER_EXPAND_KERNEL(float); +REGISTER_EXPAND_KERNEL(double); +REGISTER_EXPAND_KERNEL(float16); +REGISTER_EXPAND_KERNEL(bool); +REGISTER_EXPAND_KERNEL(uint8_t); +REGISTER_EXPAND_KERNEL(int8_t); +REGISTER_EXPAND_KERNEL(int32_t); +REGISTER_EXPAND_KERNEL(int64_t); + +template +class GpuExpandGradKernel final : public user_op::OpKernel { + public: + GpuExpandGradKernel() = default; + ~GpuExpandGradKernel() = default; + + private: + using user_op::OpKernel::Compute; + void Compute(user_op::KernelComputeContext* ctx) const override { + const user_op::Tensor* in = ctx->Tensor4ArgNameAndIndex("in", 0); + user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0); + const std::vector& logical_out_shape = + ctx->Attr>("logical_out_shape"); + const std::vector& logical_expand_shape = + ctx->Attr>("logical_expand_shape"); + + std::vector in_shape; + in_shape.resize(in->shape().NumAxes()); + for (int i = 0; i < in->shape().NumAxes(); ++i) { in_shape[i] = in->shape().At(i); } + std::vector out_shape; + std::vector stride; + CHECK_JUST(getOutShapeAndStrideForBp(logical_out_shape, logical_expand_shape, in_shape, + out_shape, stride)); + + const T* in_ptr = in->dptr(); + T* out_ptr = out->mut_dptr(); + + const int32_t in_dims = in->shape().NumAxes(); + const int32_t in_size = in->shape().elem_cnt(); + const int32_t out_size = out->shape().elem_cnt(); + + STRIDES expand_stride; + for (int i = 0; i < in_dims; ++i) { expand_stride.val[i] = stride[i]; } + STRIDES in_stride; + InitStride(in_stride.val, in_shape.data(), in_dims); + + GpuExpandGradFunctor()(ctx->stream(), in_ptr, in_stride, expand_stride, in_dims, in_size, + out_size, out_ptr); + } + + bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } +}; + +#define REGISTER_EXPAND_GRAD_KERNEL(dtype) \ + REGISTER_USER_KERNEL("expand_grad") \ + .SetCreateFn>() \ + .SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kCUDA) \ + && (user_op::HobDataType("in", 0) == GetDataType::value)) + +REGISTER_EXPAND_GRAD_KERNEL(float); +REGISTER_EXPAND_GRAD_KERNEL(double); +REGISTER_EXPAND_GRAD_KERNEL(float16); +REGISTER_EXPAND_GRAD_KERNEL(int32_t); +REGISTER_EXPAND_GRAD_KERNEL(int64_t); + +} // namespace oneflow diff --git a/cuda_code/experiment.cu b/cuda_code/experiment.cu new file mode 100644 index 0000000000000000000000000000000000000000..54056ef24f6c88446778c203890ccf9a1fc03ea3 --- /dev/null +++ b/cuda_code/experiment.cu @@ -0,0 +1,10 @@ +// https://llvm.org/docs/CompileCudaWithLLVM.html +// clang++-7 experiment.cu --cuda-gpu-arch=sm_35 -std=c++14 -L/usr/local/cuda/lib64 -lcudart_static -ldl -lrt -pthread + +extern "C" __global__ void add(int *a, int *b, int *c) { + // auto i = threadIdx.x; + // c[i] = a[i] + b[i]; + printf("%d %f\n", 123, 456.0); +} + +int main() {} \ No newline at end of file diff --git a/cuda_code/experimental_projective_tsdf_integrators.cu b/cuda_code/experimental_projective_tsdf_integrators.cu new file mode 100644 index 0000000000000000000000000000000000000000..c1b965bd4ad4c1585a9fc2a535046933109e3957 --- /dev/null +++ b/cuda_code/experimental_projective_tsdf_integrators.cu @@ -0,0 +1,344 @@ +/* +Copyright 2022 NVIDIA CORPORATION + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +#include "nvblox/experiments/integrators/experimental_projective_tsdf_integrators.h" + +#include "nvblox/experiments/integrators/cuda/experimental_integrator_input_frames.cuh" + +namespace nvblox { +namespace experiments { + +__device__ inline float interpolateDepthTexture( + cudaTextureObject_t depth_texture, const Eigen::Vector2f& u_px) { + return tex2D(depth_texture, u_px.x() + 0.5, u_px.y() + 0.5); +} + +__device__ inline bool interpolateDepthImage(const float* image, int rows, + int cols, Eigen::Vector2f u_px, + float* value_ptr) { + // If the projected point does not lie on the image plane, fail. (Here "on the + // image plane" means having pixel centers surrounding the query point, ie no + // extrapolation). + if ((u_px.x() < 0.0f) || (u_px.y() < 0.0f) || + (u_px.x() > static_cast(cols) - 1.0f) || + (u_px.y() > static_cast(rows) - 1.0f)) { + return false; + } + // Interpolation of a grid on with 1 pixel spacing. + // https://en.wikipedia.org/wiki/Bilinear_interpolation#On_the_unit_square + // Get the pixel coordinates of the pixel on the low side + const Index2D u_low_side_px = (u_px).cast(); + // Get the 4-neighbours values and put them in a matrix + // clang-format off + const Eigen::Matrix2f value_matrix = + (Eigen::Matrix2f() << + image::access(u_low_side_px.y(), u_low_side_px.x(), cols, image), + image::access(u_low_side_px.y() + 1, u_low_side_px.x(), cols, image), + image::access(u_low_side_px.y(), u_low_side_px.x() + 1, cols, image), + image::access(u_low_side_px.y() + 1, u_low_side_px.x() + 1, cols, image)) + .finished(); + // clang-format on + // Offset of the requested point to the low side center. + const Eigen::Vector2f u_offset = (u_px - u_low_side_px.cast()); + const Eigen::Vector2f x_vec(1.0f - u_offset.x(), u_offset.x()); + const Eigen::Vector2f y_vec(1.0f - u_offset.y(), u_offset.y()); + *value_ptr = x_vec.transpose() * value_matrix * y_vec; + return true; +} + +__global__ void intergrateBlocksTextureBasedInterpolation( + const Index3D* block_indices_device_ptr, const Camera* camera_device_ptr, + cudaTextureObject_t depth_texture, const Eigen::Matrix3f* R_C_L_device_ptr, + const Eigen::Vector3f* t_C_L_device_ptr, const float block_size, + const float truncation_distance_m, const float max_weight, + VoxelBlock** block_device_ptrs) { + // Linear index of thread within block + const int thread_index_linear = + threadIdx.x + blockDim.x * (threadIdx.y + (blockDim.y * threadIdx.z)); + + // Get the data which is common between all threads in a block into shared + // memory + // TODO(alexmillane): We could also get the camera into shared memory. But + // maybe let's profile things first and see what is actually affecting the + // performance. + __shared__ Eigen::Matrix3f R_C_L; + if (thread_index_linear < 9) { + R_C_L.data()[thread_index_linear] = + R_C_L_device_ptr->data()[thread_index_linear]; + } + __shared__ Eigen::Vector3f t_C_L; + if (thread_index_linear >= 9 && thread_index_linear < 12) { + t_C_L.data()[thread_index_linear - 9] = + t_C_L_device_ptr->data()[thread_index_linear - 9]; + } + __syncthreads(); + + // The indices of the voxel this thread will work on + // blockIdx.x - The index of the block we're working on (blockIdx.y/z + // should be zero) + // threadIdx.x/y/z - The indices of the voxel within the block (we + // expect the threadBlockDims == voxelBlockDims) + const Index3D block_idx = block_indices_device_ptr[blockIdx.x]; + const Index3D voxel_idx(threadIdx.z, threadIdx.y, threadIdx.x); + + // Get the Voxel we'll update in this thread + // NOTE(alexmillane): Note that we've reverse the voxel indexing order such + // that adjacent threads (x-major) access adjacent memory locations in the + // block (z-major). + TsdfVoxel* voxel_ptr = + &(block_device_ptrs[blockIdx.x] + ->voxels[threadIdx.z][threadIdx.y][threadIdx.x]); + + // Voxel center point + const Vector3f p_voxel_center_L = getCenterPostionFromBlockIndexAndVoxelIndex( + block_size, block_idx, voxel_idx); + // To camera frame + const Vector3f p_voxel_center_C = R_C_L * p_voxel_center_L + t_C_L; + + // Project to image plane + Eigen::Vector2f u_px; + if (!camera_device_ptr->project(p_voxel_center_C, &u_px)) { + return; + } + + // If the projected point does not lie on the image plane, fail. (Here "on the + // image plane" means having pixel centers surrounding the query point, ie no + // extrapolation). + if ((u_px.x() < 0.0f) || (u_px.y() < 0.0f) || + (u_px.x() > static_cast(camera_device_ptr->width()) - 1.0f) || + (u_px.y() > static_cast(camera_device_ptr->height()) - 1.0f)) { + return; + } + + // Get the MEASURED depth of the SURFACE, by interpolating the depth image + const float surface_depth_mesured = + interpolateDepthTexture(depth_texture, u_px); + + // Get the MEASURED depth of the VOXEL + const float voxel_distance_measured = + surface_depth_mesured - p_voxel_center_C.z(); + + // If we're behind the negative truncation distance, just continue. + if (voxel_distance_measured < -truncation_distance_m) { + return; + } + + // Read CURRENT voxel values (from global GPU memory) + const float voxel_distance_current = voxel_ptr->distance; + const float voxel_weight_current = voxel_ptr->weight; + + // NOTE(alexmillane): We could try to use CUDA math functions to speed up + // below + // https://docs.nvidia.com/cuda/cuda-math-api/group__CUDA__MATH__SINGLE.html#group__CUDA__MATH__SINGLE + + // Fuse + constexpr float measurement_weight = 1.0f; + const float fused_distance = (voxel_distance_measured * measurement_weight + + voxel_distance_current * voxel_weight_current) / + (measurement_weight + voxel_weight_current); + + // Write back to voxel (to global GPU memory) + voxel_ptr->distance = fused_distance > 0.0f + ? fmin(truncation_distance_m, fused_distance) + : fmax(-truncation_distance_m, fused_distance); + voxel_ptr->weight = + fmin(measurement_weight + voxel_weight_current, max_weight); +} + +__global__ void intergrateBlocksGlobalBasedInterpolation( + const Index3D* block_indices_device_ptr, const Camera* camera_device_ptr, + const float* image, int rows, int cols, + const Eigen::Matrix3f* R_C_L_device_ptr, + const Eigen::Vector3f* t_C_L_device_ptr, const float block_size, + const float truncation_distance_m, const float max_weight, + VoxelBlock** block_device_ptrs) { + // Linear index of thread within block + const int thread_index_linear = + threadIdx.x + blockDim.x * (threadIdx.y + (blockDim.y * threadIdx.z)); + + // Get the data which is common between all threads in a block into shared + // memory + // TODO(alexmillane): We could also get the camera into shared memory. But + // maybe let's profile things first and see what is actually affecting the + // performance. + __shared__ Eigen::Matrix3f R_C_L; + if (thread_index_linear < 9) { + R_C_L.data()[thread_index_linear] = + R_C_L_device_ptr->data()[thread_index_linear]; + } + __shared__ Eigen::Vector3f t_C_L; + if (thread_index_linear >= 9 && thread_index_linear < 12) { + t_C_L.data()[thread_index_linear - 9] = + t_C_L_device_ptr->data()[thread_index_linear - 9]; + } + __syncthreads(); + + // The indices of the voxel this thread will work on + // blockIdx.x - The index of the block we're working on (blockIdx.y/z + // should be zero) + // threadIdx.x/y/z - The indices of the voxel within the block (we + // expect the threadBlockDims == voxelBlockDims) + const Index3D block_idx = block_indices_device_ptr[blockIdx.x]; + const Index3D voxel_idx(threadIdx.z, threadIdx.y, threadIdx.x); + + // Get the Voxel we'll update in this thread + // NOTE(alexmillane): Note that we've reverse the voxel indexing order such + // that adjacent threads (x-major) access adjacent memory locations in the + // block (z-major). + TsdfVoxel* voxel_ptr = + &(block_device_ptrs[blockIdx.x] + ->voxels[threadIdx.z][threadIdx.y][threadIdx.x]); + + // Voxel center point + const Vector3f p_voxel_center_L = getCenterPostionFromBlockIndexAndVoxelIndex( + block_size, block_idx, voxel_idx); + // To camera frame + const Vector3f p_voxel_center_C = R_C_L * p_voxel_center_L + t_C_L; + + // Project to image plane + Eigen::Vector2f u_px; + if (!camera_device_ptr->project(p_voxel_center_C, &u_px)) { + return; + } + + // Get the MEASURED depth of the SURFACE, by interpolating the depth image + float surface_depth_mesured; + if (!interpolateDepthImage(image, rows, cols, u_px, &surface_depth_mesured)) { + return; + } + + // Get the MEASURED depth of the VOXEL + const float voxel_distance_measured = + surface_depth_mesured - p_voxel_center_C.z(); + + // If we're behind the negative truncation distance, just continue. + if (voxel_distance_measured < -truncation_distance_m) { + return; + } + + // Read CURRENT voxel values (from global GPU memory) + const float voxel_distance_current = voxel_ptr->distance; + const float voxel_weight_current = voxel_ptr->weight; + + // NOTE(alexmillane): We could try to use CUDA math functions to speed up + // below + // https://docs.nvidia.com/cuda/cuda-math-api/group__CUDA__MATH__SINGLE.html#group__CUDA__MATH__SINGLE + + // Fuse + constexpr float measurement_weight = 1.0f; + const float fused_distance = (voxel_distance_measured * measurement_weight + + voxel_distance_current * voxel_weight_current) / + (measurement_weight + voxel_weight_current); + + // Write back to voxel (to global GPU memory) + voxel_ptr->distance = fused_distance > 0.0f + ? fmin(truncation_distance_m, fused_distance) + : fmax(-truncation_distance_m, fused_distance); + voxel_ptr->weight = + fmin(measurement_weight + voxel_weight_current, max_weight); +} + +ProjectiveTsdfIntegratorExperimentsBase:: + ProjectiveTsdfIntegratorExperimentsBase() + : ProjectiveTsdfIntegrator() { + checkCudaErrors(cudaStreamCreate(&integration_stream_)); +} + +ProjectiveTsdfIntegratorExperimentsBase:: + ~ProjectiveTsdfIntegratorExperimentsBase() { + finish(); + checkCudaErrors(cudaStreamDestroy(integration_stream_)); +} + +void ProjectiveTsdfIntegratorExperimentsBase::finish() const { + cudaStreamSynchronize(integration_stream_); +} + +void ProjectiveTsdfIntegratorExperimentsTexture::updateBlocks( + const std::vector& block_indices, const DepthImage& depth_frame, + const Transform& T_L_C, const Camera& camera, + const float truncation_distance_m, TsdfLayer* layer_ptr) { + CHECK_NOTNULL(layer_ptr); + + // Create an integrator frame + // Internally this object starts (asynchronous) transfers of it's inputs to + // device memory. Kernels called the passed stream can therefore utilize the + // input frame's device-side members. + const IntegratorInputFrameExperimentsTexture input( + block_indices, depth_frame, T_L_C, camera, truncation_distance_m, + max_weight_, layer_ptr, integration_stream_); + + // Kernel call - One ThreadBlock launched per VoxelBlock + constexpr int kVoxelsPerSide = VoxelBlock::kVoxelsPerSide; + const dim3 kThreadsPerBlock(kVoxelsPerSide, kVoxelsPerSide, kVoxelsPerSide); + const int num_blocks = input.num_blocks; + // clang-format off + intergrateBlocksTextureBasedInterpolation<<>>( + input.block_indices_device_ptr, + input.camera_device_ptr, + input.depth_texture.texture_object(), + input.R_C_L_device_ptr, + input.t_C_L_device_ptr, + input.block_size, + input.truncation_distance_m, + input.max_weight, + input.block_device_ptrs); + // clang-format on + checkCudaErrors(cudaPeekAtLastError()); + + // Finish processing of the frame before returning control + finish(); +} + +void ProjectiveTsdfIntegratorExperimentsGlobal::updateBlocks( + const std::vector& block_indices, const DepthImage& depth_frame, + const Transform& T_L_C, const Camera& camera, + const float truncation_distance_m, TsdfLayer* layer_ptr) { + CHECK_NOTNULL(layer_ptr); + + // Create an integrator frame + // Internally this object starts (asynchronous) transfers of it's inputs to + // device memory. Kernels called the passed stream can therefore utilize the + // input frame's device-side members. + const IntegratorInputFrameExperimentsGlobal input( + block_indices, depth_frame, T_L_C, camera, truncation_distance_m, + max_weight_, layer_ptr, integration_stream_); + + // Kernel call - One ThreadBlock launched per VoxelBlock + constexpr int kVoxelsPerSide = VoxelBlock::kVoxelsPerSide; + const dim3 kThreadsPerBlock(kVoxelsPerSide, kVoxelsPerSide, kVoxelsPerSide); + const int num_blocks = input.num_blocks; + // clang-format off + intergrateBlocksGlobalBasedInterpolation<<>>( + input.block_indices_device_ptr, + input.camera_device_ptr, + input.depth_frame_unified_ptr, + input.depth_frame_rows, + input.depth_frame_cols, + input.R_C_L_device_ptr, + input.t_C_L_device_ptr, + input.block_size, + input.truncation_distance_m, + input.max_weight, + input.block_device_ptrs); + // clang-format on + checkCudaErrors(cudaPeekAtLastError()); + + // Finish processing of the frame before returning control + finish(); +} + +} // namespace experiments +} // namespace nvblox diff --git a/cuda_code/extract_image_patches_impl_2.cu b/cuda_code/extract_image_patches_impl_2.cu new file mode 100644 index 0000000000000000000000000000000000000000..9a9ae88737190c92329001612d4fec9a4f5f1d85 --- /dev/null +++ b/cuda_code/extract_image_patches_impl_2.cu @@ -0,0 +1,102 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "backend/kernel_compiler/gpu/cuda_impl/extract_image_patches_impl.cuh" + +template +__global__ void ExtractImagePatches(size_t output_size, int64_t stride_row, int64_t stride_col, int64_t rate_row, + int64_t rate_col, int64_t output_cols, bool need_batch, int64_t row_stride, + int64_t patch_stride, int64_t other_stride, int64_t input_row_size, + int64_t input_col_size, int64_t row_padding_top, int64_t col_padding_left, + int64_t col_input_stride, int64_t row_input_stride, int64_t patch_input_stride, + int64_t output_depth, const T *input, T *output) { + for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < output_size; pos += blockDim.x * gridDim.x) { + const int64_t batch_index = need_batch ? (static_cast(pos) / other_stride) : 0; + const int64_t inner_index = + need_batch ? (static_cast(pos) - batch_index * other_stride) : static_cast(pos); + // inner index + const int64_t patch_index = inner_index / patch_stride; + const int64_t patch_offset = (inner_index - patch_index * patch_stride) / output_depth; + // row + const int64_t row_index = patch_index / output_cols; + const int64_t row_offset = patch_offset / row_stride; + const int64_t input_row = row_index * stride_row + row_offset * rate_row - row_padding_top; + if (input_row < 0 || input_row >= input_row_size) { + output[pos] = static_cast(0); + continue; + } + // col + const int64_t col_index = patch_index - row_index * output_cols; + const int64_t col_offset = patch_offset - row_offset * row_stride; + const int64_t input_col = col_index * stride_col + col_offset * rate_col - col_padding_left; + if (input_col < 0 || input_col >= input_col_size) { + output[pos] = static_cast(0); + continue; + } + // depth + const int64_t depth = inner_index - (inner_index / output_depth) * output_depth; + // input index + const int64_t input_index = + depth + input_col * col_input_stride + input_row * row_input_stride + batch_index * patch_input_stride; + output[pos] = input[static_cast(input_index)]; + } + return; +} + +template +void CalExtractImagePatchesNHWC(size_t output_size, int64_t stride_row, int64_t stride_col, int64_t rate_row, + int64_t rate_col, int64_t output_cols, bool need_batch, int64_t row_stride, + int64_t patch_stride, int64_t other_stride, int64_t input_row_size, + int64_t input_col_size, int64_t row_padding_top, int64_t col_padding_left, + int64_t col_input_stride, int64_t row_input_stride, int64_t patch_input_stride, + int64_t output_depth, const T *input, T *output, cudaStream_t stream) { + ExtractImagePatches<<>>( + output_size, stride_row, stride_col, rate_row, rate_col, output_cols, need_batch, row_stride, patch_stride, + other_stride, input_row_size, input_col_size, row_padding_top, col_padding_left, col_input_stride, row_input_stride, + patch_input_stride, output_depth, input, output); +} + +template void CalExtractImagePatchesNHWC(size_t output_size, int64_t stride_row, int64_t stride_col, + int64_t rate_row, int64_t rate_col, int64_t output_cols, bool need_batch, + int64_t row_stride, int64_t patch_stride, int64_t other_stride, + int64_t input_row_size, int64_t input_col_size, int64_t row_padding_top, + int64_t col_padding_left, int64_t col_input_stride, + int64_t row_input_stride, int64_t patch_input_stride, + int64_t output_depth, const int *input, int *output, cudaStream_t stream); +template void CalExtractImagePatchesNHWC(size_t output_size, int64_t stride_row, int64_t stride_col, + int64_t rate_row, int64_t rate_col, int64_t output_cols, + bool need_batch, int64_t row_stride, int64_t patch_stride, + int64_t other_stride, int64_t input_row_size, int64_t input_col_size, + int64_t row_padding_top, int64_t col_padding_left, + int64_t col_input_stride, int64_t row_input_stride, + int64_t patch_input_stride, int64_t output_depth, const float *input, + float *output, cudaStream_t stream); +template void CalExtractImagePatchesNHWC(size_t output_size, int64_t stride_row, int64_t stride_col, + int64_t rate_row, int64_t rate_col, int64_t output_cols, bool need_batch, + int64_t row_stride, int64_t patch_stride, int64_t other_stride, + int64_t input_row_size, int64_t input_col_size, int64_t row_padding_top, + int64_t col_padding_left, int64_t col_input_stride, + int64_t row_input_stride, int64_t patch_input_stride, + int64_t output_depth, const half *input, half *output, + cudaStream_t stream); +template void CalExtractImagePatchesNHWC(size_t output_size, int64_t stride_row, int64_t stride_col, + int64_t rate_row, int64_t rate_col, int64_t output_cols, + bool need_batch, int64_t row_stride, int64_t patch_stride, + int64_t other_stride, int64_t input_row_size, int64_t input_col_size, + int64_t row_padding_top, int64_t col_padding_left, + int64_t col_input_stride, int64_t row_input_stride, + int64_t patch_input_stride, int64_t output_depth, const double *input, + double *output, cudaStream_t stream); diff --git a/cuda_code/extractor.cu b/cuda_code/extractor.cu new file mode 100644 index 0000000000000000000000000000000000000000..3acfbdd186e77aadc5b99f452e7b0f6412711239 --- /dev/null +++ b/cuda_code/extractor.cu @@ -0,0 +1,59 @@ + +#include +#include +#include +#include +#include "../hardware_limits.hh" +#include + + +__device__ unsigned int get_index_of(unsigned int x, unsigned int y, unsigned int size_x) +{ + unsigned int index = x + y * size_x; + return index; +} + +__global__ void extract_zone_and_replicate_k(unsigned char *img, unsigned char *output, + unsigned int size_x, unsigned int size_y, unsigned int size_x_y_roi, + unsigned int y_roi) +{ + unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; + //unsigned int actual_pos = tid + starting_index; //position in the image buffer + + //x y corresponding to thread in the input img + //x y for tid 0 start at roi starting point + //unsigned int y_img = actual_pos / size_x; + //unsigned int x_img = actual_pos - (y_img * size_x); + unsigned int x_base = tid; + unsigned int y_base = y_roi; + if (tid < size_x - size_x_y_roi) + { + unsigned long output_index = tid * size_x_y_roi * size_x_y_roi; + for (unsigned int y = y_base; y < y_base + size_x_y_roi; y++) + { + for (unsigned int x = x_base; x < x_base + size_x_y_roi; x++) + { + output[output_index] = img[x + y * size_x]; + output_index = output_index + 1; + } + } + } +} + +void extract_and_replicate(unsigned char *img_gpu, unsigned int size_x, unsigned int size_y, unsigned int roi_size_x_y, + unsigned char *output_gpu_buffer, unsigned int y_roi) +{ + unsigned int blocks_to_compare = size_x - roi_size_x_y; + unsigned int threads = 0; + unsigned int blocks = 0; + get_optimized_thread_blocks(&threads,&blocks, blocks_to_compare); + //printf("threads %i\n", threads); + //printf("blocks %i \n", blocks); + + + extract_zone_and_replicate_k<<>>(img_gpu, output_gpu_buffer, size_x, size_y, roi_size_x_y, y_roi); + +} + + + diff --git a/cuda_code/factories_test_19.cu b/cuda_code/factories_test_19.cu new file mode 100644 index 0000000000000000000000000000000000000000..f904c404251254dafb9733133b05585d5be8227b --- /dev/null +++ b/cuda_code/factories_test_19.cu @@ -0,0 +1,200 @@ +/* + * Copyright (c) 2019-2021, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +struct StringsFactoriesTest : public cudf::test::BaseFixture { +}; + +TEST_F(StringsFactoriesTest, CreateColumnFromPair) +{ + std::vector h_test_strings{"the quick brown fox jumps over the lazy dog", + "the fat cat lays next to the other accénted cat", + "a slow moving turtlé cannot catch the bird", + "which can be composéd together to form a more complete", + "thé result does not include the value in the sum in", + "", + nullptr, + "absent stop words"}; + + cudf::size_type memsize = 0; + for (auto itr = h_test_strings.begin(); itr != h_test_strings.end(); ++itr) + memsize += *itr ? (cudf::size_type)strlen(*itr) : 0; + cudf::size_type count = (cudf::size_type)h_test_strings.size(); + thrust::host_vector h_buffer(memsize); + thrust::device_vector d_buffer(memsize); + thrust::host_vector> strings(count); + thrust::host_vector h_offsets(count + 1); + cudf::size_type offset = 0; + cudf::size_type nulls = 0; + h_offsets[0] = 0; + for (cudf::size_type idx = 0; idx < count; ++idx) { + const char* str = h_test_strings[idx]; + if (!str) { + strings[idx] = thrust::pair{nullptr, 0}; + nulls++; + } else { + cudf::size_type length = (cudf::size_type)strlen(str); + memcpy(h_buffer.data() + offset, str, length); + strings[idx] = + thrust::pair{d_buffer.data().get() + offset, length}; + offset += length; + } + h_offsets[idx + 1] = offset; + } + rmm::device_vector> d_strings(strings); + CUDA_TRY(cudaMemcpy(d_buffer.data().get(), h_buffer.data(), memsize, cudaMemcpyHostToDevice)); + auto column = cudf::make_strings_column(d_strings); + EXPECT_EQ(column->type(), cudf::data_type{cudf::type_id::STRING}); + EXPECT_EQ(column->null_count(), nulls); + if (nulls) { + EXPECT_TRUE(column->nullable()); + EXPECT_TRUE(column->has_nulls()); + } + EXPECT_EQ(2, column->num_children()); + + cudf::strings_column_view strings_view(column->view()); + EXPECT_EQ(strings_view.size(), count); + EXPECT_EQ(strings_view.offsets().size(), count + 1); + EXPECT_EQ(strings_view.chars().size(), memsize); + + // check string data + auto strings_data = cudf::strings::create_offsets(strings_view); + thrust::host_vector h_chars_data(strings_data.first); + thrust::host_vector h_offsets_data(strings_data.second); + EXPECT_EQ(memcmp(h_buffer.data(), h_chars_data.data(), h_buffer.size()), 0); + EXPECT_EQ( + memcmp(h_offsets.data(), h_offsets_data.data(), h_offsets.size() * sizeof(cudf::size_type)), 0); +} + +TEST_F(StringsFactoriesTest, CreateColumnFromOffsets) +{ + std::vector h_test_strings{"the quick brown fox jumps over the lazy dog", + "the fat cat lays next to the other accénted cat", + "a slow moving turtlé cannot catch the bird", + "which can be composéd together to form a more complete", + "thé result does not include the value in the sum in", + "", + nullptr, + "absent stop words"}; + + cudf::size_type memsize = 0; + for (auto itr = h_test_strings.begin(); itr != h_test_strings.end(); ++itr) + memsize += *itr ? (cudf::size_type)strlen(*itr) : 0; + cudf::size_type count = (cudf::size_type)h_test_strings.size(); + std::vector h_buffer(memsize); + std::vector h_offsets(count + 1); + cudf::size_type offset = 0; + h_offsets[0] = offset; + cudf::bitmask_type h_null_mask = 0; + cudf::size_type null_count = 0; + for (cudf::size_type idx = 0; idx < count; ++idx) { + h_null_mask = (h_null_mask << 1); + const char* str = h_test_strings[idx]; + if (str) { + cudf::size_type length = (cudf::size_type)strlen(str); + memcpy(h_buffer.data() + offset, str, length); + offset += length; + h_null_mask |= 1; + } else + null_count++; + h_offsets[idx + 1] = offset; + } + std::vector h_nulls{h_null_mask}; + rmm::device_vector d_buffer(h_buffer); + rmm::device_vector d_offsets(h_offsets); + rmm::device_vector d_nulls(h_nulls); + auto column = cudf::make_strings_column(d_buffer, d_offsets, d_nulls, null_count); + EXPECT_EQ(column->type(), cudf::data_type{cudf::type_id::STRING}); + EXPECT_EQ(column->null_count(), null_count); + EXPECT_EQ(2, column->num_children()); + + cudf::strings_column_view strings_view(column->view()); + EXPECT_EQ(strings_view.size(), count); + EXPECT_EQ(strings_view.offsets().size(), count + 1); + EXPECT_EQ(strings_view.chars().size(), memsize); + + // check string data + auto strings_data = cudf::strings::create_offsets(strings_view); + thrust::host_vector h_chars_data(strings_data.first); + thrust::host_vector h_offsets_data(strings_data.second); + EXPECT_EQ(memcmp(h_buffer.data(), h_chars_data.data(), h_buffer.size()), 0); + EXPECT_EQ( + memcmp(h_offsets.data(), h_offsets_data.data(), h_offsets.size() * sizeof(cudf::size_type)), 0); +} + +TEST_F(StringsFactoriesTest, CreateScalar) +{ + std::string value = "test string"; + auto s = cudf::make_string_scalar(value); + auto string_s = static_cast(s.get()); + + EXPECT_EQ(string_s->to_string(), value); + EXPECT_TRUE(string_s->is_valid()); + EXPECT_TRUE(s->is_valid()); +} + +TEST_F(StringsFactoriesTest, EmptyStringsColumn) +{ + rmm::device_vector d_chars; + rmm::device_vector d_offsets(1, 0); + rmm::device_vector d_nulls; + + auto results = cudf::make_strings_column(d_chars, d_offsets, d_nulls, 0); + cudf::test::expect_strings_empty(results->view()); + + rmm::device_vector> d_strings; + results = cudf::make_strings_column(d_strings); + cudf::test::expect_strings_empty(results->view()); +} + +TEST_F(StringsFactoriesTest, CreateOffsets) +{ + std::vector strings = {"this", "is", "a", "column", "of", "strings"}; + cudf::test::strings_column_wrapper sw = {strings.begin(), strings.end()}; + cudf::column_view col(sw); + std::vector indices{0, 2, 3, 6}; + auto result = cudf::slice(col, indices); + + std::vector> expecteds{ + std::vector{"this", "is"}, // [0,2) + std::vector{"column", "of", "strings"} // [3,6) + }; + for (size_t idx = 0; idx < result.size(); idx++) { + auto strings_data = cudf::strings::create_offsets(cudf::strings_column_view(result[idx])); + thrust::host_vector h_chars(strings_data.first); + thrust::host_vector h_offsets(strings_data.second); + auto expected_strings = expecteds[idx]; + for (size_t jdx = 0; jdx < h_offsets.size() - 1; ++jdx) { + auto offset = h_offsets[jdx]; + auto length = h_offsets[jdx + 1] - offset; + std::string str(h_chars.data() + offset, length); + EXPECT_EQ(str, expected_strings[jdx]); + } + } +} diff --git a/cuda_code/factorization_kernels_8.cu b/cuda_code/factorization_kernels_8.cu new file mode 100644 index 0000000000000000000000000000000000000000..4b246b2bcf1c501f96fcdbd6bb571a0fea488f74 --- /dev/null +++ b/cuda_code/factorization_kernels_8.cu @@ -0,0 +1,202 @@ +/************************************************************* +Copyright (c) 2017-2020, the Ginkgo authors +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*************************************************************/ + +#include "core/factorization/factorization_kernels.hpp" + + +#include +#include + + +#include "core/components/prefix_sum.hpp" +#include "core/matrix/csr_builder.hpp" +#include "cuda/base/config.hpp" +#include "cuda/base/types.hpp" +#include "cuda/components/cooperative_groups.cuh" +#include "cuda/components/intrinsics.cuh" +#include "cuda/components/searching.cuh" +#include "cuda/components/thread_ids.cuh" + + +namespace gko { +namespace kernels { +namespace cuda { +/** + * @brief The factorization namespace. + * + * @ingroup factor + */ +namespace factorization { + + +constexpr int default_block_size{512}; + + +#include "common/factorization/factorization_kernels.hpp.inc" + + +template +void add_diagonal_elements(std::shared_ptr exec, + matrix::Csr *mtx, + bool is_sorted) +{ + // TODO: Runtime can be optimized by choosing a appropriate size for the + // subwarp dependent on the matrix properties + constexpr int subwarp_size = config::warp_size; + auto mtx_size = mtx->get_size(); + auto num_rows = static_cast(mtx_size[0]); + auto num_cols = static_cast(mtx_size[1]); + size_type row_ptrs_size = num_rows + 1; + + Array row_ptrs_addition(exec, row_ptrs_size); + Array needs_change_host{exec->get_master(), 1}; + needs_change_host.get_data()[0] = false; + Array needs_change_device{exec, 1}; + needs_change_device = needs_change_host; + + auto cuda_old_values = as_cuda_type(mtx->get_const_values()); + auto cuda_old_col_idxs = as_cuda_type(mtx->get_const_col_idxs()); + auto cuda_old_row_ptrs = as_cuda_type(mtx->get_row_ptrs()); + auto cuda_row_ptrs_add = as_cuda_type(row_ptrs_addition.get_data()); + + const dim3 block_dim{default_block_size, 1, 1}; + const dim3 grid_dim{ + static_cast(ceildiv(num_rows, block_dim.x / subwarp_size)), 1, + 1}; + if (is_sorted) { + kernel::find_missing_diagonal_elements + <<>>( + num_rows, num_cols, cuda_old_col_idxs, cuda_old_row_ptrs, + cuda_row_ptrs_add, + as_cuda_type(needs_change_device.get_data())); + } else { + kernel::find_missing_diagonal_elements + <<>>( + num_rows, num_cols, cuda_old_col_idxs, cuda_old_row_ptrs, + cuda_row_ptrs_add, + as_cuda_type(needs_change_device.get_data())); + } + needs_change_host = needs_change_device; + if (!needs_change_host.get_const_data()[0]) { + return; + } + + prefix_sum(exec, cuda_row_ptrs_add, row_ptrs_size); + exec->synchronize(); + + auto total_additions = + exec->copy_val_to_host(cuda_row_ptrs_add + row_ptrs_size - 1); + size_type new_num_elems = static_cast(total_additions) + + mtx->get_num_stored_elements(); + + + Array new_values{exec, new_num_elems}; + Array new_col_idxs{exec, new_num_elems}; + auto cuda_new_values = as_cuda_type(new_values.get_data()); + auto cuda_new_col_idxs = as_cuda_type(new_col_idxs.get_data()); + + kernel::add_missing_diagonal_elements + <<>>(num_rows, cuda_old_values, cuda_old_col_idxs, + cuda_old_row_ptrs, cuda_new_values, + cuda_new_col_idxs, cuda_row_ptrs_add); + + const dim3 grid_dim_row_ptrs_update{ + static_cast(ceildiv(num_rows, block_dim.x)), 1, 1}; + kernel::update_row_ptrs<<>>( + num_rows + 1, cuda_old_row_ptrs, cuda_row_ptrs_add); + + matrix::CsrBuilder mtx_builder{mtx}; + mtx_builder.get_value_array() = std::move(new_values); + mtx_builder.get_col_idx_array() = std::move(new_col_idxs); +} + +GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( + GKO_DECLARE_FACTORIZATION_ADD_DIAGONAL_ELEMENTS_KERNEL); + + +template +void initialize_row_ptrs_l_u( + std::shared_ptr exec, + const matrix::Csr *system_matrix, + IndexType *l_row_ptrs, IndexType *u_row_ptrs) +{ + const size_type num_rows{system_matrix->get_size()[0]}; + + const dim3 block_size{default_block_size, 1, 1}; + const uint32 number_blocks = + ceildiv(num_rows, static_cast(block_size.x)); + const dim3 grid_dim{number_blocks, 1, 1}; + + kernel::count_nnz_per_l_u_row<<>>( + num_rows, as_cuda_type(system_matrix->get_const_row_ptrs()), + as_cuda_type(system_matrix->get_const_col_idxs()), + as_cuda_type(system_matrix->get_const_values()), + as_cuda_type(l_row_ptrs), as_cuda_type(u_row_ptrs)); + + prefix_sum(exec, l_row_ptrs, num_rows + 1); + prefix_sum(exec, u_row_ptrs, num_rows + 1); +} + +GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( + GKO_DECLARE_FACTORIZATION_INITIALIZE_ROW_PTRS_L_U_KERNEL); + + +template +void initialize_l_u(std::shared_ptr exec, + const matrix::Csr *system_matrix, + matrix::Csr *csr_l, + matrix::Csr *csr_u) +{ + const size_type num_rows{system_matrix->get_size()[0]}; + const dim3 block_size{default_block_size, 1, 1}; + const dim3 grid_dim{static_cast(ceildiv( + num_rows, static_cast(block_size.x))), + 1, 1}; + + kernel::initialize_l_u<<>>( + num_rows, as_cuda_type(system_matrix->get_const_row_ptrs()), + as_cuda_type(system_matrix->get_const_col_idxs()), + as_cuda_type(system_matrix->get_const_values()), + as_cuda_type(csr_l->get_const_row_ptrs()), + as_cuda_type(csr_l->get_col_idxs()), as_cuda_type(csr_l->get_values()), + as_cuda_type(csr_u->get_const_row_ptrs()), + as_cuda_type(csr_u->get_col_idxs()), as_cuda_type(csr_u->get_values())); +} + +GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( + GKO_DECLARE_FACTORIZATION_INITIALIZE_L_U_KERNEL); + + +} // namespace factorization +} // namespace cuda +} // namespace kernels +} // namespace gko diff --git a/cuda_code/fadd_l1d_30_70_64p.cu b/cuda_code/fadd_l1d_30_70_64p.cu new file mode 100644 index 0000000000000000000000000000000000000000..71bf810d83d8daefe8185d18ba3fc0e7fdba4b7c --- /dev/null +++ b/cuda_code/fadd_l1d_30_70_64p.cu @@ -0,0 +1,475 @@ +#include +#include +#include +//#include +#include + +#define SHARED_MEM_ELEMENTS 1024 +#define GLOBAL_MEM_ELEMENTS 4096 + +int num_blocks; +int num_threads_per_block; +int num_iterations; +int divergence; + +float* h_A; +float* h_B; +float* h_C; +float* h_res; +float* d_A; +float* d_B; +float* d_C; +float* d_res; + +__global__ void init_memory (unsigned long long ** my_ptr_array, unsigned long long * my_array, int stride, int num_blocks_k, int num_threads_per_block_k) { + + int block_id; + int warp_id; + int i; + + int index; + int tid = blockDim.x * blockIdx.x + threadIdx.x; + + void **ptr_array = (void **)my_ptr_array; + unsigned long long *array = (unsigned long long *)my_array; + + if (tid == 0) { +// int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k; + int num_warps_per_block = num_threads_per_block_k / 32; + //int elements_per_warp = elements_per_block / num_warps_per_block; + int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block; + +// for (block_id = 0; block_id < num_blocks_k; block_id++) { + for (warp_id = 0; warp_id < num_warps_per_block; warp_id++) { + for (i = 0; i < elements_per_warp; i++) { + //index = (block_id * elements_per_block) + (warp_id * elements_per_warp); + index = (warp_id * elements_per_warp); + ptr_array[index + i] = (void*)&array[(index + ((i + 16) % elements_per_warp))]; + } + } + +/* for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) { + ptr_array[i] = (void*)&array[(i + 32)%GLOBAL_MEM_ELEMENTS]; + } +*/ + for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) { + //array[i] = (unsigned long long)ptr_array[(i+stride)%GLOBAL_MEM_ELEMENTS]; + array[i] = (unsigned long long)ptr_array[i]; + } + } + + __syncthreads(); + +} + +__global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k) { + +// unsigned long long int start_time, end_time; + unsigned long long int sum_time = 0; + int i, k; + + int tid = blockDim.x * blockIdx.x + threadIdx.x; + + int block_id = blockIdx.x; + int warp_id = threadIdx.x / 32; + int warp_thread_id = threadIdx.x % 32; + +// int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k; + int num_warps_per_block = num_threads_per_block_k / 32; +// int elements_per_warp = elements_per_block / num_warps_per_block; + int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block; + + //int index1 = (block_id * elements_per_block) + (warp_id * elements_per_warp) + warp_thread_id; + int index1 = (warp_id * elements_per_warp) + warp_thread_id; + + void **ptr_array = (void **)my_ptr_array; + unsigned long long int *array = (unsigned long long int *)my_array; + + void **tmp_ptr; + + //tmp_ptr = (void *)sdata; + //tmp_ptr = (void **)(&(ptr_array[(threadIdx.x * stride)%GLOBAL_MEM_ELEMENTS])); + //tmp_ptr = (void **)(&(ptr_array[(tid * stride)%GLOBAL_MEM_ELEMENTS])); + tmp_ptr = (void **)(&(ptr_array[index1])); + + double f1, f2, f3; + f1 = 1.1; + f2 = 2.5; + if (warp_thread_id < divergence) { +/* __asm volatile ( + ".reg .f32 %r114;\n\t" + "mov.f32 %r114, 2.2;\n\t" + ); +*/ + + for (int l = 0; l < iterations; l++) { + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + f1 = f1 + (unsigned long long)(*tmp_ptr); + tmp_ptr = (void**)(*tmp_ptr); + } + } +// __syncthreads(); + + // if ((blockDim.x * blockIdx.x + threadIdx.x) == 0) + duration[tid] = (unsigned long long)(*tmp_ptr) + (f1 * tid); + +// __syncthreads(); +} + +void usage() { + std::cout << "Usage ./binary " "threads active per warp" << std::endl; +} + +void parametric_measure_shared(int N, int iterations, int stride) { + + cudaProfilerStop(); + int i; + unsigned long long int * h_a; + unsigned long long int * d_a; + + unsigned long long ** h_ptr_a; + unsigned long long ** d_ptr_a; + + unsigned long long * duration; + unsigned long long * latency; + + cudaError_t error_id; + + /* allocate array on CPU */ + h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N); + + h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N); + + latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks); + + /* initialize array elements on CPU */ + + + for (i = 0; i < N; i++) { + h_ptr_a[i] = (unsigned long long *)&h_a[i]; + } + for (i = 0; i < N; i++) { + h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N]; + } + + /* allocate arrays on GPU */ + cudaMalloc ((void **) &d_a, sizeof(unsigned long long int) * N ); + cudaMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N ); + cudaMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks); + + cudaThreadSynchronize (); + error_id = cudaGetLastError(); + if (error_id != cudaSuccess) { + printf("Error 1 is %s\n", cudaGetErrorString(error_id)); + } + + /* copy array elements from CPU to GPU */ + cudaMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, cudaMemcpyHostToDevice); + cudaMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, cudaMemcpyHostToDevice); + cudaMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyHostToDevice); + + cudaThreadSynchronize (); + + error_id = cudaGetLastError(); + if (error_id != cudaSuccess) { + printf("Error 2 is %s\n", cudaGetErrorString(error_id)); + } + + init_memory <<<1, 1>>>(d_ptr_a, d_a, stride, num_blocks, num_threads_per_block); + cudaDeviceSynchronize(); + + /* launch kernel*/ + //dim3 Db = dim3(13); + //dim3 Dg = dim3(768,1,1); + + //printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride); + // int sharedMemSize = sizeof(unsigned long long int) * N ; + + cudaEvent_t start, stop; + float time; + + cudaEventCreate(&start); + cudaEventCreate(&stop); + + cudaEventRecord(start, 0); + cudaProfilerStart(); + + cudaFuncSetCacheConfig(shared_latency, cudaFuncCachePreferL1); + //shared_latency <<>>(d_a, N, iterations, duration); + //shared_latency <<>>(d_a, N, num_iterations, duration, stride, divergence); + shared_latency <<>>(d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block); + + cudaDeviceSynchronize(); + ///cudaThreadSynchronize (); + + cudaProfilerStop(); + cudaEventRecord(stop, 0); + cudaEventSynchronize(stop); + + cudaEventElapsedTime(&time, start, stop); + + error_id = cudaGetLastError(); + if (error_id != cudaSuccess) { + printf("Error 3 is %s\n", cudaGetErrorString(error_id)); + } + + /* copy results from GPU to CPU */ + + cudaMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, cudaMemcpyDeviceToHost); + cudaMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyDeviceToHost); + + cudaThreadSynchronize (); + + /* print results*/ + + + unsigned long long max_dur = latency[0]; + unsigned long long min_dur = latency[0]; + unsigned long long avg_lat = latency[0]; + for (int i = 1; i < num_threads_per_block * num_blocks; i++) { + avg_lat += latency[i]; + if (latency[i] > max_dur) { + max_dur = latency[i]; + } else if (latency[i] < min_dur) { + min_dur = latency[i]; + } + } + + + // printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 256.0 *num_iterations)), (double)(min_dur/(256.0 * num_iterations)), (double)(max_dur/(256.0 * num_iterations)), time); + + printf("%f\n", time); + + + /* free memory on GPU */ + cudaFree(d_a); + cudaFree(d_ptr_a); + cudaFree(duration); + cudaThreadSynchronize (); + + /*free memory on CPU */ + free(h_a); + free(h_ptr_a); + free(latency); + + +} + + +int main(int argc, char **argv) +{ + int N; + + if (argc != 6) { + usage(); + exit(1); + } + + num_blocks = atoi(argv[1]); + num_threads_per_block = atoi(argv[2]); + num_iterations = atoi(argv[3]); + divergence = atoi(argv[4]); + int stride = atoi(argv[5]); + + N = GLOBAL_MEM_ELEMENTS; + parametric_measure_shared(N, 10, stride); + + return 0; +} diff --git a/cuda_code/fastWalshTransform_kernel_1.cu b/cuda_code/fastWalshTransform_kernel_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..54354207a70c67353c675cbf31ca8ee310fe8a13 --- /dev/null +++ b/cuda_code/fastWalshTransform_kernel_1.cu @@ -0,0 +1,161 @@ +/* + * Copyright 1993-2009 NVIDIA Corporation. All rights reserved. + * + * NVIDIA Corporation and its licensors retain all intellectual property and + * proprietary rights in and to this software and related documentation and + * any modifications thereto. Any use, reproduction, disclosure, or + * distribution + * of this software and related documentation without an express license + * agreement from NVIDIA Corporation is strictly prohibited. + * + */ + +#ifndef FWT_KERNEL_CUH +#define FWT_KERNEL_CUH +#ifndef fwt_kernel_cuh +#define fwt_kernel_cuh + +#include "../benchmark_common.h" + +/////////////////////////////////////////////////////////////////////////////// +// Elementary(for vectors less than elementary size) in-shared memory +// combined radix-2 + radix-4 Fast Walsh Transform +/////////////////////////////////////////////////////////////////////////////// +#define ELEMENTARY_LOG2SIZE 11 + +__global__ void fwtBatch1Kernel(float* d_Output, float* d_Input, int log2N) { + const int N = 1 << log2N; + const int base = blockIdx.x << log2N; + + //(2 ** 11) * 4 bytes == 8KB -- maximum s_data[] size for G80 + extern __shared__ float s_data[]; + float* d_Src = d_Input + base; + float* d_Dst = d_Output + base; + + for (int pos = threadIdx.x; pos < N; pos += blockDim.x) + s_data[pos] = d_Src[pos]; + + // Main radix-4 stages + const int pos = threadIdx.x; + for (int stride = N >> 2; stride > 0; stride >>= 2) { + int lo = pos & (stride - 1); + int i0 = ((pos - lo) << 2) + lo; + int i1 = i0 + stride; + int i2 = i1 + stride; + int i3 = i2 + stride; + + __syncthreads(); + float D0 = s_data[i0]; + float D1 = s_data[i1]; + float D2 = s_data[i2]; + float D3 = s_data[i3]; + + float T; + T = D0; + D0 = D0 + D2; + D2 = T - D2; + T = D1; + D1 = D1 + D3; + D3 = T - D3; + T = D0; + s_data[i0] = D0 + D1; + s_data[i1] = T - D1; + T = D2; + s_data[i2] = D2 + D3; + s_data[i3] = T - D3; + } + + // Do single radix-2 stage for odd power of two + if (log2N & 1) { + __syncthreads(); + for (int pos = threadIdx.x; pos < N / 2; pos += blockDim.x) { + int i0 = pos << 1; + int i1 = i0 + 1; + + float D0 = s_data[i0]; + float D1 = s_data[i1]; + s_data[i0] = D0 + D1; + s_data[i1] = D0 - D1; + } + } + + __syncthreads(); + for (int pos = threadIdx.x; pos < N; pos += blockDim.x) + d_Dst[pos] = s_data[pos]; +} + +//////////////////////////////////////////////////////////////////////////////// +// Single in-global memory radix-4 Fast Walsh Transform pass +// (for strides exceeding elementary vector size) +//////////////////////////////////////////////////////////////////////////////// +__global__ void fwtBatch2Kernel(float* d_Output, float* d_Input, int stride) { + const int pos = blockIdx.x * blockDim.x + threadIdx.x; + const int N = blockDim.x * gridDim.x * 4; + + float* d_Src = d_Input + blockIdx.y * N; + float* d_Dst = d_Output + blockIdx.y * N; + + int lo = pos & (stride - 1); + int i0 = ((pos - lo) << 2) + lo; + int i1 = i0 + stride; + int i2 = i1 + stride; + int i3 = i2 + stride; + + float D0 = d_Src[i0]; + float D1 = d_Src[i1]; + float D2 = d_Src[i2]; + float D3 = d_Src[i3]; + + float T; + T = D0; + D0 = D0 + D2; + D2 = T - D2; + T = D1; + D1 = D1 + D3; + D3 = T - D3; + T = D0; + d_Dst[i0] = D0 + D1; + d_Dst[i1] = T - D1; + T = D2; + d_Dst[i2] = D2 + D3; + d_Dst[i3] = T - D3; +} + +//////////////////////////////////////////////////////////////////////////////// +// Put everything together: batched Fast Walsh Transform CPU front-end +//////////////////////////////////////////////////////////////////////////////// +void fwtBatchGPU(float* d_Data, int M, int log2N, cudaStream_t stream_app) { + const int THREAD_N = 256; + + int N = 1 << log2N; + dim3 grid((1 << log2N) / (4 * THREAD_N), M, 1); + for (; log2N > ELEMENTARY_LOG2SIZE; log2N -= 2, N >>= 2, M <<= 2) { + fwtBatch2Kernel<<>>(d_Data, d_Data, N / 4); + cutilCheckMsg("fwtBatch2Kernel() execution failed\n"); + } + + fwtBatch1Kernel<<>>(d_Data, d_Data, + log2N); + + cutilCheckMsg("fwtBatch1Kernel() execution failed\n"); +} + +//////////////////////////////////////////////////////////////////////////////// +// Modulate two arrays +//////////////////////////////////////////////////////////////////////////////// +__global__ void modulateKernel(float* d_A, float* d_B, int N) { + int tid = blockIdx.x * blockDim.x + threadIdx.x; + int numThreads = blockDim.x * gridDim.x; + float rcpN = 1.0f / (float)N; + + for (int pos = tid; pos < N; pos += numThreads) + d_A[pos] *= d_B[pos] * rcpN; +} + +// Interface to modulateKernel() +void modulateGPU(float* d_A, float* d_B, int N, cudaStream_t stream_app) { + modulateKernel<<<128, 256, 0, stream_app>>>(d_A, d_B, N); +} + +#endif +#endif diff --git a/cuda_code/fast_global_registration.cu b/cuda_code/fast_global_registration.cu new file mode 100644 index 0000000000000000000000000000000000000000..14caed477b259d5f1ec9d430fbc77e95e78f535a --- /dev/null +++ b/cuda_code/fast_global_registration.cu @@ -0,0 +1,450 @@ +/** + * Copyright (c) 2020 Neka-Nat + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + **/ +#include +#include +#include + +#include "cupoch/geometry/bruteforce_nn.h" +#include "cupoch/geometry/pointcloud.h" +#include "cupoch/registration/fast_global_registration.h" +#include "cupoch/registration/registration.h" +#include "cupoch/utility/platform.h" + +namespace cupoch { +namespace registration { +namespace { + +struct compute_tuple_constraint_functor { + compute_tuple_constraint_functor( + size_t ncorr, + const thrust::tuple* corres_cross, + const Eigen::Vector3f* point_cloud_vec_fi_points, + const Eigen::Vector3f* point_cloud_vec_fj_points, + thrust::tuple* corres_tuple, + float scale) + : ncorr_(ncorr), + corres_cross_(corres_cross), + point_cloud_vec_fi_points_(point_cloud_vec_fi_points), + point_cloud_vec_fj_points_(point_cloud_vec_fj_points), + corres_tuple_(corres_tuple), + scale_(scale){}; + const int ncorr_; + const thrust::tuple* corres_cross_; + const Eigen::Vector3f* point_cloud_vec_fi_points_; + const Eigen::Vector3f* point_cloud_vec_fj_points_; + thrust::tuple* corres_tuple_; + const float scale_; + __device__ void operator()(size_t idx) { + int rand0, rand1, rand2; + int idi0, idi1, idi2, idj0, idj1, idj2; + thrust::default_random_engine eng; + thrust::uniform_int_distribution dist(0, ncorr_ - 1); + eng.discard(idx); + rand0 = dist(eng); + rand1 = dist(eng); + rand2 = dist(eng); + idi0 = thrust::get<0>(corres_cross_[rand0]); + idj0 = thrust::get<1>(corres_cross_[rand0]); + idi1 = thrust::get<0>(corres_cross_[rand1]); + idj1 = thrust::get<1>(corres_cross_[rand1]); + idi2 = thrust::get<0>(corres_cross_[rand2]); + idj2 = thrust::get<1>(corres_cross_[rand2]); + + // collect 3 points from i-th fragment + Eigen::Vector3f pti0 = point_cloud_vec_fi_points_[idi0]; + Eigen::Vector3f pti1 = point_cloud_vec_fi_points_[idi1]; + Eigen::Vector3f pti2 = point_cloud_vec_fi_points_[idi2]; + float li0 = (pti0 - pti1).norm(); + float li1 = (pti1 - pti2).norm(); + float li2 = (pti2 - pti0).norm(); + + // collect 3 points from j-th fragment + Eigen::Vector3f ptj0 = point_cloud_vec_fj_points_[idj0]; + Eigen::Vector3f ptj1 = point_cloud_vec_fj_points_[idj1]; + Eigen::Vector3f ptj2 = point_cloud_vec_fj_points_[idj2]; + float lj0 = (ptj0 - ptj1).norm(); + float lj1 = (ptj1 - ptj2).norm(); + float lj2 = (ptj2 - ptj0).norm(); + + // check tuple constraint + bool cond = (li0 * scale_ < lj0) && (lj0 < li0 / scale_) && + (li1 * scale_ < lj1) && (lj1 < li1 / scale_) && + (li2 * scale_ < lj2) && (lj2 < li2 / scale_); + thrust::tuple invalid_idx = thrust::make_tuple(-1, -1); + corres_tuple_[3 * idx] = + (cond) ? thrust::make_tuple(idi0, idj0) : invalid_idx; + corres_tuple_[3 * idx + 1] = + (cond) ? thrust::make_tuple(idi1, idj1) : invalid_idx; + corres_tuple_[3 * idx + 2] = + (cond) ? thrust::make_tuple(idi2, idj2) : invalid_idx; + } +}; + +template +utility::device_vector> AdvancedMatching( + const std::vector& point_cloud_vec, + const std::vector>& features_vec, + const FastGlobalRegistrationOption& option) { + // STEP 0) Swap source and target if necessary + int fi = 0, fj = 1; + utility::LogDebug("Advanced matching : [{:d} - {:d}]", fi, fj); + bool swapped = false; + if (point_cloud_vec[fj].points_.size() > + point_cloud_vec[fi].points_.size()) { + int temp = fi; + fi = fj; + fj = temp; + swapped = true; + } + + // STEP 1) Initial matching + int nPti = int(point_cloud_vec[fi].points_.size()); + int nPtj = int(point_cloud_vec[fj].points_.size()); + utility::device_vector corresK; + utility::device_vector dis; + utility::device_vector> corres; + corres.resize(nPti + nPtj); + geometry::BruteForceNN(features_vec[fi].data_, features_vec[fj].data_, + corresK, dis); + thrust::copy(make_tuple_iterator(corresK.begin(), + thrust::make_counting_iterator(0)), + make_tuple_iterator( + corresK.end(), + thrust::make_counting_iterator(corresK.size())), + corres.begin()); + geometry::BruteForceNN(features_vec[fj].data_, features_vec[fi].data_, + corresK, dis); + thrust::copy(make_tuple_iterator(thrust::make_counting_iterator(0), + corresK.begin()), + make_tuple_iterator( + thrust::make_counting_iterator(corresK.size()), + corresK.end()), + corres.begin() + nPtj); + thrust::sort(utility::exec_policy(0)->on(0), corres.begin(), corres.end()); + utility::LogDebug("points are remained : {:d}", corres.size()); + + // STEP 2) CROSS CHECK + utility::LogDebug("\t[cross check] "); + utility::device_vector> corres_cross(corres.size()); + utility::device_vector counts(corres.size()); + auto end1 = thrust::reduce_by_key(utility::exec_policy(0)->on(0), + corres.begin(), corres.end(), + thrust::make_constant_iterator(1), + corres_cross.begin(), counts.begin()); + auto end2 = + thrust::remove_if(corres_cross.begin(), end1.first, counts.begin(), + [] __device__(int cnt) { return cnt < 2; }); + corres_cross.resize(thrust::distance(corres_cross.begin(), end2)); + utility::LogDebug("points are remained : {:d}", corres_cross.size()); + + // STEP 3) TUPLE CONSTRAINT + utility::LogDebug("\t[tuple constraint] "); + float scale = option.tuple_scale_; + size_t ncorr = corres_cross.size(); + size_t number_of_trial = ncorr * 100; + + utility::device_vector> corres_tuple( + 3 * number_of_trial); + compute_tuple_constraint_functor func( + ncorr, thrust::raw_pointer_cast(corres_cross.data()), + thrust::raw_pointer_cast(point_cloud_vec[fi].points_.data()), + thrust::raw_pointer_cast(point_cloud_vec[fj].points_.data()), + thrust::raw_pointer_cast(corres_tuple.data()), scale); + thrust::for_each(thrust::make_counting_iterator(0), + thrust::make_counting_iterator(number_of_trial), func); + auto end3 = thrust::remove_if( + corres_tuple.begin(), corres_tuple.end(), + [] __device__(const thrust::tuple& corr) { + return thrust::get<0>(corr) < 0; + }); + size_t n_res = thrust::distance(corres_tuple.begin(), end3); + corres_tuple.resize(std::min((int)n_res, option.maximum_tuple_count_)); + utility::LogDebug("{:d} tuples ({:d} trial, {:d} actual).", + corres_tuple.size(), number_of_trial, n_res); + + if (swapped) { + thrust::for_each(corres_tuple.begin(), corres_tuple.end(), + [] __device__(thrust::tuple & corr) { + thrust::swap(thrust::get<0>(corr), + thrust::get<1>(corr)); + }); + } + utility::LogDebug("\t[final] matches {:d}.", (int)corres_tuple.size()); + return corres_tuple; +} + +// Normalize scale of points. X' = (X-\mu)/scale +std::tuple, float, float> NormalizePointCloud( + std::vector& point_cloud_vec, + const FastGlobalRegistrationOption& option) { + int num = 2; + float scale = 0; + std::vector pcd_mean_vec; + float scale_global, scale_start; + Eigen::Vector3f means[2]; + thrust::system::cuda::unique_eager_future reduces[2]; + thrust::system::cuda::unique_eager_event foreach[2]; + + for (int i = 0; i < num; ++i) { + reduces[i] = thrust::async::reduce( + utility::exec_policy(utility::GetStream(i)) + ->on(utility::GetStream(i)), + point_cloud_vec[i].points_.begin(), + point_cloud_vec[i].points_.end(), + Eigen::Vector3f(0.0, 0.0, 0.0), + thrust::plus()); + } + for (int i = 0; i < num; ++i) { + means[i] = reduces[i].get() / point_cloud_vec[i].points_.size(); + foreach + [i] = thrust::async::for_each( + utility::exec_policy(utility::GetStream(i)) + ->on(utility::GetStream(i)), + point_cloud_vec[i].points_.begin(), + point_cloud_vec[i].points_.end(), + [mean = means[i]] __device__(Eigen::Vector3f & pt) { + pt -= mean; + }); + } + for (int i = 0; i < num; ++i) { + utility::LogDebug("normalize points :: mean = [{:f} {:f} {:f}]", + means[i](0), means[i](1), means[i](2)); + pcd_mean_vec.push_back(means[i]); + foreach + [i].wait(); + scale = thrust::transform_reduce( + utility::exec_policy(0)->on(0), + point_cloud_vec[i].points_.begin(), + point_cloud_vec[i].points_.end(), + [] __device__(const Eigen::Vector3f& pt) { return pt.norm(); }, + scale, thrust::maximum()); + } + + if (option.use_absolute_scale_) { + scale_global = 1.0; + scale_start = scale; + } else { + scale_global = scale; + scale_start = 1.0; + } + utility::LogDebug("normalize points :: global scale : {:f}", scale_global); + + for (int i = 0; i < num; ++i) { + foreach + [i] = thrust::async::for_each( + utility::exec_policy(utility::GetStream(i)) + ->on(utility::GetStream(i)), + point_cloud_vec[i].points_.begin(), + point_cloud_vec[i].points_.end(), + [scale_global] __device__(Eigen::Vector3f & pt) { + pt /= scale_global; + }); + } + foreach + [0].wait(); + foreach + [1].wait(); + return std::make_tuple(pcd_mean_vec, scale_global, scale_start); +} + +struct compute_jacobian_functor { + compute_jacobian_functor(float par) : par_(par){}; + const float par_; + __device__ thrust::tuple operator()( + const thrust::tuple& x) const { + Eigen::Vector3f p, q; + p = thrust::get<0>(x); + q = thrust::get<1>(x); + Eigen::Vector3f rpq = p - q; + float temp = par_ / (rpq.dot(rpq) + par_); + float s = temp * temp; + float r = 0; + + Eigen::Matrix6f JTJ = Eigen::Matrix6f::Zero(); + Eigen::Vector6f JTr = Eigen::Vector6f::Zero(); + Eigen::Vector6f J = Eigen::Vector6f::Zero(); + J(1) = -q(2); + J(2) = q(1); + J(3) = -1; + r = rpq(0); + JTJ += J * J.transpose() * s; + JTr += J * r * s; + + J.setZero(); + J(2) = -q(0); + J(0) = q(2); + J(4) = -1; + r = rpq(1); + JTJ += J * J.transpose() * s; + JTr += J * r * s; + + J.setZero(); + J(0) = -q(1); + J(1) = q(0); + J(5) = -1; + r = rpq(2); + JTJ += J * J.transpose() * s; + JTr += J * r * s; + return thrust::make_tuple(JTJ, JTr); + } +}; + +Eigen::Matrix4f OptimizePairwiseRegistration( + const std::vector& point_cloud_vec, + const utility::device_vector>& corres, + float scale_start, + const FastGlobalRegistrationOption& option) { + utility::LogDebug("Pairwise rigid pose optimization"); + float par = scale_start; + int numIter = option.iteration_number_; + + int i = 0, j = 1; + geometry::PointCloud point_cloud_copy_j = point_cloud_vec[j]; + + if (corres.size() < 10) return Eigen::Matrix4f::Identity(); + Eigen::Matrix4f trans = Eigen::Matrix4f::Identity(); + + for (int itr = 0; itr < numIter; itr++) { + Eigen::Matrix6f JTJ = Eigen::Matrix6f::Zero(); + Eigen::Vector6f JTr = Eigen::Vector6f::Zero(); + compute_jacobian_functor func(par); + thrust::tie(JTJ, JTr) = thrust::transform_reduce( + utility::exec_policy(0)->on(0), + make_tuple_iterator( + thrust::make_permutation_iterator( + point_cloud_vec[i].points_.begin(), + thrust::make_transform_iterator( + corres.begin(), + tuple_get_functor<0, int, int, int>())), + thrust::make_permutation_iterator( + point_cloud_copy_j.points_.begin(), + thrust::make_transform_iterator( + corres.begin(), + tuple_get_functor<1, int, int, + int>()))), + make_tuple_iterator( + thrust::make_permutation_iterator( + point_cloud_vec[i].points_.begin(), + thrust::make_transform_iterator( + corres.end(), + tuple_get_functor<0, int, int, int>())), + thrust::make_permutation_iterator( + point_cloud_copy_j.points_.begin(), + thrust::make_transform_iterator( + corres.end(), + tuple_get_functor<1, int, int, + int>()))), + func, thrust::make_tuple(JTJ, JTr), + add_tuple_functor()); + bool success; + Eigen::Vector6f result; + thrust::tie(success, result) = + utility::SolveLinearSystemPSD<6>(-JTJ, JTr); + Eigen::Matrix4f delta = utility::TransformVector6fToMatrix4f(result); + trans = delta * trans; + point_cloud_copy_j.Transform(delta); + + // graduated non-convexity. + if (option.decrease_mu_) { + if (itr % 4 == 0 && par > option.maximum_correspondence_distance_) { + par /= option.division_factor_; + } + } + } + return trans; +} + +// Below line indicates how the transformation matrix aligns two point clouds +// e.g. T * point_cloud_vec[1] is aligned with point_cloud_vec[0]. +Eigen::Matrix4f GetInvTransformationOriginalScale( + const Eigen::Matrix4f& transformation, + const std::vector& pcd_mean_vec, + float scale_global) { + Eigen::Matrix3f R = transformation.block<3, 3>(0, 0); + Eigen::Vector3f t = transformation.block<3, 1>(0, 3); + Eigen::Matrix4f transtemp = Eigen::Matrix4f::Zero(); + transtemp.block<3, 3>(0, 0) = R.transpose(); + transtemp.block<3, 1>(0, 3) = + -R.transpose() * + (-R * pcd_mean_vec[1] + t * scale_global + pcd_mean_vec[0]); + transtemp(3, 3) = 1; + return transtemp; +} + +} // namespace + +template +RegistrationResult FastGlobalRegistration( + const geometry::PointCloud& source, + const geometry::PointCloud& target, + const Feature& source_feature, + const Feature& target_feature, + const FastGlobalRegistrationOption& option /* = + FastGlobalRegistrationOption()*/) { + if (!source.HasPoints() || !target.HasPoints() || + source_feature.IsEmpty() || target_feature.IsEmpty()) { + utility::LogError("Invalid source or target pointcloud."); + return RegistrationResult(); + } + std::vector point_cloud_vec; + geometry::PointCloud source_orig = source; + geometry::PointCloud target_orig = target; + point_cloud_vec.push_back(source); + point_cloud_vec.push_back(target); + + std::vector> features_vec; + features_vec.push_back(source_feature); + features_vec.push_back(target_feature); + + float scale_global, scale_start; + std::vector pcd_mean_vec; + std::tie(pcd_mean_vec, scale_global, scale_start) = + NormalizePointCloud(point_cloud_vec, option); + utility::device_vector> corres; + corres = AdvancedMatching(point_cloud_vec, features_vec, option); + Eigen::Matrix4f transformation; + transformation = OptimizePairwiseRegistration(point_cloud_vec, corres, + scale_global, option); + + // as the original code T * point_cloud_vec[1] is aligned with + // point_cloud_vec[0] matrix inverse is applied here. + return EvaluateRegistration( + source_orig, target_orig, option.maximum_correspondence_distance_, + GetInvTransformationOriginalScale(transformation, pcd_mean_vec, + scale_global)); +} + +template RegistrationResult FastGlobalRegistration<33>( + const geometry::PointCloud& source, + const geometry::PointCloud& target, + const Feature<33>& source_feature, + const Feature<33>& target_feature, + const FastGlobalRegistrationOption& option); + +template RegistrationResult FastGlobalRegistration<352>( + const geometry::PointCloud& source, + const geometry::PointCloud& target, + const Feature<352>& source_feature, + const Feature<352>& target_feature, + const FastGlobalRegistrationOption& option); + +} // namespace registration +} // namespace cupoch \ No newline at end of file diff --git a/cuda_code/fast_int_div_4.cu b/cuda_code/fast_int_div_4.cu new file mode 100644 index 0000000000000000000000000000000000000000..e84127cb4981993f94fc58936876339f7024985f --- /dev/null +++ b/cuda_code/fast_int_div_4.cu @@ -0,0 +1,108 @@ +/* + * Copyright (c) 2020-2021, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "test_utils.h" + +namespace MLCommon { + +TEST(FastIntDiv, CpuTest) +{ + for (int i = 0; i < 100; ++i) { + // get a positive divisor + int divisor; + do { + divisor = rand(); + } while (divisor <= 0); + FastIntDiv fid(divisor); + // run it against a few random numbers and compare the outputs + for (int i = 0; i < 10000; ++i) { + auto num = rand(); + auto correct = num / divisor; + auto computed = num / fid; + ASSERT_EQ(correct, computed) << " divisor=" << divisor << " num=" << num; + num = rand(); + correct = num % divisor; + computed = num % fid; + ASSERT_EQ(correct, computed) << " divisor=" << divisor << " num=" << num; + num = -num; + correct = num / divisor; + computed = num / fid; + ASSERT_EQ(correct, computed) << " divisor=" << divisor << " num=" << num; + num = rand(); + correct = num % divisor; + computed = num % fid; + ASSERT_EQ(correct, computed) << " divisor=" << divisor << " num=" << num; + } + } +} + +__global__ void fastIntDivTestKernel( + int* computed, int* correct, const int* in, FastIntDiv fid, int divisor, int len) +{ + auto tid = threadIdx.x + blockIdx.x * blockDim.x; + if (tid < len) { + computed[tid] = in[tid] % fid; + correct[tid] = in[tid] % divisor; + computed[len + tid] = -in[tid] % fid; + correct[len + tid] = -in[tid] % divisor; + } +} + +TEST(FastIntDiv, GpuTest) +{ + static const int len = 100000; + static const int TPB = 128; + int *computed, *correct, *in; + raft::allocate(computed, len * 2); + raft::allocate(correct, len * 2); + raft::allocate(in, len); + for (int i = 0; i < 100; ++i) { + // get a positive divisor + int divisor; + do { + divisor = rand(); + } while (divisor <= 0); + FastIntDiv fid(divisor); + // run it against a few random numbers and compare the outputs + int* h_in = new int[len]; + for (int i = 0; i < len; ++i) { + h_in[i] = rand(); + } + raft::update_device(in, h_in, len, 0); + int nblks = raft::ceildiv(len, TPB); + fastIntDivTestKernel<<>>(computed, correct, in, fid, divisor, len); + CUDA_CHECK(cudaStreamSynchronize(0)); + ASSERT_TRUE(devArrMatch(correct, computed, len * 2, raft::Compare())) + << " divisor=" << divisor; + } +} + +FastIntDiv dummyFunc(int num) +{ + FastIntDiv fd(num); + return fd; +} + +TEST(FastIntDiv, IncorrectUsage) +{ + ASSERT_THROW(dummyFunc(-1), raft::exception); + ASSERT_THROW(dummyFunc(0), raft::exception); +} + +} // namespace MLCommon diff --git a/cuda_code/fct_ale_b1_vertical.cu b/cuda_code/fct_ale_b1_vertical.cu new file mode 100644 index 0000000000000000000000000000000000000000..5139230ac45600ed27d92cd1894a34d7c14b236c --- /dev/null +++ b/cuda_code/fct_ale_b1_vertical.cu @@ -0,0 +1,16 @@ +/* Block size X: 32 */ +__global__ void fct_ale_b1_vertical(const int maxLevels, const int * __restrict__ nLevels, const double * __restrict__ fct_adf_v, double * __restrict__ fct_plus, double * __restrict__ fct_minus) +{ +const int node = (blockIdx.x * maxLevels); + +for ( int level = threadIdx.x; level < nLevels[blockIdx.x] - 1; level += 32 ) +{ + double fct_adf_v_level = 0.0; + double fct_adf_v_nlevel = 0.0; + int item = blockIdx.x * (maxLevels + 1) + level; + fct_adf_v_level = fct_adf_v[item]; + fct_adf_v_nlevel = fct_adf_v[item + 1]; + fct_plus[node + level] = fmax(0.0, fct_adf_v_level) + fmax(0.0, -fct_adf_v_nlevel); + fct_minus[node + level] = fmin(0.0, fct_adf_v_level) + fmin(0.0, -fct_adf_v_nlevel); +} +} \ No newline at end of file diff --git a/cuda_code/fdwt53_3.cu b/cuda_code/fdwt53_3.cu new file mode 100644 index 0000000000000000000000000000000000000000..442ef099ec065cb22feb60c2627aa445e1ccd2f6 --- /dev/null +++ b/cuda_code/fdwt53_3.cu @@ -0,0 +1,362 @@ +/// +/// @file fdwt53.cu +/// @brief CUDA implementation of forward 5/3 2D DWT. +/// @author Martin Jirman (207962@mail.muni.cz) +/// @date 2011-02-04 13:23 +/// +/// +/// Copyright (c) 2011 Martin Jirman +/// All rights reserved. +/// +/// Redistribution and use in source and binary forms, with or without +/// modification, are permitted provided that the following conditions are met: +/// +/// * Redistributions of source code must retain the above copyright +/// notice, this list of conditions and the following disclaimer. +/// * Redistributions in binary form must reproduce the above copyright +/// notice, this list of conditions and the following disclaimer in the +/// documentation and/or other materials provided with the distribution. +/// +/// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +/// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +/// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +/// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +/// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +/// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +/// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +/// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +/// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +/// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +/// POSSIBILITY OF SUCH DAMAGE. +/// + + +#include "cuda_common.h" +#include "transform_buffer.h" +#include "io.h" + +namespace dwt_cuda { + + + /// Wraps buffer and methods needed for computing one level of 5/3 FDWT + /// using sliding window approach. + /// @tparam WIN_SIZE_X width of sliding window + /// @tparam WIN_SIZE_Y height of sliding window + template + class FDWT53 { + private: + + /// Info needed for processing of one input column. + /// @tparam CHECKED_LOADER true if column's loader should check boundaries + /// false if there are no near boudnaries to check + template + struct FDWT53Column { + /// loader for the column + VerticalDWTPixelLoader loader; + + /// offset of the column in shared buffer + int offset; + + // backup of first 3 loaded pixels (not transformed) + int pixel0, pixel1, pixel2; + + /// Sets all fields to anything to prevent 'uninitialized' warnings. + __device__ void clear() { + offset = pixel0 = pixel1 = pixel2 = 0; + loader.clear(); + } + }; + + + /// Type of shared memory buffer for 5/3 FDWT transforms. + typedef TransformBuffer FDWT53Buffer; + + /// Actual shared buffer used for forward 5/3 DWT. + FDWT53Buffer buffer; + + /// Difference between indices of two vertical neighbors in buffer. + enum { STRIDE = FDWT53Buffer::VERTICAL_STRIDE }; + + + /// Forward 5/3 DWT predict operation. + struct Forward53Predict { + __device__ void operator() (const int p, int & c, const int n) const { + c -= (p + n) / 2; // F.8, page 126, ITU-T Rec. T.800 final draft + } + }; + + + /// Forward 5/3 DWT update operation. + struct Forward53Update { + __device__ void operator() (const int p, int & c, const int n) const { + c += (p + n + 2) / 4; // F.9, page 126, ITU-T Rec. T.800 final draft + } + }; + + + /// Initializes one column: computes offset of the column in shared memory + /// buffer, initializes loader and finally uses it to load first 3 pixels. + /// @tparam CHECKED true if loader of the column checks boundaries + /// @param column (uninitialized) column info to be initialized + /// @param input input image + /// @param sizeX width of the input image + /// @param sizeY height of the input image + /// @param colIndex x-axis coordinate of the column (relative to the left + /// side of this threadblock's block of input pixels) + /// @param firstY y-axis coordinate of first image row to be transformed + template + __device__ void initColumn(FDWT53Column & column, + const int * const input, + const int sizeX, const int sizeY, + const int colIndex, const int firstY) { + // get offset of the column with index 'cId' + column.offset = buffer.getColumnOffset(colIndex); + + // coordinates of the first pixel to be loaded + const int firstX = blockIdx.x * WIN_SIZE_X + colIndex; + + if(blockIdx.y == 0) { + // topmost block - apply mirroring rules when loading first 3 rows + column.loader.init(sizeX, sizeY, firstX, firstY); + + // load pixels in mirrored way + column.pixel2 = column.loader.loadFrom(input); // loaded pixel #0 + column.pixel1 = column.loader.loadFrom(input); // loaded pixel #1 + column.pixel0 = column.loader.loadFrom(input); // loaded pixel #2 + + // reinitialize loader to start with pixel #1 again + column.loader.init(sizeX, sizeY, firstX, firstY + 1); + } else { + // non-topmost row - regular loading: + column.loader.init(sizeX, sizeY, firstX, firstY - 2); + + // load 3 rows into the column + column.pixel0 = column.loader.loadFrom(input); + column.pixel1 = column.loader.loadFrom(input); + column.pixel2 = column.loader.loadFrom(input); + // Now, the next pixel, which will be loaded by loader, is pixel #1. + } + } + + + /// Loads and vertically transforms given column. Assumes that first 3 + /// pixels are already loaded in column fields pixel0 ... pixel2. + /// @tparam CHECKED true if loader of the column checks boundaries + /// @param column column to be loaded and vertically transformed + /// @param input pointer to input image data + template + __device__ void loadAndVerticallyTransform(FDWT53Column & column, + const int * const input) { + // take 3 loaded pixels and put them into shared memory transform buffer + buffer[column.offset + 0 * STRIDE] = column.pixel0; + buffer[column.offset + 1 * STRIDE] = column.pixel1; + buffer[column.offset + 2 * STRIDE] = column.pixel2; + + // load remaining pixels to be able to vertically transform the window + for(int i = 3; i < (3 + WIN_SIZE_Y); i++) { + buffer[column.offset + i * STRIDE] = column.loader.loadFrom(input); + } + + // remember last 3 pixels for use in next iteration + column.pixel0 = buffer[column.offset + (WIN_SIZE_Y + 0) * STRIDE]; + column.pixel1 = buffer[column.offset + (WIN_SIZE_Y + 1) * STRIDE]; + column.pixel2 = buffer[column.offset + (WIN_SIZE_Y + 2) * STRIDE]; + + // vertically transform the column in transform buffer + buffer.forEachVerticalOdd(column.offset, Forward53Predict()); + buffer.forEachVerticalEven(column.offset, Forward53Update()); + } + + + /// Actual implementation of 5/3 FDWT. + /// @tparam CHECK_LOADS true if input loader must check boundaries + /// @tparam CHECK_WRITES true if output writer must check boundaries + /// @param in input image + /// @param out output buffer + /// @param sizeX width of the input image + /// @param sizeY height of the input image + /// @param winSteps number of sliding window steps + template + __device__ void transform(const int * const in, int * const out, + const int sizeX, const int sizeY, + const int winSteps) { + // info about one main and one boundary columns processed by this thread + FDWT53Column column; + FDWT53Column boundaryColumn; // only few threads use this + + // Initialize all column info: initialize loaders, compute offset of + // column in shared buffer and initialize loader of column. + const int firstY = blockIdx.y * WIN_SIZE_Y * winSteps; + initColumn(column, in, sizeX, sizeY, threadIdx.x, firstY); + + // first 3 threads initialize boundary columns, others do not use them + boundaryColumn.clear(); + if(threadIdx.x < 3) { + // index of boundary column (relative x-axis coordinate of the column) + const int colId = threadIdx.x + ((threadIdx.x == 0) ? WIN_SIZE_X : -3); + + // initialize the column + initColumn(boundaryColumn, in, sizeX, sizeY, colId, firstY); + } + + // index of column which will be written into output by this thread + const int outColumnIndex = parityIdx(); + + // offset of column which will be written by this thread into output + const int outColumnOffset = buffer.getColumnOffset(outColumnIndex); + + // initialize output writer for this thread + const int outputFirstX = blockIdx.x * WIN_SIZE_X + outColumnIndex; + VerticalDWTBandWriter writer; + writer.init(sizeX, sizeY, outputFirstX, firstY); + + // Sliding window iterations: + // Each iteration assumes that first 3 pixels of each column are loaded. + for(int w = 0; w < winSteps; w++) { + // For each column (including boundary columns): load and vertically + // transform another WIN_SIZE_Y lines. + loadAndVerticallyTransform(column, in); + if(threadIdx.x < 3) { + loadAndVerticallyTransform(boundaryColumn, in); + } + + // wait for all columns to be vertically transformed and transform all + // output rows horizontally + __syncthreads(); + buffer.forEachHorizontalOdd(2, WIN_SIZE_Y, Forward53Predict()); + __syncthreads(); + buffer.forEachHorizontalEven(2, WIN_SIZE_Y, Forward53Update()); + + // wait for all output rows to be transformed horizontally and write + // them into output buffer + __syncthreads(); + for(int r = 2; r < (2 + WIN_SIZE_Y); r += 2) { + // Write low coefficients from output column into low band ... + writer.writeLowInto(out, buffer[outColumnOffset + r * STRIDE]); + // ... and high coeficients into the high band. + writer.writeHighInto(out, buffer[outColumnOffset + (r+1) * STRIDE]); + } + + // before proceeding to next iteration, wait for all output columns + // to be written into the output + __syncthreads(); + } + } + + + public: + /// Determines, whether this block's pixels touch boundary and selects + /// right version of algorithm according to it - for many threadblocks, it + /// selects version which does not deal with boundary mirroring and thus is + /// slightly faster. + /// @param in input image + /// @param out output buffer + /// @param sx width of the input image + /// @param sy height of the input image + /// @param steps number of sliding window steps + __device__ static void run(const int * const in, int * const out, + const int sx, const int sy, const int steps) { + // object with transform buffer in shared memory + __shared__ FDWT53 fdwt53; + + // Compute limits of this threadblock's block of pixels and use them to + // determine, whether this threadblock will have to deal with boundary. + // (1 in next expressions is for radius of impulse response of 9/7 FDWT.) + const int maxX = (blockIdx.x + 1) * WIN_SIZE_X + 1; + const int maxY = (blockIdx.y + 1) * WIN_SIZE_Y * steps + 1; + const bool atRightBoudary = maxX >= sx; + const bool atBottomBoudary = maxY >= sy; + + // Select specialized version of code according to distance of this + // threadblock's pixels from image boundary. + if(atBottomBoudary) { + // near bottom boundary => check both writing and reading + fdwt53.transform(in, out, sx, sy, steps); + } else if(atRightBoudary) { + // near right boundary only => check writing only + fdwt53.transform(in, out, sx, sy, steps); + } else { + // no nearby boundary => check nothing + fdwt53.transform(in, out, sx, sy, steps); + } + } + + }; // end of class FDWT53 + + + + /// Main GPU 5/3 FDWT entry point. + /// @tparam WIN_SX width of sliding window to be used + /// @tparam WIN_SY height of sliding window to be used + /// @param input input image + /// @param output output buffer + /// @param sizeX width of the input image + /// @param sizeY height of the input image + /// @param winSteps number of sliding window steps + template + __launch_bounds__(WIN_SX, CTMIN(SHM_SIZE/sizeof(FDWT53), 8)) + __global__ void fdwt53Kernel(const int * const input, int * const output, + const int sizeX, const int sizeY, + const int winSteps) { + FDWT53::run(input, output, sizeX, sizeY, winSteps); + } + + + + /// Only computes optimal number of sliding window steps, + /// number of threadblocks and then lanches the 5/3 FDWT kernel. + /// @tparam WIN_SX width of sliding window + /// @tparam WIN_SY height of sliding window + /// @param in input image + /// @param out output buffer + /// @param sx width of the input image + /// @param sy height of the input image + template + void launchFDWT53Kernel (int * in, int * out, int sx, int sy) { + // compute optimal number of steps of each sliding window + const int steps = divRndUp(sy, 15 * WIN_SY); + + // prepare grid size + dim3 gSize(divRndUp(sx, WIN_SX), divRndUp(sy, WIN_SY * steps)); + + // run kernel, possibly measure time and finally check the call + PERF_BEGIN + fdwt53Kernel<<>>(in, out, sx, sy, steps); + PERF_END(" FDWT53", sx, sy) + CudaDWTTester::checkLastKernelCall("FDWT 5/3 kernel"); + } + + + + /// Forward 5/3 2D DWT. See common rules (above) for more details. + /// @param in Expected to be normalized into range [-128, 127]. + /// Will not be preserved (will be overwritten). + /// @param out output buffer on GPU + /// @param sizeX width of input image (in pixels) + /// @param sizeY height of input image (in pixels) + /// @param levels number of recursive DWT levels + void fdwt53(int * in, int * out, int sizeX, int sizeY, int levels) { + // select right width of kernel for the size of the image + if(sizeX >= 960) { + launchFDWT53Kernel<192, 8>(in, out, sizeX, sizeY); + } else if (sizeX >= 480) { + launchFDWT53Kernel<128, 8>(in, out, sizeX, sizeY); + } else { + launchFDWT53Kernel<64, 8>(in, out, sizeX, sizeY); + } + + // if this was not the last level, continue recursively with other levels + if(levels > 1) { + // copy output's LL band back into input buffer + const int llSizeX = divRndUp(sizeX, 2); + const int llSizeY = divRndUp(sizeY, 2); + memCopy(in, out, llSizeX, llSizeY); + + // run remaining levels of FDWT + fdwt53(in, out, llSizeX, llSizeY, levels - 1); + } + } + + + +} // end of namespace dwt_cuda diff --git a/cuda_code/filter_act_color_scale0_ckimg0_3.cu b/cuda_code/filter_act_color_scale0_ckimg0_3.cu new file mode 100644 index 0000000000000000000000000000000000000000..886c7f870ea06689b8a39165eff73701b007e842 --- /dev/null +++ b/cuda_code/filter_act_color_scale0_ckimg0_3.cu @@ -0,0 +1,37 @@ +/** + * \file dnn/src/cuda/local/cuda-convnet2/filter_acts/filter_act_color_scale0_ckimg0.cu + * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") + * + * Copyright (c) 2014-2021 Megvii Inc. All rights reserved. + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + */ +/** + * Copyright 2014 Google Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * -------------------------------------------------------------------------- + * * This file has been modified by Megvii ("Megvii Modifications"). + * * All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved. + * -------------------------------------------------------------------------- + */ +#include "filter_act_color.cuh" +namespace megdnn { +namespace cuda { + +FILTER_COLOR(false, false) +} // namespace cuda +} // namespace megdnn diff --git a/cuda_code/filters_1.cu b/cuda_code/filters_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..cc0eb232be69d5b10c9ef350b7bd9eddc83665ef --- /dev/null +++ b/cuda_code/filters_1.cu @@ -0,0 +1,456 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "opencv2/gpu/devmem2d.hpp" +#include "saturate_cast.hpp" +#include "safe_call.hpp" +#include "cuda_shared.hpp" +#include "vecmath.hpp" + +using namespace cv::gpu; + +#ifndef FLT_MAX +#define FLT_MAX 3.402823466e+30F +#endif + +///////////////////////////////////////////////////////////////////////////////////////////////// +// Linear filters + +#define MAX_KERNEL_SIZE 16 + +namespace filter_krnls +{ + __constant__ float cLinearKernel[MAX_KERNEL_SIZE]; +} + +namespace cv { namespace gpu { namespace filters +{ + void loadLinearKernel(const float kernel[], int ksize) + { + cudaSafeCall( cudaMemcpyToSymbol(filter_krnls::cLinearKernel, kernel, ksize * sizeof(float)) ); + } +}}} + +namespace filter_krnls +{ + template + __global__ void linearRowFilter(const T* src, size_t src_step, D* dst, size_t dst_step, int anchor, int width, int height) + { + __shared__ T smem[BLOCK_DIM_Y * BLOCK_DIM_X * 3]; + + const int blockStartX = blockDim.x * blockIdx.x; + const int blockStartY = blockDim.y * blockIdx.y; + + const int threadX = blockStartX + threadIdx.x; + const int prevThreadX = threadX - blockDim.x; + const int nextThreadX = threadX + blockDim.x; + + const int threadY = blockStartY + threadIdx.y; + + T* sDataRow = smem + threadIdx.y * blockDim.x * 3; + + if (threadY < height) + { + const T* rowSrc = src + threadY * src_step; + + sDataRow[threadIdx.x + blockDim.x] = threadX < width ? rowSrc[threadX] : VecTraits::all(0); + + sDataRow[threadIdx.x] = prevThreadX >= 0 ? rowSrc[prevThreadX] : VecTraits::all(0); + + sDataRow[(blockDim.x << 1) + threadIdx.x] = nextThreadX < width ? rowSrc[nextThreadX] : VecTraits::all(0); + + __syncthreads(); + + if (threadX < width) + { + typedef typename TypeVec::cn>::vec_t sum_t; + sum_t sum = VecTraits::all(0); + + sDataRow += threadIdx.x + blockDim.x - anchor; + + #pragma unroll + for(int i = 0; i < KERNEL_SIZE; ++i) + sum = sum + sDataRow[i] * cLinearKernel[i]; + + dst[threadY * dst_step + threadX] = saturate_cast(sum); + } + } + } +} + +namespace cv { namespace gpu { namespace filters +{ + template + void linearRowFilter_caller(const DevMem2D_& src, const DevMem2D_& dst, int anchor) + { + const int BLOCK_DIM_X = 16; + const int BLOCK_DIM_Y = 16; + + dim3 threads(BLOCK_DIM_X, BLOCK_DIM_Y); + dim3 blocks(divUp(src.cols, BLOCK_DIM_X), divUp(src.rows, BLOCK_DIM_Y)); + + filter_krnls::linearRowFilter<<>>(src.data, src.step/src.elemSize(), + dst.data, dst.step/dst.elemSize(), anchor, src.cols, src.rows); + + cudaSafeCall( cudaThreadSynchronize() ); + } + + template + void linearRowFilter_gpu(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor) + { + typedef void (*caller_t)(const DevMem2D_& src, const DevMem2D_& dst, int anchor); + static const caller_t callers[] = + {linearRowFilter_caller<0 , T, D>, linearRowFilter_caller<1 , T, D>, + linearRowFilter_caller<2 , T, D>, linearRowFilter_caller<3 , T, D>, + linearRowFilter_caller<4 , T, D>, linearRowFilter_caller<5 , T, D>, + linearRowFilter_caller<6 , T, D>, linearRowFilter_caller<7 , T, D>, + linearRowFilter_caller<8 , T, D>, linearRowFilter_caller<9 , T, D>, + linearRowFilter_caller<10, T, D>, linearRowFilter_caller<11, T, D>, + linearRowFilter_caller<12, T, D>, linearRowFilter_caller<13, T, D>, + linearRowFilter_caller<14, T, D>, linearRowFilter_caller<15, T, D>}; + + loadLinearKernel(kernel, ksize); + + callers[ksize]((DevMem2D_)src, (DevMem2D_)dst, anchor); + } + + template void linearRowFilter_gpu(const DevMem2D&, const DevMem2D&, const float[], int , int); + template void linearRowFilter_gpu(const DevMem2D&, const DevMem2D&, const float[], int , int); + template void linearRowFilter_gpu(const DevMem2D&, const DevMem2D&, const float[], int , int); + template void linearRowFilter_gpu(const DevMem2D&, const DevMem2D&, const float[], int , int); + + template void linearRowFilter_gpu(const DevMem2D&, const DevMem2D&, const float[], int , int); + template void linearRowFilter_gpu(const DevMem2D&, const DevMem2D&, const float[], int , int); + template void linearRowFilter_gpu(const DevMem2D&, const DevMem2D&, const float[], int , int); + template void linearRowFilter_gpu(const DevMem2D&, const DevMem2D&, const float[], int , int); + + template void linearRowFilter_gpu(const DevMem2D&, const DevMem2D&, const float[], int , int); + template void linearRowFilter_gpu(const DevMem2D&, const DevMem2D&, const float[], int , int); + template void linearRowFilter_gpu(const DevMem2D&, const DevMem2D&, const float[], int , int); + template void linearRowFilter_gpu(const DevMem2D&, const DevMem2D&, const float[], int , int); +}}} + +namespace filter_krnls +{ + template + __global__ void linearColumnFilter(const T* src, size_t src_step, D* dst, size_t dst_step, int anchor, int width, int height) + { + __shared__ T smem[BLOCK_DIM_Y * BLOCK_DIM_X * 3]; + + const int blockStartX = blockDim.x * blockIdx.x; + const int blockStartY = blockDim.y * blockIdx.y; + + const int threadX = blockStartX + threadIdx.x; + + const int threadY = blockStartY + threadIdx.y; + const int prevThreadY = threadY - blockDim.y; + const int nextThreadY = threadY + blockDim.y; + + const int smem_step = blockDim.x; + + T* sDataColumn = smem + threadIdx.x; + + if (threadX < width) + { + const T* colSrc = src + threadX; + + sDataColumn[(threadIdx.y + blockDim.y) * smem_step] = threadY < height ? colSrc[threadY * src_step] : VecTraits::all(0); + + sDataColumn[threadIdx.y * smem_step] = prevThreadY >= 0 ? colSrc[prevThreadY * src_step] : VecTraits::all(0); + + sDataColumn[(threadIdx.y + (blockDim.y << 1)) * smem_step] = nextThreadY < height ? colSrc[nextThreadY * src_step] : VecTraits::all(0); + + __syncthreads(); + + if (threadY < height) + { + typedef typename TypeVec::cn>::vec_t sum_t; + sum_t sum = VecTraits::all(0); + + sDataColumn += (threadIdx.y + blockDim.y - anchor)* smem_step; + + #pragma unroll + for(int i = 0; i < KERNEL_SIZE; ++i) + sum = sum + sDataColumn[i * smem_step] * cLinearKernel[i]; + + dst[threadY * dst_step + threadX] = saturate_cast(sum); + } + } + } +} + +namespace cv { namespace gpu { namespace filters +{ + template + void linearColumnFilter_caller(const DevMem2D_& src, const DevMem2D_& dst, int anchor) + { + const int BLOCK_DIM_X = 16; + const int BLOCK_DIM_Y = 16; + + dim3 threads(BLOCK_DIM_X, BLOCK_DIM_Y); + dim3 blocks(divUp(src.cols, BLOCK_DIM_X), divUp(src.rows, BLOCK_DIM_Y)); + + filter_krnls::linearColumnFilter<<>>(src.data, src.step/src.elemSize(), + dst.data, dst.step/dst.elemSize(), anchor, src.cols, src.rows); + + cudaSafeCall( cudaThreadSynchronize() ); + } + + template + void linearColumnFilter_gpu(const DevMem2D& src, const DevMem2D& dst, const float kernel[], int ksize, int anchor) + { + typedef void (*caller_t)(const DevMem2D_& src, const DevMem2D_& dst, int anchor); + static const caller_t callers[] = + {linearColumnFilter_caller<0 , T, D>, linearColumnFilter_caller<1 , T, D>, + linearColumnFilter_caller<2 , T, D>, linearColumnFilter_caller<3 , T, D>, + linearColumnFilter_caller<4 , T, D>, linearColumnFilter_caller<5 , T, D>, + linearColumnFilter_caller<6 , T, D>, linearColumnFilter_caller<7 , T, D>, + linearColumnFilter_caller<8 , T, D>, linearColumnFilter_caller<9 , T, D>, + linearColumnFilter_caller<10, T, D>, linearColumnFilter_caller<11, T, D>, + linearColumnFilter_caller<12, T, D>, linearColumnFilter_caller<13, T, D>, + linearColumnFilter_caller<14, T, D>, linearColumnFilter_caller<15, T, D>}; + + loadLinearKernel(kernel, ksize); + + callers[ksize]((DevMem2D_)src, (DevMem2D_)dst, anchor); + } + + template void linearColumnFilter_gpu(const DevMem2D&, const DevMem2D&, const float[], int , int); + template void linearColumnFilter_gpu(const DevMem2D&, const DevMem2D&, const float[], int , int); + template void linearColumnFilter_gpu(const DevMem2D&, const DevMem2D&, const float[], int , int); + template void linearColumnFilter_gpu(const DevMem2D&, const DevMem2D&, const float[], int , int); + + template void linearColumnFilter_gpu(const DevMem2D&, const DevMem2D&, const float[], int , int); + template void linearColumnFilter_gpu(const DevMem2D&, const DevMem2D&, const float[], int , int); + template void linearColumnFilter_gpu(const DevMem2D&, const DevMem2D&, const float[], int , int); + template void linearColumnFilter_gpu(const DevMem2D&, const DevMem2D&, const float[], int , int); + + template void linearColumnFilter_gpu(const DevMem2D&, const DevMem2D&, const float[], int , int); + template void linearColumnFilter_gpu(const DevMem2D&, const DevMem2D&, const float[], int , int); + template void linearColumnFilter_gpu(const DevMem2D&, const DevMem2D&, const float[], int , int); + template void linearColumnFilter_gpu(const DevMem2D&, const DevMem2D&, const float[], int , int); +}}} + +///////////////////////////////////////////////////////////////////////////////////////////////// +// Bilateral filters + +namespace bf_krnls +{ + __constant__ float* ctable_color; + __constant__ float* ctable_space; + __constant__ size_t ctable_space_step; + + __constant__ int cndisp; + __constant__ int cradius; + + __constant__ short cedge_disc; + __constant__ short cmax_disc; +} + +namespace cv { namespace gpu { namespace bf +{ + void load_constants(float* table_color, const DevMem2Df& table_space, int ndisp, int radius, short edge_disc, short max_disc) + { + cudaSafeCall( cudaMemcpyToSymbol(bf_krnls::ctable_color, &table_color, sizeof(table_color)) ); + cudaSafeCall( cudaMemcpyToSymbol(bf_krnls::ctable_space, &table_space.data, sizeof(table_space.data)) ); + size_t table_space_step = table_space.step / sizeof(float); + cudaSafeCall( cudaMemcpyToSymbol(bf_krnls::ctable_space_step, &table_space_step, sizeof(size_t)) ); + + cudaSafeCall( cudaMemcpyToSymbol(bf_krnls::cndisp, &ndisp, sizeof(int)) ); + cudaSafeCall( cudaMemcpyToSymbol(bf_krnls::cradius, &radius, sizeof(int)) ); + + cudaSafeCall( cudaMemcpyToSymbol(bf_krnls::cedge_disc, &edge_disc, sizeof(short)) ); + cudaSafeCall( cudaMemcpyToSymbol(bf_krnls::cmax_disc, &max_disc, sizeof(short)) ); + } +}}} + +namespace bf_krnls +{ + template + struct DistRgbMax + { + static __device__ uchar calc(const uchar* a, const uchar* b) + { + uchar x = abs(a[0] - b[0]); + uchar y = abs(a[1] - b[1]); + uchar z = abs(a[2] - b[2]); + return (max(max(x, y), z)); + } + }; + + template <> + struct DistRgbMax<1> + { + static __device__ uchar calc(const uchar* a, const uchar* b) + { + return abs(a[0] - b[0]); + } + }; + + template + __global__ void bilateral_filter(int t, T* disp, size_t disp_step, const uchar* img, size_t img_step, int h, int w) + { + const int y = blockIdx.y * blockDim.y + threadIdx.y; + const int x = ((blockIdx.x * blockDim.x + threadIdx.x) << 1) + ((y + t) & 1); + + T dp[5]; + + if (y > 0 && y < h - 1 && x > 0 && x < w - 1) + { + dp[0] = *(disp + (y ) * disp_step + x + 0); + dp[1] = *(disp + (y-1) * disp_step + x + 0); + dp[2] = *(disp + (y ) * disp_step + x - 1); + dp[3] = *(disp + (y+1) * disp_step + x + 0); + dp[4] = *(disp + (y ) * disp_step + x + 1); + + if(abs(dp[1] - dp[0]) >= cedge_disc || abs(dp[2] - dp[0]) >= cedge_disc || abs(dp[3] - dp[0]) >= cedge_disc || abs(dp[4] - dp[0]) >= cedge_disc) + { + const int ymin = max(0, y - cradius); + const int xmin = max(0, x - cradius); + const int ymax = min(h - 1, y + cradius); + const int xmax = min(w - 1, x + cradius); + + float cost[] = {0.0f, 0.0f, 0.0f, 0.0f, 0.0f}; + + const uchar* ic = img + y * img_step + channels * x; + + for(int yi = ymin; yi <= ymax; yi++) + { + const T* disp_y = disp + yi * disp_step; + + for(int xi = xmin; xi <= xmax; xi++) + { + const uchar* in = img + yi * img_step + channels * xi; + + uchar dist_rgb = DistRgbMax::calc(in, ic); + + const float weight = ctable_color[dist_rgb] * (ctable_space + abs(y-yi)* ctable_space_step)[abs(x-xi)]; + + const T disp_reg = disp_y[xi]; + + cost[0] += min(cmax_disc, abs(disp_reg - dp[0])) * weight; + cost[1] += min(cmax_disc, abs(disp_reg - dp[1])) * weight; + cost[2] += min(cmax_disc, abs(disp_reg - dp[2])) * weight; + cost[3] += min(cmax_disc, abs(disp_reg - dp[3])) * weight; + cost[4] += min(cmax_disc, abs(disp_reg - dp[4])) * weight; + } + } + + float minimum = FLT_MAX; + int id = 0; + + if (cost[0] < minimum) + { + minimum = cost[0]; + id = 0; + } + if (cost[1] < minimum) + { + minimum = cost[1]; + id = 1; + } + if (cost[2] < minimum) + { + minimum = cost[2]; + id = 2; + } + if (cost[3] < minimum) + { + minimum = cost[3]; + id = 3; + } + if (cost[4] < minimum) + { + minimum = cost[4]; + id = 4; + } + + *(disp + y * disp_step + x) = dp[id]; + } + } + } +} + +namespace cv { namespace gpu { namespace bf +{ + template + void bilateral_filter_caller(const DevMem2D_& disp, const DevMem2D& img, int channels, int iters, cudaStream_t stream) + { + dim3 threads(32, 8, 1); + dim3 grid(1, 1, 1); + grid.x = divUp(disp.cols, threads.x << 1); + grid.y = divUp(disp.rows, threads.y); + + switch (channels) + { + case 1: + for (int i = 0; i < iters; ++i) + { + bf_krnls::bilateral_filter<1><<>>(0, disp.data, disp.step/sizeof(T), img.data, img.step, disp.rows, disp.cols); + bf_krnls::bilateral_filter<1><<>>(1, disp.data, disp.step/sizeof(T), img.data, img.step, disp.rows, disp.cols); + } + break; + case 3: + for (int i = 0; i < iters; ++i) + { + bf_krnls::bilateral_filter<3><<>>(0, disp.data, disp.step/sizeof(T), img.data, img.step, disp.rows, disp.cols); + bf_krnls::bilateral_filter<3><<>>(1, disp.data, disp.step/sizeof(T), img.data, img.step, disp.rows, disp.cols); + } + break; + default: + cv::gpu::error("Unsupported channels count", __FILE__, __LINE__); + } + + if (stream != 0) + cudaSafeCall( cudaThreadSynchronize() ); + } + + void bilateral_filter_gpu(const DevMem2D& disp, const DevMem2D& img, int channels, int iters, cudaStream_t stream) + { + bilateral_filter_caller(disp, img, channels, iters, stream); + } + + void bilateral_filter_gpu(const DevMem2D_& disp, const DevMem2D& img, int channels, int iters, cudaStream_t stream) + { + bilateral_filter_caller(disp, img, channels, iters, stream); + } +}}} diff --git a/cuda_code/find_9.cu b/cuda_code/find_9.cu new file mode 100644 index 0000000000000000000000000000000000000000..4fe6f4dca60d3fe5436542b86641503d15f0552e --- /dev/null +++ b/cuda_code/find_9.cu @@ -0,0 +1,246 @@ +#include +#include +#include + + +template +struct equal_to_value_pred +{ + T value; + + equal_to_value_pred(T value) : value(value) {} + + __host__ __device__ + bool operator()(T v) const { return v == value; } +}; + + +template +struct not_equal_to_value_pred +{ + T value; + + not_equal_to_value_pred(T value) : value(value) {} + + __host__ __device__ + bool operator()(T v) const { return v != value; } +}; + + +template +struct less_than_value_pred +{ + T value; + + less_than_value_pred(T value) : value(value) {} + + __host__ __device__ + bool operator()(T v) const { return v < value; } +}; + + +template +__global__ void find_kernel(ExecutionPolicy exec, Iterator first, Iterator last, T value, Iterator2 result) +{ + *result = thrust::find(exec, first, last, value); +} + + +template +void TestFindDevice(ExecutionPolicy exec) +{ + size_t n = 100; + + thrust::host_vector h_data = unittest::random_integers(n); + thrust::device_vector d_data = h_data; + + typename thrust::host_vector::iterator h_iter; + + typedef typename thrust::device_vector::iterator iter_type; + thrust::device_vector d_result(1); + + h_iter = thrust::find(h_data.begin(), h_data.end(), int(0)); + + find_kernel<<<1,1>>>(exec, d_data.begin(), d_data.end(), int(0), d_result.begin()); + { + cudaError_t const err = cudaDeviceSynchronize(); + ASSERT_EQUAL(cudaSuccess, err); + } + + ASSERT_EQUAL(h_iter - h_data.begin(), (iter_type)d_result[0] - d_data.begin()); + + for(size_t i = 1; i < n; i *= 2) + { + int sample = h_data[i]; + + h_iter = thrust::find(h_data.begin(), h_data.end(), sample); + + find_kernel<<<1,1>>>(exec, d_data.begin(), d_data.end(), sample, d_result.begin()); + { + cudaError_t const err = cudaDeviceSynchronize(); + ASSERT_EQUAL(cudaSuccess, err); + } + + ASSERT_EQUAL(h_iter - h_data.begin(), (iter_type)d_result[0] - d_data.begin()); + } +} + + +void TestFindDeviceSeq() +{ + TestFindDevice(thrust::seq); +}; +DECLARE_UNITTEST(TestFindDeviceSeq); + + +void TestFindDeviceDevice() +{ + TestFindDevice(thrust::device); +}; +DECLARE_UNITTEST(TestFindDeviceDevice); + + +template +__global__ void find_if_kernel(ExecutionPolicy exec, Iterator first, Iterator last, Predicate pred, Iterator2 result) +{ + *result = thrust::find_if(exec, first, last, pred); +} + + +template +void TestFindIfDevice(ExecutionPolicy exec) +{ + size_t n = 100; + + thrust::host_vector h_data = unittest::random_integers(n); + thrust::device_vector d_data = h_data; + + typename thrust::host_vector::iterator h_iter; + + typedef typename thrust::device_vector::iterator iter_type; + thrust::device_vector d_result(1); + + h_iter = thrust::find_if(h_data.begin(), h_data.end(), equal_to_value_pred(0)); + + find_if_kernel<<<1,1>>>(exec, d_data.begin(), d_data.end(), equal_to_value_pred(0), d_result.begin()); + { + cudaError_t const err = cudaDeviceSynchronize(); + ASSERT_EQUAL(cudaSuccess, err); + } + + ASSERT_EQUAL(h_iter - h_data.begin(), (iter_type)d_result[0] - d_data.begin()); + + for (size_t i = 1; i < n; i *= 2) + { + int sample = h_data[i]; + + h_iter = thrust::find_if(h_data.begin(), h_data.end(), equal_to_value_pred(sample)); + + find_if_kernel<<<1,1>>>(exec, d_data.begin(), d_data.end(), equal_to_value_pred(sample), d_result.begin()); + { + cudaError_t const err = cudaDeviceSynchronize(); + ASSERT_EQUAL(cudaSuccess, err); + } + + ASSERT_EQUAL(h_iter - h_data.begin(), (iter_type)d_result[0] - d_data.begin()); + } +} + + +void TestFindIfDeviceSeq() +{ + TestFindIfDevice(thrust::seq); +}; +DECLARE_UNITTEST(TestFindIfDeviceSeq); + + +void TestFindIfDeviceDevice() +{ + TestFindIfDevice(thrust::device); +}; +DECLARE_UNITTEST(TestFindIfDeviceDevice); + + +template +__global__ void find_if_not_kernel(ExecutionPolicy exec, Iterator first, Iterator last, Predicate pred, Iterator2 result) +{ + *result = thrust::find_if_not(exec, first, last, pred); +} + + +template +void TestFindIfNotDevice(ExecutionPolicy exec) +{ + size_t n = 100; + thrust::host_vector h_data = unittest::random_integers(n); + thrust::device_vector d_data = h_data; + + typename thrust::host_vector::iterator h_iter; + + typedef typename thrust::device_vector::iterator iter_type; + thrust::device_vector d_result(1); + + h_iter = thrust::find_if_not(h_data.begin(), h_data.end(), not_equal_to_value_pred(0)); + + find_if_not_kernel<<<1,1>>>(exec, d_data.begin(), d_data.end(), not_equal_to_value_pred(0), d_result.begin()); + { + cudaError_t const err = cudaDeviceSynchronize(); + ASSERT_EQUAL(cudaSuccess, err); + } + + ASSERT_EQUAL(h_iter - h_data.begin(), (iter_type)d_result[0] - d_data.begin()); + + for(size_t i = 1; i < n; i *= 2) + { + int sample = h_data[i]; + + h_iter = thrust::find_if_not(h_data.begin(), h_data.end(), not_equal_to_value_pred(sample)); + + find_if_not_kernel<<<1,1>>>(exec, d_data.begin(), d_data.end(), not_equal_to_value_pred(sample), d_result.begin()); + { + cudaError_t const err = cudaDeviceSynchronize(); + ASSERT_EQUAL(cudaSuccess, err); + } + + ASSERT_EQUAL(h_iter - h_data.begin(), (iter_type)d_result[0] - d_data.begin()); + } +} + + +void TestFindIfNotDeviceSeq() +{ + TestFindIfNotDevice(thrust::seq); +}; +DECLARE_UNITTEST(TestFindIfNotDeviceSeq); + + +void TestFindIfNotDeviceDevice() +{ + TestFindIfNotDevice(thrust::device); +}; +DECLARE_UNITTEST(TestFindIfNotDeviceDevice); + + +void TestFindCudaStreams() +{ + thrust::device_vector vec(5); + vec[0] = 1; + vec[1] = 2; + vec[2] = 3; + vec[3] = 3; + vec[4] = 5; + + cudaStream_t s; + cudaStreamCreate(&s); + + ASSERT_EQUAL(thrust::find(thrust::cuda::par.on(s), vec.begin(), vec.end(), 0) - vec.begin(), 5); + ASSERT_EQUAL(thrust::find(thrust::cuda::par.on(s), vec.begin(), vec.end(), 1) - vec.begin(), 0); + ASSERT_EQUAL(thrust::find(thrust::cuda::par.on(s), vec.begin(), vec.end(), 2) - vec.begin(), 1); + ASSERT_EQUAL(thrust::find(thrust::cuda::par.on(s), vec.begin(), vec.end(), 3) - vec.begin(), 2); + ASSERT_EQUAL(thrust::find(thrust::cuda::par.on(s), vec.begin(), vec.end(), 4) - vec.begin(), 5); + ASSERT_EQUAL(thrust::find(thrust::cuda::par.on(s), vec.begin(), vec.end(), 5) - vec.begin(), 4); + + cudaStreamDestroy(s); +} +DECLARE_UNITTEST(TestFindCudaStreams); + diff --git a/cuda_code/flatten_13.cu b/cuda_code/flatten_13.cu new file mode 100644 index 0000000000000000000000000000000000000000..3632e421735a657582e9cc010ca1ed477cb438fd --- /dev/null +++ b/cuda_code/flatten_13.cu @@ -0,0 +1,77 @@ +/******************************************************************************* + * Copyright (c) 2015-2018 Skymind, Inc. + * + * This program and the accompanying materials are made available under the + * terms of the Apache License, Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0. + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + * + * SPDX-License-Identifier: Apache-2.0 + ******************************************************************************/ + +// +// @author raver119@gmail.com +// @author Yurii Shyrma, created on 27.11.2018 +// + +#include + +namespace nd4j { + +//////////////////////////////////////////////////////////////////////// +template +__global__ void flattenKernel( + Nd4jPointer *extraPointers, + int dOffset, + char order, + void *vz, Nd4jLong *zShapeInfo, + void *vy, Nd4jLong *yShapeInfo) { + + auto z = reinterpret_cast(vz); + auto y = reinterpret_cast(vy); + + __shared__ Nd4jLong lenY, yOrder, zEWS, yEWS; + + if (threadIdx.x == 0) { + + yEWS = shape::elementWiseStride(yShapeInfo); + zEWS = shape::elementWiseStride(zShapeInfo); + lenY = shape::length(yShapeInfo); + } + __syncthreads(); + + Nd4jLong tid = blockIdx.x * blockDim.x + threadIdx.x; + + if (zEWS >= 1 && yEWS >= 1 && yOrder == order) { + + for (int i = tid; i < lenY; i += gridDim.x * blockDim.x) + z[i * zEWS + dOffset] = y[i * yEWS]; + } + else { + + for(auto i = tid; i < lenY; i += gridDim.x * blockDim.x) + z[i * zEWS + dOffset] = y[shape::getIndexOrderOffset(i, yShapeInfo, lenY, order)]; + } +} + +//////////////////////////////////////////////////////////////////////// +template +__host__ void flattenKernelGeneric(dim3& launchDims, cudaStream_t *stream, + Nd4jPointer *extraPointers, + int dOffset, + char order, + void *vz, Nd4jLong *zShapeInfo, + void *vy, Nd4jLong *yShapeInfo) { + + flattenKernel<<>>(extraPointers, dOffset, order, vz, zShapeInfo, vy, yShapeInfo); +} + +BUILD_SINGLE_TEMPLATE(template void ND4J_EXPORT flattenKernelGeneric, (dim3& launchDims, cudaStream_t *stream, Nd4jPointer *extraPointers, int dOffset, char order, void *vz, Nd4jLong *zShapeInfo, void *vy, Nd4jLong *yShapeInfo), LIBND4J_TYPES); + + +} \ No newline at end of file diff --git a/cuda_code/flatten_18.cu b/cuda_code/flatten_18.cu new file mode 100644 index 0000000000000000000000000000000000000000..8e7be078b591c8c5c5a14721bb26e8aad7f2f55a --- /dev/null +++ b/cuda_code/flatten_18.cu @@ -0,0 +1,17 @@ +#include "cudakernel/memory/flatten.h" +#include "ppl/nn/common/tensor_shape.h" +#include "ppl/common/retcode.h" +#include "ppl/common/types.h" +#include + +ppl::common::RetCode PPLCUDAFlattenForwardImp( + cudaStream_t stream, + const ppl::nn::TensorShape* input_shape, + const void* input, + const ppl::nn::TensorShape* output_shape, + void* output) +{ + int64_t num_elems_output = output_shape->GetElementsIncludingPadding(); + cudaMemcpyAsync(output, input, ppl::common::GetSizeOfDataType(input_shape->GetDataType()) * num_elems_output, cudaMemcpyDeviceToDevice, stream); + return ppl::common::RC_SUCCESS; +} \ No newline at end of file diff --git a/cuda_code/flip_kernel_6.cu b/cuda_code/flip_kernel_6.cu new file mode 100644 index 0000000000000000000000000000000000000000..d9f04fb3ab1b7b1eeb05b11f754a41cf30cc779f --- /dev/null +++ b/cuda_code/flip_kernel_6.cu @@ -0,0 +1,137 @@ +/* +Copyright 2020 The OneFlow Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +#include "oneflow/core/framework/framework.h" +#include "oneflow/core/kernel/new_kernel_util.h" +#include "oneflow/core/common/nd_index_offset_helper.h" + +namespace oneflow { + +namespace { + +const int32_t NDIMS = 16; +struct SIZE_V { + int32_t val[NDIMS]; +}; + +struct VIS { + bool val[NDIMS] = {false}; +}; + +template +__global__ void FlipGpuForward(const int32_t element, const int64_t total_dims, + const SIZE_V stride_contiguous_v, const SIZE_V sizes_v, + const VIS vis, SIZE_V strides_v, const T* in_dptr, T* out_dptr) { + CUDA_1D_KERNEL_LOOP(i, element) { + int32_t cur_indices = i; + int32_t rem = 0; + int32_t dst_offset = 0; + for (int32_t d = 0; d < total_dims; d++) { + int32_t temp = cur_indices; + cur_indices = cur_indices / stride_contiguous_v.val[d]; + rem = temp - cur_indices * stride_contiguous_v.val[d]; + dst_offset += vis.val[d] ? (sizes_v.val[d] - 1 - cur_indices) * strides_v.val[d] + : cur_indices * strides_v.val[d]; + cur_indices = rem; + } + out_dptr[i] = in_dptr[dst_offset]; + } +} + +} // namespace + +template +class FlipGpuKernel final : public user_op::OpKernel { + public: + FlipGpuKernel() = default; + ~FlipGpuKernel() = default; + + private: + using user_op::OpKernel::Compute; + void Compute(user_op::KernelComputeContext* ctx) const override { + const user_op::Tensor* x_tensor = ctx->Tensor4ArgNameAndIndex("x", 0); + user_op::Tensor* y_tensor = ctx->Tensor4ArgNameAndIndex("y", 0); + const int32_t elem_cnt = y_tensor->shape().elem_cnt(); + + const int32_t total_dims = y_tensor->shape().NumAxes(); + + std::vector dims = ctx->Attr>("dims"); + VIS vis; + for (auto x : dims) { vis.val[x] = true; } + + SIZE_V sizes_v; + for (int32_t i = 0; i < total_dims; i++) { sizes_v.val[i] = y_tensor->shape().At(i); } + + // TODO(bbuf) delete strides caluculate, after tensor strides supported + SIZE_V strides_v; + strides_v.val[total_dims - 1] = 1; + for (int32_t i = total_dims - 2; i >= 0; i--) { + strides_v.val[i] = strides_v.val[i + 1] * y_tensor->shape().At(i + 1); + } + + SIZE_V stride_contiguous_v; + + for (int32_t i = total_dims - 1; i >= 0; i--) { + if (i == total_dims - 1) { + stride_contiguous_v.val[i] = 1; + } else { + stride_contiguous_v.val[i] = + std::max(x_tensor->shape().At(i + 1), 1) * stride_contiguous_v.val[i + 1]; + } + } + RUN_CUDA_KERNEL((FlipGpuForward), ctx->stream(), elem_cnt, elem_cnt, total_dims, + stride_contiguous_v, sizes_v, vis, strides_v, x_tensor->dptr(), + y_tensor->mut_dptr()); + } + bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } +}; + +template +class FlipGrad1DGpuKernel final : public user_op::OpKernel { + public: + FlipGrad1DGpuKernel() = default; + ~FlipGrad1DGpuKernel() = default; + + private: + using user_op::OpKernel::Compute; + void Compute(user_op::KernelComputeContext* ctx) const override { + user_op::Tensor* dx_tensor = ctx->Tensor4ArgNameAndIndex("dx", 0); + Memset(ctx->stream(), dx_tensor->mut_dptr(), 0, + dx_tensor->shape().elem_cnt() * sizeof(T)); + const user_op::Tensor* dy_tensor = ctx->Tensor4ArgNameAndIndex("dy", 0); + Memcpy( + ctx->stream(), dx_tensor->mut_dptr(), dy_tensor->dptr(), + dy_tensor->shape().elem_cnt() * GetSizeOfDataType(dy_tensor->data_type())); + } + bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } +}; + +#define REGISTER_FLIP_GPU_KERNEL(dtype) \ + REGISTER_USER_KERNEL("flip").SetCreateFn>().SetIsMatchedHob( \ + (user_op::HobDeviceType() == DeviceType::kGPU) \ + && (user_op::HobDataType("y", 0) == GetDataType::value)); \ + REGISTER_USER_KERNEL("flip_grad") \ + .SetCreateFn>() \ + .SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kGPU) \ + && (user_op::HobDataType("dx", 0) == GetDataType::value)); + +REGISTER_FLIP_GPU_KERNEL(float) +REGISTER_FLIP_GPU_KERNEL(double) +REGISTER_FLIP_GPU_KERNEL(uint8_t) +REGISTER_FLIP_GPU_KERNEL(int8_t) +REGISTER_FLIP_GPU_KERNEL(int32_t) +REGISTER_FLIP_GPU_KERNEL(int64_t) + +} // namespace oneflow diff --git a/cuda_code/float16_test_6.cu b/cuda_code/float16_test_6.cu new file mode 100644 index 0000000000000000000000000000000000000000..8be774441fe7c00a7d01c198df409ab269ff850a --- /dev/null +++ b/cuda_code/float16_test_6.cu @@ -0,0 +1,419 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/platform/float16.h" + +#define GLOG_NO_ABBREVIATED_SEVERITIES // msvc conflict logging with windows.h +#include +#include +#include +#include + +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/tensor_util.h" +#include "paddle/fluid/platform/eigen_ext.h" +#include "paddle/fluid/platform/enforce.h" + +#define ARITHMETIC_KERNEL(op_type, sign) \ + __global__ void op_type(const half *in1, const half *in2, half *out) { \ + out[0] = in1[0] sign in2[0]; \ + } + +#define COMPOUND_KERNEL(op_type, sign) \ + __global__ void op_type(half *in1, const half *in2) { in1[0] sign in2[0]; } + +#define COMPARISON_KERNEL(op_type, sign) \ + __global__ void op_type(const half *in1, const half *in2, bool *out) { \ + out[0] = in1[0] sign in2[0]; \ + } + +#ifdef PADDLE_WITH_HIP +#define ARITHMETIC_KERNEL_LAUNCH(op_type) \ + void Test##op_type(float v_in1, float v_in2, float v_out) { \ + LOG(INFO) << "Test " << #op_type << " on GPU!"; \ + half *in1, *in2, *out; \ + half *d_in1, *d_in2, *d_out; \ + int size = sizeof(half); \ + hipMalloc(reinterpret_cast(&d_in1), size); \ + hipMalloc(reinterpret_cast(&d_in2), size); \ + hipMalloc(reinterpret_cast(&d_out), size); \ + in1 = reinterpret_cast(malloc(size)); \ + in2 = reinterpret_cast(malloc(size)); \ + out = reinterpret_cast(malloc(size)); \ + in1[0] = float16(v_in1).to_half(); \ + in2[0] = float16(v_in2).to_half(); \ + hipMemcpy(d_in1, in1, size, hipMemcpyHostToDevice); \ + hipMemcpy(d_in2, in2, size, hipMemcpyHostToDevice); \ + hipLaunchKernelGGL(op_type, dim3(1), dim3(1), 0, 0, d_in1, d_in2, d_out); \ + hipMemcpy(out, d_out, size, hipMemcpyDeviceToHost); \ + EXPECT_EQ(static_cast(float16(out[0])), v_out); \ + free(in1); \ + free(in2); \ + free(out); \ + hipFree(d_in1); \ + hipFree(d_in2); \ + hipFree(d_out); \ + } + +#define COMPOUND_KERNEL_LAUNCH(op_type) \ + void Test##op_type(float v_in1, float v_in2, float v_out) { \ + LOG(INFO) << "Test " << #op_type << " on GPU!"; \ + half *in1, *in2; \ + half *d_in1, *d_in2; \ + int size = sizeof(half); \ + hipMalloc(reinterpret_cast(&d_in1), size); \ + hipMalloc(reinterpret_cast(&d_in2), size); \ + in1 = reinterpret_cast(malloc(size)); \ + in2 = reinterpret_cast(malloc(size)); \ + in1[0] = float16(v_in1).to_half(); \ + in2[0] = float16(v_in2).to_half(); \ + hipMemcpy(d_in1, in1, size, hipMemcpyHostToDevice); \ + hipMemcpy(d_in2, in2, size, hipMemcpyHostToDevice); \ + hipLaunchKernelGGL(op_type, dim3(1), dim3(1), 0, 0, d_in1, d_in2); \ + hipMemcpy(in1, d_in1, size, hipMemcpyDeviceToHost); \ + EXPECT_EQ(static_cast(float16(in1[0])), v_out); \ + free(in1); \ + free(in2); \ + hipFree(d_in1); \ + hipFree(d_in2); \ + } + +#define COMPARISON_KERNEL_LAUNCH(op_type) \ + void Test##op_type(float v_in1, float v_in2, bool v_out) { \ + LOG(INFO) << "Test " << #op_type << " on GPU!"; \ + half *in1, *in2; \ + half *d_in1, *d_in2; \ + bool *out, *d_out; \ + int size = sizeof(half); \ + hipMalloc(reinterpret_cast(&d_in1), size); \ + hipMalloc(reinterpret_cast(&d_in2), size); \ + hipMalloc(reinterpret_cast(&d_out), 1); \ + in1 = reinterpret_cast(malloc(size)); \ + in2 = reinterpret_cast(malloc(size)); \ + out = reinterpret_cast(malloc(1)); \ + in1[0] = float16(v_in1).to_half(); \ + in2[0] = float16(v_in2).to_half(); \ + hipMemcpy(d_in1, in1, size, hipMemcpyHostToDevice); \ + hipMemcpy(d_in2, in2, size, hipMemcpyHostToDevice); \ + hipLaunchKernelGGL(op_type, dim3(1), dim3(1), 0, 0, d_in1, d_in2, d_out); \ + hipMemcpy(out, d_out, 1, hipMemcpyDeviceToHost); \ + EXPECT_EQ(out[0], v_out); \ + free(in1); \ + free(in2); \ + free(out); \ + hipFree(d_in1); \ + hipFree(d_in2); \ + hipFree(d_out); \ + } +#else +#define ARITHMETIC_KERNEL_LAUNCH(op_type) \ + void Test##op_type(float v_in1, float v_in2, float v_out) { \ + LOG(INFO) << "Test " << #op_type << " on GPU!"; \ + half *in1, *in2, *out; \ + half *d_in1, *d_in2, *d_out; \ + int size = sizeof(half); \ + cudaMalloc(reinterpret_cast(&d_in1), size); \ + cudaMalloc(reinterpret_cast(&d_in2), size); \ + cudaMalloc(reinterpret_cast(&d_out), size); \ + in1 = reinterpret_cast(malloc(size)); \ + in2 = reinterpret_cast(malloc(size)); \ + out = reinterpret_cast(malloc(size)); \ + in1[0] = float16(v_in1).to_half(); \ + in2[0] = float16(v_in2).to_half(); \ + cudaMemcpy(d_in1, in1, size, cudaMemcpyHostToDevice); \ + cudaMemcpy(d_in2, in2, size, cudaMemcpyHostToDevice); \ + op_type<<<1, 1>>>(d_in1, d_in2, d_out); \ + cudaMemcpy(out, d_out, size, cudaMemcpyDeviceToHost); \ + EXPECT_EQ(static_cast(float16(out[0])), v_out); \ + free(in1); \ + free(in2); \ + free(out); \ + cudaFree(d_in1); \ + cudaFree(d_in2); \ + cudaFree(d_out); \ + } + +#define COMPOUND_KERNEL_LAUNCH(op_type) \ + void Test##op_type(float v_in1, float v_in2, float v_out) { \ + LOG(INFO) << "Test " << #op_type << " on GPU!"; \ + half *in1, *in2; \ + half *d_in1, *d_in2; \ + int size = sizeof(half); \ + cudaMalloc(reinterpret_cast(&d_in1), size); \ + cudaMalloc(reinterpret_cast(&d_in2), size); \ + in1 = reinterpret_cast(malloc(size)); \ + in2 = reinterpret_cast(malloc(size)); \ + in1[0] = float16(v_in1).to_half(); \ + in2[0] = float16(v_in2).to_half(); \ + cudaMemcpy(d_in1, in1, size, cudaMemcpyHostToDevice); \ + cudaMemcpy(d_in2, in2, size, cudaMemcpyHostToDevice); \ + op_type<<<1, 1>>>(d_in1, d_in2); \ + cudaMemcpy(in1, d_in1, size, cudaMemcpyDeviceToHost); \ + EXPECT_EQ(static_cast(float16(in1[0])), v_out); \ + free(in1); \ + free(in2); \ + cudaFree(d_in1); \ + cudaFree(d_in2); \ + } + +#define COMPARISON_KERNEL_LAUNCH(op_type) \ + void Test##op_type(float v_in1, float v_in2, bool v_out) { \ + LOG(INFO) << "Test " << #op_type << " on GPU!"; \ + half *in1, *in2; \ + half *d_in1, *d_in2; \ + bool *out, *d_out; \ + int size = sizeof(half); \ + cudaMalloc(reinterpret_cast(&d_in1), size); \ + cudaMalloc(reinterpret_cast(&d_in2), size); \ + cudaMalloc(reinterpret_cast(&d_out), 1); \ + in1 = reinterpret_cast(malloc(size)); \ + in2 = reinterpret_cast(malloc(size)); \ + out = reinterpret_cast(malloc(1)); \ + in1[0] = float16(v_in1).to_half(); \ + in2[0] = float16(v_in2).to_half(); \ + cudaMemcpy(d_in1, in1, size, cudaMemcpyHostToDevice); \ + cudaMemcpy(d_in2, in2, size, cudaMemcpyHostToDevice); \ + op_type<<<1, 1>>>(d_in1, d_in2, d_out); \ + cudaMemcpy(out, d_out, 1, cudaMemcpyDeviceToHost); \ + EXPECT_EQ(out[0], v_out); \ + free(in1); \ + free(in2); \ + free(out); \ + cudaFree(d_in1); \ + cudaFree(d_in2); \ + cudaFree(d_out); \ + } +#endif + +#ifdef PADDLE_CUDA_FP16 +namespace paddle { +namespace platform { + +#if defined(PADDLE_WITH_HIP) +ARITHMETIC_KERNEL(Add, +) +ARITHMETIC_KERNEL(Sub, -) +ARITHMETIC_KERNEL(Mul, *) +ARITHMETIC_KERNEL(Div, /) + +ARITHMETIC_KERNEL_LAUNCH(Add) +ARITHMETIC_KERNEL_LAUNCH(Sub) +ARITHMETIC_KERNEL_LAUNCH(Mul) +ARITHMETIC_KERNEL_LAUNCH(Div) + +// Negative sign kernel +__global__ void Neg(half *in) { in[0] = -in[0]; } + +void TestNeg(float v_in, float v_out) { + LOG(INFO) << "Test Neg on GPU!"; + half *in, *d_in; + int size = sizeof(half); +#ifdef PADDLE_WITH_HIP + hipMalloc(reinterpret_cast(&d_in), size); +#else + cudaMalloc(reinterpret_cast(&d_in), size); +#endif + in = reinterpret_cast(malloc(size)); + in[0] = float16(v_in).to_half(); +#ifdef PADDLE_WITH_HIP + hipMemcpy(d_in, in, size, hipMemcpyHostToDevice); +#else + cudaMemcpy(d_in, in, size, cudaMemcpyHostToDevice); +#endif + Neg<<<1, 1>>>(d_in); +#ifdef PADDLE_WITH_HIP + hipMemcpy(in, d_in, size, hipMemcpyDeviceToHost); +#else + cudaMemcpy(in, d_in, size, cudaMemcpyDeviceToHost); +#endif + EXPECT_EQ(static_cast(float16(in[0])), v_out); + free(in); +#ifdef PADDLE_WITH_HIP + hipFree(d_in); +#else + cudaFree(d_in); +#endif +} + +COMPOUND_KERNEL(AddAssign, +=) +COMPOUND_KERNEL(SubAssign, -=) +COMPOUND_KERNEL(MulAssign, *=) +COMPOUND_KERNEL(DivAssign, /=) + +COMPOUND_KERNEL_LAUNCH(AddAssign) +COMPOUND_KERNEL_LAUNCH(SubAssign) +COMPOUND_KERNEL_LAUNCH(MulAssign) +COMPOUND_KERNEL_LAUNCH(DivAssign) + +COMPARISON_KERNEL(Equal, ==) +COMPARISON_KERNEL(NotEqual, !=) +COMPARISON_KERNEL(Less, <) +COMPARISON_KERNEL(LessEqual, <=) +COMPARISON_KERNEL(Greater, >) +COMPARISON_KERNEL(GreaterEqual, >=) + +COMPARISON_KERNEL_LAUNCH(Equal) +COMPARISON_KERNEL_LAUNCH(NotEqual) +COMPARISON_KERNEL_LAUNCH(Less) +COMPARISON_KERNEL_LAUNCH(LessEqual) +COMPARISON_KERNEL_LAUNCH(Greater) +COMPARISON_KERNEL_LAUNCH(GreaterEqual) + +TEST(float16, arithmetic_on_gpu) { + TestAdd(1, 2, 3); + TestSub(2, 1, 1); + TestMul(2, 3, 6); + TestDiv(6, 2, 3); + TestNeg(1, -1); +} + +TEST(float16, compound_on_gpu) { + TestAddAssign(1, 2, 3); + TestSubAssign(2, 1, 1); + TestMulAssign(2, 3, 6); + TestDivAssign(6, 2, 3); +} + +TEST(float16, comparision_on_gpu) { + TestEqual(1, 1, true); + TestEqual(1, 2, false); + TestNotEqual(2, 3, true); + TestNotEqual(2, 2, false); + TestLess(3, 4, true); + TestLess(3, 3, false); + TestLessEqual(3, 3, true); + TestLessEqual(3, 2, false); + TestGreater(4, 3, true); + TestGreater(4, 4, false); + TestGreaterEqual(4, 4, true); + TestGreaterEqual(4, 5, false); +} +#endif // CUDA_VERSION + +TEST(float16, conversion_on_gpu) { + // Explicit conversion to and from cuda half + EXPECT_EQ(float16(float16(1.0f).to_half()).x, 0x3c00); + EXPECT_EQ(float16(float16(0.5f).to_half()).x, 0x3800); + EXPECT_EQ(float16(float16(0.33333f).to_half()).x, 0x3555); + EXPECT_EQ(float16(float16(0.0f).to_half()).x, 0x0000); + EXPECT_EQ(float16(float16(-0.0f).to_half()).x, 0x8000); + EXPECT_EQ(float16(float16(65504.0f).to_half()).x, 0x7bff); + EXPECT_EQ(float16(float16(65536.0f).to_half()).x, 0x7c00); + + // Assignment operator + float16 v_assign; + v_assign = float16(1.0f).to_half(); + EXPECT_EQ(v_assign.x, 0x3c00); +} + +TEST(float16, lod_tensor_on_gpu) { + framework::LoDTensor src_tensor; + framework::LoDTensor gpu_tensor; + framework::LoDTensor dst_tensor; + + float16 *src_ptr = src_tensor.mutable_data( + framework::make_ddim({2, 2}), CPUPlace()); + + float16 arr[4] = {float16(1.0f), float16(0.5f), float16(0.33333f), + float16(0.0f)}; + memcpy(src_ptr, arr, 4 * sizeof(float16)); + + // CPU LoDTensor to GPU LoDTensor + CUDAPlace gpu_place(0); + CUDADeviceContext gpu_ctx(gpu_place); + framework::TensorCopy(src_tensor, gpu_place, gpu_ctx, &gpu_tensor); + + // GPU LoDTensor to CPU LoDTensor + framework::TensorCopy(gpu_tensor, CPUPlace(), gpu_ctx, &dst_tensor); + + // Sync before comparing LoDTensors + gpu_ctx.Wait(); + const float16 *dst_ptr = dst_tensor.data(); + ASSERT_NE(src_ptr, dst_ptr); + for (size_t i = 0; i < 4; ++i) { + EXPECT_EQ(src_ptr[i].x, dst_ptr[i].x); + } +} + +template +struct Functor { + bool operator()(const T &val) { + return std::type_index(typeid(T)) == + std::type_index(typeid(platform::float16)); + } +}; + +TEST(float16, typeid) { + // the framework heavily used typeid hash + Functor functor; + float16 a = float16(.0f); + Functor functor2; + int b(0); + + // compile time assert + PADDLE_ENFORCE_EQ( + functor(a), true, + platform::errors::Unavailable("The float16 support in GPU failed.")); + PADDLE_ENFORCE_EQ( + functor2(b), false, + platform::errors::Unavailable("The float16 support in GPU failed.")); +} + +// GPU test +TEST(float16, isinf) { + float16 a; + a.x = 0x7c00; + float16 b = float16(INFINITY); + // underflow to 0 + float16 native_a(5e-40f); + EXPECT_EQ(std::isinf(a), true); + EXPECT_EQ(std::isinf(b), true); +#ifndef _WIN32 + // overflow to inf + float16 native_b(5e40f); + EXPECT_EQ(std::isinf(native_b), true); +#endif + EXPECT_EQ(native_a, float16(0)); +} + +TEST(float16, isnan) { + float16 a; + a.x = 0x7fff; + float16 b = float16(NAN); + float16 c = float16(5e40); + // inf * +-0 will get a nan + float16 d = c * float16(0); + EXPECT_EQ(std::isnan(a), true); + EXPECT_EQ(std::isnan(b), true); + EXPECT_EQ(std::isnan(d), true); +} + +TEST(float16, cast) { + float16 a; + a.x = 0x0070; + auto b = a; + { + // change semantic, keep the same value + float16 c = reinterpret_cast(reinterpret_cast(b)); + EXPECT_EQ(b, c); + } + + { + // use uint32 low 16 bit store float16 + uint32_t c = reinterpret_cast(b); + float16 d; + d.x = c; + EXPECT_EQ(b, d); + } +} + +} // namespace platform +} // namespace paddle +#endif // PADDLE_CUDA_FP16 diff --git a/cuda_code/float_encoding.cu b/cuda_code/float_encoding.cu new file mode 100644 index 0000000000000000000000000000000000000000..1d0858efd9a08eebd4fc709b08f30336e96053f1 --- /dev/null +++ b/cuda_code/float_encoding.cu @@ -0,0 +1,95 @@ +/* + * float_encoding.cu + * + * Created on: 30 paź 2015 + * Author: Karol Dzitkowski + */ + +#include +#include +#include +#include "core/macros.h" +#include +#include + +namespace ddj +{ + template + SharedCudaPtrVector FloatEncoding::Encode(SharedCudaPtr data) + { + CUDA_ASSERT_RETURN( cudaGetLastError() ); + LOG4CPLUS_INFO_FMT(_logger, "FLOAT encoding START: data size = %lu", data->size()); + + if(data->size() <= 0) + return SharedCudaPtrVector{ + CudaPtr::make_shared(), + CudaPtr::make_shared() + }; + + int precision = CudaArrayStatistics().Precision(data); + SharedCudaPtr resultData; + FloatingPointToIntegerOperator op { precision }; + + // Make sure we won't overflow + bool transform = false; + if(precision < MAX_PRECISION) + { + auto minMax = CudaArrayStatistics().MinMax(data); + int scaleFactor = std::pow(10, precision); + if((std::get<0>(minMax) * scaleFactor) > std::numeric_limits::min() && + (std::get<1>(minMax) * scaleFactor) < std::numeric_limits::max()) + transform = true; + else precision = MAX_PRECISION; + } + + if(transform) + resultData = CudaArrayTransform().Transform(data, op); + else + resultData = CastSharedCudaPtr(data->copy()); + + auto resultMetadata = CudaPtr::make_shared(sizeof(int)); + resultMetadata->fillFromHost((char*)&precision, sizeof(int)); + + CUDA_ASSERT_RETURN( cudaGetLastError() ); + LOG4CPLUS_INFO(_logger, "FLOAT enoding END"); + + return SharedCudaPtrVector { resultMetadata, MoveSharedCudaPtr(resultData) }; + } + + template + SharedCudaPtr FloatEncoding::Decode(SharedCudaPtrVector input) + { + LOG4CPLUS_INFO_FMT( + _logger, + "FLOAT decoding START: input[0] size = %lu, input[1] size = %lu", + input[0]->size(), input[1]->size() + ); + + if(input[1]->size() <= 0) + return CudaPtr::make_shared(); + + auto metadata = input[0]; + auto data = MoveSharedCudaPtr(input[1]); + + int precision; + CUDA_CALL( cudaMemcpy(&precision, metadata->get(), sizeof(int), CPY_DTH) ); + + SharedCudaPtr result; + IntegerToFloatingPointOperator op { precision }; + if(precision < MAX_PRECISION) + result = CudaArrayTransform().Transform(data, op); + else + result = CastSharedCudaPtr(data->copy()); + + CUDA_ASSERT_RETURN( cudaGetLastError() ); + LOG4CPLUS_INFO(_logger, "FLOAT decoding END"); + + return result; + } + +#define FLOAT_ENCODING_SPEC(X) \ + template SharedCudaPtrVector FloatEncoding::Encode(SharedCudaPtr data); \ + template SharedCudaPtr FloatEncoding::Decode(SharedCudaPtrVector data); +FOR_EACH(FLOAT_ENCODING_SPEC, char, short, double, float, int, long, long long, unsigned int) + +} /* namespace ddj */ diff --git a/cuda_code/forward_14.cu b/cuda_code/forward_14.cu new file mode 100644 index 0000000000000000000000000000000000000000..b6c5e8cf87ac39e4f7a7b5da57a62e418577dc7e --- /dev/null +++ b/cuda_code/forward_14.cu @@ -0,0 +1,1763 @@ +/** + * \file dnn/src/cuda/warp_perspective/forward.cu + * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") + * + * Copyright (c) 2014-2021 Megvii Inc. All rights reserved. + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. + */ +#include "src/cuda/warp_perspective/common.h" + +#include +#include "megdnn/dtype.h" +#include "src/common/rounding_converter.cuh" +#include "src/cuda/error_info.cuh" +#include "src/cuda/integer_subbyte_utils.cuh" +#include "src/cuda/utils.cuh" +#include "src/cuda/warp_perspective/common.cuh" + +using namespace megdnn; +using namespace cuda; +using namespace warp_perspective; +using namespace integer_subbyte; + +namespace { + +template +struct CtypeHelper; + +template <> +struct CtypeHelper { + static constexpr int bit_width = 32; +}; +template <> +struct CtypeHelper { + static constexpr int bit_width = 16; +}; +template <> +struct CtypeHelper { + static constexpr int bit_width = 8; +}; +template <> +struct CtypeHelper { + static constexpr int bit_width = 8; +}; +template <> +struct CtypeHelper { + static constexpr int bit_width = 4; +}; +template <> +struct CtypeHelper { + static constexpr int bit_width = 4; +}; + +template +struct DirectSrcVisitor { + const void* ptr; + + __device__ __forceinline__ const ctype* get(int batch, int im_size) { + return (ctype*)((char*)ptr + static_cast(batch) * static_cast(im_size) * CtypeHelper::bit_width / 8); + } + + void move_batch(size_t batch, size_t im_size) { + ptr = (char*)ptr + batch * im_size * CtypeHelper::bit_width / 8; + } +}; + +template +struct IndexedSrcVisitor { + const void* ptr; + const int* idx; + int N_SRC; + + AsyncErrorInfo* error_info; + void* error_tracker; + + __device__ __forceinline__ const ctype* get(int batch, int im_size) { + int orig_batch = batch; + batch = idx[batch]; + if (batch < 0 || batch >= N_SRC) { + set_async_error_info( + error_info, error_tracker, + "mat_idx out of bound: mat_idx[%d]=%d src_batch=%d", orig_batch, + batch, N_SRC); + batch = 0; + } + return (ctype*)((char*)ptr + static_cast(batch) * static_cast(im_size) * CtypeHelper::bit_width / 8); + } + + void move_batch(size_t batch, size_t) { idx += batch; } +}; + +template < + typename ctype, typename Getter, typename SrcVisitor, typename OutputConverter> +__global__ void kern_general( + SrcVisitor src, const float* __restrict mat, ctype* __restrict dst, int C, + int IH, int IW, int OH, int OW) { + Getter getter; + OutputConverter output_converter; + int ow = blockIdx.x * blockDim.x + threadIdx.x; + int oh = blockIdx.y * blockDim.y + threadIdx.y; + const ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW); + dst += blockIdx.z * C * OH * OW; + mat += blockIdx.z * 3 * 3; + if (ow < OW && oh < OH) { + float denominator = mat[6] * ow + mat[7] * oh + mat[8]; + float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; + float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; + int iw0 = getter(floor(iw) + 0, IW); + int iw1 = getter(floor(iw) + 1, IW); + int ih0 = getter(floor(ih) + 0, IH); + int ih1 = getter(floor(ih) + 1, IH); + float palpha = ih - floor(ih); + float pbeta = iw - floor(iw); + float nalpha = 1.0f - palpha; + float nbeta = 1.0f - pbeta; + for (int c = 0; c < C; ++c) { + dst[oh * OW + ow] = output_converter( + sptr[ih0 * IW + iw0] * nalpha * nbeta + + sptr[ih0 * IW + iw1] * nalpha * pbeta + + sptr[ih1 * IW + iw0] * palpha * nbeta + + sptr[ih1 * IW + iw1] * palpha * pbeta); + sptr += IH * IW; + dst += OH * OW; + } + } +} + +template < + typename ctype, typename Getter, typename SrcVisitor, typename OutputConverter> +__global__ void kern_general_nchw4( + SrcVisitor src, const float* __restrict mat, ctype* __restrict dst, int C, + int IH, int IW, int OH, int OW) { + Getter getter; + OutputConverter output_converter; + int ow = blockIdx.x * blockDim.x + threadIdx.x; + int oh = blockIdx.y * blockDim.y + threadIdx.y; + const ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW); + dst += blockIdx.z * C * OH * OW; + mat += blockIdx.z * 3 * 3; + if (ow < OW && oh < OH) { + float denominator = mat[6] * ow + mat[7] * oh + mat[8]; + float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; + float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; + int iw0 = getter(floor(iw) + 0, IW); + int iw1 = getter(floor(iw) + 1, IW); + int ih0 = getter(floor(ih) + 0, IH); + int ih1 = getter(floor(ih) + 1, IH); + float palpha = ih - floor(ih); + float pbeta = iw - floor(iw); + float nalpha = 1.0f - palpha; + float nbeta = 1.0f - pbeta; + int o_coor = (oh * OW + ow) << 2; + int i_coor_00 = (ih0 * IW + iw0) << 2; + int i_coor_01 = (ih0 * IW + iw1) << 2; + int i_coor_10 = (ih1 * IW + iw0) << 2; + int i_coor_11 = (ih1 * IW + iw1) << 2; + for (int c0 = 0, nr_chan = C / 4; c0 < nr_chan; ++c0) { +#pragma unroll + for (int c1 = 0; c1 < 4; ++c1) { + dst[o_coor + c1] = output_converter( + sptr[i_coor_00 + c1] * nalpha * nbeta + + sptr[i_coor_01 + c1] * nalpha * pbeta + + sptr[i_coor_10 + c1] * palpha * nbeta + + sptr[i_coor_11 + c1] * palpha * pbeta); + } + sptr += IH * IW * 4; + dst += OH * OW * 4; + } + } +} + +template +MEGDNN_DEVICE __forceinline__ int pack_output_func( + OutputConverter& output_converter, int (&s00)[8], int (&s01)[8], int (&s10)[8], + int (&s11)[8], float w00, float w01, float w10, float w11) { +#define warp_perspective_transform(idx) \ + static_cast( \ + output_converter( \ + s00[idx] * w00 + s01[idx] * w01 + s10[idx] * w10 + s11[idx] * w11) \ + .as_storage()) + + return transform_int8_to_b4x8( + warp_perspective_transform(0), warp_perspective_transform(1), + warp_perspective_transform(2), warp_perspective_transform(3), + warp_perspective_transform(4), warp_perspective_transform(5), + warp_perspective_transform(6), warp_perspective_transform(7)); +#undef warp_perspective_transform +} + +template < + typename ctype, typename Getter, typename SrcVisitor, typename OutputConverter> +__global__ void kern_general_nchw64( + SrcVisitor src, const float* __restrict mat, ctype* __restrict dst, int C, + int IH, int IW, int OH, int OW) { + constexpr bool signedness = std::is_same::value; + Getter getter; + OutputConverter output_converter; + int ow = blockIdx.x * blockDim.x + threadIdx.x; + int c1 = ow % 2; + ow = ow / 2; + int oh = blockIdx.y * blockDim.y + threadIdx.y; + const ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW); + dst += blockIdx.z * C * OH * OW / 2; + mat += blockIdx.z * 3 * 3; + const int4* sptr_int4 = reinterpret_cast(sptr); + int4* dst_int4 = reinterpret_cast(dst); + if (ow < OW && oh < OH) { + float denominator = mat[6] * ow + mat[7] * oh + mat[8]; + float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; + float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; + int iw0 = getter(floor(iw) + 0, IW); + int iw1 = getter(floor(iw) + 1, IW); + int ih0 = getter(floor(ih) + 0, IH); + int ih1 = getter(floor(ih) + 1, IH); + float palpha = ih - floor(ih); + float pbeta = iw - floor(iw); + float nalpha = 1.0f - palpha; + float nbeta = 1.0f - pbeta; + float w00 = nalpha * nbeta; + float w01 = nalpha * pbeta; + float w10 = palpha * nbeta; + float w11 = palpha * pbeta; + int o_coor = (oh * OW + ow) << 1; + int i_coor_00 = (ih0 * IW + iw0) << 1; + int i_coor_01 = (ih0 * IW + iw1) << 1; + int i_coor_10 = (ih1 * IW + iw0) << 1; + int i_coor_11 = (ih1 * IW + iw1) << 1; + int s00[8], s01[8], s10[8], s11[8]; + int4 s[4], d; + for (int c0 = 0, nr_chan = C / 64; c0 < nr_chan; ++c0) { + s[0] = __ldg(sptr_int4 + i_coor_00 + c1); + s[1] = __ldg(sptr_int4 + i_coor_01 + c1); + s[2] = __ldg(sptr_int4 + i_coor_10 + c1); + s[3] = __ldg(sptr_int4 + i_coor_11 + c1); + + transform_b4x8_to_int8(s00, s[0].x); + transform_b4x8_to_int8(s01, s[1].x); + transform_b4x8_to_int8(s10, s[2].x); + transform_b4x8_to_int8(s11, s[3].x); + d.x = pack_output_func( + output_converter, s00, s01, s10, s11, w00, w01, w10, w11); + + transform_b4x8_to_int8(s00, s[0].y); + transform_b4x8_to_int8(s01, s[1].y); + transform_b4x8_to_int8(s10, s[2].y); + transform_b4x8_to_int8(s11, s[3].y); + d.y = pack_output_func( + output_converter, s00, s01, s10, s11, w00, w01, w10, w11); + + transform_b4x8_to_int8(s00, s[0].z); + transform_b4x8_to_int8(s01, s[1].z); + transform_b4x8_to_int8(s10, s[2].z); + transform_b4x8_to_int8(s11, s[3].z); + d.z = pack_output_func( + output_converter, s00, s01, s10, s11, w00, w01, w10, w11); + + transform_b4x8_to_int8(s00, s[0].w); + transform_b4x8_to_int8(s01, s[1].w); + transform_b4x8_to_int8(s10, s[2].w); + transform_b4x8_to_int8(s11, s[3].w); + d.w = pack_output_func( + output_converter, s00, s01, s10, s11, w00, w01, w10, w11); + + dst_int4[o_coor + c1] = d; + sptr_int4 += IH * IW * 2; + dst_int4 += OH * OW * 2; + } + } +} + +template +__global__ void kern_const_border( + SrcVisitor src, const float* __restrict mat, ctype* __restrict dst, int C, + int IH, int IW, int OH, int OW, ctype bval) { + OutputConverter output_converter; + int ow = blockIdx.x * blockDim.x + threadIdx.x; + int oh = blockIdx.y * blockDim.y + threadIdx.y; + const ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW); + dst += blockIdx.z * C * OH * OW; + mat += blockIdx.z * 3 * 3; + if (ow < OW && oh < OH) { + float denominator = mat[6] * ow + mat[7] * oh + mat[8]; + float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; + float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; + int iw0 = floor(iw) + 0; + int iw1 = floor(iw) + 1; + int ih0 = floor(ih) + 0; + int ih1 = floor(ih) + 1; + bool okw0 = (iw0 >= 0 && iw0 < IW); + bool okw1 = (iw1 >= 0 && iw1 < IW); + bool okh0 = (ih0 >= 0 && ih0 < IH); + bool okh1 = (ih1 >= 0 && ih1 < IH); + float palpha = ih - floor(ih); + float pbeta = iw - floor(iw); + float nalpha = 1.0f - palpha; + float nbeta = 1.0f - pbeta; + for (int c = 0; c < C; ++c) { + ctype v00 = (okh0 && okw0 ? sptr[ih0 * IW + iw0] : bval); + ctype v01 = (okh0 && okw1 ? sptr[ih0 * IW + iw1] : bval); + ctype v10 = (okh1 && okw0 ? sptr[ih1 * IW + iw0] : bval); + ctype v11 = (okh1 && okw1 ? sptr[ih1 * IW + iw1] : bval); + ctype val = output_converter( + v00 * nalpha * nbeta + v01 * nalpha * pbeta + v10 * palpha * nbeta + + v11 * palpha * pbeta); + dst[oh * OW + ow] = val; + sptr += IH * IW; + dst += OH * OW; + } + } +} + +template +__global__ void kern_const_border_nchw4( + SrcVisitor src, const float* __restrict mat, ctype* __restrict dst, int C, + int IH, int IW, int OH, int OW, ctype bval) { + OutputConverter output_converter; + int ow = blockIdx.x * blockDim.x + threadIdx.x; + int oh = blockIdx.y * blockDim.y + threadIdx.y; + const ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW); + dst += blockIdx.z * C * OH * OW; + mat += blockIdx.z * 3 * 3; + if (ow < OW && oh < OH) { + float denominator = mat[6] * ow + mat[7] * oh + mat[8]; + float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; + float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; + int iw0 = floor(iw) + 0; + int iw1 = floor(iw) + 1; + int ih0 = floor(ih) + 0; + int ih1 = floor(ih) + 1; + bool okw0 = (iw0 >= 0 && iw0 < IW); + bool okw1 = (iw1 >= 0 && iw1 < IW); + bool okh0 = (ih0 >= 0 && ih0 < IH); + bool okh1 = (ih1 >= 0 && ih1 < IH); + float palpha = ih - floor(ih); + float pbeta = iw - floor(iw); + float nalpha = 1.0f - palpha; + float nbeta = 1.0f - pbeta; + int i_coor_00 = (ih0 * IW + iw0) << 2; + int i_coor_01 = (ih0 * IW + iw1) << 2; + int i_coor_10 = (ih1 * IW + iw0) << 2; + int i_coor_11 = (ih1 * IW + iw1) << 2; + int o_coor = (oh * OW + ow) << 2; + for (int c0 = 0, nr_chan = C / 4; c0 < nr_chan; ++c0) { +#pragma unroll + for (int c1 = 0; c1 < 4; ++c1) { + ctype v00 = (okh0 && okw0 ? sptr[i_coor_00 + c1] : bval); + ctype v01 = (okh0 && okw1 ? sptr[i_coor_01 + c1] : bval); + ctype v10 = (okh1 && okw0 ? sptr[i_coor_10 + c1] : bval); + ctype v11 = (okh1 && okw1 ? sptr[i_coor_11 + c1] : bval); + ctype val = output_converter( + v00 * nalpha * nbeta + v01 * nalpha * pbeta + + v10 * palpha * nbeta + v11 * palpha * pbeta); + dst[o_coor + c1] = val; + } + sptr += IH * IW * 4; + dst += OH * OW * 4; + } + } +} + +template +__global__ void kern_const_border_nchw64( + SrcVisitor src, const float* __restrict mat, ctype* __restrict dst, int C, + int IH, int IW, int OH, int OW, ctype bval) { + constexpr bool signedness = std::is_same::value; + OutputConverter output_converter; + int ow = blockIdx.x * blockDim.x + threadIdx.x; + int c1 = ow % 2; + ow = ow / 2; + int oh = blockIdx.y * blockDim.y + threadIdx.y; + const ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW); + dst += blockIdx.z * C * OH * OW / 2; + mat += blockIdx.z * 3 * 3; + const int4* sptr_int4 = reinterpret_cast(sptr); + int4* dst_int4 = reinterpret_cast(dst); + if (ow < OW && oh < OH) { + float denominator = mat[6] * ow + mat[7] * oh + mat[8]; + float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; + float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; + int iw0 = floor(iw) + 0; + int iw1 = floor(iw) + 1; + int ih0 = floor(ih) + 0; + int ih1 = floor(ih) + 1; + bool okw0 = (iw0 >= 0 && iw0 < IW); + bool okw1 = (iw1 >= 0 && iw1 < IW); + bool okh0 = (ih0 >= 0 && ih0 < IH); + bool okh1 = (ih1 >= 0 && ih1 < IH); + float palpha = ih - floor(ih); + float pbeta = iw - floor(iw); + float nalpha = 1.0f - palpha; + float nbeta = 1.0f - pbeta; + float w00 = nalpha * nbeta; + float w01 = nalpha * pbeta; + float w10 = palpha * nbeta; + float w11 = palpha * pbeta; + int o_coor = (oh * OW + ow) << 1; + int i_coor_00 = (ih0 * IW + iw0) << 1; + int i_coor_01 = (ih0 * IW + iw1) << 1; + int i_coor_10 = (ih1 * IW + iw0) << 1; + int i_coor_11 = (ih1 * IW + iw1) << 1; + bool flag00 = okh0 && okw0, flag01 = okh0 && okw1, flag10 = okh1 && okw0, + flag11 = okh1 && okw1; + int8_t bval_4 = bval.as_storage() & 0xF; + int bval_8 = transform_int8_to_b4x8( + bval_4, bval_4, bval_4, bval_4, bval_4, bval_4, bval_4, bval_4); + int4 bval_int4; + bval_int4.x = bval_8; + bval_int4.y = bval_8; + bval_int4.z = bval_8; + bval_int4.w = bval_8; + int s00[8], s01[8], s10[8], s11[8]; + int4 s[4], d; + for (int c0 = 0, nr_chan = C / 64; c0 < nr_chan; ++c0) { + if (flag00) { + s[0] = __ldg(sptr_int4 + i_coor_00 + c1); + } else { + s[0] = bval_int4; + } + if (flag01) { + s[1] = __ldg(sptr_int4 + i_coor_01 + c1); + } else { + s[1] = bval_int4; + } + if (flag10) { + s[2] = __ldg(sptr_int4 + i_coor_10 + c1); + } else { + s[2] = bval_int4; + } + if (flag11) { + s[3] = __ldg(sptr_int4 + i_coor_11 + c1); + } else { + s[3] = bval_int4; + } + + transform_b4x8_to_int8(s00, s[0].x); + transform_b4x8_to_int8(s01, s[1].x); + transform_b4x8_to_int8(s10, s[2].x); + transform_b4x8_to_int8(s11, s[3].x); + d.x = pack_output_func( + output_converter, s00, s01, s10, s11, w00, w01, w10, w11); + + transform_b4x8_to_int8(s00, s[0].y); + transform_b4x8_to_int8(s01, s[1].y); + transform_b4x8_to_int8(s10, s[2].y); + transform_b4x8_to_int8(s11, s[3].y); + d.y = pack_output_func( + output_converter, s00, s01, s10, s11, w00, w01, w10, w11); + + transform_b4x8_to_int8(s00, s[0].z); + transform_b4x8_to_int8(s01, s[1].z); + transform_b4x8_to_int8(s10, s[2].z); + transform_b4x8_to_int8(s11, s[3].z); + d.z = pack_output_func( + output_converter, s00, s01, s10, s11, w00, w01, w10, w11); + + transform_b4x8_to_int8(s00, s[0].w); + transform_b4x8_to_int8(s01, s[1].w); + transform_b4x8_to_int8(s10, s[2].w); + transform_b4x8_to_int8(s11, s[3].w); + d.w = pack_output_func( + output_converter, s00, s01, s10, s11, w00, w01, w10, w11); + + dst_int4[o_coor + c1] = d; + sptr_int4 += IH * IW * 2; + dst_int4 += OH * OW * 2; + } + } +} + +template +struct KernCoreNHWC { + MEGDNN_DEVICE __forceinline__ static void func( + char* dst_ptr, const char* src_ptr0, const char* src_ptr1, + const char* src_ptr2, const char* src_ptr3, const int offset, float w00, + float w01, float w10, float w11, OutputConverter& output_converter, + const bool src0_ok, const bool src1_ok, const bool src2_ok, + const bool src3_ok, const ctype bval) { + static_assert(pack_c == 1, "static_assert pack_c == 1"); + ctype v00 = src0_ok ? *(ctype*)(src_ptr0 + offset) : bval; + ctype v01 = src1_ok ? *(ctype*)(src_ptr1 + offset) : bval; + ctype v10 = src2_ok ? *(ctype*)(src_ptr2 + offset) : bval; + ctype v11 = src3_ok ? *(ctype*)(src_ptr3 + offset) : bval; + ctype res = output_converter(v00 * w00 + v01 * w01 + v10 * w10 + v11 * w11); + *(ctype*)(dst_ptr + offset) = res; + } +}; + +template +struct KernCoreNHWC { + MEGDNN_DEVICE __forceinline__ static void func( + char* dst_ptr, const char* src_ptr0, const char* src_ptr1, + const char* src_ptr2, const char* src_ptr3, const int offset, float w00, + float w01, float w10, float w11, OutputConverter& output_converter, + const bool src0_ok, const bool src1_ok, const bool src2_ok, + const bool src3_ok, const ctype bval) { + static_assert( + std::is_same::value || + std::is_same::value, + "assert qu4 or q4"); + constexpr bool signedness = std::is_same::value; + int8_t bval_4 = bval.as_storage() & 0xF; + const int bval_int = transform_int8_to_b4x8( + bval_4, bval_4, bval_4, bval_4, bval_4, bval_4, bval_4, bval_4); + int src_ori[4]; + src_ori[0] = src0_ok ? *(int*)(src_ptr0 + offset) : bval_int; + src_ori[1] = src1_ok ? *(int*)(src_ptr1 + offset) : bval_int; + src_ori[2] = src2_ok ? *(int*)(src_ptr2 + offset) : bval_int; + src_ori[3] = src3_ok ? *(int*)(src_ptr3 + offset) : bval_int; + int src[4][8]; + transform_b4x8_to_int8(src[0], src_ori[0]); + transform_b4x8_to_int8(src[1], src_ori[1]); + transform_b4x8_to_int8(src[2], src_ori[2]); + transform_b4x8_to_int8(src[3], src_ori[3]); + int res = pack_output_func( + output_converter, src[0], src[1], src[2], src[3], w00, w01, w10, w11); + *(int*)(dst_ptr + offset) = res; + } +}; + +template +struct KernCoreNHWC { + MEGDNN_DEVICE __forceinline__ static void func( + char* dst_ptr, const char* src_ptr0, const char* src_ptr1, + const char* src_ptr2, const char* src_ptr3, const int offset, float w00, + float w01, float w10, float w11, OutputConverter& output_converter, + const bool src0_ok, const bool src1_ok, const bool src2_ok, + const bool src3_ok, const ctype bval) { + static_assert( + std::is_same::value || + std::is_same::value, + "assert qu4 or q4"); + constexpr bool signedness = std::is_same::value; + int8_t bval_4 = bval.as_storage() & 0xF; + const int bval_int_temp = transform_int8_to_b4x8( + bval_4, bval_4, bval_4, bval_4, bval_4, bval_4, bval_4, bval_4); + const int2 bval_int{bval_int_temp, bval_int_temp}; + + int2 src_ori[4]; + src_ori[0] = src0_ok ? *(int2*)(src_ptr0 + offset) : bval_int; + src_ori[1] = src1_ok ? *(int2*)(src_ptr1 + offset) : bval_int; + src_ori[2] = src2_ok ? *(int2*)(src_ptr2 + offset) : bval_int; + src_ori[3] = src3_ok ? *(int2*)(src_ptr3 + offset) : bval_int; + int src[8][8]; + transform_b4x8_to_int8(src[0], src_ori[0].x); + transform_b4x8_to_int8(src[1], src_ori[1].x); + transform_b4x8_to_int8(src[2], src_ori[2].x); + transform_b4x8_to_int8(src[3], src_ori[3].x); + + transform_b4x8_to_int8(src[4], src_ori[0].y); + transform_b4x8_to_int8(src[5], src_ori[1].y); + transform_b4x8_to_int8(src[6], src_ori[2].y); + transform_b4x8_to_int8(src[7], src_ori[3].y); + + int2 res; + res.x = pack_output_func( + output_converter, src[0], src[1], src[2], src[3], w00, w01, w10, w11); + res.y = pack_output_func( + output_converter, src[4], src[5], src[6], src[7], w00, w01, w10, w11); + *(int2*)(dst_ptr + offset) = res; + } +}; + +template < + typename ctype, typename Getter, typename SrcVisitor, typename OutputConverter, + int pack_c> +__global__ void kern_general_nhwc( + SrcVisitor src, const float* __restrict mat, ctype* __restrict dst, int C, + int IH, int IW, int OH, int OW) { + Getter getter; + OutputConverter output_converter; + constexpr int bit_width = CtypeHelper::bit_width; + int ow = blockIdx.x * blockDim.x + threadIdx.x; + int oh = blockIdx.y * blockDim.y + threadIdx.y; + const ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW); + dst = (ctype*)((char*)dst + blockIdx.z * C * OH * OW * bit_width / 8); + mat += blockIdx.z * 3 * 3; + if (ow < OW && oh < OH) { + float denominator = mat[6] * ow + mat[7] * oh + mat[8]; + float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; + float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; + int iw0 = getter(floor(iw) + 0, IW); + int iw1 = getter(floor(iw) + 1, IW); + int ih0 = getter(floor(ih) + 0, IH); + int ih1 = getter(floor(ih) + 1, IH); + float palpha = ih - floor(ih); + float pbeta = iw - floor(iw); + float nalpha = 1.0f - palpha; + float nbeta = 1.0f - pbeta; + float w00 = nalpha * nbeta; + float w01 = nalpha * pbeta; + float w10 = palpha * nbeta; + float w11 = palpha * pbeta; + const char* src_ptr0 = (char*)sptr + (ih0 * IW + iw0) * C * bit_width / 8; + const char* src_ptr1 = (char*)sptr + (ih0 * IW + iw1) * C * bit_width / 8; + const char* src_ptr2 = (char*)sptr + (ih1 * IW + iw0) * C * bit_width / 8; + const char* src_ptr3 = (char*)sptr + (ih1 * IW + iw1) * C * bit_width / 8; + char* dst_ptr = (char*)dst + (oh * OW + ow) * C * bit_width / 8; + + for (int c = 0; c < C; c += pack_c) { + KernCoreNHWC::func( + dst_ptr, src_ptr0, src_ptr1, src_ptr2, src_ptr3, c * bit_width / 8, + w00, w01, w10, w11, output_converter, true, true, true, true, + (ctype)0); + } + } +} + +template < + typename ctype, typename Getter, typename SrcVisitor, typename OutputConverter, + int pack_c> +__global__ void kern_general_nhwc_const( + SrcVisitor src, const float* __restrict mat, ctype* __restrict dst, int C, + int IH, int IW, int OH, int OW, ctype bval) { + Getter getter; + OutputConverter output_converter; + constexpr int bit_width = CtypeHelper::bit_width; + int ow = blockIdx.x * blockDim.x + threadIdx.x; + int oh = blockIdx.y * blockDim.y + threadIdx.y; + const ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW); + dst = (ctype*)((char*)dst + blockIdx.z * C * OH * OW * bit_width / 8); + mat += blockIdx.z * 3 * 3; + if (ow < OW && oh < OH) { + float denominator = mat[6] * ow + mat[7] * oh + mat[8]; + float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; + float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; + int iw0 = getter(floor(iw) + 0, IW); + int iw1 = getter(floor(iw) + 1, IW); + int ih0 = getter(floor(ih) + 0, IH); + int ih1 = getter(floor(ih) + 1, IH); + float palpha = ih - floor(ih); + float pbeta = iw - floor(iw); + float nalpha = 1.0f - palpha; + float nbeta = 1.0f - pbeta; + float w00 = nalpha * nbeta; + float w01 = nalpha * pbeta; + float w10 = palpha * nbeta; + float w11 = palpha * pbeta; + const char* src_ptr0 = (char*)sptr + (ih0 * IW + iw0) * C * bit_width / 8; + const char* src_ptr1 = (char*)sptr + (ih0 * IW + iw1) * C * bit_width / 8; + const char* src_ptr2 = (char*)sptr + (ih1 * IW + iw0) * C * bit_width / 8; + const char* src_ptr3 = (char*)sptr + (ih1 * IW + iw1) * C * bit_width / 8; + char* dst_ptr = (char*)dst + (oh * OW + ow) * C * bit_width / 8; + bool okw0 = (iw0 >= 0 && iw0 < IW); + bool okw1 = (iw1 >= 0 && iw1 < IW); + bool okh0 = (ih0 >= 0 && ih0 < IH); + bool okh1 = (ih1 >= 0 && ih1 < IH); + bool src0_ok = okh0 && okw0; + bool src1_ok = okh0 && okw1; + bool src2_ok = okh1 && okw0; + bool src3_ok = okh1 && okw1; + for (int c = 0; c < C; c += pack_c) { + KernCoreNHWC::func( + dst_ptr, src_ptr0, src_ptr1, src_ptr2, src_ptr3, c * bit_width / 8, + w00, w01, w10, w11, output_converter, src0_ok, src1_ok, src2_ok, + src3_ok, bval); + } + } +} + +template +void dispatch_with_visitor( + bool is_nhwc, SrcVisitor src, const float* mat, ctype* dst, int N, int C, + int IH, int IW, int OH, int OW, ctype bval, BorderMode bmode, + cudaStream_t stream) { + constexpr int pack_c = 1; + const int BY = 16, BX = 32; +#define DISPATCH(Getter) \ + do { \ + if (is_nhwc) { \ + kern_general_nhwc< \ + ctype, Getter, SrcVisitor, rounding::RoundingConverter, \ + pack_c><<>>( \ + src, mat, dst, C, IH, IW, OH, OW); \ + } else { \ + kern_general< \ + ctype, Getter, SrcVisitor, rounding::RoundingConverter> \ + <<>>( \ + src, mat, dst, C, IH, IW, OH, OW); \ + } \ + } while (0) + + const int max_batch_size = 65535; + while (N) { + size_t curr_batch_size = N < max_batch_size ? N : max_batch_size; + dim3 threads(BX, BY); + dim3 blocks((OW + BX - 1) / BX, (OH + BY - 1) / BY, curr_batch_size); + + switch (bmode) { + case BORDER_REPLICATE: + DISPATCH(ReplicateGetter); + break; + case BORDER_REFLECT: + DISPATCH(ReflectGetter); + break; + case BORDER_REFLECT_101: + DISPATCH(Reflect101Getter); + break; + case BORDER_WRAP: + DISPATCH(WrapGetter); + break; +#undef DISPATCH + case BORDER_CONSTANT: + if (is_nhwc) { + kern_general_nhwc_const< + ctype, ConstGetter, SrcVisitor, + rounding::RoundingConverter, pack_c> + <<>>( + src, mat, dst, C, IH, IW, OH, OW, bval); + } else { + kern_const_border< + ctype, SrcVisitor, rounding::RoundingConverter> + <<>>( + src, mat, dst, C, IH, IW, OH, OW, bval); + } + break; + default: + break; + } + + N -= curr_batch_size; + src.move_batch(curr_batch_size, C * IH * IW); + mat += curr_batch_size * 3 * 3; + dst += curr_batch_size * C * OH * OW; + } +} + +template +void dispatch_with_visitor_nhwc_bit4( + SrcVisitor src, const float* mat, ctype* dst, int N, int C, int IH, int IW, + int OH, int OW, ctype bval, BorderMode bmode, cudaStream_t stream) { + const int BY = 16, BX = 32; +#define DISPATCH(Getter) \ + do { \ + kern_general_nhwc< \ + ctype, Getter, SrcVisitor, rounding::RoundingConverter, pack_c> \ + <<>>(src, mat, dst, C, IH, IW, OH, OW); \ + } while (0) + + const int max_batch_size = 65535; + while (N) { + size_t curr_batch_size = N < max_batch_size ? N : max_batch_size; + dim3 threads(BX, BY); + dim3 blocks((OW + BX - 1) / BX, (OH + BY - 1) / BY, curr_batch_size); + + switch (bmode) { + case BORDER_REPLICATE: + DISPATCH(ReplicateGetter); + break; + case BORDER_REFLECT: + DISPATCH(ReflectGetter); + break; + case BORDER_REFLECT_101: + DISPATCH(Reflect101Getter); + break; + case BORDER_WRAP: + DISPATCH(WrapGetter); + break; + case BORDER_CONSTANT: { + kern_general_nhwc_const< + ctype, ConstGetter, SrcVisitor, + rounding::RoundingConverter, pack_c> + <<>>( + src, mat, dst, C, IH, IW, OH, OW, bval); + } break; + default: + break; + } +#undef DISPATCH + + N -= curr_batch_size; + src.move_batch(curr_batch_size, C * IH * IW / 2); + mat += curr_batch_size * 3 * 3; + dst += curr_batch_size * C * OH * OW / 2; + } +} + +template +void dispatch_with_visitor_nchw4( + SrcVisitor src, const float* mat, ctype* dst, int N, int C, int IH, int IW, + int OH, int OW, ctype bval, BorderMode bmode, cudaStream_t stream) { + const int BY = 16, BX = 32; +#define DISPATCH(Getter) \ + do { \ + kern_general_nchw4< \ + ctype, Getter, SrcVisitor, rounding::RoundingConverter> \ + <<>>(src, mat, dst, C, IH, IW, OH, OW); \ + } while (0) + + const int max_batch_size = 65535; + while (N) { + size_t curr_batch_size = N < max_batch_size ? N : max_batch_size; + dim3 threads(BX, BY); + dim3 blocks((OW + BX - 1) / BX, (OH + BY - 1) / BY, curr_batch_size); + + switch (bmode) { + case BORDER_REPLICATE: + DISPATCH(ReplicateGetter); + break; + case BORDER_REFLECT: + DISPATCH(ReflectGetter); + break; + case BORDER_REFLECT_101: + DISPATCH(Reflect101Getter); + break; + case BORDER_WRAP: + DISPATCH(WrapGetter); + break; +#undef DISPATCH + case BORDER_CONSTANT: + kern_const_border_nchw4< + ctype, SrcVisitor, rounding::RoundingConverter> + <<>>( + src, mat, dst, C, IH, IW, OH, OW, bval); + break; + default: + break; + } + + N -= curr_batch_size; + src.move_batch(curr_batch_size, C * IH * IW); + mat += curr_batch_size * 3 * 3; + dst += curr_batch_size * C * OH * OW; + } +} + +template +void dispatch_with_visitor_nchw64( + SrcVisitor src, const float* mat, ctype* dst, int N, int C, int IH, int IW, + int OH, int OW, ctype bval, BorderMode bmode, cudaStream_t stream) { + const int BY = 16, BX = 32; +#define DISPATCH(Getter) \ + do { \ + kern_general_nchw64< \ + ctype, Getter, SrcVisitor, rounding::RoundingConverter> \ + <<>>(src, mat, dst, C, IH, IW, OH, OW); \ + } while (0) + + const int max_batch_size = 65535; + while (N) { + size_t curr_batch_size = N < max_batch_size ? N : max_batch_size; + dim3 threads(BX, BY); + dim3 blocks((OW * 2 + BX - 1) / BX, (OH + BY - 1) / BY, curr_batch_size); + + switch (bmode) { + case BORDER_REPLICATE: + DISPATCH(ReplicateGetter); + break; + case BORDER_REFLECT: + DISPATCH(ReflectGetter); + break; + case BORDER_REFLECT_101: + DISPATCH(Reflect101Getter); + break; + case BORDER_WRAP: + DISPATCH(WrapGetter); + break; +#undef DISPATCH + case BORDER_CONSTANT: + kern_const_border_nchw64< + ctype, SrcVisitor, rounding::RoundingConverter> + <<>>( + src, mat, dst, C, IH, IW, OH, OW, bval); + break; + default: + break; + } + + N -= curr_batch_size; + src.move_batch(curr_batch_size, C * IH * IW / 2); + mat += curr_batch_size * 3 * 3; + dst += curr_batch_size * C * OH * OW / 2; + } +} + +template +struct CudaTypeCvt; + +template <> +struct CudaTypeCvt { + CudaDTypeParamImpl m_src_param; + CudaTypeCvt(CudaDTypeParamImpl src_param) { m_src_param = src_param; }; + inline __device__ int8_t operator()(uint8_t val) { + return val - m_src_param.zero_point; + } +}; + +template <> +struct CudaTypeCvt { + CudaDTypeParamImpl m_src_param; + CudaTypeCvt(CudaDTypeParamImpl src_param) { m_src_param = src_param; }; + __device__ __forceinline__ float operator()(uint8_t val) { + return m_src_param.dequantize(dt_quint8(val)); + } +}; + +#define INST(dst_ctype, vec_dst_type) \ + template < \ + typename src_dtype, typename src_ctype, typename Getter, \ + typename SrcVisitor> \ + __global__ void kern_general_quint8_nhw_nchw4( \ + SrcVisitor src, const float* __restrict mat, dst_ctype* __restrict dst, \ + int IH, int IW, int OH, int OW, \ + CudaTypeCvt type_cvt) { \ + Getter getter; \ + rounding::RoundingConverter warp_out_converter; \ + int ow = blockIdx.x * blockDim.x + threadIdx.x; \ + int oh = blockIdx.y * blockDim.y + threadIdx.y; \ + const src_ctype* __restrict sptr = src.get(blockIdx.z, IH * IW); \ + dst += blockIdx.z * OH * OW * 4; \ + mat += blockIdx.z * 3 * 3; \ + if (ow < OW && oh < OH) { \ + float denominator = mat[6] * ow + mat[7] * oh + mat[8]; \ + float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; \ + float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; \ + int iw0 = getter(floor(iw) + 0, IW); \ + int iw1 = getter(floor(iw) + 1, IW); \ + int ih0 = getter(floor(ih) + 0, IH); \ + int ih1 = getter(floor(ih) + 1, IH); \ + float palpha = ih - floor(ih); \ + float pbeta = iw - floor(iw); \ + float nalpha = 1.0f - palpha; \ + float nbeta = 1.0f - pbeta; \ + vec_dst_type result; \ + src_ctype val_x = warp_out_converter( \ + sptr[ih0 * IW + iw0] * nalpha * nbeta + \ + sptr[ih0 * IW + iw1] * nalpha * pbeta + \ + sptr[ih1 * IW + iw0] * palpha * nbeta + \ + sptr[ih1 * IW + iw1] * palpha * pbeta); \ + result.x = type_cvt(val_x); \ + result.y = result.z = result.w = 0; \ + *((vec_dst_type*)dst + oh * OW + ow) = result; \ + } \ + } + +INST(int8_t, char4) +#undef INST + +#define INST(dst_ctype, vec_dst_type) \ + template \ + __global__ void kern_const_border_quint8_nhw_nchw4( \ + SrcVisitor src, const float* __restrict mat, dst_ctype* __restrict dst, \ + int IH, int IW, int OH, int OW, src_ctype bval, \ + CudaTypeCvt type_cvt) { \ + rounding::RoundingConverter warp_out_converter; \ + int ow = blockIdx.x * blockDim.x + threadIdx.x; \ + int oh = blockIdx.y * blockDim.y + threadIdx.y; \ + const src_ctype* __restrict sptr = src.get(blockIdx.z, IH * IW); \ + dst += blockIdx.z * OH * OW * 4; \ + mat += blockIdx.z * 3 * 3; \ + if (ow < OW && oh < OH) { \ + float denominator = mat[6] * ow + mat[7] * oh + mat[8]; \ + float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; \ + float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; \ + int iw0 = floor(iw) + 0; \ + int iw1 = floor(iw) + 1; \ + int ih0 = floor(ih) + 0; \ + int ih1 = floor(ih) + 1; \ + bool okw0 = (iw0 >= 0 && iw0 < IW); \ + bool okw1 = (iw1 >= 0 && iw1 < IW); \ + bool okh0 = (ih0 >= 0 && ih0 < IH); \ + bool okh1 = (ih1 >= 0 && ih1 < IH); \ + float palpha = ih - floor(ih); \ + float pbeta = iw - floor(iw); \ + float nalpha = 1.0f - palpha; \ + float nbeta = 1.0f - pbeta; \ + vec_dst_type result; \ + src_ctype v00 = (okh0 && okw0 ? sptr[ih0 * IW + iw0] : bval); \ + src_ctype v01 = (okh0 && okw1 ? sptr[ih0 * IW + iw1] : bval); \ + src_ctype v10 = (okh1 && okw0 ? sptr[ih1 * IW + iw0] : bval); \ + src_ctype v11 = (okh1 && okw1 ? sptr[ih1 * IW + iw1] : bval); \ + src_ctype val_x = warp_out_converter( \ + v00 * nalpha * nbeta + v01 * nalpha * pbeta + \ + v10 * palpha * nbeta + v11 * palpha * pbeta); \ + result.x = type_cvt(val_x); \ + result.y = result.z = result.w = 0; \ + *((vec_dst_type*)dst + oh * OW + ow) = result; \ + } \ + } + +INST(int8_t, char4) +#undef INST + +#define INST(dst_ctype, vec_dst_type) \ + template < \ + typename src_dtype, typename src_ctype, typename Getter, \ + typename SrcVisitor> \ + __global__ void kern_general_quint8_n3hw_nchw4( \ + SrcVisitor src, const float* __restrict mat, dst_ctype* __restrict dst, \ + int IH, int IW, int OH, int OW, \ + CudaTypeCvt type_cvt) { \ + Getter getter; \ + rounding::RoundingConverter warp_out_converter; \ + int ow = blockIdx.x * blockDim.x + threadIdx.x; \ + int oh = blockIdx.y * blockDim.y + threadIdx.y; \ + const src_ctype* __restrict sptr = src.get(blockIdx.z, 3 * IH * IW); \ + dst += blockIdx.z * OH * OW * 4; \ + mat += blockIdx.z * 3 * 3; \ + if (ow < OW && oh < OH) { \ + float denominator = mat[6] * ow + mat[7] * oh + mat[8]; \ + float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; \ + float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; \ + int iw0 = getter(floor(iw) + 0, IW); \ + int iw1 = getter(floor(iw) + 1, IW); \ + int ih0 = getter(floor(ih) + 0, IH); \ + int ih1 = getter(floor(ih) + 1, IH); \ + float palpha = ih - floor(ih); \ + float pbeta = iw - floor(iw); \ + float nalpha = 1.0f - palpha; \ + float nbeta = 1.0f - pbeta; \ + vec_dst_type result; \ + src_ctype val_x = warp_out_converter( \ + sptr[ih0 * IW + iw0] * nalpha * nbeta + \ + sptr[ih0 * IW + iw1] * nalpha * pbeta + \ + sptr[ih1 * IW + iw0] * palpha * nbeta + \ + sptr[ih1 * IW + iw1] * palpha * pbeta); \ + src_ctype val_y = warp_out_converter( \ + sptr[IW * IH + ih0 * IW + iw0] * nalpha * nbeta + \ + sptr[IW * IH + ih0 * IW + iw1] * nalpha * pbeta + \ + sptr[IW * IH + ih1 * IW + iw0] * palpha * nbeta + \ + sptr[IW * IH + ih1 * IW + iw1] * palpha * pbeta); \ + src_ctype val_z = warp_out_converter( \ + sptr[2 * IW * IH + ih0 * IW + iw0] * nalpha * nbeta + \ + sptr[2 * IW * IH + ih0 * IW + iw1] * nalpha * pbeta + \ + sptr[2 * IW * IH + ih1 * IW + iw0] * palpha * nbeta + \ + sptr[2 * IW * IH + ih1 * IW + iw1] * palpha * pbeta); \ + result.x = type_cvt(val_x); \ + result.y = type_cvt(val_y); \ + result.z = type_cvt(val_z); \ + result.w = 0; \ + *((vec_dst_type*)dst + oh * OW + ow) = result; \ + } \ + } + +INST(int8_t, char4) +#undef INST + +#define INST(dst_ctype, vec_dst_type) \ + template \ + __global__ void kern_const_border_quint8_n3hw_nchw4( \ + SrcVisitor src, const float* __restrict mat, dst_ctype* __restrict dst, \ + int IH, int IW, int OH, int OW, src_ctype bval, \ + CudaTypeCvt type_cvt) { \ + rounding::RoundingConverter warp_out_converter; \ + int ow = blockIdx.x * blockDim.x + threadIdx.x; \ + int oh = blockIdx.y * blockDim.y + threadIdx.y; \ + const src_ctype* __restrict sptr = src.get(blockIdx.z, 3 * IH * IW); \ + dst += blockIdx.z * OH * OW * 4; \ + mat += blockIdx.z * 3 * 3; \ + if (ow < OW && oh < OH) { \ + float denominator = mat[6] * ow + mat[7] * oh + mat[8]; \ + float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; \ + float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; \ + int iw0 = floor(iw) + 0; \ + int iw1 = floor(iw) + 1; \ + int ih0 = floor(ih) + 0; \ + int ih1 = floor(ih) + 1; \ + bool okw0 = (iw0 >= 0 && iw0 < IW); \ + bool okw1 = (iw1 >= 0 && iw1 < IW); \ + bool okh0 = (ih0 >= 0 && ih0 < IH); \ + bool okh1 = (ih1 >= 0 && ih1 < IH); \ + float palpha = ih - floor(ih); \ + float pbeta = iw - floor(iw); \ + float nalpha = 1.0f - palpha; \ + float nbeta = 1.0f - pbeta; \ + vec_dst_type result; \ + src_ctype v00, v01, v10, v11; \ + v00 = (okh0 && okw0 ? sptr[ih0 * IW + iw0] : bval); \ + v01 = (okh0 && okw1 ? sptr[ih0 * IW + iw1] : bval); \ + v10 = (okh1 && okw0 ? sptr[ih1 * IW + iw0] : bval); \ + v11 = (okh1 && okw1 ? sptr[ih1 * IW + iw1] : bval); \ + src_ctype val_x = warp_out_converter( \ + v00 * nalpha * nbeta + v01 * nalpha * pbeta + \ + v10 * palpha * nbeta + v11 * palpha * pbeta); \ + v00 = (okh0 && okw0 ? sptr[IH * IW + ih0 * IW + iw0] : bval); \ + v01 = (okh0 && okw1 ? sptr[IH * IW + ih0 * IW + iw1] : bval); \ + v10 = (okh1 && okw0 ? sptr[IH * IW + ih1 * IW + iw0] : bval); \ + v11 = (okh1 && okw1 ? sptr[IH * IW + ih1 * IW + iw1] : bval); \ + src_ctype val_y = warp_out_converter( \ + v00 * nalpha * nbeta + v01 * nalpha * pbeta + \ + v10 * palpha * nbeta + v11 * palpha * pbeta); \ + v00 = (okh0 && okw0 ? sptr[2 * IH * IW + ih0 * IW + iw0] : bval); \ + v01 = (okh0 && okw1 ? sptr[2 * IH * IW + ih0 * IW + iw1] : bval); \ + v10 = (okh1 && okw0 ? sptr[2 * IH * IW + ih1 * IW + iw0] : bval); \ + v11 = (okh1 && okw1 ? sptr[2 * IH * IW + ih1 * IW + iw1] : bval); \ + src_ctype val_z = warp_out_converter( \ + v00 * nalpha * nbeta + v01 * nalpha * pbeta + \ + v10 * palpha * nbeta + v11 * palpha * pbeta); \ + result.x = type_cvt(val_x); \ + result.y = type_cvt(val_y); \ + result.z = type_cvt(val_z); \ + result.w = 0; \ + *((vec_dst_type*)dst + oh * OW + ow) = result; \ + } \ + } + +INST(int8_t, char4) +#undef INST + +#define INST(dst_ctype, vec_dst_type) \ + template < \ + typename src_dtype, typename src_ctype, typename Getter, \ + typename SrcVisitor> \ + __global__ void kern_general_quint8_nhw3_nchw4( \ + SrcVisitor src, const float* __restrict mat, dst_ctype* __restrict dst, \ + int IH, int IW, int OH, int OW, \ + CudaTypeCvt type_cvt) { \ + Getter getter; \ + rounding::RoundingConverter warp_out_converter; \ + int ow = blockIdx.x * blockDim.x + threadIdx.x; \ + int oh = blockIdx.y * blockDim.y + threadIdx.y; \ + const src_ctype* __restrict sptr = src.get(blockIdx.z, 3 * IH * IW); \ + dst += blockIdx.z * OH * OW * 4; \ + mat += blockIdx.z * 3 * 3; \ + if (ow < OW && oh < OH) { \ + float denominator = mat[6] * ow + mat[7] * oh + mat[8]; \ + float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; \ + float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; \ + int iw0 = getter(floor(iw) + 0, IW); \ + int iw1 = getter(floor(iw) + 1, IW); \ + int ih0 = getter(floor(ih) + 0, IH); \ + int ih1 = getter(floor(ih) + 1, IH); \ + float palpha = ih - floor(ih); \ + float pbeta = iw - floor(iw); \ + float nalpha = 1.0f - palpha; \ + float nbeta = 1.0f - pbeta; \ + vec_dst_type result; \ + src_ctype val_x = warp_out_converter( \ + sptr[(ih0 * IW + iw0) * 3] * nalpha * nbeta + \ + sptr[(ih0 * IW + iw1) * 3] * nalpha * pbeta + \ + sptr[(ih1 * IW + iw0) * 3] * palpha * nbeta + \ + sptr[(ih1 * IW + iw1) * 3] * palpha * pbeta); \ + src_ctype val_y = warp_out_converter( \ + sptr[(ih0 * IW + iw0) * 3 + 1] * nalpha * nbeta + \ + sptr[(ih0 * IW + iw1) * 3 + 1] * nalpha * pbeta + \ + sptr[(ih1 * IW + iw0) * 3 + 1] * palpha * nbeta + \ + sptr[(ih1 * IW + iw1) * 3 + 1] * palpha * pbeta); \ + src_ctype val_z = warp_out_converter( \ + sptr[(ih0 * IW + iw0) * 3 + 2] * nalpha * nbeta + \ + sptr[(ih0 * IW + iw1) * 3 + 2] * nalpha * pbeta + \ + sptr[(ih1 * IW + iw0) * 3 + 2] * palpha * nbeta + \ + sptr[(ih1 * IW + iw1) * 3 + 2] * palpha * pbeta); \ + result.x = type_cvt(val_x); \ + result.y = type_cvt(val_y); \ + result.z = type_cvt(val_z); \ + result.w = 0; \ + *((vec_dst_type*)dst + oh * OW + ow) = result; \ + } \ + } + +INST(int8_t, char4) +#undef INST + +#define INST(dst_ctype, vec_dst_type) \ + template \ + __global__ void kern_const_border_quint8_nhw3_nchw4( \ + SrcVisitor src, const float* __restrict mat, dst_ctype* __restrict dst, \ + int IH, int IW, int OH, int OW, src_ctype bval, \ + CudaTypeCvt type_cvt) { \ + rounding::RoundingConverter warp_out_converter; \ + int ow = blockIdx.x * blockDim.x + threadIdx.x; \ + int oh = blockIdx.y * blockDim.y + threadIdx.y; \ + const src_ctype* __restrict sptr = src.get(blockIdx.z, 3 * IH * IW); \ + dst += blockIdx.z * OH * OW * 4; \ + mat += blockIdx.z * 3 * 3; \ + if (ow < OW && oh < OH) { \ + float denominator = mat[6] * ow + mat[7] * oh + mat[8]; \ + float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; \ + float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; \ + int iw0 = floor(iw) + 0; \ + int iw1 = floor(iw) + 1; \ + int ih0 = floor(ih) + 0; \ + int ih1 = floor(ih) + 1; \ + bool okw0 = (iw0 >= 0 && iw0 < IW); \ + bool okw1 = (iw1 >= 0 && iw1 < IW); \ + bool okh0 = (ih0 >= 0 && ih0 < IH); \ + bool okh1 = (ih1 >= 0 && ih1 < IH); \ + float palpha = ih - floor(ih); \ + float pbeta = iw - floor(iw); \ + float nalpha = 1.0f - palpha; \ + float nbeta = 1.0f - pbeta; \ + vec_dst_type result; \ + src_ctype v00, v01, v10, v11; \ + v00 = (okh0 && okw0 ? sptr[(ih0 * IW + iw0) * 3] : bval); \ + v01 = (okh0 && okw1 ? sptr[(ih0 * IW + iw1) * 3] : bval); \ + v10 = (okh1 && okw0 ? sptr[(ih1 * IW + iw0) * 3] : bval); \ + v11 = (okh1 && okw1 ? sptr[(ih1 * IW + iw1) * 3] : bval); \ + src_ctype val_x = warp_out_converter( \ + v00 * nalpha * nbeta + v01 * nalpha * pbeta + \ + v10 * palpha * nbeta + v11 * palpha * pbeta); \ + v00 = (okh0 && okw0 ? sptr[(ih0 * IW + iw0) * 3 + 1] : bval); \ + v01 = (okh0 && okw1 ? sptr[(ih0 * IW + iw1) * 3 + 1] : bval); \ + v10 = (okh1 && okw0 ? sptr[(ih1 * IW + iw0) * 3 + 1] : bval); \ + v11 = (okh1 && okw1 ? sptr[(ih1 * IW + iw1) * 3 + 1] : bval); \ + src_ctype val_y = warp_out_converter( \ + v00 * nalpha * nbeta + v01 * nalpha * pbeta + \ + v10 * palpha * nbeta + v11 * palpha * pbeta); \ + v00 = (okh0 && okw0 ? sptr[(ih0 * IW + iw0) * 3 + 2] : bval); \ + v01 = (okh0 && okw1 ? sptr[(ih0 * IW + iw1) * 3 + 2] : bval); \ + v10 = (okh1 && okw0 ? sptr[(ih1 * IW + iw0) * 3 + 2] : bval); \ + v11 = (okh1 && okw1 ? sptr[(ih1 * IW + iw1) * 3 + 2] : bval); \ + src_ctype val_z = warp_out_converter( \ + v00 * nalpha * nbeta + v01 * nalpha * pbeta + \ + v10 * palpha * nbeta + v11 * palpha * pbeta); \ + result.x = type_cvt(val_x); \ + result.y = type_cvt(val_y); \ + result.z = type_cvt(val_z); \ + result.w = 0; \ + *((vec_dst_type*)dst + oh * OW + ow) = result; \ + } \ + } + +INST(int8_t, char4) +#undef INST + +template < + typename src_dtype, typename src_ctype, typename dst_ctype, typename SrcVisitor> +void dispatch_with_visitor_quint8_dimshuffle_typecvt_nchw4( + bool is_nhwc, SrcVisitor src, const float* mat, dst_ctype* dst, int N, int C, + int IH, int IW, int OH, int OW, src_ctype bval, + CudaDTypeParamImpl param, BorderMode bmode, cudaStream_t stream) { + const int BY = 16, BX = 32; + CudaTypeCvt type_cvt(param); +#define DISPATCH(Getter) \ + do { \ + if (C == 1) { \ + kern_general_quint8_nhw_nchw4 \ + <<>>( \ + src, mat, dst, IH, IW, OH, OW, type_cvt); \ + } else if (is_nhwc) { \ + kern_general_quint8_nhw3_nchw4 \ + <<>>( \ + src, mat, dst, IH, IW, OH, OW, type_cvt); \ + } else { \ + kern_general_quint8_n3hw_nchw4 \ + <<>>( \ + src, mat, dst, IH, IW, OH, OW, type_cvt); \ + } \ + } while (0) + + const int max_batch_size = 65535; + while (N) { + size_t curr_batch_size = N < max_batch_size ? N : max_batch_size; + dim3 threads(BX, BY); + dim3 blocks((OW + BX - 1) / BX, (OH + BY - 1) / BY, curr_batch_size); + + switch (bmode) { + case BORDER_REPLICATE: + DISPATCH(ReplicateGetter); + break; + case BORDER_REFLECT: + DISPATCH(ReflectGetter); + break; + case BORDER_REFLECT_101: + DISPATCH(Reflect101Getter); + break; + case BORDER_WRAP: + DISPATCH(WrapGetter); + break; +#undef DISPATCH + case BORDER_CONSTANT: + if (C == 1) { + kern_const_border_quint8_nhw_nchw4 + <<>>( + src, mat, dst, IH, IW, OH, OW, bval, type_cvt); + } else if (is_nhwc) { + kern_const_border_quint8_nhw3_nchw4< + src_dtype, src_ctype, SrcVisitor> + <<>>( + src, mat, dst, IH, IW, OH, OW, bval, type_cvt); + } else { + kern_const_border_quint8_n3hw_nchw4< + src_dtype, src_ctype, SrcVisitor> + <<>>( + src, mat, dst, IH, IW, OH, OW, bval, type_cvt); + } + break; + default: + break; + } + + N -= curr_batch_size; + src.move_batch(curr_batch_size, C * IH * IW); + mat += curr_batch_size * 3 * 3; + dst += curr_batch_size * 4 * OH * OW; + } +} + +#define INST(dst_ctype) \ + template < \ + typename src_dtype, typename src_ctype, typename Getter, \ + typename SrcVisitor> \ + __global__ void kern_general_quint8_nchw( \ + SrcVisitor src, const float* __restrict mat, dst_ctype* __restrict dst, \ + int C, int IH, int IW, int OH, int OW, \ + CudaTypeCvt type_cvt) { \ + Getter getter; \ + rounding::RoundingConverter warp_out_converter; \ + int ow = blockIdx.x * blockDim.x + threadIdx.x; \ + int oh = blockIdx.y * blockDim.y + threadIdx.y; \ + const src_ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW); \ + dst += blockIdx.z * C * OH * OW; \ + mat += blockIdx.z * 3 * 3; \ + if (ow < OW && oh < OH) { \ + float denominator = mat[6] * ow + mat[7] * oh + mat[8]; \ + float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; \ + float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; \ + int iw0 = getter(floor(iw) + 0, IW); \ + int iw1 = getter(floor(iw) + 1, IW); \ + int ih0 = getter(floor(ih) + 0, IH); \ + int ih1 = getter(floor(ih) + 1, IH); \ + float palpha = ih - floor(ih); \ + float pbeta = iw - floor(iw); \ + float nalpha = 1.0f - palpha; \ + float nbeta = 1.0f - pbeta; \ + for (int c = 0; c < C; ++c) { \ + src_ctype val = warp_out_converter( \ + sptr[ih0 * IW + iw0] * nalpha * nbeta + \ + sptr[ih0 * IW + iw1] * nalpha * pbeta + \ + sptr[ih1 * IW + iw0] * palpha * nbeta + \ + sptr[ih1 * IW + iw1] * palpha * pbeta); \ + dst_ctype result; \ + result = type_cvt(val); \ + dst[oh * OW + ow] = result; \ + sptr += IH * IW; \ + dst += OH * OW; \ + } \ + } \ + } + +INST(float) +#undef INST + +#define INST(dst_ctype) \ + template \ + __global__ void kern_const_border_quint8_nchw( \ + SrcVisitor src, const float* __restrict mat, dst_ctype* __restrict dst, \ + int C, int IH, int IW, int OH, int OW, src_ctype bval, \ + CudaTypeCvt type_cvt) { \ + rounding::RoundingConverter warp_out_converter; \ + int ow = blockIdx.x * blockDim.x + threadIdx.x; \ + int oh = blockIdx.y * blockDim.y + threadIdx.y; \ + const src_ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW); \ + dst += blockIdx.z * C * OH * OW; \ + mat += blockIdx.z * 3 * 3; \ + if (ow < OW && oh < OH) { \ + float denominator = mat[6] * ow + mat[7] * oh + mat[8]; \ + float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; \ + float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; \ + int iw0 = floor(iw) + 0; \ + int iw1 = floor(iw) + 1; \ + int ih0 = floor(ih) + 0; \ + int ih1 = floor(ih) + 1; \ + bool okw0 = (iw0 >= 0 && iw0 < IW); \ + bool okw1 = (iw1 >= 0 && iw1 < IW); \ + bool okh0 = (ih0 >= 0 && ih0 < IH); \ + bool okh1 = (ih1 >= 0 && ih1 < IH); \ + float palpha = ih - floor(ih); \ + float pbeta = iw - floor(iw); \ + float nalpha = 1.0f - palpha; \ + float nbeta = 1.0f - pbeta; \ + for (int c = 0; c < C; ++c) { \ + src_ctype v00 = (okh0 && okw0 ? sptr[ih0 * IW + iw0] : bval); \ + src_ctype v01 = (okh0 && okw1 ? sptr[ih0 * IW + iw1] : bval); \ + src_ctype v10 = (okh1 && okw0 ? sptr[ih1 * IW + iw0] : bval); \ + src_ctype v11 = (okh1 && okw1 ? sptr[ih1 * IW + iw1] : bval); \ + src_ctype val = warp_out_converter( \ + v00 * nalpha * nbeta + v01 * nalpha * pbeta + \ + v10 * palpha * nbeta + v11 * palpha * pbeta); \ + dst_ctype result; \ + result = type_cvt(val); \ + dst[oh * OW + ow] = result; \ + sptr += IH * IW; \ + dst += OH * OW; \ + } \ + } \ + } + +INST(float) +#undef INST + +#define INST(dst_ctype) \ + template < \ + typename src_dtype, typename src_ctype, typename Getter, \ + typename SrcVisitor> \ + __global__ void kern_general_quint8_nhwc_nchw( \ + SrcVisitor src, const float* __restrict mat, dst_ctype* __restrict dst, \ + int C, int IH, int IW, int OH, int OW, \ + CudaTypeCvt type_cvt) { \ + Getter getter; \ + rounding::RoundingConverter warp_out_converter; \ + int ow = blockIdx.x * blockDim.x + threadIdx.x; \ + int oh = blockIdx.y * blockDim.y + threadIdx.y; \ + const src_ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW); \ + dst += blockIdx.z * C * OH * OW; \ + mat += blockIdx.z * 3 * 3; \ + if (ow < OW && oh < OH) { \ + float denominator = mat[6] * ow + mat[7] * oh + mat[8]; \ + float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; \ + float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; \ + int iw0 = getter(floor(iw) + 0, IW); \ + int iw1 = getter(floor(iw) + 1, IW); \ + int ih0 = getter(floor(ih) + 0, IH); \ + int ih1 = getter(floor(ih) + 1, IH); \ + float palpha = ih - floor(ih); \ + float pbeta = iw - floor(iw); \ + float nalpha = 1.0f - palpha; \ + float nbeta = 1.0f - pbeta; \ + for (int c = 0; c < C; ++c) { \ + src_ctype val = warp_out_converter( \ + sptr[(ih0 * IW + iw0) * C + c] * nalpha * nbeta + \ + sptr[(ih0 * IW + iw1) * C + c] * nalpha * pbeta + \ + sptr[(ih1 * IW + iw0) * C + c] * palpha * nbeta + \ + sptr[(ih1 * IW + iw1) * C + c] * palpha * pbeta); \ + dst_ctype result; \ + result = type_cvt(val); \ + dst[oh * OW + ow] = result; \ + dst += OH * OW; \ + } \ + } \ + } + +INST(float) +#undef INST + +#define INST(dst_ctype) \ + template \ + __global__ void kern_const_border_quint8_nhwc_nchw( \ + SrcVisitor src, const float* __restrict mat, dst_ctype* __restrict dst, \ + int C, int IH, int IW, int OH, int OW, src_ctype bval, \ + CudaTypeCvt type_cvt) { \ + rounding::RoundingConverter warp_out_converter; \ + int ow = blockIdx.x * blockDim.x + threadIdx.x; \ + int oh = blockIdx.y * blockDim.y + threadIdx.y; \ + const src_ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW); \ + dst += blockIdx.z * C * OH * OW; \ + mat += blockIdx.z * 3 * 3; \ + if (ow < OW && oh < OH) { \ + float denominator = mat[6] * ow + mat[7] * oh + mat[8]; \ + float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; \ + float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; \ + int iw0 = floor(iw) + 0; \ + int iw1 = floor(iw) + 1; \ + int ih0 = floor(ih) + 0; \ + int ih1 = floor(ih) + 1; \ + bool okw0 = (iw0 >= 0 && iw0 < IW); \ + bool okw1 = (iw1 >= 0 && iw1 < IW); \ + bool okh0 = (ih0 >= 0 && ih0 < IH); \ + bool okh1 = (ih1 >= 0 && ih1 < IH); \ + float palpha = ih - floor(ih); \ + float pbeta = iw - floor(iw); \ + float nalpha = 1.0f - palpha; \ + float nbeta = 1.0f - pbeta; \ + for (int c = 0; c < C; ++c) { \ + src_ctype v00 = \ + (okh0 && okw0 ? sptr[(ih0 * IW + iw0) * C + c] : bval); \ + src_ctype v01 = \ + (okh0 && okw1 ? sptr[(ih0 * IW + iw1) * C + c] : bval); \ + src_ctype v10 = \ + (okh1 && okw0 ? sptr[(ih1 * IW + iw0) * C + c] : bval); \ + src_ctype v11 = \ + (okh1 && okw1 ? sptr[(ih1 * IW + iw1) * C + c] : bval); \ + float val = warp_out_converter( \ + v00 * nalpha * nbeta + v01 * nalpha * pbeta + \ + v10 * palpha * nbeta + v11 * palpha * pbeta); \ + dst_ctype result; \ + result = type_cvt(val); \ + dst[oh * OW + ow] = result; \ + dst += OH * OW; \ + } \ + } \ + } + +INST(float) +#undef INST + +template < + typename src_dtype, typename src_ctype, typename dst_ctype, typename SrcVisitor> +void dispatch_with_visitor_quint8_dimshuffle_typecvt_nchw( + bool is_nhwc, SrcVisitor src, const float* mat, dst_ctype* dst, int N, int C, + int IH, int IW, int OH, int OW, src_ctype bval, + CudaDTypeParamImpl param, BorderMode bmode, cudaStream_t stream) { + const int BY = 16, BX = 32; + CudaTypeCvt type_cvt(param); +#define DISPATCH(Getter) \ + do { \ + if (is_nhwc) { \ + kern_general_quint8_nhwc_nchw \ + <<>>( \ + src, mat, dst, C, IH, IW, OH, OW, type_cvt); \ + } else { \ + kern_general_quint8_nchw \ + <<>>( \ + src, mat, dst, C, IH, IW, OH, OW, type_cvt); \ + } \ + } while (0) + + const int max_batch_size = 65535; + while (N) { + size_t curr_batch_size = N < max_batch_size ? N : max_batch_size; + dim3 threads(BX, BY); + dim3 blocks((OW + BX - 1) / BX, (OH + BY - 1) / BY, curr_batch_size); + + switch (bmode) { + case BORDER_REPLICATE: + DISPATCH(ReplicateGetter); + break; + case BORDER_REFLECT: + DISPATCH(ReflectGetter); + break; + case BORDER_REFLECT_101: + DISPATCH(Reflect101Getter); + break; + case BORDER_WRAP: + DISPATCH(WrapGetter); + break; +#undef DISPATCH + case BORDER_CONSTANT: + if (is_nhwc) { + kern_const_border_quint8_nhwc_nchw + <<>>( + src, mat, dst, C, IH, IW, OH, OW, bval, type_cvt); + } else { + kern_const_border_quint8_nchw + <<>>( + src, mat, dst, C, IH, IW, OH, OW, bval, type_cvt); + } + break; + default: + break; + } + + N -= curr_batch_size; + src.move_batch(curr_batch_size, C * IH * IW); + mat += curr_batch_size * 3 * 3; + dst += curr_batch_size * C * OH * OW; + } +} + +} // anonymous namespace + +namespace megdnn { +namespace cuda { +namespace warp_perspective { + +template +void forward_proxy( + bool is_nhwc, const ctype* src, const float* mat, const int* mat_idx, + ctype* dst, int N_SRC, int N_MAT, int C, int IH, int IW, int OH, int OW, + ctype bval, BorderMode bmode, megcore::AsyncErrorInfo* error_info, + void* error_tracker, cudaStream_t stream) { + if (mat_idx) { + IndexedSrcVisitor visitor; + visitor.ptr = src; + visitor.idx = mat_idx; + visitor.N_SRC = N_SRC; + visitor.error_info = error_info; + visitor.error_tracker = error_tracker; + dispatch_with_visitor( + is_nhwc, visitor, mat, dst, N_MAT, C, IH, IW, OH, OW, bval, bmode, + stream); + } else { + DirectSrcVisitor visitor; + visitor.ptr = src; + dispatch_with_visitor( + is_nhwc, visitor, mat, dst, N_MAT, C, IH, IW, OH, OW, bval, bmode, + stream); + } + after_kernel_launch(); +} + +template +void forward_proxy_nhwc_bit4( + const ctype* src, const float* mat, const int* mat_idx, ctype* dst, int N_SRC, + int N_MAT, int C, int IH, int IW, int OH, int OW, ctype bval, BorderMode bmode, + megcore::AsyncErrorInfo* error_info, void* error_tracker, cudaStream_t stream) { + if (mat_idx) { + IndexedSrcVisitor visitor; + visitor.ptr = src; + visitor.idx = mat_idx; + visitor.N_SRC = N_SRC; + visitor.error_info = error_info; + visitor.error_tracker = error_tracker; + dispatch_with_visitor_nhwc_bit4, pack_c>( + visitor, mat, dst, N_MAT, C, IH, IW, OH, OW, bval, bmode, stream); + } else { + DirectSrcVisitor visitor; + visitor.ptr = src; + dispatch_with_visitor_nhwc_bit4, pack_c>( + visitor, mat, dst, N_MAT, C, IH, IW, OH, OW, bval, bmode, stream); + } + after_kernel_launch(); +} + +template +void forward_proxy_nchw4( + const ctype* src, const float* mat, const int* mat_idx, ctype* dst, int N_SRC, + int N_MAT, int C, int IH, int IW, int OH, int OW, ctype bval, BorderMode bmode, + megcore::AsyncErrorInfo* error_info, void* error_tracker, cudaStream_t stream) { + if (mat_idx) { + IndexedSrcVisitor visitor; + visitor.ptr = src; + visitor.idx = mat_idx; + visitor.N_SRC = N_SRC; + visitor.error_info = error_info; + visitor.error_tracker = error_tracker; + dispatch_with_visitor_nchw4( + visitor, mat, dst, N_MAT, C, IH, IW, OH, OW, bval, bmode, stream); + } else { + DirectSrcVisitor visitor; + visitor.ptr = src; + dispatch_with_visitor_nchw4( + visitor, mat, dst, N_MAT, C, IH, IW, OH, OW, bval, bmode, stream); + } + after_kernel_launch(); +} + +template +void forward_proxy_nchw64( + const ctype* src, const float* mat, const int* mat_idx, ctype* dst, int N_SRC, + int N_MAT, int C, int IH, int IW, int OH, int OW, ctype bval, BorderMode bmode, + megcore::AsyncErrorInfo* error_info, void* error_tracker, cudaStream_t stream) { + if (mat_idx) { + IndexedSrcVisitor visitor; + visitor.ptr = src; + visitor.idx = mat_idx; + visitor.N_SRC = N_SRC; + visitor.error_info = error_info; + visitor.error_tracker = error_tracker; + dispatch_with_visitor_nchw64( + visitor, mat, dst, N_MAT, C, IH, IW, OH, OW, bval, bmode, stream); + } else { + DirectSrcVisitor visitor; + visitor.ptr = src; + dispatch_with_visitor_nchw64( + visitor, mat, dst, N_MAT, C, IH, IW, OH, OW, bval, bmode, stream); + } + after_kernel_launch(); +} + +#define INST(ctype) \ + template void forward_proxy( \ + bool, const ctype*, const float*, const int*, ctype*, int, int, int, int, \ + int, int, int, ctype, BorderMode, megcore::AsyncErrorInfo*, void*, \ + cudaStream_t); +INST(float) +INST(uint8_t) +#ifndef MEGDNN_DISABLE_FLOAT16 +INST(dt_float16) +#endif +INST(int8_t) +#undef INST + +#define INST(ctype) \ + template void forward_proxy_nchw4( \ + const ctype*, const float*, const int*, ctype*, int, int, int, int, int, \ + int, int, ctype, BorderMode, megcore::AsyncErrorInfo*, void*, \ + cudaStream_t); + +INST(int8_t) +#undef INST + +#define INST(ctype) \ + template void forward_proxy_nchw64( \ + const ctype*, const float*, const int*, ctype*, int, int, int, int, int, \ + int, int, ctype, BorderMode, megcore::AsyncErrorInfo*, void*, \ + cudaStream_t); + +INST(dt_qint4) +INST(dt_quint4) +#undef INST + +#define INST(ctype, pack_c) \ + template void forward_proxy_nhwc_bit4( \ + const ctype*, const float*, const int*, ctype*, int, int, int, int, int, \ + int, int, ctype, BorderMode, megcore::AsyncErrorInfo*, void*, \ + cudaStream_t); + +INST(dt_qint4, 8) +INST(dt_quint4, 8) +INST(dt_qint4, 16) +INST(dt_quint4, 16) +#undef INST + +template +void forward_proxy_quint8_dimshuffle_typecvt_nchw4( + bool is_nhwc, const src_ctype* src, const float* mat, const int* mat_idx, + dst_ctype* dst, int N_SRC, int N_MAT, int C, int IH, int IW, int OH, int OW, + src_ctype bval, DTypeParamImpl param, BorderMode bmode, + megcore::AsyncErrorInfo* error_info, void* error_tracker, cudaStream_t stream) { + CudaDTypeParamImpl dtype_param(param); + if (mat_idx) { + IndexedSrcVisitor visitor; + visitor.ptr = src; + visitor.idx = mat_idx; + visitor.N_SRC = N_SRC; + visitor.error_info = error_info; + visitor.error_tracker = error_tracker; + dispatch_with_visitor_quint8_dimshuffle_typecvt_nchw4( + is_nhwc, visitor, mat, dst, N_MAT, C, IH, IW, OH, OW, bval, dtype_param, + bmode, stream); + } else { + DirectSrcVisitor visitor; + visitor.ptr = src; + dispatch_with_visitor_quint8_dimshuffle_typecvt_nchw4( + is_nhwc, visitor, mat, dst, N_MAT, C, IH, IW, OH, OW, bval, dtype_param, + bmode, stream); + } + after_kernel_launch(); +} + +#define INST(src_dtype, src_ctype, dst_ctype) \ + template void forward_proxy_quint8_dimshuffle_typecvt_nchw4( \ + bool is_nhwc, const src_ctype*, const float*, const int*, dst_ctype*, int, \ + int, int, int, int, int, int, src_ctype, DTypeParamImpl param, \ + BorderMode, megcore::AsyncErrorInfo*, void*, cudaStream_t); + +INST(dt_quint8, uint8_t, int8_t) +#undef INST + +template +void forward_proxy_quint8_dimshuffle_typecvt_nchw( + bool is_nhwc, const src_ctype* src, const float* mat, const int* mat_idx, + dst_ctype* dst, int N_SRC, int N_MAT, int C, int IH, int IW, int OH, int OW, + src_ctype bval, DTypeParamImpl param, BorderMode bmode, + megcore::AsyncErrorInfo* error_info, void* error_tracker, cudaStream_t stream) { + CudaDTypeParamImpl dtype_param(param); + if (mat_idx) { + IndexedSrcVisitor visitor; + visitor.ptr = src; + visitor.idx = mat_idx; + visitor.N_SRC = N_SRC; + visitor.error_info = error_info; + visitor.error_tracker = error_tracker; + dispatch_with_visitor_quint8_dimshuffle_typecvt_nchw( + is_nhwc, visitor, mat, dst, N_MAT, C, IH, IW, OH, OW, bval, dtype_param, + bmode, stream); + } else { + DirectSrcVisitor visitor; + visitor.ptr = src; + dispatch_with_visitor_quint8_dimshuffle_typecvt_nchw( + is_nhwc, visitor, mat, dst, N_MAT, C, IH, IW, OH, OW, bval, dtype_param, + bmode, stream); + } + after_kernel_launch(); +} + +#define INST(src_dtype, src_ctype, dst_ctype) \ + template void forward_proxy_quint8_dimshuffle_typecvt_nchw( \ + bool is_nhwc, const src_ctype*, const float*, const int*, dst_ctype*, int, \ + int, int, int, int, int, int, src_ctype, DTypeParamImpl param, \ + BorderMode, megcore::AsyncErrorInfo*, void*, cudaStream_t); + +INST(dt_quint8, uint8_t, float) +#undef INST + +} // namespace warp_perspective +} // namespace cuda +} // namespace megdnn + +// vim: syntax=cpp.doxygen diff --git a/cuda_code/forward_mapping_per_gpu_functor.cu b/cuda_code/forward_mapping_per_gpu_functor.cu new file mode 100644 index 0000000000000000000000000000000000000000..0fd93bc1003dbce0de8fd07b6b3067bd9b95b8b5 --- /dev/null +++ b/cuda_code/forward_mapping_per_gpu_functor.cu @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2020, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "HugeCTR/include/embeddings/sparse_embedding_functors.hpp" + +namespace HugeCTR { + +namespace { + +// for one-hot, the value_index mapping is linear (no need to use hashtable) +template +__global__ void hash_key_value_index_mapping_kernel(size_t nnz, int slot_num, + const uint32_t *mapping_offsets, + const TypeKey *hash_key, + size_t *hash_value_index) { + size_t gid = blockIdx.x * blockDim.x + threadIdx.x; + if (gid < nnz) { + int slot_id = gid % slot_num; + hash_value_index[gid] = hash_key[gid] - mapping_offsets[slot_id]; + } +} + +} // namespace + +/** + * forward propagation on each GPU for LocalizedSlotSparseEmbeddingOneHot. + * Because there is no hashtable in this class, so there must be a mapping table + * between input valud_index and local value_index. + * @param batch_size batch size for the current mini-batch computation. + * @param slot_num the number of slots for current GPU + * @param row_offset row_offset (CSR format of input sparse tensors) + * @param hash_key value (CSR format of input sparse tensors) + * @param nnz non-zero feature number per batch + * @param mapping_offsets the mapping between input value_index and local value_index + * @param hash_value_index hash table value_index(row index of embedding) + * @param stream cuda stream + */ +template +void SparseEmbeddingFunctors::forward_mapping_per_gpu(size_t batch_size, size_t slot_num, + const Tensor2 &hash_key, + size_t nnz, + const Tensor2 &mapping_offsets, + Tensor2 &hash_value_index, + cudaStream_t stream) { + // remove hashtable get_insert(), and do linear mapping between key and value_index + hash_key_value_index_mapping_kernel<<<(nnz + 255) / 256, 256, 0, stream>>>( + nnz, slot_num, mapping_offsets.get_ptr(), hash_key.get_ptr(), hash_value_index.get_ptr()); + + return; +} + +template void SparseEmbeddingFunctors::forward_mapping_per_gpu( + size_t batch_size, size_t slot_num, const Tensor2 &hash_key, size_t nnz, + const Tensor2 &mapping_offsets, Tensor2 &hash_value_index, + cudaStream_t stream); + +template void SparseEmbeddingFunctors::forward_mapping_per_gpu( + size_t batch_size, size_t slot_num, const Tensor2 &hash_key, size_t nnz, + const Tensor2 &mapping_offsets, Tensor2 &hash_value_index, + cudaStream_t stream); + +} // namespace HugeCTR \ No newline at end of file diff --git a/cuda_code/forward_per_gpu_functor_4.cu b/cuda_code/forward_per_gpu_functor_4.cu new file mode 100644 index 0000000000000000000000000000000000000000..102f708a98e4b7387a824bbd049d5a2819ffdf3f --- /dev/null +++ b/cuda_code/forward_per_gpu_functor_4.cu @@ -0,0 +1,375 @@ +/* + * Copyright (c) 2020, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "HugeCTR/include/embeddings/sparse_embedding_functors.hpp" +#include "HugeCTR/include/utils.cuh" + +namespace HugeCTR { + +namespace { +/** + * All the CUDA kernel functions used by embedding layer are defined in this file, including + * forward propagation, backward propagation. The functions are defined by propagation type + * and combiner type(sum or mean) as below: + * 1) forward + * sum: calling forward_sum_kernel() + * mean: calling foward_sum_kernel() + forward_scale_kernel() + * 2) backward: + * calculating wgrad: + * sum: calling backward_sum_kernel() + * mean: calling backward_mean_kernel() + * update embedding table: including several steps as below, + * step1: expand sample IDs, calling sample_id_expand_kernel() + * step2: get value_index by key (will call hash_table->get_insert() in nv_hashtable lib) + * step3: sort by value_index (will call cub::DeviceRadixSort::SortPairs in cub lib) + * step4: count the number for each unduplicated value_index, calling value_count_kernel() + * step5: use optimizer method to compute deltaw, and record corresponding, including three + * types of optimizer: Adam: caling opt_adam_kernel() Momentum sgd: calling + * opt_momentum_sgd_kernel() Nesterov: calling opt_nesterov_kernel() step6: update embedding table + * by deltaw, calling update_kernel() + */ + +// forward kernel funcion: for both combiner=sum and combiner=mean +template +__global__ void forward_sum_kernel(int batch_size, int slot_num, int embedding_vec_size, + const TypeKey *row_offset, const size_t *hash_value_index, + const float *hash_table_value, + TypeEmbeddingComp *embedding_feature) { + int bid = blockIdx.x; // each block corresponding to one sample + int tid = threadIdx.x; // each thread corresponding to one element in the embedding vector + + if (bid < batch_size && tid < embedding_vec_size) { + for (int i = 0; i < slot_num; i++) { + int feature_row_index = bid * slot_num + i; + TypeKey value_offset = row_offset[feature_row_index]; + TypeKey feature_num = + row_offset[feature_row_index + 1] - value_offset; // number of hash values in one slot + + float sum = 0.0f; + + // reduce in a slot + for (int j = 0; j < feature_num; j++) { + size_t value_index = hash_value_index[value_offset + j]; + sum += hash_table_value[value_index * embedding_vec_size + tid]; + } + + // store the embedding vector + embedding_feature[feature_row_index * embedding_vec_size + tid] = + TypeConvertFunc::convert(sum); + } + } +} + +template +__global__ void forward_sum_align2_kernel(int batch_size, int slot_num, int embedding_vec_size, + const TypeKey *row_offset, const size_t *hash_value_index, + const float *hash_table_value, + __half *embedding_feature) { + int bid = blockIdx.x; // each block corresponding to one sample + int tid = threadIdx.x; // each thread corresponding to one element in the embedding vector + + if (bid < batch_size && tid < embedding_vec_size) { + const float2 *hash_table_value2 = reinterpret_cast(hash_table_value); + __half2 *embedding_feature2 = reinterpret_cast<__half2 *>(embedding_feature); + + for (int i = 0; i < slot_num; i++) { + int feature_row_index = bid * slot_num + i; + TypeKey value_offset = row_offset[feature_row_index]; + TypeKey feature_num = + row_offset[feature_row_index + 1] - value_offset; // number of hash values in one slot + + // use float type to do accumulation + float2 sum2 = {0.0f, 0.0f}; + for (int j = 0; j < feature_num; j++) { + size_t value_index = hash_value_index[value_offset + j]; + sum2.x += hash_table_value2[value_index * embedding_vec_size + tid].x; + sum2.y += hash_table_value2[value_index * embedding_vec_size + tid].y; + } + __half2 sum = __float22half2_rn(sum2); + + // store the embedding vector + embedding_feature2[feature_row_index * embedding_vec_size + tid] = sum; + } + } +} + +// forward kernel funcion: for combiner=mean in LocalizedEmbedding +template +__global__ void forward_mean_kernel(int batch_size, int slot_num, int embedding_vec_size, + const TypeKey *row_offset, const size_t *hash_value_index, + const float *hash_table_value, + TypeEmbeddingComp *embedding_feature) { + int bid = blockIdx.x; // each block corresponding to one sample + int tid = threadIdx.x; // each thread corresponding to one element in the embedding vector + + if (bid < batch_size && tid < embedding_vec_size) { + for (int i = 0; i < slot_num; i++) { + int feature_row_index = bid * slot_num + i; + TypeKey value_offset = row_offset[feature_row_index]; + int feature_num = + row_offset[feature_row_index + 1] - value_offset; // number of hash values in one slot + + float sum = 0.0f; + + // reduce in a slot + for (int j = 0; j < feature_num; j++) { + size_t value_index = hash_value_index[value_offset + j]; + sum += hash_table_value[value_index * embedding_vec_size + tid]; + } + + float scaler = 1.0f; + if (feature_num > 1) { + scaler = 1.0f / feature_num; + } + + // store the embedding vector + embedding_feature[feature_row_index * embedding_vec_size + tid] = + TypeConvertFunc::convert(sum * scaler); + } + } +} + +template +__global__ void forward_mean_align2_kernel(int batch_size, int slot_num, int embedding_vec_size, + const TypeKey *row_offset, + const size_t *hash_value_index, + const float *hash_table_value, + __half *embedding_feature) { + int bid = blockIdx.x; // each block corresponding to one sample + int tid = threadIdx.x; // each thread corresponding to one element in the embedding vector + + if (bid < batch_size && tid < embedding_vec_size) { + const float2 *hash_table_value2 = reinterpret_cast(hash_table_value); + __half2 *embedding_feature2 = reinterpret_cast<__half2 *>(embedding_feature); + + for (int i = 0; i < slot_num; i++) { + int feature_row_index = bid * slot_num + i; + TypeKey value_offset = row_offset[feature_row_index]; + int feature_num = + row_offset[feature_row_index + 1] - value_offset; // number of hash values in one slot + + // use float to do accumulation + float2 sum = {0.0f, 0.0f}; + for (int j = 0; j < feature_num; j++) { + size_t value_index = hash_value_index[value_offset + j]; + sum.x += hash_table_value2[value_index * embedding_vec_size + tid].x; + sum.y += hash_table_value2[value_index * embedding_vec_size + tid].y; + } + __half2 sum2 = __float22half2_rn(sum); + + float scaler = 1.0f; + if (feature_num > 1) { + scaler = 1.0f / feature_num; + } + __half2 scaler2 = __float2half2_rn(scaler); + + // store the embedding vector + embedding_feature2[feature_row_index * embedding_vec_size + tid] = __hmul2(sum2, scaler2); + } + } +} + +// do sum reduction +template +void forward_sum(size_t batch_size, size_t slot_num, size_t embedding_vec_size, + const TypeHashKey *row_offset, const size_t *hash_value_index, + const float *hash_table_value, TypeEmbeddingComp *embedding_feature, + cudaStream_t stream) { + const size_t grid_size = batch_size; // each block corresponds to a sample + const size_t block_size = + embedding_vec_size; // each thread corresponds to one element in an embedding vector + forward_sum_kernel<<>>(batch_size, slot_num, embedding_vec_size, + row_offset, hash_value_index, + hash_table_value, embedding_feature); +} + +// do sum reduction +template +void forward_sum(size_t batch_size, size_t slot_num, size_t embedding_vec_size, + const TypeHashKey *row_offset, const size_t *hash_value_index, + const float *hash_table_value, __half *embedding_feature, cudaStream_t stream) { + const size_t grid_size = batch_size; // each block corresponds to a sample + if (embedding_vec_size % 2 == 0) { + const size_t block_size = embedding_vec_size / 2; + forward_sum_align2_kernel<<>>( + batch_size, slot_num, embedding_vec_size / 2, row_offset, hash_value_index, + hash_table_value, embedding_feature); + } else { + const size_t block_size = + embedding_vec_size; // each thread corresponds to one element in an embedding vector + forward_sum_kernel<<>>( + batch_size, slot_num, embedding_vec_size, row_offset, hash_value_index, hash_table_value, + embedding_feature); + } +} + +template +void forward_mean(size_t batch_size, size_t slot_num, size_t embedding_vec_size, + const TypeHashKey *row_offset, const size_t *hash_value_index, + const float *hash_table_value, TypeEmbeddingComp *embedding_feature, + cudaStream_t stream) { + const size_t grid_size = batch_size; + const size_t block_size = embedding_vec_size; + forward_mean_kernel<<>>( + batch_size, slot_num, embedding_vec_size, row_offset, hash_value_index, hash_table_value, + embedding_feature); +} + +template +void forward_mean(size_t batch_size, size_t slot_num, size_t embedding_vec_size, + const TypeHashKey *row_offset, const size_t *hash_value_index, + const float *hash_table_value, __half *embedding_feature, cudaStream_t stream) { + const size_t grid_size = batch_size; + if (embedding_vec_size % 2 == 0) { + const size_t block_size = embedding_vec_size / 2; + forward_mean_align2_kernel<<>>( + batch_size, slot_num, embedding_vec_size / 2, row_offset, hash_value_index, + hash_table_value, embedding_feature); + } else { + const size_t block_size = embedding_vec_size; + forward_mean_kernel<<>>( + batch_size, slot_num, embedding_vec_size, row_offset, hash_value_index, hash_table_value, + embedding_feature); + } +} + +} // namespace + +/** + * forward propagation on each GPU for LocalizedSlotSparseEmbeddingHash + * @param batch_size batch size for the current mini-batch computation. + * @param slot_num the number of slots for current GPU + * @param embedding_vec_size embedding vector size. + * @param combiner 0-sum; 1-mean + * @param row_offset row_offset (CSR format of input sparse tensors) + * @param hash_key value (CSR format of input sparse tensors) + * @param nnz non-zero feature number per batch + * @param hash_table hash table, pairs of + * @param hash_table_value hash table value, which represents embedding vector + * @param hash_value_index hash table value_index(row index of embedding) + * @param embedding_feature embedding feature (output) + * @param stream cuda stream + */ +template +void SparseEmbeddingFunctors::forward_per_gpu( + size_t batch_size, size_t slot_num, size_t embedding_vec_size, int combiner, bool train, + const Tensor2 &row_offset, const Tensor2 &hash_key, size_t nnz, + HashTable &hash_table, const Tensor2 &hash_table_value, + Tensor2 &hash_value_index, Tensor2 &embedding_feature, + cudaStream_t stream) { + try { + if (train) { + hash_table.get_insert(hash_key.get_ptr(), hash_value_index.get_ptr(), nnz, stream); + } else { + hash_table.get(hash_key.get_ptr(), hash_value_index.get_ptr(), nnz, stream); + } + + // do sum reduction + if (combiner == 0) { + forward_sum(batch_size, slot_num, embedding_vec_size, row_offset.get_ptr(), + hash_value_index.get_ptr(), hash_table_value.get_ptr(), + embedding_feature.get_ptr(), stream); + } else if (combiner == 1) { + forward_mean(batch_size, slot_num, embedding_vec_size, row_offset.get_ptr(), + hash_value_index.get_ptr(), hash_table_value.get_ptr(), + embedding_feature.get_ptr(), stream); + } else { + CK_THROW_(Error_t::WrongInput, "Invalid combiner type "); + } + } catch (const std::runtime_error &rt_err) { + std::cerr << rt_err.what() << std::endl; + throw; + } + + return; +} + +template void SparseEmbeddingFunctors::forward_per_gpu( + size_t batch_size, size_t slot_num, size_t embedding_vec_size, int combiner, bool train, + const Tensor2 &row_offset, const Tensor2 &hash_key, size_t nnz, + HashTable &hash_table, const Tensor2 &hash_table_value, + Tensor2 &hash_value_index, Tensor2 &embedding_feature, cudaStream_t stream); + +template void SparseEmbeddingFunctors::forward_per_gpu( + size_t batch_size, size_t slot_num, size_t embedding_vec_size, int combiner, bool train, + const Tensor2 &row_offset, const Tensor2 &hash_key, size_t nnz, + HashTable &hash_table, const Tensor2 &hash_table_value, + Tensor2 &hash_value_index, Tensor2 &embedding_feature, cudaStream_t stream); + +template void SparseEmbeddingFunctors::forward_per_gpu( + size_t batch_size, size_t slot_num, size_t embedding_vec_size, int combiner, bool train, + const Tensor2 &row_offset, const Tensor2 &hash_key, size_t nnz, + HashTable &hash_table, const Tensor2 &hash_table_value, + Tensor2 &hash_value_index, Tensor2<__half> &embedding_feature, cudaStream_t stream); + +template void SparseEmbeddingFunctors::forward_per_gpu( + size_t batch_size, size_t slot_num, size_t embedding_vec_size, int combiner, bool train, + const Tensor2 &row_offset, const Tensor2 &hash_key, size_t nnz, + HashTable &hash_table, const Tensor2 &hash_table_value, + Tensor2 &hash_value_index, Tensor2<__half> &embedding_feature, cudaStream_t stream); + +template +void SparseEmbeddingFunctors::forward_sum_per_gpu( + size_t batch_size, size_t slot_num, size_t embedding_vec_size, int combiner, bool train, + const Tensor2 &row_offset, const Tensor2 &hash_key, size_t nnz, + const Tensor2 &hash_table_value, Tensor2 &hash_value_index, + Tensor2 &embedding_feature, cudaStream_t stream) { + try { + // do sum reduction + if (combiner == 0) { + forward_sum(batch_size, slot_num, embedding_vec_size, row_offset.get_ptr(), + hash_value_index.get_ptr(), hash_table_value.get_ptr(), + embedding_feature.get_ptr(), stream); + } else if (combiner == 1) { + forward_mean(batch_size, slot_num, embedding_vec_size, row_offset.get_ptr(), + hash_value_index.get_ptr(), hash_table_value.get_ptr(), + embedding_feature.get_ptr(), stream); + } else { + CK_THROW_(Error_t::WrongInput, "Invalid combiner type "); + } + } catch (const std::runtime_error &rt_err) { + std::cerr << rt_err.what() << std::endl; + throw; + } + + return; +} + +template void SparseEmbeddingFunctors::forward_sum_per_gpu( + size_t batch_size, size_t slot_num, size_t embedding_vec_size, int combiner, bool train, + const Tensor2 &row_offset, const Tensor2 &hash_key, size_t nnz, + const Tensor2 &hash_table_value, Tensor2 &hash_value_index, + Tensor2 &embedding_feature, cudaStream_t stream); + +template void SparseEmbeddingFunctors::forward_sum_per_gpu( + size_t batch_size, size_t slot_num, size_t embedding_vec_size, int combiner, bool train, + const Tensor2 &row_offset, const Tensor2 &hash_key, size_t nnz, + const Tensor2 &hash_table_value, Tensor2 &hash_value_index, + Tensor2 &embedding_feature, cudaStream_t stream); + +template void SparseEmbeddingFunctors::forward_sum_per_gpu( + size_t batch_size, size_t slot_num, size_t embedding_vec_size, int combiner, bool train, + const Tensor2 &row_offset, const Tensor2 &hash_key, size_t nnz, + const Tensor2 &hash_table_value, Tensor2 &hash_value_index, + Tensor2<__half> &embedding_feature, cudaStream_t stream); + +template void SparseEmbeddingFunctors::forward_sum_per_gpu( + size_t batch_size, size_t slot_num, size_t embedding_vec_size, int combiner, bool train, + const Tensor2 &row_offset, const Tensor2 &hash_key, size_t nnz, + const Tensor2 &hash_table_value, Tensor2 &hash_value_index, + Tensor2<__half> &embedding_feature, cudaStream_t stream); + +} // namespace HugeCTR diff --git a/cuda_code/fp32_mul.cu b/cuda_code/fp32_mul.cu new file mode 100644 index 0000000000000000000000000000000000000000..925d5fda38d44db1bb05c4b12782d0340baf726c --- /dev/null +++ b/cuda_code/fp32_mul.cu @@ -0,0 +1,192 @@ +#include +#include +#include + +#define DATA_TYPE 0 // 0-SP, 1-INT, 2-DP + +#define VECTOR_SIZE 60000000 +#define TILE_DIM 1024 +#define COMP_ITERATIONS 4096 +#define KERNEL_CALLS 4 + +template __global__ void simpleKernel(T *A, T *C1, T *C2, T *C3, T *C4, int size, int inner_reps, int tile_dim) +{ + int xIndex = blockIdx.x * tile_dim + threadIdx.x; + T ra, rb, rc, rd, re, rf, rg, rh; + + if (xIndex < size) { + ra=A[xIndex]; + rb=A[size-xIndex]; + rc=A[xIndex]; + rd=A[size-xIndex]; + re=A[xIndex]; + rf=A[size-xIndex]; + rg=A[xIndex]; + rh=A[size-xIndex]; + + // rb=A[xIndex]; + for (int i=0;i 3 || argc == 2) { + printf("\nError: Wrong number of arguments.\n\n"); + printf("Usage:\n\t %s [inner_iterations] [kernel_calls]\n\t %s\n", argv[0], argv[0]); + + return -1; + } + + if (argc == 3) { + kernel_calls = atoi(argv[2]); + compute_iters = atoi(argv[1]); + } + + printf("Number of kernel launches: %d\n", kernel_calls); + printf("Number of compute iterations: %d\n", compute_iters); + + // execution configuration parameters + dim3 grid(vector_size/tile_dim, 1), threads(tile_dim, 1); + + // CUDA events + cudaEvent_t start, stop; + + #if (DATA_TYPE == 0) + size_t mem_size = static_cast(sizeof(float) * vector_size); + // allocate host memory + float *h_iA = (float *) malloc(mem_size); + float *h_oC1 = (float *) malloc(mem_size); + float *h_oC2 = (float *) malloc(mem_size); + float *h_oC3 = (float *) malloc(mem_size); + float *h_oC4 = (float *) malloc(mem_size); + // initalize host data + for (int i = 0; i < vector_size; ++i) + { + h_iA[i] = (float) i+3; + // h_iB[i] = (float) i+3; + } + // allocate device memory + float *d_iA, *d_oC1, *d_oC2, *d_oC3, *d_oC4; + #elif (DATA_TYPE == 1) + size_t mem_size = static_cast(sizeof(int) * vector_size); + // allocate host memory + int *h_iA = (int *) malloc(mem_size); + int *h_oC1 = (int *) malloc(mem_size); + int *h_oC2 = (int *) malloc(mem_size); + int *h_oC3 = (int *) malloc(mem_size); + int *h_oC4 = (int *) malloc(mem_size); + // initalize host data + for (int i = 0; i < vector_size; ++i) + { + h_iA[i] = (int) i+3; + // h_iB[i] = (float) i+3; + } + // allocate device memory + int *d_iA, *d_oC1, *d_oC2, *d_oC3, *d_oC4; + #else + size_t mem_size = static_cast(sizeof(double) * vector_size); + // allocate host memory + double *h_iA = (double *) malloc(mem_size); + double *h_oC1 = (double *) malloc(mem_size); + double *h_oC2 = (double *) malloc(mem_size); + double *h_oC3 = (double *) malloc(mem_size); + double *h_oC4 = (double *) malloc(mem_size); + // initalize host data + for (int i = 0; i < vector_size; ++i) + { + h_iA[i] = (double) i+3; + // h_iB[i] = (float) i+3; + } + // allocate device memory + double *d_iA, *d_oC1, *d_oC2, *d_oC3, *d_oC4; + #endif + + cudaMalloc((void **) &d_iA, mem_size); + cudaMalloc((void **) &d_oC1, mem_size); + cudaMalloc((void **) &d_oC2, mem_size); + cudaMalloc((void **) &d_oC3, mem_size); + cudaMalloc((void **) &d_oC4, mem_size); + + // copy host data to device + cudaMemcpy(d_iA, h_iA, mem_size, cudaMemcpyHostToDevice); + + // print out common data for all kernels + printf("\nVector size: %d TotalBlocks: %d blockSize: %d\n\n", vector_size, grid.x, threads.x); + + // initialize events + cudaEventCreate(&start); + cudaEventCreate(&stop); + + // take measurements for loop over kernel launches + cudaEventRecord(start, 0); + + for (int i=0; i < kernel_calls; i++) + { + #if (DATA_TYPE == 0) + simpleKernel<<>>(d_iA, d_oC1, d_oC2, d_oC3, d_oC4, vector_size, compute_iters, tile_dim); + #elif (DATA_TYPE == 1) + simpleKernel<<>>(d_iA, d_oC1, d_oC2, d_oC3, d_oC4, vector_size, compute_iters, tile_dim); + #else + simpleKernel<<>>(d_iA, d_oC1, d_oC2, d_oC3, d_oC4, vector_size, compute_iters, tile_dim); + #endif // Ensure no launch failure + } + + cudaEventRecord(stop, 0); + cudaEventSynchronize(stop); + float kernelTime; + cudaEventElapsedTime(&kernelTime, start, stop); + + // take measurements for loop inside kernel + cudaMemcpy(h_oC1, d_oC1, mem_size, cudaMemcpyDeviceToHost); + cudaMemcpy(h_oC2, d_oC2, mem_size, cudaMemcpyDeviceToHost); + cudaMemcpy(h_oC3, d_oC3, mem_size, cudaMemcpyDeviceToHost); + cudaMemcpy(h_oC4, d_oC4, mem_size, cudaMemcpyDeviceToHost); + + printf("teste: %f\n", h_oC1[0]); + + // report effective bandwidths + float kernelBandwidth = 2.0f * 1000.0f * mem_size/(1024*1024*1024)/(kernelTime/kernel_calls); + printf("simpleKernel, Throughput = %.4f GB/s, Time = %.5f ms, Size = %u fp32 elements, NumDevsUsed = %u, Workgroup = %u\n", + kernelBandwidth, + kernelTime/kernel_calls, + vector_size, 1, tile_dim * 1); + + free(h_iA); + free(h_oC1); + free(h_oC2); + free(h_oC3); + free(h_oC4); + + cudaFree(d_iA); + cudaFree(d_oC1); + cudaFree(d_oC2); + cudaFree(d_oC3); + cudaFree(d_oC4); + + + cudaEventDestroy(start); + cudaEventDestroy(stop); + + cudaDeviceReset(); + + printf("Test passed\n"); + + exit(EXIT_SUCCESS); +} diff --git a/cuda_code/fps_kernel_4.cu b/cuda_code/fps_kernel_4.cu new file mode 100644 index 0000000000000000000000000000000000000000..b4ae4e0410f22cc2e5a68abb9b90208b2b42e845 --- /dev/null +++ b/cuda_code/fps_kernel_4.cu @@ -0,0 +1,182 @@ +/* CUDA Implementation for farthest point sampling*/ +#ifndef _FPS_KERNEL +#define _FPS_KERNEL + +#include + +#include +#include + +// Note: AT_ASSERT has become AT_CHECK on master after 0.4. +// Note: AT_CHECK has become TORCH_CHECK on master after 1.2. +#define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor") +#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") +#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) +// Note: CHECK_EQ, CHECK_GT, etc. are marcos in Pytorch. +// #define CHECK_EQ(x, y) TORCH_CHECK(x == y, #x " does not equal to " #y) +// #define CHECK_GT(x, y) TORCH_CHECK(x > y, #x " is not greater than " #y) + +#define MAX_THREADS 512 + +inline int opt_n_threads(int work_size) { + const int pow_2 = std::log(static_cast(work_size)) / std::log(2.0); + return max(min(1 << pow_2, MAX_THREADS), 1); +} + +#define RUN(BLOCK_SIZE, DIM) \ + AT_DISPATCH_FLOATING_TYPES(points.scalar_type(), "FarthestPointSample", ([&] { \ + FarthestPointSampleKernel \ + <<>>( \ + index.data(), \ + points.data(), \ + temp.data(), \ + num_points, \ + num_centroids); \ + })); + +#define RUN_DIM(BLOCK_SIZE) \ + switch (dim) { \ + case 3: \ + RUN(BLOCK_SIZE, 3) \ + break; \ + case 2: \ + RUN(BLOCK_SIZE, 2) \ + break; \ + default: \ + TORCH_CHECK(false, "Only support dim=2 or 3."); \ + } + +#define RUN_BLOCK(BLOCK_SIZE) \ + case BLOCK_SIZE: \ + RUN_DIM(BLOCK_SIZE) \ + break; + +/* +Forward kernel +points: (B, N1, D) +temp: (B, N1) +index: (B, N2) +*/ +template +__global__ void FarthestPointSampleKernel( + index_t* __restrict__ index, + const scalar_t* __restrict__ points, + scalar_t* __restrict__ temp, + const int64_t num_points, + const int64_t num_centroids) { + // Allocate shared memory + __shared__ scalar_t smem_dist[BLOCK_SIZE]; + // Use int to save memory + __shared__ int smem_idx[BLOCK_SIZE]; + + const int batch_idx = blockIdx.x; + int cur_idx = 0; + int points_offset = batch_idx * num_points * DIM; + int temp_offset = batch_idx * num_points; + int index_offset = batch_idx * num_centroids; + + // Explicitly choose the first point as a centroid + if (threadIdx.x == 0) index[index_offset] = cur_idx; + + for (int i = 1; i < num_centroids; ++i) { + scalar_t max_dist = 0.0; + int max_idx = cur_idx; + + int offset1 = cur_idx * DIM; + scalar_t coords1[DIM] = {0.0}; + #pragma unroll + for (int ii = 0; ii < DIM; ++ii) { + coords1[ii] = points[points_offset + offset1 + ii]; + } + + for (int j = threadIdx.x; j < num_points; j += BLOCK_SIZE) { + int offset2 = j * DIM; + scalar_t dist = 0.0; + #pragma unroll + for (int jj = 0; jj < DIM; ++jj) { + scalar_t diff = points[points_offset + offset2 + jj] - coords1[jj]; + dist += diff * diff; + } + + scalar_t last_dist = temp[temp_offset + j]; + if (last_dist > dist || last_dist < 0.0) { + temp[temp_offset + j] = dist; + } else { + dist = last_dist; + } + if (dist > max_dist) { + max_dist = dist; + max_idx = j; + } + } + + smem_dist[threadIdx.x] = max_dist; + smem_idx[threadIdx.x] = max_idx; + + // assert block_size == blockDim.x + int offset = BLOCK_SIZE / 2; + while (offset > 0) { + __syncthreads(); + if (threadIdx.x < offset) { + scalar_t dist1 = smem_dist[threadIdx.x]; + scalar_t dist2 = smem_dist[threadIdx.x+offset]; + if (dist1 < dist2) { + smem_dist[threadIdx.x] = dist2; + smem_idx[threadIdx.x] = smem_idx[threadIdx.x+offset]; + } + } + offset /= 2; + } + __syncthreads(); + + cur_idx = smem_idx[0]; + if (threadIdx.x == 0) index[index_offset + i] = (index_t)cur_idx; + } +} + +/* +Forward interface +Input: + points: (B, N1, D) +Output: + index: (B, N2) +*/ +at::Tensor FarthestPointSample( + const at::Tensor points, + const int64_t num_centroids) { + + const auto batch_size = points.size(0); + const auto num_points = points.size(1); + const auto dim = points.size(2); + + // Sanity check + CHECK_INPUT(points); + TORCH_CHECK(dim == 2 || dim == 3, "Only support dim=2 or dim=3") + CHECK_GT(num_centroids, 0); + CHECK_GE(num_points, num_centroids); + + auto index = at::zeros({batch_size, num_centroids}, points.type().toScalarType(at::kLong)); + // In original implementation, it only allocates memory with the size of grid instead of batch size. + auto temp = at::neg(at::ones({batch_size, num_points}, points.type())); + + // In order to make full use of shared memory and threads, + // it is recommended to set num_centroids to be power of 2. + const auto n_threads = opt_n_threads(num_points); + + switch (n_threads) { + RUN_BLOCK(512) + RUN_BLOCK(256) + RUN_BLOCK(128) + RUN_BLOCK(64) + RUN_BLOCK(32) + RUN_BLOCK(16) + default: + RUN_DIM(16) + } + + THCudaCheck(cudaGetLastError()); + + return index; +} + +#endif \ No newline at end of file diff --git a/cuda_code/from_arrow_1.cu b/cuda_code/from_arrow_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..28fc2ae9d4fdc532fbcd1b3e5b82496bbf847748 --- /dev/null +++ b/cuda_code/from_arrow_1.cu @@ -0,0 +1,458 @@ +/* + * Copyright (c) 2020-2021, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +namespace cudf { + +namespace detail { +data_type arrow_to_cudf_type(arrow::DataType const& arrow_type) +{ + switch (arrow_type.id()) { + case arrow::Type::NA: return data_type(type_id::EMPTY); + case arrow::Type::BOOL: return data_type(type_id::BOOL8); + case arrow::Type::INT8: return data_type(type_id::INT8); + case arrow::Type::INT16: return data_type(type_id::INT16); + case arrow::Type::INT32: return data_type(type_id::INT32); + case arrow::Type::INT64: return data_type(type_id::INT64); + case arrow::Type::UINT8: return data_type(type_id::UINT8); + case arrow::Type::UINT16: return data_type(type_id::UINT16); + case arrow::Type::UINT32: return data_type(type_id::UINT32); + case arrow::Type::UINT64: return data_type(type_id::UINT64); + case arrow::Type::FLOAT: return data_type(type_id::FLOAT32); + case arrow::Type::DOUBLE: return data_type(type_id::FLOAT64); + case arrow::Type::DATE32: return data_type(type_id::TIMESTAMP_DAYS); + case arrow::Type::TIMESTAMP: { + auto type = static_cast(&arrow_type); + switch (type->unit()) { + case arrow::TimeUnit::type::SECOND: return data_type(type_id::TIMESTAMP_SECONDS); + case arrow::TimeUnit::type::MILLI: return data_type(type_id::TIMESTAMP_MILLISECONDS); + case arrow::TimeUnit::type::MICRO: return data_type(type_id::TIMESTAMP_MICROSECONDS); + case arrow::TimeUnit::type::NANO: return data_type(type_id::TIMESTAMP_NANOSECONDS); + default: CUDF_FAIL("Unsupported timestamp unit in arrow"); + } + } + case arrow::Type::DURATION: { + auto type = static_cast(&arrow_type); + switch (type->unit()) { + case arrow::TimeUnit::type::SECOND: return data_type(type_id::DURATION_SECONDS); + case arrow::TimeUnit::type::MILLI: return data_type(type_id::DURATION_MILLISECONDS); + case arrow::TimeUnit::type::MICRO: return data_type(type_id::DURATION_MICROSECONDS); + case arrow::TimeUnit::type::NANO: return data_type(type_id::DURATION_NANOSECONDS); + default: CUDF_FAIL("Unsupported duration unit in arrow"); + } + } + case arrow::Type::STRING: return data_type(type_id::STRING); + case arrow::Type::DICTIONARY: return data_type(type_id::DICTIONARY32); + case arrow::Type::LIST: return data_type(type_id::LIST); + case arrow::Type::DECIMAL: { + auto const type = static_cast(&arrow_type); + return data_type{type_id::DECIMAL64, -type->scale()}; + } + case arrow::Type::STRUCT: return data_type(type_id::STRUCT); + default: CUDF_FAIL("Unsupported type_id conversion to cudf"); + } +} + +namespace { +/** + * @brief Functor to return column for a corresponding arrow array. column + * is formed from buffer underneath the arrow array along with any offset and + * change in length that array has. + */ +struct dispatch_to_cudf_column { + /** + * @brief Returns mask from an array withut any offsets. + */ + std::unique_ptr get_mask_buffer(arrow::Array const& array, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) + { + if (array.null_bitmap_data() == nullptr) { + return std::make_unique(0, stream, mr); + } + auto mask = std::make_unique( + bitmask_allocation_size_bytes(static_cast(array.null_bitmap()->size() * CHAR_BIT)), + stream, + mr); + auto mask_buffer = array.null_bitmap(); + CUDA_TRY(cudaMemcpyAsync(mask->data(), + reinterpret_cast(mask_buffer->address()), + array.null_bitmap()->size(), + cudaMemcpyDefault, + stream.value())); + return mask; + } + + template ())> + std::unique_ptr operator()( + arrow::Array const&, data_type, bool, rmm::cuda_stream_view, rmm::mr::device_memory_resource*) + { + CUDF_FAIL("Unsupported type in from_arrow."); + } + + template ())> + std::unique_ptr operator()(arrow::Array const& array, + data_type type, + bool skip_mask, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) + { + auto data_buffer = array.data()->buffers[1]; + size_type const num_rows = array.length(); + auto const has_nulls = skip_mask ? false : array.null_bitmap_data() != nullptr; + auto col = make_fixed_width_column(type, num_rows, mask_state::UNALLOCATED, stream, mr); + auto mutable_column_view = col->mutable_view(); + CUDA_TRY(cudaMemcpyAsync( + mutable_column_view.data(), + reinterpret_cast(data_buffer->address()) + array.offset() * sizeof(T), + sizeof(T) * num_rows, + cudaMemcpyDefault, + stream.value())); + if (has_nulls) { + auto tmp_mask = get_mask_buffer(array, stream, mr); + + // If array is sliced, we have to copy whole mask and then take copy. + auto out_mask = (num_rows == static_cast(data_buffer->size() / sizeof(T))) + ? std::move(*tmp_mask) + : cudf::detail::copy_bitmask(static_cast(tmp_mask->data()), + array.offset(), + array.offset() + num_rows, + stream, + mr); + + col->set_null_mask(std::move(out_mask)); + } + + return col; + } +}; + +std::unique_ptr get_empty_type_column(size_type size) +{ + return std::make_unique(data_type(type_id::EMPTY), size, rmm::device_buffer{}); +} + +/** + * @brief Returns cudf column formed from given arrow array + * This has been introduced to take care of compiler error "error: explicit specialization of + * function must precede its first use" + */ +std::unique_ptr get_column(arrow::Array const& array, + data_type type, + bool skip_mask, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr); + +template <> +std::unique_ptr dispatch_to_cudf_column::operator()( + arrow::Array const& array, + data_type type, + bool skip_mask, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + using DeviceType = int64_t; + + auto constexpr BIT_WIDTH_RATIO = 2; // Array::Type:type::DECIMAL (128) / int64_t + auto data_buffer = array.data()->buffers[1]; + auto const num_rows = static_cast(array.length()); + + rmm::device_uvector buf(num_rows * BIT_WIDTH_RATIO, stream); + rmm::device_uvector out_buf(num_rows, stream, mr); + + CUDA_TRY(cudaMemcpyAsync( + reinterpret_cast(buf.data()), + reinterpret_cast(data_buffer->address()) + array.offset() * sizeof(DeviceType), + buf.size() * sizeof(DeviceType), + cudaMemcpyDefault, + stream.value())); + + auto every_other = [] __device__(size_type i) { return 2 * i; }; + auto gather_map = cudf::detail::make_counting_transform_iterator(0, every_other); + + thrust::gather( + rmm::exec_policy(stream), gather_map, gather_map + num_rows, buf.data(), out_buf.data()); + + auto null_mask = [&] { + if (not skip_mask and array.null_bitmap_data()) { + auto temp_mask = get_mask_buffer(array, stream, mr); + // If array is sliced, we have to copy whole mask and then take copy. + return (num_rows == static_cast(data_buffer->size() / sizeof(DeviceType))) + ? std::move(*temp_mask.release()) + : cudf::detail::copy_bitmask(static_cast(temp_mask->data()), + array.offset(), + array.offset() + num_rows, + stream, + mr); + } + return rmm::device_buffer{}; + }(); + + return std::make_unique(type, num_rows, out_buf.release(), std::move(null_mask)); +} + +template <> +std::unique_ptr dispatch_to_cudf_column::operator()( + arrow::Array const& array, + data_type, + bool skip_mask, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + auto data_buffer = array.data()->buffers[1]; + auto data = rmm::device_buffer(data_buffer->size(), stream, mr); + CUDA_TRY(cudaMemcpyAsync(data.data(), + reinterpret_cast(data_buffer->address()), + data_buffer->size(), + cudaMemcpyDefault, + stream.value())); + auto out_col = mask_to_bools(static_cast(data.data()), + array.offset(), + array.offset() + array.length(), + stream, + mr); + + auto const has_nulls = skip_mask ? false : array.null_bitmap_data() != nullptr; + if (has_nulls) { + auto out_mask = + detail::copy_bitmask(static_cast(get_mask_buffer(array, stream, mr)->data()), + array.offset(), + array.offset() + array.length(), + stream, + mr); + + out_col->set_null_mask(std::move(out_mask)); + } + + return out_col; +} + +template <> +std::unique_ptr dispatch_to_cudf_column::operator()( + arrow::Array const& array, + data_type, + bool, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + if (array.length() == 0) { return make_empty_column(data_type{type_id::STRING}); } + auto str_array = static_cast(&array); + auto offset_array = std::make_unique( + str_array->value_offsets()->size() / sizeof(int32_t), str_array->value_offsets(), nullptr); + auto char_array = std::make_unique( + str_array->value_data()->size(), str_array->value_data(), nullptr); + + auto offsets_column = dispatch_to_cudf_column{}.operator()( + *offset_array, data_type(type_id::INT32), true, stream, mr); + auto chars_column = dispatch_to_cudf_column{}.operator()( + *char_array, data_type(type_id::INT8), true, stream, mr); + + auto const num_rows = offsets_column->size() - 1; + auto out_col = make_strings_column(num_rows, + std::move(offsets_column), + std::move(chars_column), + UNKNOWN_NULL_COUNT, + std::move(*get_mask_buffer(array, stream, mr)), + stream, + mr); + + return num_rows == array.length() ? std::move(out_col) + : std::make_unique(cudf::detail::slice( + out_col->view(), + static_cast(array.offset()), + static_cast(array.offset() + array.length()))); +} + +template <> +std::unique_ptr dispatch_to_cudf_column::operator()( + arrow::Array const& array, + data_type, + bool, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + auto dict_array = static_cast(&array); + auto dict_type = arrow_to_cudf_type(*(dict_array->dictionary()->type())); + auto keys_column = get_column(*(dict_array->dictionary()), dict_type, true, stream, mr); + auto ind_type = arrow_to_cudf_type(*(dict_array->indices()->type())); + + auto indices_column = get_column(*(dict_array->indices()), ind_type, false, stream, mr); + // If index type is not of type uint32_t, then cast it to uint32_t + auto const dict_indices_type = data_type{type_id::UINT32}; + if (indices_column->type().id() != dict_indices_type.id()) + indices_column = cudf::detail::cast(indices_column->view(), dict_indices_type, stream, mr); + + // Child columns shouldn't have masks and we need the mask in main column + auto column_contents = indices_column->release(); + indices_column = std::make_unique( + dict_indices_type, static_cast(array.length()), std::move(*(column_contents.data))); + + return make_dictionary_column(std::move(keys_column), + std::move(indices_column), + std::move(*(column_contents.null_mask)), + UNKNOWN_NULL_COUNT); +} + +template <> +std::unique_ptr dispatch_to_cudf_column::operator()( + arrow::Array const& array, + data_type, + bool, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + auto struct_array = static_cast(&array); + std::vector> child_columns; + // Offsets have already been applied to child + arrow::ArrayVector array_children = struct_array->fields(); + std::transform(array_children.cbegin(), + array_children.cend(), + std::back_inserter(child_columns), + [&mr, &stream](auto const& child_array) { + auto type = arrow_to_cudf_type(*(child_array->type())); + return get_column(*child_array, type, false, stream, mr); + }); + + auto out_mask = std::move(*(get_mask_buffer(array, stream, mr))); + if (struct_array->null_bitmap_data() != nullptr) { + out_mask = detail::copy_bitmask(static_cast(out_mask.data()), + array.offset(), + array.offset() + array.length(), + stream, + mr); + } + + return make_structs_column( + array.length(), move(child_columns), UNKNOWN_NULL_COUNT, std::move(out_mask), stream, mr); +} + +template <> +std::unique_ptr dispatch_to_cudf_column::operator()( + arrow::Array const& array, + data_type, + bool, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + auto list_array = static_cast(&array); + auto offset_array = std::make_unique( + list_array->value_offsets()->size() / sizeof(int32_t), list_array->value_offsets(), nullptr); + auto offsets_column = dispatch_to_cudf_column{}.operator()( + *offset_array, data_type(type_id::INT32), true, stream, mr); + + auto child_type = arrow_to_cudf_type(*(list_array->values()->type())); + auto child_column = get_column(*(list_array->values()), child_type, false, stream, mr); + + auto const num_rows = offsets_column->size() - 1; + auto out_col = make_lists_column(num_rows, + std::move(offsets_column), + std::move(child_column), + UNKNOWN_NULL_COUNT, + std::move(*get_mask_buffer(array, stream, mr)), + stream, + mr); + + return num_rows == array.length() ? std::move(out_col) + : std::make_unique(cudf::detail::slice( + out_col->view(), + static_cast(array.offset()), + static_cast(array.offset() + array.length()))); +} + +std::unique_ptr get_column(arrow::Array const& array, + data_type type, + bool skip_mask, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + return type.id() != type_id::EMPTY + ? type_dispatcher(type, dispatch_to_cudf_column{}, array, type, skip_mask, stream, mr) + : get_empty_type_column(array.length()); +} + +} // namespace + +std::unique_ptr from_arrow(arrow::Table const& input_table, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + if (input_table.num_columns() == 0) { return std::make_unique
(); } + std::vector> columns; + auto chunked_arrays = input_table.columns(); + std::transform(chunked_arrays.begin(), + chunked_arrays.end(), + std::back_inserter(columns), + [&mr, &stream](auto const& chunked_array) { + std::vector> concat_columns; + auto cudf_type = arrow_to_cudf_type(*(chunked_array->type())); + auto array_chunks = chunked_array->chunks(); + if (cudf_type.id() == type_id::EMPTY) { + return get_empty_type_column(chunked_array->length()); + } + std::transform(array_chunks.begin(), + array_chunks.end(), + std::back_inserter(concat_columns), + [&cudf_type, &mr, &stream](auto const& array_chunk) { + return get_column(*array_chunk, cudf_type, false, stream, mr); + }); + if (concat_columns.empty()) { + return std::make_unique(cudf_type, 0, rmm::device_buffer{}); + } else if (concat_columns.size() == 1) { + return std::move(concat_columns[0]); + } + + std::vector column_views; + std::transform(concat_columns.begin(), + concat_columns.end(), + std::back_inserter(column_views), + [](auto const& col) { return col->view(); }); + return cudf::detail::concatenate(column_views, stream, mr); + }); + + return std::make_unique
(std::move(columns)); +} + +} // namespace detail + +std::unique_ptr
from_arrow(arrow::Table const& input_table, + rmm::mr::device_memory_resource* mr) +{ + CUDF_FUNC_RANGE(); + + return detail::from_arrow(input_table, rmm::cuda_stream_default, mr); +} + +} // namespace cudf diff --git a/cuda_code/from_arrow_4.cu b/cuda_code/from_arrow_4.cu new file mode 100644 index 0000000000000000000000000000000000000000..99b657fb9d5ddc227f97a15f47c5ef22e805e491 --- /dev/null +++ b/cuda_code/from_arrow_4.cu @@ -0,0 +1,455 @@ +/* + * Copyright (c) 2020-2022, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +namespace cudf { + +namespace detail { +data_type arrow_to_cudf_type(arrow::DataType const& arrow_type) +{ + switch (arrow_type.id()) { + case arrow::Type::NA: return data_type(type_id::EMPTY); + case arrow::Type::BOOL: return data_type(type_id::BOOL8); + case arrow::Type::INT8: return data_type(type_id::INT8); + case arrow::Type::INT16: return data_type(type_id::INT16); + case arrow::Type::INT32: return data_type(type_id::INT32); + case arrow::Type::INT64: return data_type(type_id::INT64); + case arrow::Type::UINT8: return data_type(type_id::UINT8); + case arrow::Type::UINT16: return data_type(type_id::UINT16); + case arrow::Type::UINT32: return data_type(type_id::UINT32); + case arrow::Type::UINT64: return data_type(type_id::UINT64); + case arrow::Type::FLOAT: return data_type(type_id::FLOAT32); + case arrow::Type::DOUBLE: return data_type(type_id::FLOAT64); + case arrow::Type::DATE32: return data_type(type_id::TIMESTAMP_DAYS); + case arrow::Type::TIMESTAMP: { + auto type = static_cast(&arrow_type); + switch (type->unit()) { + case arrow::TimeUnit::type::SECOND: return data_type(type_id::TIMESTAMP_SECONDS); + case arrow::TimeUnit::type::MILLI: return data_type(type_id::TIMESTAMP_MILLISECONDS); + case arrow::TimeUnit::type::MICRO: return data_type(type_id::TIMESTAMP_MICROSECONDS); + case arrow::TimeUnit::type::NANO: return data_type(type_id::TIMESTAMP_NANOSECONDS); + default: CUDF_FAIL("Unsupported timestamp unit in arrow"); + } + } + case arrow::Type::DURATION: { + auto type = static_cast(&arrow_type); + switch (type->unit()) { + case arrow::TimeUnit::type::SECOND: return data_type(type_id::DURATION_SECONDS); + case arrow::TimeUnit::type::MILLI: return data_type(type_id::DURATION_MILLISECONDS); + case arrow::TimeUnit::type::MICRO: return data_type(type_id::DURATION_MICROSECONDS); + case arrow::TimeUnit::type::NANO: return data_type(type_id::DURATION_NANOSECONDS); + default: CUDF_FAIL("Unsupported duration unit in arrow"); + } + } + case arrow::Type::STRING: return data_type(type_id::STRING); + case arrow::Type::DICTIONARY: return data_type(type_id::DICTIONARY32); + case arrow::Type::LIST: return data_type(type_id::LIST); + case arrow::Type::DECIMAL: { + auto const type = static_cast(&arrow_type); + return data_type{type_id::DECIMAL128, -type->scale()}; + } + case arrow::Type::STRUCT: return data_type(type_id::STRUCT); + default: CUDF_FAIL("Unsupported type_id conversion to cudf"); + } +} + +namespace { +/** + * @brief Functor to return column for a corresponding arrow array. column + * is formed from buffer underneath the arrow array along with any offset and + * change in length that array has. + */ +struct dispatch_to_cudf_column { + /** + * @brief Returns mask from an array without any offsets. + */ + std::unique_ptr get_mask_buffer(arrow::Array const& array, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) + { + if (array.null_bitmap_data() == nullptr) { + return std::make_unique(0, stream, mr); + } + auto mask = std::make_unique( + bitmask_allocation_size_bytes(static_cast(array.null_bitmap()->size() * CHAR_BIT)), + stream, + mr); + auto mask_buffer = array.null_bitmap(); + CUDA_TRY(cudaMemcpyAsync(mask->data(), + reinterpret_cast(mask_buffer->address()), + array.null_bitmap()->size(), + cudaMemcpyDefault, + stream.value())); + return mask; + } + + template ())> + std::unique_ptr operator()( + arrow::Array const&, data_type, bool, rmm::cuda_stream_view, rmm::mr::device_memory_resource*) + { + CUDF_FAIL("Unsupported type in from_arrow."); + } + + template ())> + std::unique_ptr operator()(arrow::Array const& array, + data_type type, + bool skip_mask, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) + { + auto data_buffer = array.data()->buffers[1]; + size_type const num_rows = array.length(); + auto const has_nulls = skip_mask ? false : array.null_bitmap_data() != nullptr; + auto col = make_fixed_width_column(type, num_rows, mask_state::UNALLOCATED, stream, mr); + auto mutable_column_view = col->mutable_view(); + CUDA_TRY(cudaMemcpyAsync( + mutable_column_view.data(), + reinterpret_cast(data_buffer->address()) + array.offset() * sizeof(T), + sizeof(T) * num_rows, + cudaMemcpyDefault, + stream.value())); + if (has_nulls) { + auto tmp_mask = get_mask_buffer(array, stream, mr); + + // If array is sliced, we have to copy whole mask and then take copy. + auto out_mask = (num_rows == static_cast(data_buffer->size() / sizeof(T))) + ? std::move(*tmp_mask) + : cudf::detail::copy_bitmask(static_cast(tmp_mask->data()), + array.offset(), + array.offset() + num_rows, + stream, + mr); + + col->set_null_mask(std::move(out_mask)); + } + + return col; + } +}; + +std::unique_ptr get_empty_type_column(size_type size) +{ + return std::make_unique(data_type(type_id::EMPTY), size, rmm::device_buffer{}); +} + +/** + * @brief Returns cudf column formed from given arrow array + * This has been introduced to take care of compiler error "error: explicit specialization of + * function must precede its first use" + */ +std::unique_ptr get_column(arrow::Array const& array, + data_type type, + bool skip_mask, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr); + +template <> +std::unique_ptr dispatch_to_cudf_column::operator()( + arrow::Array const& array, + data_type type, + bool skip_mask, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + using DeviceType = __int128_t; + + auto data_buffer = array.data()->buffers[1]; + auto const num_rows = static_cast(array.length()); + auto col = make_fixed_width_column(type, num_rows, mask_state::UNALLOCATED, stream, mr); + auto mutable_column_view = col->mutable_view(); + + CUDA_TRY(cudaMemcpyAsync( + mutable_column_view.data(), + reinterpret_cast(data_buffer->address()) + array.offset() * sizeof(DeviceType), + sizeof(DeviceType) * num_rows, + cudaMemcpyDefault, + stream.value())); + + auto null_mask = [&] { + if (not skip_mask and array.null_bitmap_data()) { + auto temp_mask = get_mask_buffer(array, stream, mr); + // If array is sliced, we have to copy whole mask and then take copy. + return (num_rows == static_cast(data_buffer->size() / sizeof(DeviceType))) + ? std::move(*temp_mask.release()) + : cudf::detail::copy_bitmask(static_cast(temp_mask->data()), + array.offset(), + array.offset() + num_rows, + stream, + mr); + } + return rmm::device_buffer{}; + }(); + + col->set_null_mask(std::move(null_mask)); + return col; +} + +template <> +std::unique_ptr dispatch_to_cudf_column::operator()( + arrow::Array const& array, + data_type, + bool skip_mask, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + auto data_buffer = array.data()->buffers[1]; + auto data = rmm::device_buffer(data_buffer->size(), stream, mr); + CUDA_TRY(cudaMemcpyAsync(data.data(), + reinterpret_cast(data_buffer->address()), + data_buffer->size(), + cudaMemcpyDefault, + stream.value())); + auto out_col = mask_to_bools(static_cast(data.data()), + array.offset(), + array.offset() + array.length(), + stream, + mr); + + auto const has_nulls = skip_mask ? false : array.null_bitmap_data() != nullptr; + if (has_nulls) { + auto out_mask = + detail::copy_bitmask(static_cast(get_mask_buffer(array, stream, mr)->data()), + array.offset(), + array.offset() + array.length(), + stream, + mr); + + out_col->set_null_mask(std::move(out_mask)); + } + + return out_col; +} + +template <> +std::unique_ptr dispatch_to_cudf_column::operator()( + arrow::Array const& array, + data_type, + bool, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + if (array.length() == 0) { return make_empty_column(type_id::STRING); } + auto str_array = static_cast(&array); + auto offset_array = std::make_unique( + str_array->value_offsets()->size() / sizeof(int32_t), str_array->value_offsets(), nullptr); + auto char_array = std::make_unique( + str_array->value_data()->size(), str_array->value_data(), nullptr); + + auto offsets_column = dispatch_to_cudf_column{}.operator()( + *offset_array, data_type(type_id::INT32), true, stream, mr); + auto chars_column = dispatch_to_cudf_column{}.operator()( + *char_array, data_type(type_id::INT8), true, stream, mr); + + auto const num_rows = offsets_column->size() - 1; + auto out_col = make_strings_column(num_rows, + std::move(offsets_column), + std::move(chars_column), + UNKNOWN_NULL_COUNT, + std::move(*get_mask_buffer(array, stream, mr))); + + return num_rows == array.length() + ? std::move(out_col) + : std::make_unique( + cudf::detail::slice(out_col->view(), + static_cast(array.offset()), + static_cast(array.offset() + array.length())), + stream, + mr); +} + +template <> +std::unique_ptr dispatch_to_cudf_column::operator()( + arrow::Array const& array, + data_type, + bool, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + auto dict_array = static_cast(&array); + auto dict_type = arrow_to_cudf_type(*(dict_array->dictionary()->type())); + auto keys_column = get_column(*(dict_array->dictionary()), dict_type, true, stream, mr); + auto ind_type = arrow_to_cudf_type(*(dict_array->indices()->type())); + + auto indices_column = get_column(*(dict_array->indices()), ind_type, false, stream, mr); + // If index type is not of type uint32_t, then cast it to uint32_t + auto const dict_indices_type = data_type{type_id::UINT32}; + if (indices_column->type().id() != dict_indices_type.id()) + indices_column = cudf::detail::cast(indices_column->view(), dict_indices_type, stream, mr); + + // Child columns shouldn't have masks and we need the mask in main column + auto column_contents = indices_column->release(); + indices_column = std::make_unique( + dict_indices_type, static_cast(array.length()), std::move(*(column_contents.data))); + + return make_dictionary_column(std::move(keys_column), + std::move(indices_column), + std::move(*(column_contents.null_mask)), + UNKNOWN_NULL_COUNT); +} + +template <> +std::unique_ptr dispatch_to_cudf_column::operator()( + arrow::Array const& array, + data_type, + bool, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + auto struct_array = static_cast(&array); + std::vector> child_columns; + // Offsets have already been applied to child + arrow::ArrayVector array_children = struct_array->fields(); + std::transform(array_children.cbegin(), + array_children.cend(), + std::back_inserter(child_columns), + [&mr, &stream](auto const& child_array) { + auto type = arrow_to_cudf_type(*(child_array->type())); + return get_column(*child_array, type, false, stream, mr); + }); + + auto out_mask = std::move(*(get_mask_buffer(array, stream, mr))); + if (struct_array->null_bitmap_data() != nullptr) { + out_mask = detail::copy_bitmask(static_cast(out_mask.data()), + array.offset(), + array.offset() + array.length(), + stream, + mr); + } + + return make_structs_column( + array.length(), move(child_columns), UNKNOWN_NULL_COUNT, std::move(out_mask), stream, mr); +} + +template <> +std::unique_ptr dispatch_to_cudf_column::operator()( + arrow::Array const& array, + data_type, + bool, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + auto list_array = static_cast(&array); + auto offset_array = std::make_unique( + list_array->value_offsets()->size() / sizeof(int32_t), list_array->value_offsets(), nullptr); + auto offsets_column = dispatch_to_cudf_column{}.operator()( + *offset_array, data_type(type_id::INT32), true, stream, mr); + + auto child_type = arrow_to_cudf_type(*(list_array->values()->type())); + auto child_column = get_column(*(list_array->values()), child_type, false, stream, mr); + + auto const num_rows = offsets_column->size() - 1; + auto out_col = make_lists_column(num_rows, + std::move(offsets_column), + std::move(child_column), + UNKNOWN_NULL_COUNT, + std::move(*get_mask_buffer(array, stream, mr)), + stream, + mr); + + return num_rows == array.length() + ? std::move(out_col) + : std::make_unique( + cudf::detail::slice(out_col->view(), + static_cast(array.offset()), + static_cast(array.offset() + array.length())), + stream, + mr); +} + +std::unique_ptr get_column(arrow::Array const& array, + data_type type, + bool skip_mask, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + return type.id() != type_id::EMPTY + ? type_dispatcher(type, dispatch_to_cudf_column{}, array, type, skip_mask, stream, mr) + : get_empty_type_column(array.length()); +} + +} // namespace + +std::unique_ptr
from_arrow(arrow::Table const& input_table, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + if (input_table.num_columns() == 0) { return std::make_unique
(); } + std::vector> columns; + auto chunked_arrays = input_table.columns(); + std::transform(chunked_arrays.begin(), + chunked_arrays.end(), + std::back_inserter(columns), + [&mr, &stream](auto const& chunked_array) { + std::vector> concat_columns; + auto cudf_type = arrow_to_cudf_type(*(chunked_array->type())); + auto array_chunks = chunked_array->chunks(); + if (cudf_type.id() == type_id::EMPTY) { + return get_empty_type_column(chunked_array->length()); + } + std::transform(array_chunks.begin(), + array_chunks.end(), + std::back_inserter(concat_columns), + [&cudf_type, &mr, &stream](auto const& array_chunk) { + return get_column(*array_chunk, cudf_type, false, stream, mr); + }); + if (concat_columns.empty()) { + return std::make_unique(cudf_type, 0, rmm::device_buffer{}); + } else if (concat_columns.size() == 1) { + return std::move(concat_columns[0]); + } + + std::vector column_views; + std::transform(concat_columns.begin(), + concat_columns.end(), + std::back_inserter(column_views), + [](auto const& col) { return col->view(); }); + return cudf::detail::concatenate(column_views, stream, mr); + }); + + return std::make_unique
(std::move(columns)); +} + +} // namespace detail + +std::unique_ptr
from_arrow(arrow::Table const& input_table, + rmm::mr::device_memory_resource* mr) +{ + CUDF_FUNC_RANGE(); + + return detail::from_arrow(input_table, rmm::cuda_stream_default, mr); +} + +} // namespace cudf diff --git a/cuda_code/fsa_algo_17.cu b/cuda_code/fsa_algo_17.cu new file mode 100644 index 0000000000000000000000000000000000000000..c7106bc0ce0d5a2f887fc572ae92b7e584664d4d --- /dev/null +++ b/cuda_code/fsa_algo_17.cu @@ -0,0 +1,1961 @@ +/** + * Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey, Haowen Qiu, + * Wei Kang) + * Mobvoi Inc. (authors: Fangjun Kuang) + * + * See LICENSE for clarification regarding multiple authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include + +#include "k2/csrc/array_ops.h" +#include "k2/csrc/fsa_algo.h" +#include "k2/csrc/fsa_utils.h" +#include "k2/csrc/host/aux_labels.h" +#include "k2/csrc/host/connect.h" +#include "k2/csrc/host/determinize.h" +#include "k2/csrc/host/intersect.h" +#include "k2/csrc/host/rmepsilon.h" +#include "k2/csrc/host/topsort.h" +#include "k2/csrc/host_shim.h" +#include "k2/csrc/macros.h" +#include "k2/csrc/rm_epsilon.h" + + +// this contains a subset of the algorithms in fsa_algo.h; currently it just +// contains one that are wrappings of the corresponding algorithms in +// host/. +namespace k2 { + +bool RecursionWrapper(bool (*f)(Fsa &, Fsa *, Array1 *), Fsa &src, + Fsa *dest, Array1 *arc_map) { + NVTX_RANGE(K2_FUNC); + // src is actually an FsaVec. Just recurse for now. + int32_t num_fsas = src.shape.Dim0(); + std::vector srcs(num_fsas), dests(num_fsas); + std::vector> arc_maps(num_fsas); + int32_t tot_num_arcs = 0; + for (int32_t i = 0; i < num_fsas; ++i) { + srcs[i] = src.Index(0, i); + // Recurse. + if (!f(srcs[i], &(dests[i]), + (arc_map != nullptr ? &(arc_maps[i]) : nullptr))) + return false; + if (arc_map != nullptr) { + // convert arc indexes in arc_maps from idx2 to idx012 + arc_maps[i] = Plus(arc_maps[i], tot_num_arcs); + tot_num_arcs += srcs[i].NumElements(); + } + } + *dest = Stack(0, num_fsas, dests.data()); + if (arc_map != nullptr) + *arc_map = Cat(src.Context(), num_fsas, arc_maps.data()); + return true; +} + +bool ConnectHost(Fsa &src, Fsa *dest, Array1 *arc_map /*=nullptr*/) { + NVTX_RANGE(K2_FUNC); + int32_t num_axes = src.NumAxes(); + if (num_axes < 2 || num_axes > 3) { + K2_LOG(FATAL) << "Input has bad num-axes " << num_axes; + } else if (num_axes == 3) { + return RecursionWrapper(ConnectHost, src, dest, arc_map); + } + + k2host::Fsa host_fsa = FsaToHostFsa(src); + k2host::Connection c(host_fsa); + k2host::Array2Size size; + c.GetSizes(&size); + FsaCreator creator(size); + k2host::Fsa host_dest_fsa = creator.GetHostFsa(); + int32_t *arc_map_data = nullptr; + if (arc_map != nullptr) { + *arc_map = Array1(src.Context(), size.size2); + arc_map_data = arc_map->Data(); + } + bool ans = c.GetOutput(&host_dest_fsa, arc_map_data); + *dest = creator.GetFsa(); + return ans; +} + +bool TopSortHost(Fsa &src, Fsa *dest, Array1 *arc_map /*=nullptr*/) { + NVTX_RANGE(K2_FUNC); + int32_t num_axes = src.NumAxes(); + if (num_axes < 2 || num_axes > 3) { + K2_LOG(FATAL) << "Input has bad num-axes " << num_axes; + } else if (num_axes == 3) { + return RecursionWrapper(TopSortHost, src, dest, arc_map); + } + + k2host::Fsa host_fsa = FsaToHostFsa(src); + k2host::TopSorter sorter(host_fsa); + k2host::Array2Size size; + sorter.GetSizes(&size); + FsaCreator creator(size); + k2host::Fsa host_dest_fsa = creator.GetHostFsa(); + int32_t *arc_map_data = nullptr; + if (arc_map != nullptr) { + *arc_map = Array1(src.Context(), size.size2); + arc_map_data = arc_map->Data(); + } + bool ans = sorter.GetOutput(&host_dest_fsa, arc_map_data); + *dest = creator.GetFsa(); + return ans; +} + +bool Intersect(FsaOrVec &a_fsas, int32_t properties_a, FsaOrVec &b_fsas, + int32_t properties_b, bool treat_epsilons_specially, FsaVec *out, + Array1 *arc_map_a, Array1 *arc_map_b) { + NVTX_RANGE(K2_FUNC); + K2_CHECK(a_fsas.NumAxes() >= 2 && a_fsas.NumAxes() <= 3); + K2_CHECK(b_fsas.NumAxes() >= 2 && b_fsas.NumAxes() <= 3); + ContextPtr c = a_fsas.Context(); + K2_CHECK_EQ(c->GetDeviceType(), kCpu); + if (a_fsas.NumAxes() == 2) { + FsaVec a_fsas_vec = FsaToFsaVec(a_fsas); + return Intersect(a_fsas_vec, properties_a, b_fsas, properties_b, + treat_epsilons_specially, out, arc_map_a, arc_map_b); + } + if (b_fsas.NumAxes() == 2) { + FsaVec b_fsas_vec = FsaToFsaVec(b_fsas); + return Intersect(a_fsas, properties_a, b_fsas_vec, properties_b, + treat_epsilons_specially, out, arc_map_a, arc_map_b); + } + + int32_t num_fsas_a = a_fsas.Dim0(), num_fsas_b = b_fsas.Dim0(); + K2_CHECK_GT(num_fsas_a, 0); + K2_CHECK_GT(num_fsas_b, 0); + int32_t stride_a = 1, stride_b = 1; + if (num_fsas_a != num_fsas_b) { + if (num_fsas_a == 1) { + stride_a = 0; + } else if (num_fsas_b == 1) { + stride_b = 0; + } else { + K2_CHECK_EQ(num_fsas_a, num_fsas_b); + } + // the check on the previous line will fail. + } + if (properties_a < 0) { + Array1 properties_a_out(c, num_fsas_a); + GetFsaVecBasicProperties(a_fsas, &properties_a_out, &properties_a); + } + if (properties_b < 0) { + Array1 properties_b_out(c, num_fsas_b); + GetFsaVecBasicProperties(b_fsas, &properties_b_out, &properties_b); + } + bool arc_sorted = (properties_a & kFsaPropertiesArcSorted) && + (properties_b & kFsaPropertiesArcSorted); + K2_CHECK(arc_sorted) << "Both a_fsas and b_fsas should be arc-sorted"; + int32_t num_fsas = std::max(num_fsas_a, num_fsas_b); + + std::vector> intersections(num_fsas); + std::vector> sizes(num_fsas); + for (int32_t i = 0; i < num_fsas; ++i) { + k2host::Fsa host_fsa_a = FsaVecToHostFsa(a_fsas, i * stride_a), + host_fsa_b = FsaVecToHostFsa(b_fsas, i * stride_b); + intersections[i] = std::make_unique( + host_fsa_a, host_fsa_b, treat_epsilons_specially, false); + intersections[i]->GetSizes(&(sizes[i])); + } + FsaVecCreator creator(sizes); + int32_t num_arcs = creator.NumArcs(); + + if (arc_map_a) *arc_map_a = Array1(c, num_arcs); + if (arc_map_b) *arc_map_b = Array1(c, num_arcs); + + // the following few lines will allow us to add suitable offsets to the + // `arc_map`. + Array1 a_fsas_row_splits12 = + a_fsas.RowSplits(2)[a_fsas.RowSplits(1)], + b_fsas_row_splits12 = + b_fsas.RowSplits(2)[b_fsas.RowSplits(1)]; + const int32_t *a_fsas_row_splits12_data = a_fsas_row_splits12.Data(), + *b_fsas_row_splits12_data = b_fsas_row_splits12.Data(); + + bool ok = true; + for (int32_t i = 0; i < num_fsas; ++i) { + k2host::Fsa host_fsa_out = creator.GetHostFsa(i); + int32_t arc_offset = creator.GetArcOffsetFor(i); + int32_t *this_arc_map_a = + (arc_map_a ? arc_map_a->Data() + arc_offset : nullptr), + *this_arc_map_b = + (arc_map_b ? arc_map_b->Data() + arc_offset : nullptr); + bool ans = intersections[i]->GetOutput(&host_fsa_out, this_arc_map_a, + this_arc_map_b); + ok = ok && ans; + int32_t this_num_arcs = creator.GetArcOffsetFor(i + 1) - arc_offset; + if (arc_map_a) { + int32_t arc_offset_a = a_fsas_row_splits12_data[i * stride_a]; + for (int32_t i = 0; i < this_num_arcs; i++) + if (this_arc_map_a[i] != -1) this_arc_map_a[i] += arc_offset_a; + } + if (arc_map_b) { + int32_t arc_offset_b = b_fsas_row_splits12_data[i * stride_b]; + for (int32_t i = 0; i < this_num_arcs; i++) + if (this_arc_map_b[i] != -1) this_arc_map_b[i] += arc_offset_b; + } + } + *out = creator.GetFsaVec(); + return ok; +} + +// Will be used in RemoveEpsilonHost and Determinize below to process FsaVec +// input recursively. +void RecursionWrapper(void (*f)(FsaOrVec &, FsaOrVec *, Ragged *), + FsaOrVec &src, FsaOrVec *dest, + Ragged *arc_deriv) { + NVTX_RANGE(K2_FUNC); + // src is actually an FsaVec. Just recurse for now. + K2_CHECK_EQ(src.NumAxes(), 3); + int32_t num_fsas = src.shape.Dim0(); + std::vector srcs(num_fsas), dests(num_fsas); + std::vector> arc_derivs(num_fsas); + int32_t tot_num_arcs = 0; + for (int32_t i = 0; i < num_fsas; ++i) { + srcs[i] = src.Index(0, i); + f(srcs[i], &(dests[i]), arc_deriv != nullptr ? &(arc_derivs[i]) : nullptr); + if (arc_deriv != nullptr) { + // convert arc indexes in arc_derivs from idx2 to idx012 + Array1 &values = arc_derivs[i].values; + values = Plus(values, tot_num_arcs); + tot_num_arcs += srcs[i].NumElements(); + } + } + *dest = Stack(0, num_fsas, dests.data()); + if (arc_deriv != nullptr) *arc_deriv = Cat(0, num_fsas, arc_derivs.data()); +} + +void RemoveEpsilonHost(FsaOrVec &src, FsaOrVec *dest, + Ragged *arc_derivs /*=nullptr*/) { + NVTX_RANGE(K2_FUNC); + int32_t num_axes = src.NumAxes(); + if (num_axes < 2 || num_axes > 3) { + K2_LOG(FATAL) << "Input has bad num-axes " << num_axes; + } else if (num_axes == 3) { + return RecursionWrapper(RemoveEpsilonHost, src, dest, arc_derivs); + } + k2host::Fsa host_fsa = FsaToHostFsa(src); + int32_t num_states = host_fsa.NumStates(); + K2_CHECK_EQ(num_states, src.Dim0()); + std::vector max_forward_weights(num_states); + std::vector max_backward_weights(num_states); + k2host::WfsaWithFbWeights max_wfsa(host_fsa, k2host::kMaxWeight, + max_forward_weights.data(), + max_backward_weights.data()); + // pass infinity as beam since we don't do pruning here. + float beam = std::numeric_limits::infinity(); + k2host::EpsilonsRemoverPrunedMax eps_remover(max_wfsa, beam); + k2host::Array2Size fsa_size, arc_derivs_size; + eps_remover.GetSizes(&fsa_size, &arc_derivs_size); + FsaCreator fsa_creator(fsa_size); + k2host::Fsa host_dest_fsa = fsa_creator.GetHostFsa(); + K2_STATIC_ASSERT( + (std::is_same::value)); + Ragged2Creator ragged_creator(arc_derivs_size); + k2host::Array2 host_arc_derivs = + ragged_creator.GetHostArray2(); + eps_remover.GetOutput(&host_dest_fsa, &host_arc_derivs); + *dest = fsa_creator.GetFsa(); + if (arc_derivs != nullptr) *arc_derivs = ragged_creator.GetRagged2(); +} + + +void RemoveEpsilon(FsaOrVec &src, int32_t properties, + FsaOrVec *dest, + Ragged *arc_derivs) { + if ((properties & kFsaPropertiesTopSortedAndAcyclic) != 0 && + src.Context()->GetDeviceType() == kCpu) { + // Host version of the algorithm + RemoveEpsilonHost(src, dest, arc_derivs); + } else { + RemoveEpsilonDevice(src, dest, arc_derivs); + } +} + + +void RemoveEpsilonAndAddSelfLoops(FsaOrVec &src, int32_t properties, + FsaOrVec *dest, + Ragged *arc_derivs) { + NVTX_RANGE(K2_FUNC); + Ragged arc_derivs1; + + FsaOrVec temp; + RemoveEpsilon(src, properties, &temp, + (arc_derivs != nullptr ? &arc_derivs1 : nullptr)); + + Array1 arc_derivs2; + AddEpsilonSelfLoops(temp, dest, + (arc_derivs != nullptr ? &arc_derivs2 : nullptr)); + + if (arc_derivs != nullptr) { + *arc_derivs = Index(arc_derivs1, 0, arc_derivs2, nullptr); + } +} + + +void Determinize(FsaOrVec &src, + DeterminizeWeightPushingType weight_pushing_type, + FsaOrVec *dest, Ragged *arc_derivs /*=nullptr*/) { + NVTX_RANGE(K2_FUNC); + int32_t num_axes = src.NumAxes(); + if (num_axes < 2 || num_axes > 3) { + K2_LOG(FATAL) << "Input has bad num-axes " << num_axes; + } else if (num_axes == 3) { + int32_t num_fsas = src.shape.Dim0(); + std::vector srcs(num_fsas), dests(num_fsas); + std::vector> derivs_vector(num_fsas); + int32_t tot_num_arcs = 0; + for (int32_t i = 0; i < num_fsas; ++i) { + srcs[i] = src.Index(0, i); + Determinize(srcs[i], weight_pushing_type, &(dests[i]), + arc_derivs != nullptr ? &(derivs_vector[i]) : nullptr); + if (arc_derivs != nullptr) { + // convert arc indexes in arc_derivs from idx2 to idx012 + Array1 &values = arc_derivs[i].values; + values = Plus(values, tot_num_arcs); + tot_num_arcs += srcs[i].NumElements(); + } + } + *dest = Stack(0, num_fsas, dests.data()); + if (arc_derivs != nullptr) *arc_derivs = Cat(0, num_fsas, + derivs_vector.data()); + return; + } + k2host::Fsa host_fsa = FsaToHostFsa(src); + int32_t num_states = host_fsa.NumStates(); + K2_CHECK_EQ(num_states, src.Dim0()); + int32_t max_step = -1; // no limit + k2host::FbWeightType host_weight_pushing_type = + static_cast(static_cast(weight_pushing_type)); + k2host::DeterminizerMax determinizer(host_fsa, max_step, + host_weight_pushing_type); + k2host::Array2Size fsa_size, arc_derivs_size; + determinizer.GetSizes(&fsa_size, &arc_derivs_size); + FsaCreator fsa_creator(fsa_size); + k2host::Fsa host_dest_fsa = fsa_creator.GetHostFsa(); + K2_STATIC_ASSERT( + (std::is_same::value)); + Ragged2Creator ragged_creator(arc_derivs_size); + k2host::Array2 host_arc_derivs = + ragged_creator.GetHostArray2(); + determinizer.GetOutput(&host_dest_fsa, &host_arc_derivs); + *dest = fsa_creator.GetFsa(); + if (arc_derivs != nullptr) *arc_derivs = ragged_creator.GetRagged2(); +} + +Fsa LinearFsa(const Array1 &symbols) { + NVTX_RANGE(K2_FUNC); + ContextPtr &c = symbols.Context(); + int32_t n = symbols.Dim(), num_states = n + 2, num_arcs = n + 1; + Array1 row_splits1 = Range(c, num_states + 1, 0), + row_ids1 = Range(c, num_arcs, 0); + int32_t *row_splits1_data = row_splits1.Data(); + Array1 arcs(c, num_arcs); + Arc *arcs_data = arcs.Data(); + const int32_t *symbols_data = symbols.Data(); + K2_EVAL( + c, num_arcs, lambda_set_arcs, (int32_t arc_idx01)->void { + int32_t src_state = arc_idx01, dest_state = arc_idx01 + 1, + // -1 == kFinalSymbol + symbol = (arc_idx01 < n ? symbols_data[arc_idx01] : -1); + if (arc_idx01 < n) K2_CHECK_NE(symbol, -1); + float score = 0.0; + arcs_data[arc_idx01] = Arc(src_state, dest_state, symbol, score); + // the final state has no leaving arcs. + if (arc_idx01 == 0) row_splits1_data[num_states] = num_arcs; + }); + return Ragged(RaggedShape2(&row_splits1, &row_ids1, num_arcs), arcs); +} + +FsaVec LinearFsas(const Ragged &symbols) { + NVTX_RANGE(K2_FUNC); + K2_CHECK_EQ(symbols.NumAxes(), 2); + ContextPtr &c = symbols.Context(); + + // if there are n symbols, there are n+2 states and n+1 arcs. + RaggedShape states_shape = ChangeSublistSize(symbols.shape, 2); + + int32_t num_states = states_shape.NumElements(), + num_arcs = symbols.NumElements() + symbols.Dim0(); + + // row_splits2 maps from state_idx01 to arc_idx012; row_ids2 does the reverse. + // We'll set them in the lambda below. + Array1 row_splits2(c, num_states + 1), row_ids2(c, num_arcs); + // If num_states equals to zero, the code below won't set the last value of + // row_splits2, we should initialize here, or it will be a random value. + if (num_states == 0) row_splits2 = 0; + + int32_t *row_ids2_data = row_ids2.Data(), + *row_splits2_data = row_splits2.Data(); + const int32_t *row_ids1_data = states_shape.RowIds(1).Data(), + *row_splits1_data = states_shape.RowSplits(1).Data(), + *symbols_data = symbols.values.Data(); + Array1 arcs(c, num_arcs); + Arc *arcs_data = arcs.Data(); + K2_EVAL( + c, num_states, lambda, (int32_t state_idx01)->void { + int32_t fsa_idx0 = row_ids1_data[state_idx01], + state_idx0x = row_splits1_data[fsa_idx0], + next_state_idx0x = row_splits1_data[fsa_idx0 + 1], + idx1 = state_idx01 - state_idx0x; + + // the following works because each FSA has one fewer arcs than states. + int32_t arc_idx0xx = state_idx0x - fsa_idx0, + next_arc_idx0xx = next_state_idx0x - (fsa_idx0 + 1), + // the following may look a bit wrong.. here, the idx1 is the + // same as the idx12 if the arc exists, because each state has + // one arc leaving it (except the last state). + arc_idx012 = arc_idx0xx + idx1; + // the following works because each FSA has one fewer symbols than arcs + // (however it doesn't work for the last arc of each FSA; we check + // below.) + int32_t symbol_idx01 = arc_idx012 - fsa_idx0; + if (arc_idx012 < next_arc_idx0xx) { + int32_t src_state = idx1, dest_state = idx1 + 1, + symbol = (arc_idx012 + 1 < next_arc_idx0xx + ? symbols_data[symbol_idx01] + : -1); // kFinalSymbol + float score = 0.0; + arcs_data[arc_idx012] = Arc(src_state, dest_state, symbol, score); + row_ids2_data[arc_idx012] = state_idx01; + } else { + // The following ensures that the last element of row_splits1_data + // (i.e. row_splits1[num_states]) is set to num_arcs. It also writes + // something unnecessary for the last state of each FSA but the last + // one, which will cause 2 threads to write the same item to the same + // location. Note that there is no arc with index `arc_idx01`, if you + // reach here. + row_splits2_data[state_idx01 + 1] = arc_idx012; + } + row_splits2_data[state_idx01] = arc_idx012; + }); + return Ragged( + RaggedShape3(&states_shape.RowSplits(1), &states_shape.RowIds(1), + num_states, &row_splits2, &row_ids2, num_arcs), + arcs); +} + +FsaVec LevenshteinGraphs(const Ragged &symbols, + float ins_del_score /* = -0.501 */, + Array1 *aux_labels /*= nullptr*/, + Array1 *score_offsets /*= nullptr*/) { + NVTX_RANGE(K2_FUNC); + K2_CHECK_EQ(symbols.NumAxes(), 2); + ContextPtr &c = symbols.Context(); + + // For each fsa, the number of states will be number of symbols plus 2, we + // plus 2 because we need an extra super final arc for each fsa. + RaggedShape fsa_to_states = ChangeSublistSize(symbols.shape, 2); + + int32_t num_states = fsa_to_states.NumElements(); + Array1 num_arcs_for(c, num_states + 1); + int32_t *num_arcs_for_data = num_arcs_for.Data(); + // "fts" is short for fsa to states + const int32_t *fts_row_splits1_data = fsa_to_states.RowSplits(1).Data(), + *fts_row_ids1_data = fsa_to_states.RowIds(1).Data(); + // set the arcs number for each state + K2_EVAL( + c, num_states, lambda_set_num_arcs, (int32_t state_idx01)->void { + int32_t fsa_idx0 = fts_row_ids1_data[state_idx01], + final_state = fts_row_splits1_data[fsa_idx0 + 1] - 1, + current_num_arcs = 3; // normally there are three arcs, + // self-loop and two arcs pointing to + // the next state. + if (state_idx01 == final_state - 1) + current_num_arcs = 2; + else if (state_idx01 == final_state) + current_num_arcs = 0; + num_arcs_for_data[state_idx01] = current_num_arcs; + }); + ExclusiveSum(num_arcs_for, &num_arcs_for); + Array1 &states_to_arcs_row_splits = num_arcs_for; + int32_t num_arcs = symbols.NumElements() * 3 + symbols.Dim0() * 2; + RaggedShape states_to_arcs = + RaggedShape2(&states_to_arcs_row_splits, nullptr, num_arcs); + + // shape with a index of [fsa][state][arc] + RaggedShape shape = ComposeRaggedShapes(fsa_to_states, states_to_arcs); + Array1 arcs(c, num_arcs); + Arc *arcs_data = arcs.Data(); + const int32_t *row_splits1_data = shape.RowSplits(1).Data(), + *row_ids1_data = shape.RowIds(1).Data(), + *row_splits2_data = shape.RowSplits(2).Data(), + *row_ids2_data = shape.RowIds(2).Data(), + *symbols_data = symbols.values.Data(); + + int32_t *aux_labels_data = nullptr; + if (aux_labels != nullptr) { + *aux_labels = Array1(c, num_arcs); + aux_labels_data = aux_labels->Data(); + } + float *score_offsets_data = nullptr; + if (score_offsets != nullptr) { + *score_offsets = Array1(c, num_arcs); + score_offsets_data = score_offsets->Data(); + } + + K2_EVAL( + c, num_arcs, lambda_set_arcs, (int32_t arc_idx012)->void { + int32_t state_idx01 = row_ids2_data[arc_idx012], + fsa_idx0 = row_ids1_data[state_idx01], + state_idx0x = row_splits1_data[fsa_idx0], + final_state_idx01 = row_splits1_data[fsa_idx0 + 1] - 1, + state_idx1 = state_idx01 - state_idx0x, + arc_idx01x = row_splits2_data[state_idx01], + arc_idx2 = arc_idx012 - arc_idx01x, + sym_state_idx01 = state_idx01 - 2 * fsa_idx0, + current_symbol = 0, + aux_labels_value = 0; + + if (state_idx01 != final_state_idx01 - 1 && + state_idx01 != final_state_idx01) { + current_symbol = symbols_data[sym_state_idx01]; + K2_CHECK((current_symbol != 0) && (current_symbol != -1)) + << "0 and -1 are not expected to be a symbol."; + } + + float score_offset_value = 0; + Arc arc; + arc.src_state = state_idx1; + + switch (arc_idx2) { + case 0: // the self loop arc + arc.label = 0; + arc.dest_state = state_idx1; + arc.score = ins_del_score; + aux_labels_value = 0; + score_offset_value = ins_del_score - (-0.5); + break; + case 1: // the arc pointing to next state with blank + if (state_idx01 == final_state_idx01 - 1) { // the arc pointing to + // final state + arc.label = -1; + arc.score = 0; + aux_labels_value = -1; + } else { + arc.label = 0; + arc.score = -0.5; + aux_labels_value = current_symbol; + } + arc.dest_state = state_idx1 + 1; + break; + case 2: // the arc pointing to the next state with symbol + arc.label = current_symbol; + arc.dest_state = state_idx1 + 1; + arc.score = 0; + aux_labels_value = current_symbol; + break; + default: + K2_LOG(FATAL) << "Arc index must be less than 3"; + } + + arcs_data[arc_idx012] = arc; + if (aux_labels) aux_labels_data[arc_idx012] = aux_labels_value; + if (score_offsets) score_offsets_data[arc_idx012] = score_offset_value; + }); + return Ragged(shape, arcs); +} + +FsaVec CtcGraphs(const Ragged &symbols, bool modified /*= false*/, + Array1 *aux_labels /*= nullptr*/) { + NVTX_RANGE(K2_FUNC); + K2_CHECK_EQ(symbols.NumAxes(), 2); + ContextPtr &c = symbols.Context(); + + int32_t num_fsas = symbols.Dim0(); + Array1 num_states_for(c, num_fsas + 1); + int32_t *num_states_for_data = num_states_for.Data(); + const int32_t *symbol_row_split1_data = symbols.RowSplits(1).Data(); + // symbols indexed with [fsa][symbol] + // for each fsa we need `symbol_num * 2 + 1 + 1` states, `symbol_num * 2 + 1` + // means that we need a blank state on each side of a symbol state, `+ 1` is + // for final state in k2 + K2_EVAL( + c, num_fsas, lambda_set_num_states, (int32_t fsa_idx0)->void { + int32_t symbol_idx0x = symbol_row_split1_data[fsa_idx0], + symbol_idx0x_next = symbol_row_split1_data[fsa_idx0 + 1], + symbol_num = symbol_idx0x_next - symbol_idx0x; + num_states_for_data[fsa_idx0] = symbol_num * 2 + 2; + }); + + ExclusiveSum(num_states_for, &num_states_for); + Array1 &fsa_to_states_row_splits = num_states_for; + RaggedShape fsa_to_states = + RaggedShape2(&fsa_to_states_row_splits, nullptr, -1); + + int32_t num_states = fsa_to_states.NumElements(); + Array1 num_arcs_for(c, num_states + 1); + int32_t *num_arcs_for_data = num_arcs_for.Data(); + const int32_t *fts_row_splits1_data = fsa_to_states.RowSplits(1).Data(), + *fts_row_ids1_data = fsa_to_states.RowIds(1).Data(), + *symbol_data = symbols.values.Data(); + // set the arcs number for each state + K2_EVAL( + c, num_states, lambda_set_num_arcs, (int32_t state_idx01)->void { + int32_t fsa_idx0 = fts_row_ids1_data[state_idx01], + // we minus fsa_idx0 here, because we are adding one more state, + // the final state for each fsa + sym_state_idx01 = state_idx01 / 2 - fsa_idx0, + remainder = state_idx01 % 2, + current_num_arcs = 2; // normally there are two arcs, self-loop + // and arc pointing to the next state + // blank state always has two arcs + if (remainder) { // symbol state + int32_t sym_final_state = + symbol_row_split1_data[fsa_idx0 + 1]; + // There are no arcs for final states + if (sym_state_idx01 == sym_final_state) { + current_num_arcs = 0; + } else if (modified) { + current_num_arcs = 3; + } else { + int32_t current_symbol = symbol_data[sym_state_idx01], + // we set the next symbol of the last symbol to -1, so + // the following if clause will always be true, which means + // we will have 3 arcs for last symbol state + next_symbol = (sym_state_idx01 + 1) == sym_final_state ? + -1 : symbol_data[sym_state_idx01 + 1]; + // symbols must be not equal to -1, which is specially used in k2 + K2_CHECK_NE(current_symbol, -1); + // if current_symbol equals next_symbol, we need a blank state + // between them, so there are two arcs for this state + // otherwise, this state will point to blank state and next symbol + // state, so we need three arcs here. + // Note: for the simpilfied topology (standard equals false), there + // are always 3 arcs leaving symbol states. + if (current_symbol != next_symbol) + current_num_arcs = 3; + } + } + num_arcs_for_data[state_idx01] = current_num_arcs; + }); + + ExclusiveSum(num_arcs_for, &num_arcs_for); + Array1 &states_to_arcs_row_splits = num_arcs_for; + RaggedShape states_to_arcs = + RaggedShape2(&states_to_arcs_row_splits, nullptr, -1); + + // ctc_shape with a index of [fsa][state][arc] + RaggedShape ctc_shape = ComposeRaggedShapes(fsa_to_states, states_to_arcs); + int32_t num_arcs = ctc_shape.NumElements(); + Array1 arcs(c, num_arcs); + Arc *arcs_data = arcs.Data(); + const int32_t *ctc_row_splits1_data = ctc_shape.RowSplits(1).Data(), + *ctc_row_ids1_data = ctc_shape.RowIds(1).Data(), + *ctc_row_splits2_data = ctc_shape.RowSplits(2).Data(), + *ctc_row_ids2_data = ctc_shape.RowIds(2).Data(); + int32_t *aux_labels_data = nullptr; + if (aux_labels != nullptr) { + *aux_labels = Array1(c, num_arcs); + aux_labels_data = aux_labels->Data(); + } + + K2_EVAL( + c, num_arcs, lambda_set_arcs, (int32_t arc_idx012)->void { + int32_t state_idx01 = ctc_row_ids2_data[arc_idx012], + fsa_idx0 = ctc_row_ids1_data[state_idx01], + state_idx0x = ctc_row_splits1_data[fsa_idx0], + state_idx1 = state_idx01 - state_idx0x, + arc_idx01x = ctc_row_splits2_data[state_idx01], + arc_idx2 = arc_idx012 - arc_idx01x, + sym_state_idx01 = state_idx01 / 2 - fsa_idx0, + remainder = state_idx01 % 2, + sym_final_state = symbol_row_split1_data[fsa_idx0 + 1]; + bool final_state = sym_final_state == sym_state_idx01; + int32_t current_symbol = final_state ? + -1 : symbol_data[sym_state_idx01]; + Arc arc; + arc.score = 0; + arc.src_state = state_idx1; + int32_t aux_labels_value = 0; + if (remainder) { + if (final_state) return; + int32_t next_symbol = (sym_state_idx01 + 1) == sym_final_state ? + -1 : symbol_data[sym_state_idx01 + 1]; + // for standard topology, the symbol state can not point to next + // symbol state if the next symbol is identical to current symbol. + if (current_symbol == next_symbol && !modified) { + K2_CHECK_LT(arc_idx2, 2); + arc.label = arc_idx2 == 0 ? 0 : current_symbol; + arc.dest_state = arc_idx2 == 0 ? state_idx1 + 1 : state_idx1; + } else { + switch (arc_idx2) { + case 0: // the arc pointing to blank state + arc.label = 0; + arc.dest_state = state_idx1 + 1; + break; + case 1: // the self loop arc + arc.label = current_symbol; + arc.dest_state = state_idx1; + break; + case 2: // the arc pointing to the next symbol state + arc.label = next_symbol; + aux_labels_value = sym_state_idx01 + 1 == sym_final_state ? + -1 : next_symbol; + arc.dest_state = state_idx1 + 2; + break; + default: + K2_LOG(FATAL) << "Arc index must be less than 3"; + } + } + } else { + K2_CHECK_LT(arc_idx2, 2); + arc.label = arc_idx2 == 0 ? 0 : current_symbol; + arc.dest_state = arc_idx2 == 0 ? state_idx1 : state_idx1 + 1; + aux_labels_value = arc_idx2 == 0 ? 0 : current_symbol; + if (final_state && arc_idx2 != 0) aux_labels_value = -1; + } + arcs_data[arc_idx012] = arc; + if (aux_labels) aux_labels_data[arc_idx012] = aux_labels_value; + }); + return Ragged(ctc_shape, arcs); +} + +Fsa CtcTopo(const ContextPtr &c, int32_t max_token, bool modified, + Array1 *aux_labels) { + NVTX_RANGE(K2_FUNC); + K2_CHECK(aux_labels); + if (modified) { + // plusing 2 here to include 0(epsilon) and final state + int32_t states = max_token + 2; + // for modified topology, the number of self loops and leaving arcs for + // state 0 are all the number of states minus one. + // and there two arcs(one for self loop, the other points to state 0) for + // each of other states. see links belove for details : + // https://github.com/k2-fsa/k2/issues/746#issuecomment-856421616 + // https://github.com/k2-fsa/snowfall/pull/209 + int32_t num_arcs = (states - 1) * 2 + (states - 2) * 2; + *aux_labels = Array1(c, num_arcs); + Array1 row_ids(c, num_arcs); + Array1 arcs(c, num_arcs); + int32_t *row_ids_data = row_ids.Data(), + *aux_labels_data = aux_labels->Data(); + Arc *arcs_data = arcs.Data(); + K2_EVAL( + c, num_arcs, lambad_set_row_ids_and_arcs, (int32_t idx01) -> void { + Arc arc; + arc.score = 0; + if (idx01 < states - 1) { // state 0 self loop + arc.src_state = 0; + arc.dest_state = 0; + arc.label = idx01; + row_ids_data[idx01] = 0; + aux_labels_data[idx01] = idx01; + } else if (idx01 < (states - 1) * 2) { // arcs leaving state 0 + int32_t dest_state = idx01 - (states - 1) + 1; + arc.src_state = 0; + arc.dest_state = dest_state; + arc.label = dest_state == states - 1 ? -1 : dest_state; + row_ids_data[idx01] = 0; + aux_labels_data[idx01] = dest_state == states -1 ? -1 : dest_state; + } else { // arcs for other states + int32_t bias = idx01 - (states - 1) * 2; + int32_t state = bias / 2 + 1; + arc.src_state = state; + arc.label = state; + if (bias % 2) + arc.dest_state = 0; + else + arc.dest_state = state; + row_ids_data[idx01] = state; + aux_labels_data[idx01] = 0; + } + arcs_data[idx01] = arc; + }); + Array1 row_splits(c, states + 1); + RowIdsToRowSplits(row_ids, &row_splits); + return Ragged(RaggedShape2(&row_splits, &row_ids, num_arcs), arcs); + } else { + // plusing 2 here to include 0(epsilon) and final state + int32_t states = max_token + 2, + dim0 = states - 1, // minusing 1 here because there is not + // any leaving arcs for final state + dim1 = max_token + 2, // there are number of states arcs leaving + // each state for standard topolopy + num_arcs = dim0 * dim1; + *aux_labels = Array1(c, num_arcs); + Array1 row_ids(c, num_arcs); + Array1 arcs(c, num_arcs); + int32_t *row_ids_data = row_ids.Data(), + *aux_labels_data = aux_labels->Data(); + Arc *arcs_data = arcs.Data(); + K2_EVAL2( + c, dim0, dim1, lambda_set_row_ids_and_arcs, + (int32_t i, int32_t j)->void { + row_ids_data[i * dim1 + j] = i; + Arc arc; + arc.src_state = i; + arc.dest_state = j; + arc.label = j == (dim1 - 1) ? -1 : j; + arc.score = 0; + arcs_data[i * dim1 + j] = arc; + int32_t olabel = i == j ? 0 : (j == (dim1 - 1) ? -1 : j); + aux_labels_data[i * dim1 + j] = olabel; + }); + Array1 row_splits(c, states + 1); + RowIdsToRowSplits(row_ids, &row_splits); + return Ragged(RaggedShape2(&row_splits, &row_ids, dim0 * dim1), arcs); + } +} + +void ArcSort(Fsa *fsa) { + if (fsa->NumAxes() < 2) return; // it is empty + SortSublists(fsa); +} + +void ArcSort(Fsa &src, Fsa *dest, Array1 *arc_map /*= nullptr*/) { + NVTX_RANGE(K2_FUNC); + if (!src.values.IsValid()) return; + + if (arc_map != nullptr) + *arc_map = Array1(src.Context(), src.NumElements()); + + Fsa tmp(src.shape, src.values.Clone()); + SortSublists(&tmp, arc_map); + *dest = tmp; +} + +// TODO(fangjun): use the following method suggested by Dan +// +// ... incidentally, it's possible to further optimize this so the run +// time is less than linear, by using methods similar to what I use +// in GetStateBatches(); imagine computing a table that instead of +// the best traceback, is the best 2-step traceback; and then the 4-step +// traceback, and so on. There's no need for this right now, since the +// forward-pass algorithm is already at least linear-time in the length +// of this path. But we can consider it for the future. +Ragged ShortestPath(FsaVec &fsas, + const Array1 &entering_arcs) { + NVTX_RANGE(K2_FUNC); + K2_CHECK_EQ(fsas.NumAxes(), 3); + const int32_t *entering_arcs_data = entering_arcs.Data(); + const Arc *arcs_data = fsas.values.Data(); + int32_t num_fsas = fsas.Dim0(); + int32_t num_states = fsas.TotSize(1); + ContextPtr &context = fsas.Context(); + + // allocate an extra element for ExclusiveSum + Array1 num_best_arcs_per_fsa(context, num_fsas + 1); + int32_t *num_best_arcs_per_fsa_data = num_best_arcs_per_fsa.Data(); + const int32_t *row_splits1_data = fsas.RowSplits(1).Data(); + + // -1 represents an invalid arc_index. + // This extra array avoids an extra iteration over `entering_arcs`. + Array1 state_best_arc_index_array(context, num_states, -1); + int32_t *state_best_arc_index_array_data = state_best_arc_index_array.Data(); + + K2_EVAL( + context, num_fsas, lambda_set_num_best_arcs, (int32_t fsas_idx0) { + int32_t state_idx01 = row_splits1_data[fsas_idx0]; + int32_t state_idx01_next = row_splits1_data[fsas_idx0 + 1]; + + if (state_idx01_next == state_idx01) { + // this fsa is empty, so there is no best path available + num_best_arcs_per_fsa_data[fsas_idx0] = 0; + return; + } + + int32_t final_state_idx01 = state_idx01_next - 1; + int32_t cur_state = final_state_idx01; + int32_t cur_index = entering_arcs_data[cur_state]; + int32_t num_arcs = 0; + int32_t *p = state_best_arc_index_array_data + final_state_idx01; + while (cur_index != -1) { + *p = cur_index; + --p; + + cur_state = arcs_data[cur_index].src_state + state_idx01; + cur_index = entering_arcs_data[cur_state]; + ++num_arcs; + } + num_best_arcs_per_fsa_data[fsas_idx0] = num_arcs; + }); + ExclusiveSum(num_best_arcs_per_fsa, &num_best_arcs_per_fsa); + + RaggedShape shape = RaggedShape2(&num_best_arcs_per_fsa, nullptr, -1); + const int32_t *shape_row_splits1_data = shape.RowSplits(1).Data(); + const int32_t *shape_row_ids1_data = shape.RowIds(1).Data(); + + const int32_t *ans_row_splits_data = shape.RowSplits(1).Data(); + Array1 best_path_arc_indexes(context, shape.NumElements()); + int32_t *best_path_arc_indexes_data = best_path_arc_indexes.Data(); + + K2_EVAL( + context, shape.NumElements(), lambda_set_best_arcs, (int32_t ans_idx01) { + int32_t fsa_idx0 = shape_row_ids1_data[ans_idx01]; + int32_t ans_idx0x = shape_row_splits1_data[fsa_idx0]; + int32_t ans_idx1 = ans_idx01 - ans_idx0x; + + int32_t num_arcs_this_fsa = num_best_arcs_per_fsa_data[fsa_idx0 + 1] - + num_best_arcs_per_fsa_data[fsa_idx0]; + if (num_arcs_this_fsa == 0) return; + + int32_t final_state_idx01_this_fsa = row_splits1_data[fsa_idx0 + 1] - 1; + + const int32_t *p_start = state_best_arc_index_array_data + + final_state_idx01_this_fsa - + num_arcs_this_fsa + 1; + + best_path_arc_indexes_data[ans_idx01] = p_start[ans_idx1]; + }); + + Ragged ans(shape, best_path_arc_indexes); + return ans; +} + +void AddEpsilonSelfLoops(FsaOrVec &src, FsaOrVec *dest, + Array1 *arc_map /*= nullptr*/) { + NVTX_RANGE(K2_FUNC); + ContextPtr &c = src.Context(); + const int32_t *old_row_splits1_data = src.RowSplits(1).Data(), + *old_row_ids1_data = src.RowIds(1).Data(); + const Arc *old_arcs_data = src.values.Data(); + if (src.NumAxes() == 2) { + int32_t num_states = src.Dim0(); + if (num_states < 2) { + K2_CHECK_EQ(num_states, 0); + *dest = src; + if (arc_map != nullptr) *arc_map = Array1(c, 0); + return; + } + + int32_t old_num_arcs = src.TotSize(1), + new_num_arcs = old_num_arcs + (num_states - 1); + Array1 new_row_splits(c, num_states + 1), + new_row_ids(c, new_num_arcs); + Array1 new_arcs(c, new_num_arcs); + int32_t *new_row_splits1_data = new_row_splits.Data(), + *new_row_ids1_data = new_row_ids.Data(); + Arc *new_arcs_data = new_arcs.Data(); + int32_t *arc_map_data = nullptr; + if (arc_map) { + *arc_map = Array1(c, new_num_arcs); + arc_map_data = arc_map->Data(); + } + ParallelRunner pr(c); + { + With w(pr.NewStream()); + K2_EVAL( + c, old_num_arcs, lambda_copy_data, (int32_t arc_idx01)->void { + int32_t state_idx0 = old_row_ids1_data[arc_idx01], + new_arc_idx01 = arc_idx01 + 1 + state_idx0; + // the "+1" above is because we put the self-loop first. + new_row_ids1_data[new_arc_idx01] = state_idx0; + new_arcs_data[new_arc_idx01] = old_arcs_data[arc_idx01]; + if (arc_map_data) arc_map_data[new_arc_idx01] = arc_idx01; + }); + } + { + With w(pr.NewStream()); + K2_EVAL( + c, num_states, lambda_set_new_data, (int32_t state_idx0)->void { + int32_t old_arc_idx0x = old_row_splits1_data[state_idx0], + new_arc_idx0x = old_arc_idx0x + state_idx0; + new_row_splits1_data[state_idx0] = new_arc_idx0x; + if (state_idx0 + 1 < num_states) { // not final-state + int32_t new_arc_idx01 = new_arc_idx0x; // the 1st arc is the loop + new_row_ids1_data[new_arc_idx01] = state_idx0; + new_arcs_data[new_arc_idx01] = + Arc(state_idx0, state_idx0, 0, 0.0); + if (arc_map_data) arc_map_data[new_arc_idx01] = -1; + } else { + // Note: if num_states was zero we would have returned above, so + // we don't have to worry about empty FSAs. + new_row_splits1_data[num_states] = new_arc_idx0x; + } + }); + } + pr.Finish(); + *dest = Ragged( + RaggedShape2(&new_row_splits, &new_row_ids, new_num_arcs), new_arcs); + } else { + K2_CHECK_EQ(src.NumAxes(), 3); + // Get a vector saying, for each FSA, whether it's nonempty. + int32_t num_fsas = src.Dim0(), num_states = src.TotSize(1), + old_num_arcs = src.TotSize(2); + if (num_states == 0) { + *dest = src; + if (arc_map) *arc_map = Array1(c, 0); + return; + } + Array1 fsa_nonempty(c, num_fsas + 1); + int32_t *fsa_nonempty_data = fsa_nonempty.Data(); + K2_EVAL( + c, num_fsas, lambda_set_fsa_nonempty, (int32_t fsa_idx0)->void { + fsa_nonempty_data[fsa_idx0] = (old_row_splits1_data[fsa_idx0 + 1] > + old_row_splits1_data[fsa_idx0]); + }); + ExclusiveSum(fsa_nonempty, &fsa_nonempty); + const int32_t *old_row_splits2_data = src.RowSplits(2).Data(), + *old_row_ids2_data = src.RowIds(2).Data(); + int32_t num_nonempty_fsas = fsa_nonempty.Back(), + new_num_arcs = old_num_arcs + num_states - num_nonempty_fsas; + // we subtract `num_nonempty_fsas` because final-states don't get a + // self-loop. + + Array1 new_row_splits2(c, num_states + 1), + new_row_ids2(c, new_num_arcs); + Array1 new_arcs(c, new_num_arcs); + // fsa_idx0_mod_data maps from fsa_idx0 to a modified fsa_idx0 that + // "doesn't count" FSAs with zero states. + const int32_t *fsa_idx0_mod_data = fsa_nonempty_data; + int32_t *new_row_splits2_data = new_row_splits2.Data(), + *new_row_ids2_data = new_row_ids2.Data(); + Arc *new_arcs_data = new_arcs.Data(); + int32_t *arc_map_data = nullptr; + if (arc_map) { + *arc_map = Array1(c, new_num_arcs); + arc_map_data = arc_map->Data(); + } + ParallelRunner pr(c); + { + With w(pr.NewStream()); + K2_EVAL( + c, old_num_arcs, lambda_copy_data, (int32_t arc_idx012)->void { + int32_t state_idx01 = old_row_ids2_data[arc_idx012], + fsa_idx0 = old_row_ids1_data[state_idx01], + fsa_idx0_mod = fsa_idx0_mod_data[fsa_idx0], + new_arc_idx012 = + arc_idx012 + 1 + state_idx01 - fsa_idx0_mod; + // The "+1" above is because we put the self-loop first. The + // "-fsa_idx0_mod" is because final-states don't get a self-loop. + new_row_ids2_data[new_arc_idx012] = state_idx01; + new_arcs_data[new_arc_idx012] = old_arcs_data[arc_idx012]; + if (arc_map_data) arc_map_data[new_arc_idx012] = arc_idx012; + }); + } + { + With w(pr.NewStream()); + K2_EVAL( + c, num_states, lambda_set_new_data, (int32_t state_idx01)->void { + int32_t fsa_idx0 = old_row_ids1_data[state_idx01], + fsa_idx0_mod = fsa_idx0_mod_data[fsa_idx0], + state_idx0x = old_row_splits1_data[fsa_idx0], + next_state_idx0x = old_row_splits1_data[fsa_idx0 + 1], + old_arc_idx01x = old_row_splits2_data[state_idx01]; + // Below the "+ state_idx01" is because each state gets a self-loop, + // and the "- fsa_idx0_mod" is because final-states don't get a + // self-loop. + int32_t new_arc_idx01x = + old_arc_idx01x + state_idx01 - fsa_idx0_mod; + // The self-loop arc is the first arc: + int32_t new_arc_idx012 = new_arc_idx01x; + new_row_splits2_data[state_idx01] = new_arc_idx01x; + if (state_idx01 + 1 < next_state_idx0x) { // not final-state + new_row_ids2_data[new_arc_idx012] = state_idx01; + int32_t state_idx1 = state_idx01 - state_idx0x; + new_arcs_data[new_arc_idx012] = + Arc(state_idx1, state_idx1, 0, 0.0); + if (arc_map_data) arc_map_data[new_arc_idx012] = -1; + } else if (state_idx01 + 1 == num_states) { + // Note: if num_states was zero we would have returned above, so + // we dont have to worry about an empty FsaVec. + new_row_splits2_data[num_states] = new_arc_idx01x; + } + }); + } + pr.Finish(); + *dest = + Ragged(RaggedShape3(&src.RowSplits(1), &src.RowIds(1), num_states, + &new_row_splits2, &new_row_ids2, new_num_arcs), + new_arcs); + } +} + +Fsa Union(FsaVec &fsas, Array1 *arc_map /*= nullptr*/) { + NVTX_RANGE(K2_FUNC); + K2_CHECK_EQ(fsas.NumAxes(), 3); + + ContextPtr &context = fsas.Context(); + const int32_t *fsas_row_splits1_data = fsas.RowSplits(1).Data(); + const int32_t *fsas_row_splits2_data = fsas.RowSplits(2).Data(); + const int32_t *fsas_row_ids1_data = fsas.RowIds(1).Data(); + const int32_t *fsas_row_ids2_data = fsas.RowIds(2).Data(); + const Arc *arcs_data = fsas.values.Data(); + + int32_t num_fsas = fsas.Dim0(); + int32_t num_states = fsas.TotSize(1); + int32_t num_arcs = fsas.TotSize(2); + + // A new start state and a new final state are added (+2). + // The final state of each fsa is removed (-num_fsas) + int32_t num_out_states = num_states + 2 - num_fsas; + int32_t out_final_state = num_out_states - 1; + + // For every fsa, a new arc is added from the new start state + // to its original start state (+num_fsas) + int32_t num_out_arcs = num_arcs + num_fsas; + + Array1 out_row_ids(context, num_out_arcs); + Array1 out_arcs(context, num_out_arcs); + Array1 tmp_arc_map(context, num_out_arcs, -1); + int32_t *tmp_arc_map_data = tmp_arc_map.Data(); + + int32_t *out_row_ids_data = out_row_ids.Data(); + Arc *out_arcs_data = out_arcs.Data(); + + K2_EVAL( + context, num_arcs, lambda_set_out, (int32_t fsas_arc_idx012) { + int32_t fsas_state_idx01 = fsas_row_ids2_data[fsas_arc_idx012]; + int32_t fsas_idx0 = fsas_row_ids1_data[fsas_state_idx01]; + int32_t this_fsa_final_state_idx01 = + fsas_row_splits1_data[fsas_idx0 + 1] - 1; + + K2_DCHECK_GT(this_fsa_final_state_idx01, fsas_state_idx01) + << "We support only FSAs with at least two states at present"; + + int32_t fsas_state_idx0x = fsas_row_splits1_data[fsas_idx0]; + int32_t fsas_state_idx1 = fsas_state_idx01 - fsas_state_idx0x; + int32_t this_fsa_final_state_idx1 = + this_fsa_final_state_idx01 - fsas_state_idx0x; + + int32_t fsas_arc_idx0xx = fsas_row_splits2_data[fsas_state_idx0x]; + + // fsa0: +1 (a new start state) + // fsa1: +0 (the final state of fsa0 is removed) + // fsa2: -1 (the final state of fsa1 is removed) + // fsa3: -2 (the final state of fsa2 is removed) + int32_t state_offset = 1 - fsas_idx0; + int32_t out_state_idx0 = fsas_state_idx01 + state_offset; + + int32_t out_arc_idx01 = fsas_arc_idx012 + num_fsas; + out_row_ids_data[out_arc_idx01] = out_state_idx0; + Arc arc = arcs_data[fsas_arc_idx012]; + + K2_DCHECK_EQ(arc.src_state, fsas_state_idx1); + + if (arc.dest_state == this_fsa_final_state_idx1) + arc.dest_state = out_final_state; + else + arc.dest_state = arc.dest_state - arc.src_state + out_state_idx0; + + arc.src_state = out_state_idx0; + out_arcs_data[out_arc_idx01] = arc; + tmp_arc_map_data[out_arc_idx01] = fsas_arc_idx012; + + if (fsas_arc_idx0xx == fsas_arc_idx012) { + // add a new arc from the new start state to the start state + // of this fsa + // + // WARNING: we cannot use fsas_state_idx01 here + // since the start state may have no leaving arcs! + Arc arc(0, fsas_state_idx0x + state_offset, 0, 0); + out_arcs_data[fsas_idx0] = arc; + out_row_ids_data[fsas_idx0] = 0; + } + }); + + if (arc_map != nullptr) *arc_map = std::move(tmp_arc_map); + Array1 out_row_splits(context, num_out_states + 1); + RowIdsToRowSplits(out_row_ids, &out_row_splits); + RaggedShape shape = RaggedShape2(&out_row_splits, &out_row_ids, num_out_arcs); + Fsa ans = Ragged(shape, out_arcs); + return ans; +} + +Fsa Closure(Fsa &fsa, Array1 *arc_map /* = nullptr*/) { + NVTX_RANGE(K2_FUNC); + K2_CHECK_EQ(fsa.NumAxes(), 2) << "We support only a single FSA."; + ContextPtr &c = fsa.Context(); + + int32_t num_states = fsa.Dim0(); + if (num_states < 2) { + K2_CHECK_EQ(num_states, 0) + << "An empty fsa should contain no states at all"; + if (arc_map != nullptr) *arc_map = Array1(c, 0); + return fsa; // return itself if the input fsa is empty + } + + const int32_t *fsa_row_splits_data = fsa.RowSplits(1).Data(); + const int32_t *fsa_row_ids_data = fsa.RowIds(1).Data(); + const Arc *fsa_arcs_data = fsa.values.Data(); + int32_t fsa_final_state = num_states - 1; + + int32_t num_out_states = num_states; + + // An arc from the start state to the final state with label == -1 is added. + int32_t num_out_arcs = fsa.values.Dim() + 1; + + Array1 out_row_ids(c, num_out_arcs); + int32_t *out_row_ids_data = out_row_ids.Data(); + + Array1 out_arcs(c, num_out_arcs); + Arc *out_arcs_data = out_arcs.Data(); + + Array1 tmp_arc_map(c, num_out_arcs); + int32_t *tmp_arc_map_data = tmp_arc_map.Data(); + + K2_EVAL( + c, fsa.values.Dim(), lambda_set_arcs, (int32_t fsa_arc_idx01) { + int32_t fsa_state_idx0 = fsa_row_ids_data[fsa_arc_idx01]; + int32_t fsa_arc_idx0x = fsa_row_splits_data[fsa_state_idx0]; + int32_t fsa_arc_idx1 = fsa_arc_idx01 - fsa_arc_idx0x; + int32_t this_state_num_arcs = + fsa_row_splits_data[fsa_state_idx0 + 1] - fsa_arc_idx0x; + + Arc arc = fsa_arcs_data[fsa_arc_idx01]; + if (arc.dest_state == fsa_final_state) { + // modify arcs entering the final state such that: + // - dest_state == 0 + // - label == 0 + arc.dest_state = 0; + K2_DCHECK_EQ(arc.label, -1); + arc.label = 0; + } + + int out_arc_idx01; + if (arc.src_state > 0) { + // this arc is not originated from the start state, so its index is + // incremented + out_arc_idx01 = fsa_arc_idx01 + 1; + } else { + out_arc_idx01 = fsa_arc_idx01; + if (fsa_arc_idx1 == this_state_num_arcs - 1) { + // This is the last arc of the original start state, + // so we add a new arc just after it. + Arc new_arc(0, fsa_final_state, -1, 0.0f); + out_arcs_data[out_arc_idx01 + 1] = new_arc; + out_row_ids_data[out_arc_idx01 + 1] = 0; + tmp_arc_map_data[out_arc_idx01 + 1] = -1; + } + } + + // it may happen that the start state has no leaving arcs + if (fsa_row_splits_data[1] == 0) { + Arc new_arc(0, fsa_final_state, -1, 0.0f); + out_arcs_data[0] = new_arc; + out_row_ids_data[0] = 0; + tmp_arc_map_data[0] = -1; + } + + tmp_arc_map_data[out_arc_idx01] = fsa_arc_idx01; + + out_arcs_data[out_arc_idx01] = arc; + out_row_ids_data[out_arc_idx01] = arc.src_state; + }); + + if (arc_map != nullptr) *arc_map = std::move(tmp_arc_map); + + Array1 out_row_splits(c, num_out_states + 1); + int32_t *out_row_splits_data = out_row_splits.Data(); + + K2_EVAL( + c, out_row_splits.Dim(), lambda_set_row_splits, (int32_t i) { + if (i == 0) + out_row_splits_data[i] = 0; + else + out_row_splits_data[i] = fsa_row_splits_data[i] + 1; + }); + + RaggedShape shape = RaggedShape2(&out_row_splits, &out_row_ids, num_out_arcs); + Fsa ans = Ragged(shape, out_arcs); + return ans; +} + +FsaOrVec ExpandArcs(FsaOrVec &fsas, RaggedShape &labels_shape, + Array1 *fsas_arc_map /*=nullptr*/, + Array1 *labels_arc_map /*=nullptr*/) { + NVTX_RANGE(K2_FUNC); + if (fsas.NumAxes() == 2) { + FsaVec fsas_temp = FsaToFsaVec(fsas); + return ExpandArcs(fsas_temp, labels_shape, fsas_arc_map, labels_arc_map) + .RemoveAxis(0); + } + K2_CHECK_EQ(fsas.NumAxes(), 3); + K2_CHECK_EQ(labels_shape.NumAxes(), 2); + K2_CHECK_EQ(fsas.NumElements(), labels_shape.Dim0()); + ContextPtr &c = fsas.Context(); + K2_CHECK(c->IsCompatible(*labels_shape.Context())); + + RaggedShape state_to_arcs = GetLayer(fsas.shape, 1); + + // `state_to_foo` is a RaggedShape that, for each state in `fsas`, has a list + // of length `num_arcs + 1`, where `num_arcs` is the number of arcs leaving + // this state in `fsas`. Interpret this as: one element for the state + // itself, then one for each arc leaving it. This `foo` is an index that + // corresponds to num-arcs plus one, but because it is really a placeholder + // and we want to keep it distinct from other things, we call it `foo`. + RaggedShape state_to_foo = ChangeSublistSize(state_to_arcs, 1); + + int32_t foo_size = state_to_foo.NumElements(); + + // For each element of `state_to_foo`, `num_ostates_for` says how many states + // there will be for this (state,foo) in the returned (output) FSA. Here, the + // idx0 is the state, the idx1 is foo. If idx1 == 0 (interpret this as "the + // state itself"), then `num_ostates_for[idx01] = 1`, meaning "keep the + // original state". Otherwise, idx1 - 1 represents an arc_idx2 [into `fsas`], + // and we set `num_ostates_for[idx01] = max(0, seq_len-1)`, where seq_len is + // the length of the sequence in `labels_shape` corresponding to this + // arc-index. + Array1 num_ostates_for(c, foo_size + 1); + int32_t *num_ostates_for_data = num_ostates_for.Data(); + + const int32_t *labels_row_splits1_data = labels_shape.RowSplits(1).Data(), + *fsas_row_splits2_data = fsas.RowSplits(2).Data(), + *state_to_foo_row_splits1_data = + state_to_foo.RowSplits(1).Data(), + *state_to_foo_row_ids1_data = state_to_foo.RowIds(1).Data(); + + K2_EVAL( + c, foo_size, lambda_set_num_ostates, (int32_t idx01)->void { + // note: the idx01, idx0, idx0x are into `state_to_foo`. + // This idx0 is a state-index into `fsas` (an idx01 w.r.t. `fsas`). + int32_t idx0 = state_to_foo_row_ids1_data[idx01], + idx0x = state_to_foo_row_splits1_data[idx0], + idx1 = idx01 - idx0x; // idx1 is `foo`. + int32_t num_ostates; + if (idx1 == 0) { + num_ostates = 1; // this is a copy of the original state. + } else { + int32_t fsas_arc_idx2 = idx1 - 1, fsas_state_idx01 = idx0, + fsas_arc_idx01x = fsas_row_splits2_data[fsas_state_idx01], + fsas_arc_idx012 = fsas_arc_idx01x + fsas_arc_idx2, + labels_shape_idx0 = fsas_arc_idx012, + labels_shape_idx0x = + labels_row_splits1_data[labels_shape_idx0], + labels_shape_idx0x_next = + labels_row_splits1_data[labels_shape_idx0 + 1], + labels_shape_len1 = + labels_shape_idx0x_next - labels_shape_idx0x; + // A sequence of n symbols will require n-1 extra states to represent + // it. + num_ostates = max(labels_shape_len1 - 1, (int32_t)0); + } + num_ostates_for_data[idx01] = num_ostates; + }); + ExclusiveSum(num_ostates_for, &num_ostates_for); + Array1 &foo_to_ostates_row_splits = num_ostates_for; + RaggedShape foo_to_ostates = + RaggedShape2(&foo_to_ostates_row_splits, nullptr, -1); + + // to_ostates_shape has 4 axes: [fsa_id][orig_state][foo][ostate] + // where foo is a general-purpose index that ranges over the (num_arcs + 1) of + // the original state. + RaggedShape to_ostates_shape = ComposeRaggedShapes3( + GetLayer(fsas.shape, 0), state_to_foo, foo_to_ostates); + + // Below, `tos` means `to_ostates_shape`. + const int32_t *tos_row_splits1_data = to_ostates_shape.RowSplits(1).Data(), + *tos_row_ids1_data = to_ostates_shape.RowIds(1).Data(), + *tos_row_splits2_data = to_ostates_shape.RowSplits(2).Data(), + *tos_row_ids2_data = to_ostates_shape.RowIds(2).Data(), + *tos_row_splits3_data = to_ostates_shape.RowSplits(3).Data(), + *tos_row_ids3_data = to_ostates_shape.RowIds(3).Data(); + + // `num_oarcs` gives the number of arcs in the returned (output) FSA for each + // `ostate` (i.e. leaving each state in the returned FSA). + int32_t tot_ostates = to_ostates_shape.NumElements(); + Array1 num_oarcs(c, tot_ostates + 1); + int32_t *num_oarcs_data = num_oarcs.Data(); + K2_EVAL( + c, tot_ostates, lambda_set_num_oarcs, (int32_t idx0123)->void { + // All these indexes are into `to_ostates_shape`, indexed + // `[fsa][state][foo][ostate].` + int32_t idx012 = tos_row_ids3_data[idx0123], + idx012x = tos_row_splits3_data[idx012], + idx01 = tos_row_ids2_data[idx012], + idx01x = tos_row_splits2_data[idx01], + idx01x_next = tos_row_splits2_data[idx01 + 1], + len2 = idx01x_next - idx01x, idx2 = idx012 - idx01x, + idx3 = idx0123 - idx012x; + int32_t num_arcs; + if (idx2 == 0) { + K2_CHECK_EQ(idx3, 0); + // This ostate corresponds to the original state; it is not one of the + // extra states added to support chains of arcs. + // The original state had `orig_num_arcs` leaving it, which is the + // number of `foo` indexes minus one. + int32_t orig_num_arcs = len2 - 1; + num_arcs = orig_num_arcs; + } else { + // All newly-created states have exactly one arc leaving them. + num_arcs = 1; + } + num_oarcs_data[idx0123] = num_arcs; + }); + ExclusiveSum(num_oarcs, &num_oarcs); + Array1 &ostate_to_oarcs_row_splits = num_oarcs; + RaggedShape ostate_to_oarcs = + RaggedShape2(&ostate_to_oarcs_row_splits, nullptr, -1); + + // `full_shape` has 5 axes: [fsa][orig_state][foo][ostate][oarc] + RaggedShape full_shape = + ComposeRaggedShapes(to_ostates_shape, ostate_to_oarcs); + // for the lower-order row-splits and row-ids, use tot_row_{splits,idx}n_data + const int32_t *full_row_splits4_data = full_shape.RowSplits(4).Data(), + *full_row_ids4_data = full_shape.RowIds(4).Data(); + int32_t tot_oarcs = full_shape.NumElements(); + K2_CHECK_GE(tot_oarcs, fsas.NumElements()); + + int32_t *fsas_arc_map_data = nullptr, *labels_arc_map_data = nullptr; + if (fsas_arc_map) { + *fsas_arc_map = Array1(c, tot_oarcs); + fsas_arc_map_data = fsas_arc_map->Data(); + } + if (labels_arc_map) { + *labels_arc_map = Array1(c, tot_oarcs); + labels_arc_map_data = labels_arc_map->Data(); + } + Array1 oarcs(c, tot_oarcs); + Arc *oarcs_data = oarcs.Data(); + const Arc *arcs_data = fsas.values.Data(); + + K2_EVAL( + c, tot_oarcs, lambda_set_arcs, (int32_t idx01234)->void { + // All these indexes are into `full_shape`, indexed + // `[fsa][state][foo][ostate][oarc].` + int32_t idx0123 = full_row_ids4_data[idx01234], + idx0123x = full_row_splits4_data[idx0123], + idx4 = idx01234 - idx0123x, idx012 = tos_row_ids3_data[idx0123], + idx012x = tos_row_splits3_data[idx012], + idx3 = idx0123 - idx012x, idx01 = tos_row_ids2_data[idx012], + idx01x = tos_row_splits2_data[idx01], idx2 = idx012 - idx01x, + idx0 = tos_row_ids1_data[idx01], + idx0x = tos_row_splits1_data[idx0], + idx0xxx = tos_row_splits3_data[tos_row_splits2_data[idx0x]]; + + int32_t fsa_idx01x = fsas_row_splits2_data[idx01]; + + int32_t fsa_idx2; // the idx2 (arc-index) into `fsas` of the input arc + // that's most relevant to us.. + int32_t seq_pos; // seq_pos is our index into the sequence of arcs that + // we produce for each original arc + if (idx2 == 0) { + K2_CHECK_EQ(idx3, 0); + fsa_idx2 = idx4; // corresponds to foo=0, so idx3 will be 0; the idx4 + // enumerates the arcs leaving it.. + seq_pos = 0; + } else { + // this is one of the extra `foo` indexes, one per arc in the input + // FSA that leaves this state; each of those `foo` indexes has + // (seq_len - 1) states in it (idx3=0,1..seq_len-1); and each state + // has one arc leaving it (idx4==0). + K2_CHECK_EQ(idx4, 0); + fsa_idx2 = idx2 - 1; + seq_pos = idx3 + 1; + } + int32_t fsa_idx012 = fsa_idx01x + fsa_idx2; // index of the arc in + // source FSA FSA that + // we're expanding.. + Arc iarc = arcs_data[fsa_idx012]; + + int32_t labels_idx0x = labels_row_splits1_data[fsa_idx012], + labels_next_idx0x = labels_row_splits1_data[fsa_idx012 + 1], + labels_len1 = labels_next_idx0x - labels_idx0x; + // labels_len1 is length of label sequence for this arc + K2_CHECK_LT(seq_pos, max(int32_t(1), labels_len1)); + + int32_t dest_idx01 = idx0x + iarc.dest_state, // original destination + // state-index + orig_dest_idx0123 = + tos_row_splits3_data[tos_row_splits2_data[dest_idx01]]; + + Arc oarc; + oarc.src_state = idx0123 - idx0xxx; + // If this is the last arc in the sequence, the dest-state is the + // original dest-state of the arc. Otherwise the dest-state is one of + // the new states that we created. The idx123 will be an idx1 after + // removing axes. + int32_t dest_idx123; + if (seq_pos + 1 >= labels_len1) { // last arc in sequence.. + dest_idx123 = orig_dest_idx0123 - idx0xxx; + } else { + int32_t dest_state_idx2 = fsa_idx2 + 1, // index `foo` equals + // orig_arc_idx+1 + dest_state_idx3 = seq_pos, // ostate index.. + dest_idx012 = idx01x + dest_state_idx2, + dest_idx012x = tos_row_splits3_data[dest_idx012], + dest_idx0123 = dest_idx012x + dest_state_idx3; + dest_idx123 = dest_idx0123 - idx0xxx; + } + oarc.dest_state = dest_idx123; // indexes 1,2,3 will be combined; in + // the output FSA it will be an idx1. + + if (fsas_arc_map_data) + fsas_arc_map_data[idx01234] = (seq_pos == 0 ? fsa_idx012 : -1); + if (labels_arc_map_data) + labels_arc_map_data[idx01234] = + (seq_pos < labels_len1 ? labels_idx0x + seq_pos : -1); + if (iarc.label != -1) { + // normal case.. label goes on 1st arc in sequence + oarc.label = (seq_pos == 0 ? iarc.label : 0); + } else { + // If the arc was to the final-state, we need to keep the label on the + // last arc of the sequence to keep the output valid. The following + // would be "seq_pos + 1 == labels_len1 ? -1 : 0", but we make it ">=" + // not "=" to account for the case seq_pos=0, labels_len1 = 0. + oarc.label = (seq_pos + 1 >= labels_len1 ? -1 : 0); + } + oarc.score = (seq_pos == 0 ? iarc.score : 0.0); + oarcs_data[idx01234] = oarc; + }); + + // remove current axes 1 and 2... [after removing axis 1, old axis 2 becomes + // axis 1, so remove axis 1 twice]. + RaggedShape temp = RemoveAxis(full_shape, 1); + return FsaVec(RemoveAxis(temp, 1), oarcs); +} + + +void Invert(FsaOrVec &src, Ragged &src_aux_labels, FsaOrVec *dest, + Ragged *dest_aux_labels, + Array1 *arc_map /*= nullptr*/) { + NVTX_RANGE(K2_FUNC); + K2_CHECK_EQ(src_aux_labels.NumAxes(), 2); + K2_CHECK_EQ(src_aux_labels.Dim0(), src.NumElements()); + K2_CHECK(dest != nullptr && dest_aux_labels != nullptr); + ContextPtr c = GetContext(src, src_aux_labels); + if (src.NumAxes() == 2) { + Fsa *srcs = &src; + FsaVec src_vec = CreateFsaVec(1, &srcs), dest_vec; + Invert(src_vec, src_aux_labels, &dest_vec, dest_aux_labels, arc_map); + *dest = GetFsaVecElement(dest_vec, 0); + return; + } + Array1 src_arc_map, labels_arc_map; + *dest = ExpandArcs(src, src_aux_labels.shape, &src_arc_map, &labels_arc_map); + // swap labels and aux_labels + int32_t dest_num_arcs = dest->NumElements(); + Arc *dest_arcs_data = dest->values.Data(); + const int32_t *labels_arc_map_data = labels_arc_map.Data(), + *src_aux_labels_data = src_aux_labels.values.Data(); + Array1 dest_aux_labels_row_splits(c, dest_num_arcs + 1); + int32_t *dest_aux_labels_row_splits_data = dest_aux_labels_row_splits.Data(); + K2_EVAL( + c, dest_num_arcs, lambda_set_dest_aux_labels_num, + (int32_t dest_idx012)->void { + Arc &dest_arc = dest_arcs_data[dest_idx012]; + // we'll remove epsilons in dest_aux_labels + dest_aux_labels_row_splits_data[dest_idx012] = + dest_arc.label == 0 ? 0 : 1; + }); + ExclusiveSum(dest_aux_labels_row_splits.Arange(0, dest_num_arcs), + &dest_aux_labels_row_splits); + RaggedShape dest_aux_labels_shape = + RaggedShape2(&dest_aux_labels_row_splits, nullptr, -1); + Array1 dest_aux_labels_values(c, + dest_aux_labels_shape.NumElements()); + int32_t *dest_aux_labels_values_data = dest_aux_labels_values.Data(); + K2_EVAL( + c, dest_num_arcs, lambda_set_dest_labels_and_aux_labels, + (int32_t dest_idx012)->void { + Arc &dest_arc = dest_arcs_data[dest_idx012]; + // swap label and aux_label + if (dest_arc.label != 0) { + int32_t dest_aux_labels_idx0x = + dest_aux_labels_row_splits_data[dest_idx012]; + // every arc in dest has at most one aux_label (as the aux_label is + // the label of src on this arc) + dest_aux_labels_values_data[dest_aux_labels_idx0x] = dest_arc.label; + } + int32_t src_aux_labels_idx01 = labels_arc_map_data[dest_idx012]; + dest_arc.label = src_aux_labels_idx01 == -1 + ? 0 + : src_aux_labels_data[src_aux_labels_idx01]; + }); + *dest_aux_labels = + Ragged(dest_aux_labels_shape, dest_aux_labels_values); + if (arc_map != nullptr) *arc_map = src_arc_map; +} + +// Will be used in InvertHost to process FsaVec input recursively. +void RecursionWrapperAuxLabels(void (*f)(FsaOrVec &, Ragged &, + FsaOrVec *, Ragged *), + FsaOrVec &src, Ragged &src_aux_labels, + FsaOrVec *dest, + Ragged *dest_aux_labels) { + NVTX_RANGE(K2_FUNC); + // src is actually an FsaVec. Just recurse for now. + K2_CHECK_EQ(src.NumAxes(), 3); + int32_t num_fsas = src.shape.Dim0(); + std::vector srcs(num_fsas), dests(num_fsas); + std::vector> src_aux_labels_vec(num_fsas), + dest_aux_labels_vec(num_fsas); + int32_t tot_num_arcs = 0; + Array1 src_aux_labels_row_splits = src_aux_labels.RowSplits(1), + src_aux_labels_values = src_aux_labels.values; + for (int32_t i = 0; i < num_fsas; ++i) { + srcs[i] = src.Index(0, i); + int32_t cur_num_arcs = srcs[i].NumElements(); + // below block get aux_labels for srcs[i] + // TODO(haowen): replace with Range op for ragged + { + Array1 row_splits = src_aux_labels_row_splits.Arange( + tot_num_arcs, tot_num_arcs + cur_num_arcs + 1); + Array1 values = + src_aux_labels_values.Arange(row_splits[0], row_splits.Back()); + row_splits = Minus(row_splits, row_splits[0]); + RaggedShape shape = RaggedShape2(&row_splits, nullptr, -1); + src_aux_labels_vec[i] = Ragged(shape, values); + } + f(srcs[i], src_aux_labels_vec[i], &(dests[i]), &(dest_aux_labels_vec[i])); + tot_num_arcs += cur_num_arcs; + } + *dest = Stack(0, num_fsas, dests.data()); + *dest_aux_labels = Cat(0, num_fsas, dest_aux_labels_vec.data()); +} + +void InvertHost(FsaOrVec &src, Ragged &src_aux_labels, FsaOrVec *dest, + Ragged *dest_aux_labels) { + NVTX_RANGE(K2_FUNC); + K2_CHECK_EQ(src_aux_labels.NumAxes(), 2); + K2_CHECK_EQ(src_aux_labels.Dim0(), src.NumElements()); + K2_CHECK(dest != nullptr && dest_aux_labels != nullptr); + int32_t num_axes = src.NumAxes(); + if (num_axes < 2 || num_axes > 3) { + K2_LOG(FATAL) << "Input has bad num-axes " << num_axes; + } else if (num_axes == 3) { + return RecursionWrapperAuxLabels(InvertHost, src, src_aux_labels, dest, + dest_aux_labels); + } + + k2host::Fsa host_fsa = FsaToHostFsa(src); + // k2host::AuxLabels is a k2host::Array2 + k2host::AuxLabels host_aux_labels( + src_aux_labels.Dim0(), src_aux_labels.NumElements(), + src_aux_labels.RowSplits(1).Data(), src_aux_labels.values.Data()); + k2host::FstInverter inverter(host_fsa, host_aux_labels); + k2host::Array2Size fsa_size, aux_size; + inverter.GetSizes(&fsa_size, &aux_size); + FsaCreator fsa_creator(fsa_size); + k2host::Fsa host_dest_fsa = fsa_creator.GetHostFsa(); + Ragged2Creator ragged_creator(aux_size); + k2host::AuxLabels host_dest_aux_labels = ragged_creator.GetHostArray2(); + inverter.GetOutput(&host_dest_fsa, &host_dest_aux_labels); + *dest = fsa_creator.GetFsa(); + *dest_aux_labels = ragged_creator.GetRagged2(); +} + +FsaOrVec ReplaceFsa(FsaVec &src, FsaOrVec &index, int32_t symbol_range_begin, + Array1 *arc_map_src /* = nullptr */, + Array1 *arc_map_index /* = nullptr */) { + NVTX_RANGE(K2_FUNC); + if (index.NumAxes() == 2) { + FsaVec index_temp = FsaToFsaVec(index); + return ReplaceFsa(src, index_temp, symbol_range_begin, arc_map_src, + arc_map_index).RemoveAxis(0); + } + K2_CHECK_EQ(index.NumAxes(), 3); + ContextPtr &c = index.Context(); + K2_CHECK(c->IsCompatible(*src.Context())); + + RaggedShape state_to_arcs = GetLayer(index.shape, 1); + + // `state_to_foo` is a RaggedShape that, for each state in `index`, has a list + // of length `tot_arcs + 1`. Interpret this as: one element for the state + // itself, then one for each arc leaving it. This `foo` is an index that + // corresponds to num-arcs plus one, but because it is really a placeholder + // and we want to keep it distinct from other things, we call it `foo`. + RaggedShape state_to_foo = ChangeSublistSize(state_to_arcs, 1); + + int32_t foo_size = state_to_foo.NumElements(), + num_src_fsas = src.Dim0(); + // For each element of `state_to_foo`, `num_ostates_for` says how many states + // there will be for this (state,foo) in the returned (output) FSA. Here, the + // idx0 is the state, the idx1 is foo. If idx1 == 0 (interpret this as "the + // state itself"), then `num_ostates_for[idx01] = 1`, meaning "keep the + // original state". Otherwise, idx1 - 1 represents an arc_idx2 [into `index`] + // and we set `num_ostates_for[idx01] = max(0, state_num-1)`, where state_num + // is the states number of the fsa in `src` that would repalce into this arc, + // the final state of this fsa will identify with the dest-state of this arc, + // so we minus 1. + Array1 num_ostates_for(c, foo_size + 1); + int32_t *num_ostates_for_data = num_ostates_for.Data(); + const Arc *index_arcs_data = index.values.Data(); + + const int32_t *src_row_splits1_data = src.RowSplits(1).Data(), + *index_row_splits2_data = index.RowSplits(2).Data(), + *state_to_foo_row_splits1_data = + state_to_foo.RowSplits(1).Data(), + *state_to_foo_row_ids1_data = state_to_foo.RowIds(1).Data(); + + K2_EVAL( + c, foo_size, lambda_set_num_ostates, (int32_t idx01)->void { + // note: the idx01, idx0, idx0x are into `state_to_foo`. + // This idx0 is a state-index into `index` (an idx01 w.r.t. `index`). + int32_t idx0 = state_to_foo_row_ids1_data[idx01], + idx0x = state_to_foo_row_splits1_data[idx0], + idx1 = idx01 - idx0x; // idx1 is `foo`. + int32_t num_ostates; + if (idx1 == 0) { + num_ostates = 1; // this is a copy of the original state. + } else { + int32_t index_arc_idx2 = idx1 - 1, index_state_idx01 = idx0, + index_arc_idx01x = index_row_splits2_data[index_state_idx01], + index_arc_idx012 = index_arc_idx01x + index_arc_idx2, + index_label = index_arcs_data[index_arc_idx012].label, + src_idx0 = index_label - symbol_range_begin; + // will not replace for this arc + if (src_idx0 < 0 || src_idx0 >= num_src_fsas) { + num_ostates = 0; + } else { + int32_t src_idx0x = src_row_splits1_data[src_idx0], + src_idx0x_next = src_row_splits1_data[src_idx0 + 1], + src_len1 = src_idx0x_next - src_idx0x; + num_ostates = max(src_len1 - 1, (int32_t)0); + } + } + num_ostates_for_data[idx01] = num_ostates; + }); + ExclusiveSum(num_ostates_for, &num_ostates_for); + Array1 &foo_to_ostates_row_splits = num_ostates_for; + RaggedShape foo_to_ostates = + RaggedShape2(&foo_to_ostates_row_splits, nullptr, -1); + + // to_ostates_shape has 4 axes: [fsa_id][orig_state][foo][ostate] + // where foo is a general-purpose index that ranges over the (num_arcs + 1) of + // the original state. + RaggedShape to_ostates_shape = ComposeRaggedShapes3( + GetLayer(index.shape, 0), state_to_foo, foo_to_ostates); + + // Below, `tos` means `to_ostates_shape`. + const int32_t *tos_row_splits1_data = to_ostates_shape.RowSplits(1).Data(), + *tos_row_ids1_data = to_ostates_shape.RowIds(1).Data(), + *tos_row_splits2_data = to_ostates_shape.RowSplits(2).Data(), + *tos_row_ids2_data = to_ostates_shape.RowIds(2).Data(), + *tos_row_splits3_data = to_ostates_shape.RowSplits(3).Data(), + *tos_row_ids3_data = to_ostates_shape.RowIds(3).Data(), + *src_row_splits2_data = src.RowSplits(2).Data(); + + // `num_oarcs` gives the number of arcs in the returned (output) FSA for each + // `ostate` (i.e. leaving each state in the returned FSA). + int32_t tot_ostates = to_ostates_shape.NumElements(); + Array1 num_oarcs(c, tot_ostates + 1); + int32_t *num_oarcs_data = num_oarcs.Data(); + K2_EVAL( + c, tot_ostates, lambda_set_num_oarcs, (int32_t idx0123)->void { + // All these indexes are into `to_ostates_shape`, indexed + // `[fsa][state][foo][ostate].` + int32_t idx012 = tos_row_ids3_data[idx0123], + idx012x = tos_row_splits3_data[idx012], + idx01 = tos_row_ids2_data[idx012], + idx01x = tos_row_splits2_data[idx01], + idx01x_next = tos_row_splits2_data[idx01 + 1], + len2 = idx01x_next - idx01x, idx2 = idx012 - idx01x, + idx3 = idx0123 - idx012x; + int32_t num_arcs; + if (idx2 == 0) { + K2_CHECK_EQ(idx3, 0); + // This ostate corresponds to the original state; + // The original state had `orig_num_arcs` leaving it, which is the + // number of `foo` indexes minus one. + int32_t orig_num_arcs = len2 - 1; + num_arcs = orig_num_arcs; + } else { + // All inserted states have the same num of arcs as in the src. + // note: the prefix `index_` means it is an idxXXX w.r.t. `index`. + // the prefix `src_` means the variable is an idxXXX w.r.t. `src`. + int32_t index_arc_idx2 = idx2 - 1, + index_arc_idx01x = index_row_splits2_data[idx01], + index_arc_idx012 = index_arc_idx01x + index_arc_idx2, + index_label = index_arcs_data[index_arc_idx012].label, + src_fsa_idx0 = index_label - symbol_range_begin; + K2_CHECK_GE(src_fsa_idx0, 0); + K2_CHECK_LT(src_fsa_idx0, num_src_fsas); + int32_t src_state_idx1 = idx3, + src_state_idx0x = src_row_splits1_data[src_fsa_idx0], + src_state_idx01 = src_state_idx0x + src_state_idx1, + src_arc_idx01x = src_row_splits2_data[src_state_idx01], + src_arc_idx01x_next = + src_row_splits2_data[src_state_idx01 + 1], + src_num_arcs = src_arc_idx01x_next - src_arc_idx01x; + num_arcs = src_num_arcs; + } + num_oarcs_data[idx0123] = num_arcs; + }); + ExclusiveSum(num_oarcs, &num_oarcs); + Array1 &ostate_to_oarcs_row_splits = num_oarcs; + RaggedShape ostate_to_oarcs = + RaggedShape2(&ostate_to_oarcs_row_splits, nullptr, -1); + + // `full_shape` has 5 axes: [fsa][orig_state][foo][ostate][oarc] + RaggedShape full_shape = + ComposeRaggedShapes(to_ostates_shape, ostate_to_oarcs); + + // for the lower-order row-splits and row-ids, use tot_row_{splits,ids}n_data + const int32_t *full_row_splits4_data = full_shape.RowSplits(4).Data(), + *full_row_ids4_data = full_shape.RowIds(4).Data(); + int32_t tot_oarcs = full_shape.NumElements(); + K2_CHECK_GE(tot_oarcs, index.NumElements()); + + int32_t *arc_map_src_data = nullptr, *arc_map_index_data = nullptr; + if (arc_map_src) { + *arc_map_src = Array1(c, tot_oarcs); + arc_map_src_data = arc_map_src->Data(); + } + if (arc_map_index) { + *arc_map_index = Array1(c, tot_oarcs); + arc_map_index_data = arc_map_index->Data(); + } + Array1 oarcs(c, tot_oarcs); + Arc *oarcs_data = oarcs.Data(); + const Arc *src_arcs_data = src.values.Data(); + + K2_EVAL( + c, tot_oarcs, lambda_set_arcs, (int32_t idx01234)->void { + // All these indexes are into `full_shape`, indexed + // `[fsa][state][foo][ostate][oarc].` + // The prefix `index_` means it is an idxXXX w.r.t. `index`. + // the prefix `src_` means the variable is an idxXXX w.r.t. `src`. + int32_t idx0123 = full_row_ids4_data[idx01234], + idx0123x = full_row_splits4_data[idx0123], + idx4 = idx01234 - idx0123x, + idx012 = tos_row_ids3_data[idx0123], + idx012x = tos_row_splits3_data[idx012], + idx3 = idx0123 - idx012x, + idx01 = tos_row_ids2_data[idx012], + idx01x = tos_row_splits2_data[idx01], + idx2 = idx012 - idx01x, + idx0 = tos_row_ids1_data[idx01], + idx0x = tos_row_splits1_data[idx0], + idx0xxx = tos_row_splits3_data[tos_row_splits2_data[idx0x]]; + + int32_t index_arc_idx2; // the idx2 (arc-index) into `index` + if (idx2 == 0) { + K2_CHECK_EQ(idx3, 0); + index_arc_idx2 = idx4; // corresponds to foo=0, so idx3 will be 0; + // the idx4 enumerates the arcs leaving it.. + } else { + // this is one of the extra `foo` indexes, it's conrespoding index + // into `index` is `foo` index minus 1 + index_arc_idx2 = idx2 - 1; + } + + int32_t index_arc_idx01x = index_row_splits2_data[idx01]; + // index of the arc in source FSA, FSA that we're replaceing.. + int32_t index_arc_idx012 = index_arc_idx01x + index_arc_idx2; + + Arc index_arc = index_arcs_data[index_arc_idx012]; + // original destination state-index + int32_t dest_state_idx01 = idx0x + index_arc.dest_state, + orig_dest_state_idx0123 = + tos_row_splits3_data[tos_row_splits2_data[dest_state_idx01]]; + + Arc src_arc; + Arc oarc; + oarc.src_state = idx0123 - idx0xxx; + // initialize mapping index + int32_t arc_src_map_idx = -1, + arc_index_map_idx = -1; + int32_t src_fsa_idx0 = index_arc.label - symbol_range_begin; + // will not replace for this arc + // dest state is the dest state of index arc + if (src_fsa_idx0 < 0 || src_fsa_idx0 >= num_src_fsas) { + K2_CHECK_EQ(idx2, 0); + oarc.dest_state = orig_dest_state_idx0123 - idx0xxx; + oarc.label = index_arc.label; + oarc.score = index_arc.score; + arc_index_map_idx = index_arc_idx012; + } else { + int32_t src_state_idx0x = src_row_splits1_data[src_fsa_idx0], + src_state_idx0x_next = src_row_splits1_data[src_fsa_idx0 + 1], + num_states = src_state_idx0x_next - src_state_idx0x, + src_state_idx1 = idx3, + src_state_idx01 = src_state_idx0x + src_state_idx1, + src_arc_idx01x = src_row_splits2_data[src_state_idx01], + src_arc_idx2 = idx4, + src_arc_idx012 = src_arc_idx01x + src_arc_idx2; + src_arc = src_arcs_data[src_arc_idx012]; + // handle the arcs belongs to index + if (idx2 == 0) { + // if the fsa to be replaced in is empty, this arc would point to + // its original dest-state + if (0 == num_states) { + oarc.dest_state = orig_dest_state_idx0123 - idx0xxx; + } else { + // this arc would point to the initial state of the fsa in src, + // the state id bias to current state(the src-state) is the count + // of all the ostates coresponding to the original state util now, + // the idx4 enumerates foo index + int32_t idx012_t = idx01x + 0, + idx2_t = idx4, + idx012x_t = tos_row_splits3_data[idx012_t], + idx012x_next_t = + tos_row_splits3_data[idx012_t + idx2_t + 1], + bias = idx012x_next_t - idx012x_t; + oarc.dest_state = idx0123 + bias - idx0xxx; + } + // set the label of the arc we are replacing to be 0(epsilon) + oarc.label = 0; + oarc.score = index_arc.score; + arc_index_map_idx = index_arc_idx012; + } else { // handle the arcs belongs to src + // the arc point to the final state of the fsa in src would point to + // the dest state of the arc we're replaceing + if (src_arc.label == -1) { + oarc.dest_state = orig_dest_state_idx0123 - idx0xxx; + } else { + // this is the inner arc of the fsa in src + int32_t dest_state_idx012x = idx0123 - idx3, + dest_state_idx0123 = dest_state_idx012x + src_arc.dest_state; + oarc.dest_state = dest_state_idx0123 - idx0xxx; + } + // arcs in src fsas that point to final state would set to epsilon + // arc (label from -1 to 0) + oarc.label = src_arc.label == -1 ? 0 : src_arc.label; + oarc.score = src_arc.score; + arc_src_map_idx = src_arc_idx012; + } + } + if (arc_map_src_data) + arc_map_src_data[idx01234] = arc_src_map_idx; + if (arc_map_index_data) + arc_map_index_data[idx01234] = arc_index_map_idx; + oarcs_data[idx01234] = oarc; + }); + // remove current axes 1 and 2... [after removing axis 1, old axis 2 becomes + // axis 1, so remove axis 1 twice]. + RaggedShape temp = RemoveAxis(full_shape, 1); + return FsaVec(RemoveAxis(temp, 1), oarcs); +} + +FsaOrVec RemoveEpsilonSelfLoops(FsaOrVec &src, + Array1 *arc_map /* = nullptr */) { + NVTX_RANGE(K2_FUNC); + if (src.NumAxes() == 2) { + FsaVec temp = FsaToFsaVec(src); + return RemoveEpsilonSelfLoops(temp, arc_map).RemoveAxis(0); + } + K2_CHECK_EQ(src.NumAxes(), 3); + + ContextPtr &c = src.Context(); + int32_t num_arcs = src.NumElements(); + Renumbering renumber_lists(c, num_arcs); + char *keep_list_data = renumber_lists.Keep().Data(); + + const Arc *arcs_data = src.values.Data(); + K2_EVAL( + c, num_arcs, lambda_set_keep, (int32_t i)->void { + Arc arc = arcs_data[i]; + char keep; + if (arc.label == 0 && arc.src_state == arc.dest_state) { + // This arc is an epsilon self-loop, so it should be removed + keep = 0; + } else { + keep = 1; + } + keep_list_data[i] = keep; + }); + FsaVec ans = Index(src, 2, renumber_lists.New2Old(), arc_map); + return ans; +} + +} // namespace k2 diff --git a/cuda_code/functions_5.cu b/cuda_code/functions_5.cu new file mode 100644 index 0000000000000000000000000000000000000000..96cca058474016f889416f42bce7eb8367d4e90f --- /dev/null +++ b/cuda_code/functions_5.cu @@ -0,0 +1,119 @@ +#include "functions.cuh" + +#include +#include + +using namespace std; + +__global__ +void substract_matrix(data_t * A, data_t * B, data_t * C, lenght_t n) +{ + index_t id = blockIdx.x * blockDim.x + threadIdx.x; + + if(id < n) + C[id] = A[id] - B[id]; +} + +__global__ +void divide_matrix(data_t * A, int * B, lenght_t n) +{ + index_t id = blockIdx.x * blockDim.x + threadIdx.x; + + if(id < n) + A[id] = A[id] / B[id]; +} + +__global__ +void divide_image(data_t * I, data_t * X, lenght_t rows, lenght_t colums, lenght_t n, lenght_t p) +{ + index_t x = blockDim.x * blockIdx.x + threadIdx.x; + index_t y = blockDim.y * blockIdx.y + threadIdx.y; + + if(x < rows - p + 1 && y < colums - p + 1) + { + index_t i = x + y * (rows - p + 1); + index_t k = 0; + X = & X[i * n]; + + for (index_t b = y; b < y + p; ++b) + { + for (index_t a = x; a < x + p; ++a) + { + X[k] = I[a + b * rows]; + k++; + } + } + } +} + +__global__ +void sum_image_patches(data_t * I, data_t * X, index_t * C, lenght_t rows, lenght_t colums, lenght_t n, lenght_t p) +{ + index_t x = blockDim.x * blockIdx.x + threadIdx.x; + index_t y = blockDim.y * blockIdx.y + threadIdx.y; + + if(x < rows && y < colums) + { + index_t idx = x + y * rows; + index_t a = x - p + 1; + index_t b = y - p + 1; + + index_t px, py, pp, pi; + + px = p; + for(index_t i = a; i < a + p; i++) + { + px = px - 1; + py = p; + + for(index_t j = b; j < b + p; j++) + { + py = py - 1; + if(i >= 0 && j >= 0 && i < (rows - p + 1) && j < (colums - p + 1)) + { + pp = i + j * (rows - p + 1); + pi = px + py * p; + I[idx] += X[pi + pp * n]; + C[idx] ++; + } + } + } + } +} + +void gpu_blas_mmul(cublasHandle_t & handle, data_t * A, data_t * B, data_t * C, int m, int k, int n, bool transpose) +{ + data_t alf = 1; + data_t bet = 0; + data_t * alpha = &alf; + data_t * beta = &bet; + + // Do the actual multiplication + if(transpose) + cublasSgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, m, n, k, alpha, A, k, B, k, beta, C, m); + else + cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, alpha, A, m, B, k, beta, C, m); +} + +void fill_matrix(data_t * A, lenght_t n) +{ + srand (time(NULL)); + + for(index_t i = 0; i < n; i++) + { + A[i] = rand() % int(1E6); + A[i] /= 1E6; + } +} + +void read_image(data_t * A, const char * file, lenght_t nr_rows_A, lenght_t nr_cols_A) +{ + ifstream lee(file); + + for(index_t i = 0; i < nr_rows_A; ++i) + for(index_t j = 0; j < nr_cols_A; ++j) + lee>>A[i + j * nr_rows_A]; + + lee.close(); +} + diff --git a/cuda_code/fused_l2_nn_6.cu b/cuda_code/fused_l2_nn_6.cu new file mode 100644 index 0000000000000000000000000000000000000000..07b7c109762fdff67a249b23e1fa2a4c1495c324 --- /dev/null +++ b/cuda_code/fused_l2_nn_6.cu @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2019-2020, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include "../common/ml_benchmark.hpp" + +namespace MLCommon { +namespace Bench { +namespace Distance { + +struct FLNParams { + int m, n, k; +}; // struct FLNParams + +template +struct FusedL2NN : public Fixture { + FusedL2NN(const std::string& name, const FLNParams& p) + : Fixture(name, std::shared_ptr( + new raft::mr::device::default_allocator)), + params(p) {} + + protected: + void allocateBuffers(const ::benchmark::State& state) override { + alloc(x, params.m * params.k); + alloc(y, params.n * params.k); + alloc(xn, params.m); + alloc(yn, params.n); + alloc(out, params.m); + alloc(workspace, params.m); + MLCommon::Random::Rng r(123456ULL); + r.uniform(x, params.m * params.k, T(-1.0), T(1.0), stream); + r.uniform(y, params.n * params.k, T(-1.0), T(1.0), stream); + MLCommon::LinAlg::rowNorm(xn, x, params.k, params.m, + MLCommon::LinAlg::L2Norm, true, stream); + MLCommon::LinAlg::rowNorm(yn, y, params.k, params.n, + MLCommon::LinAlg::L2Norm, true, stream); + auto blks = ceildiv(params.m, 256); + MLCommon::Distance::initKernel, int> + <<>>(out, params.m, std::numeric_limits::max(), + op); + } + + void deallocateBuffers(const ::benchmark::State& state) override { + dealloc(x, params.m * params.k); + dealloc(y, params.n * params.k); + dealloc(xn, params.m); + dealloc(yn, params.n); + dealloc(out, params.m); + dealloc(workspace, params.m); + } + + void runBenchmark(::benchmark::State& state) override { + loopOnState(state, [this]() { + // it is enough to only benchmark the L2-squared metric + MLCommon::Distance::fusedL2NN, int>( + out, x, y, xn, yn, params.m, params.n, params.k, (void*)workspace, op, + false, false, stream); + }); + } + + private: + FLNParams params; + T *x, *y, *xn, *yn; + cub::KeyValuePair* out; + int* workspace; + MLCommon::Distance::MinAndDistanceReduceOp op; +}; // struct FusedL2NN + +static std::vector getInputs() { + return { + {32, 16384, 16384}, {64, 16384, 16384}, {128, 16384, 16384}, + {256, 16384, 16384}, {512, 16384, 16384}, {1024, 16384, 16384}, + {16384, 32, 16384}, {16384, 64, 16384}, {16384, 128, 16384}, + {16384, 256, 16384}, {16384, 512, 16384}, {16384, 1024, 16384}, + {16384, 16384, 32}, {16384, 16384, 64}, {16384, 16384, 128}, + {16384, 16384, 256}, {16384, 16384, 512}, {16384, 16384, 1024}, + {16384, 16384, 16384}, + }; +} + +ML_BENCH_REGISTER(FLNParams, FusedL2NN, "", getInputs()); +ML_BENCH_REGISTER(FLNParams, FusedL2NN, "", getInputs()); + +} // namespace Distance +} // namespace Bench +} // namespace MLCommon diff --git a/cuda_code/fused_op_7.cu b/cuda_code/fused_op_7.cu new file mode 100644 index 0000000000000000000000000000000000000000..3d7caab2fb31784e658159ecb7ff3bb5cb8d2e4d --- /dev/null +++ b/cuda_code/fused_op_7.cu @@ -0,0 +1,844 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +// Additional use of MXNET_USE_CUDA is not needed to guard a '.cu' file. +#if MXNET_ENABLE_CUDA_RTC + +#include +#include +#include +#include +#include +#include +#include "./fused_op.h" +#include "./fused_op-inl.h" +#include "../operator_common.h" +#include "../elemwise_op_common.h" +#include "../../executor/exec_pass.h" +#include "../../common/cuda_utils.h" + +namespace mxnet { + +namespace { + +inline std::string mshadowTypeToString(int type) { + switch (type) { + case mshadow::kFloat32: + return "float"; + case mshadow::kFloat64: + return "double"; + case mshadow::kFloat16: + return "half"; + case mshadow::kUint8: + return "unsigned char"; + case mshadow::kInt8: + return "char"; + case mshadow::kInt32: + return "int"; + case mshadow::kInt64: + return "long long"; + case mshadow::kBool: + return "bool"; + default: + LOG(FATAL) << "Unknown type enum " << type; + } + return ""; +} + +inline int mshadowTypeToVectorLength(int type) { + switch (type) { + case mshadow::kFloat32: + return 1; + case mshadow::kFloat64: + return 1; + case mshadow::kFloat16: + return 2; + case mshadow::kUint8: + return 4; + case mshadow::kInt8: + return 4; + case mshadow::kInt32: + return 1; + case mshadow::kInt64: + return 1; + case mshadow::kBool: + return 4 / sizeof(bool); + default: + LOG(FATAL) << "Unknown type enum " << type; + } + return 0; +} + +inline void replaceString(std::string *input, const std::string old, const std::string repl) { + size_t pos = 0; + while ((pos = input->find(old, pos)) != std::string::npos) { + input->replace(pos, old.size(), repl); + pos += repl.size(); + } +} + +inline std::vector splitStringToVector(const std::string& input, const std::string def) { + size_t pos_start = 0, pos_end; + const std::string& s = input.substr(1, input.length()-2); + std::vector res; + + auto convert_token = [def](std::string token){ + if (token == def) { + return 0; + } + return std::stoi(token); + }; + + while ((pos_end = s.find(",", pos_start)) != std::string::npos) { + std::string token = s.substr(pos_start, pos_end - pos_start); + pos_start = pos_end + 1; + if (token.length() > 0) { + res.push_back(convert_token(token)); + } + } + + if (pos_start < s.length()) { + res.push_back(convert_token(s.substr(pos_start))); + } + return res; +} + +std::string ParseOpDescription(const std::vector& op_desc, + const std::map, std::string>& variables, + const nnvm::IndexedGraph::Node& node) { + const auto* source = node.source; + std::string fmt = op_desc[0]; + for (size_t j = 1; j < op_desc.size(); ++j) { + const std::string& desc = op_desc[j]; + std::string sub; + if (desc[0] == '_') { + // Argument + const int arg_id = std::stoi(desc.substr(1)); + sub = variables.at({node.inputs[arg_id].node_id, node.inputs[arg_id].index}); + } else { + sub = source->attrs.dict.at(desc); + } + size_t pos = fmt.find("%"); + CHECK_NE(pos, std::string::npos); + fmt.replace(pos, 1, sub); + } + return fmt; +} + +void AddShape(const mxnet::TShape& shape, + std::vector>* shapes) { + // We need alignment to 8 bytes for size_t in the Shape struct + // so if ndim is odd, there will be 4B of padding + int ndim = shape.ndim(); + const int offset = ndim % 2 == 0 ? 2 : 3; + shapes->push_back(std::vector(ndim + offset)); + std::vector& tensor_shapes = shapes->back(); + size_t total_size = 1; + for (int i = ndim-1; i >= 0; i--) { + tensor_shapes[i] = shape[i]; + total_size *= shape[i]; + } + size_t * shape_size_ptr = reinterpret_cast(&tensor_shapes[ndim + offset - 2]); + *shape_size_ptr = total_size; +} + +void AddPointerAndShape(const TBlob& data, + std::vector *ptrs, + std::vector>* shapes, + mshadow::Stream * s) { + using namespace mshadow; + MSHADOW_TYPE_SWITCH_WITH_BOOL(data.type_flag_, DType, { + Tensor tensor = data.FlatTo1D(s); + ptrs->push_back(tensor.dptr_); + AddShape(data.shape_, shapes); + }); +} + +// Obtain compilation log from the program. +std::string GetCompileLog(nvrtcProgram program) { + size_t log_size_including_null; + NVRTC_CALL(nvrtcGetProgramLogSize(program, &log_size_including_null)); + // For most std::string implementations, this is probably 1 char bigger than needed. OK though. + std::string log(log_size_including_null, '\0'); + NVRTC_CALL(nvrtcGetProgramLog(program, &log[0])); + // Make sure the string reflects the true size (so minus the null terminator). + log.resize(log_size_including_null - 1); + return log; +} + +// Obtain compilation result (ptx assembly) from the program. +std::string GetPtx(nvrtcProgram program) { + size_t ptx_size_including_null; + NVRTC_CALL(nvrtcGetPTXSize(program, &ptx_size_including_null)); + // For most std::string implementations, this is probably 1 char bigger than needed. OK though. + std::string ptx(ptx_size_including_null, '\0'); + NVRTC_CALL(nvrtcGetPTX(program, &ptx[0])); + // Make sure the string reflects the true size (so minus the null terminator). + ptx.resize(ptx_size_including_null - 1); + return ptx; +} + +} // namespace + +std::string FusedOp::GenerateCode(const std::vector &req, + const std::vector &in_dtypes, + const std::vector &out_dtypes, + const std::vector &in_ndims, + const std::vector &out_ndims, + const mxnet::ShapeVector &node_shapes, + const std::vector &node_dtypes, + const int nvec, + const std::string &kernel_name, + std::vector* check_shapes) { + const auto& g = subgraph_.indexed_graph(); + std::string code = ""; + int temp_name_counter = 0; + using NodeEntry = nnvm::IndexedGraph::NodeEntry; + std::map, std::string> variables; + std::map load_index; + bool check_shapes_compile = true; + + std::vector outputs(g.num_nodes()); + + for (size_t i = 0; i < g.num_nodes(); ++i) { + const auto& node = g[i]; + if (node.source != nullptr) { + outputs[i] = node.source->num_outputs(); + } else { + outputs[i] = 0; + } + } + + for (size_t i = 0; i < g.num_nodes(); ++i) { + const auto& node = g[i]; + const auto* source = node.source; + if (source != nullptr) { + if (source->is_variable()) { + load_index[i] = 1; + } else { + std::string op_name = source->op()->name; + if (fusion::slice_ops.find(op_name) != fusion::slice_ops.end()) { + load_index[node.inputs[0].node_id] = 0; + } + } + } + } + for (size_t i = 0; i < g.num_nodes(); ++i) { + const auto& node = g[i]; + const auto* source = node.source; + if (source != nullptr) { + if (source->is_variable()) { + if (load_index[i]) { + const auto& var_name = source->attrs.name; + code += "const auto vec_" + var_name + " = op::load_index(" + + var_name + ", offset, " + var_name + "_shape);\n"; + variables[{i, 0}] = var_name; + } + CHECK_EQ(outputs[i], 1); + } else { + std::string op_name = source->op()->name; + if (fusion::slice_ops.find(op_name) != fusion::slice_ops.end()) { + int node_id = node.inputs[0].node_id; + const uint32_t input_entry_id = g.entry_id(node.inputs[0]); + const auto& shape = node_shapes[input_entry_id]; + const int ndim = shape.ndim(); + const auto& var_name = g[node_id].source->attrs.name; + const auto vec_name = "vec_" + var_name + "_" + std::to_string(i); + load_index[node_id] = 0; + auto parse_tuple = [](const std::string& input, const std::string def) { + std::string out = input; + replaceString(&out, "(", "{"); + replaceString(&out, ")", "}"); + replaceString(&out, "None", def); + replaceString(&out, " ", ""); + return out; + }; + auto build_tuple = [ndim](int axis, const std::string str, const std::string def) { + if (axis < 0 && + axis >= -ndim) { + axis += ndim; + } + if (axis < 0 || axis >= ndim) { + LOG(FATAL) << "Axis " << axis << " is out of bounds for array of dimension " << ndim; + } + std::string tuple = "{"; + for (int i = 0; i < axis; i++) { + tuple = tuple + def + ","; + } + tuple += str; + for (int i = axis + 1; i < ndim; i++) { + tuple = tuple + "," + def; + } + tuple += "}"; + return tuple; + }; + auto check_tuple = [ndim, nvec](const std::string str) { + std::vector tuple = splitStringToVector(str, "INT_MAX"); + if (tuple[ndim-1] % nvec == 0) { + return true; + } + return false; + }; + auto build_string_axis = [ndim](int axis) { + if (axis < 0) { + axis = ndim + axis; + } + return std::to_string(axis); + }; + auto build_string_end = [i, ndim, var_name](std::string* code) { + std::string end_var_name = var_name + "_" + std::to_string(i) + "_end"; + *code += "op::Shape<" + std::to_string(ndim) + "> "+ end_var_name + ";\n"; + *code += end_var_name + ".set(INT_MAX);\n"; + return end_var_name; + }; + std::string begin; + std::string end; + if (op_name == "broadcast_like" || op_name == "slice_like") { + uint32_t like_id = g.entry_id(i, 0); + begin = build_tuple(0, "0", "0"); + std::string extra_var_name = "extra_" + std::to_string(like_id) + "_shape"; + if (std::find(extra_shape_args_.begin(), extra_shape_args_.end(), like_id) == + extra_shape_args_.end()) { + extra_shape_args_.push_back(like_id); + } + if (check_shapes) { + check_shapes->push_back(like_id); + check_shapes->push_back(input_entry_id); + } + end = extra_var_name; + } else { + begin = parse_tuple(source->attrs.dict.at("begin"), "0"); + end = parse_tuple(source->attrs.dict.at("end"), "INT_MAX"); + if (op_name == "slice_axis") { + int axis = std::stoi(source->attrs.dict.at("axis")); + begin = build_tuple(axis, begin, "0"); + end = build_tuple(axis, end, "INT_MAX"); + } + if (check_shapes) { + if (check_tuple(begin) && check_tuple(end)) { + check_shapes->push_back(input_entry_id); + } else { + check_shapes_compile = false; + } + } + } + std::string slice_func = "load_slice"; + if (!check_shapes) { + slice_func = "fast_" + slice_func; + } + code += "const auto " + vec_name + " = op::" + slice_func + "(" + + var_name + ", " + var_name + "_shape," + begin + + "," + end + ", offset);\n"; + CHECK_EQ(outputs[i], 1); + variables[{i, 0}] = vec_name; + continue; + } + } + } + } + + if (!check_shapes_compile) { + check_shapes->clear(); + } + + size_t counter = 0; + for (const auto& entry : g.outputs()) { + std::string var_name = "output" + std::to_string(counter); + code += "op::VectorType vec_" + var_name + ";\n"; + ++counter; + } + + code += "for (int j = 0; j < nvec; j++ ) {\n"; + + + for (size_t i = 0; i < g.num_nodes(); ++i) { + const auto& node = g[i]; + const auto* source = node.source; + if (source != nullptr) { + std::string var_name = "temp" + std::to_string(temp_name_counter++); + if (source->is_variable()) { + if (load_index[i]) { + code += "const auto " + var_name + " = op::load(vec_" + + variables[{i, 0}] + ".x[j]);\n"; + CHECK_EQ(outputs[i], 1); + variables[{i, 0}] = var_name; + } + } else { + std::string op_name = source->op()->name; + if (fusion::ops_desc.find(op_name) != fusion::ops_desc.end()) { + const std::vector>& op_descs = + fusion::ops_desc.at(op_name); + CHECK_EQ(outputs[i], op_descs.size()); + size_t count = 0; + for (const auto& op_desc : op_descs) { + var_name = "temp" + std::to_string(temp_name_counter++); + const std::string& fmt = ParseOpDescription(op_desc, variables, node); + code += "const auto " + var_name + " = " + fmt + ";\n"; + variables[{i, count}] = var_name; + ++count; + } + continue; + } + + if (fusion::slice_ops.find(op_name) != fusion::slice_ops.end()) { + code += "const auto " + var_name + " = op::load(" + variables[{i, 0}] + ".x[j]);\n"; + variables[{i, 0}] = var_name; + continue; + } + + + // Special cases with variable number + // of inputs/outputs, listed in + // fusion::variable_io_ops + if (op_name == "add_n") { + CHECK_EQ(outputs[i], 1); + const auto& arg = variables[{node.inputs[0].node_id, node.inputs[0].index}]; + code += "auto " + var_name + " = " + arg + ";\n"; + for (size_t inp = 1; inp < node.inputs.size(); ++inp) { + const auto& temp_arg = variables[{node.inputs[inp].node_id, node.inputs[inp].index}]; + code += var_name + " = op::add(" + var_name + ", " + temp_arg + ");\n"; + } + variables[{i, 0}] = var_name; + continue; + } + + if (op_name == "_backward_Activation") { + CHECK_EQ(outputs[i], 1); + std::string act_type = node.source->attrs.dict.at("act_type"); + std::string rhs, lhs; + rhs = variables[{node.inputs[0].node_id, node.inputs[0].index}]; + if (act_type == "relu" || + act_type == "sigmoid" || + act_type == "tanh") { + lhs = variables[{node.inputs[1].node_id, node.inputs[1].index}]; + } else { + lhs = variables[{node.inputs[2].node_id, node.inputs[2].index}]; + } + code += "const auto " + var_name + " = op::backward_" + act_type + + "(" + lhs + ", " + rhs + ");\n"; + + variables[{i, 0}] = var_name; + continue; + } + + if (op_name == "amp_multicast" || op_name == "_backward_amp_multicast") { + CHECK_EQ(outputs[i], node.inputs.size()); + for (size_t counter = 0; counter < outputs[i]; ++counter) { + const auto& input = node.inputs[counter]; + var_name = "temp" + std::to_string(temp_name_counter++); + const auto& arg = variables[{input.node_id, input.index}]; + code += "const auto " + var_name + " = " + arg + ";\n"; + variables[{i, counter}] = var_name; + } + continue; + } + + if (op_name == "_backward_cast") { + CHECK_EQ(outputs[i], 1); + const int output_type = node_dtypes[g.entry_id(i, 0)]; + const auto& arg = variables[{node.inputs[0].node_id, node.inputs[0].index}]; + code += "const auto " + var_name + " = op::cast<" + mshadowTypeToString(output_type) + + ">(" + arg + ");\n"; + variables[{i, 0}] = var_name; + continue; + } + + // LeakyReLU, look for act_type + if (op_name == "LeakyReLU") { + std::string act_type = node.source->attrs.dict.at("act_type"); + const std::vector>& op_descs = + fusion::LeakyReLU_ops.at(act_type); + if (fusion::LeakyReLU_ops.find(act_type) != fusion::LeakyReLU_ops.end()) { + CHECK_EQ(outputs[i], op_descs.size()); + size_t count = 0; + for (const auto& op_desc : op_descs) { + var_name = "temp" + std::to_string(temp_name_counter++); + const std::string& fmt = ParseOpDescription(op_desc, variables, node); + code += "const auto " + var_name + " = " + fmt + ";\n"; + variables[{i, count}] = var_name; + ++count; + } + continue; + } + } + if (op_name == "_backward_LeakyReLU") { + std::string act_type = node.source->attrs.dict.at("act_type"); + const std::vector>& op_descs = + fusion::LeakyReLU_bwd_ops.at(act_type); + if (fusion::LeakyReLU_ops.find(act_type) != fusion::LeakyReLU_bwd_ops.end()) { + CHECK_EQ(outputs[i], op_descs.size()); + size_t count = 0; + for (const auto& op_desc : op_descs) { + var_name = "temp" + std::to_string(temp_name_counter++); + const std::string& fmt = ParseOpDescription(op_desc, variables, node); + code += "const auto " + var_name + " = " + fmt + ";\n"; + variables[{i, count}] = var_name; + ++count; + } + continue; + } + } + + LOG(FATAL) << "Unrecognized op " + op_name; + } + } else { + LOG(FATAL) << "Encountered node with NULL source."; + } + } + + counter = 0; + for (const auto& entry : g.outputs()) { + const std::string& var = variables[{entry.node_id, entry.index}]; + const auto var_name = "output" + std::to_string(counter); + code += "vec_" + var_name + ".x[j] = op::store("+ var +", " + var_name + ");\n"; + ++counter; + } + + code += "}\n"; + + counter = 0; + + for (const auto& entry : g.outputs()) { + const std::string& var = variables[{entry.node_id, entry.index}]; + if (req[counter] == kWriteTo || req[counter] == kWriteInplace) { + const auto var_name = "output" + std::to_string(counter); + code += "op::store_index(vec_" + var_name + ", i, " + var_name + ", " + + var_name + "_shape);\n"; + } else if (req[counter] == kAddTo) { + const auto var_name = "output" + std::to_string(counter); + code += "op::store_add_index(vec_" + var_name + ", i, " + var_name + ", " + + var_name + "_shape);\n"; + } else if (req[counter] == kNullOp) { + // nullptr req, do not do anything + } else { + LOG(FATAL) << "Encountered unexpected req."; + } + ++counter; + } + + // Add boilerplate and type information + std::string kernel_params = ""; + std::string tensor_params = ""; + nnvm::Symbol sym; + sym.outputs = subgraph_.outputs; + const std::vector input_names = sym.ListInputNames(nnvm::Symbol::kAll); + size_t num_params = in_dtypes.size() + out_dtypes.size(); + size_t i = 0; + std::string aux_code = "static const int nvec = " + std::to_string(nvec) + ";\n"; + + for (const auto &shape_id : extra_shape_args_) { + std::string shape_name = "extra_" + std::to_string(shape_id) + "_shape"; + int ndim = node_shapes[shape_id].ndim(); + kernel_params += " const op::Shape<" + std::to_string(ndim) + "> " + shape_name; + kernel_params += ", "; + } + for (const auto &type : in_dtypes) { + std::string type_name = mshadowTypeToString(type); + std::string dtype_var = "DType_" + input_names[i]; + std::string dim_var = "ndim_" + input_names[i]; + std::string dim_val = std::to_string(in_ndims[i]); + aux_code = "using " + dtype_var + " = " + type_name + ";\n" + aux_code; + aux_code = "static const int " + dim_var + " = " + dim_val + ";\n" + aux_code; + tensor_params += dtype_var + "* " +input_names[i]; + kernel_params += " const op::Shape<" + dim_val + "> " + input_names[i]+"_shape"; + ++i; + if (i < num_params) { + tensor_params += ", "; + } + kernel_params += ", "; + } + for (const auto &type : out_dtypes) { + std::string type_name = mshadowTypeToString(type); + std::string out_name = "output" + std::to_string(i - in_dtypes.size()); + std::string dtype_var = "DType_" + out_name; + std::string dim_var = "ndim_" + out_name; + std::string dim_val = std::to_string(out_ndims[i - in_dtypes.size()]); + aux_code = "static const int " + dim_var + " = " + dim_val + ";\n" + aux_code; + aux_code = "using " + dtype_var + " = " + type_name + ";\n" + aux_code; + tensor_params += dtype_var + "* " + out_name; + kernel_params += " const op::Shape<" + dim_val + "> " + out_name+"_shape"; + ++i; + if (i < num_params) { + tensor_params += ", "; + } + kernel_params += ", "; + } + kernel_params += tensor_params; + + // Create kernel source (minus the common header) + return aux_code + "\n" + + "__launch_bounds__(" + std::to_string(FusedOp::NTHREADS) + ")\n" + + "__global__ void FusedKernel_" + kernel_name + + "(size_t N, " + kernel_params + ") {\n" + + fusion::kernel_begin + "\n" + + code + "\n" + + fusion::kernel_end; +} + +CUfunction FusedOp::CompileCode(const std::string &code, + const std::string &kernel_name, + int dev_id) { + // Guard NVRTC calls + std::lock_guard lock_nvrtc(mutex_); + // Local class for value type of compile cache + struct KernelInfo { + std::string mangled_name; + std::string ptx; + std::vector functions; + }; + // Maps from the cuda source code (minus header) to the ptx and jit-compiled CUfunctions. + using KernelCache = std::map; + // Per-gpu-architecture compiled kernel cache with jit-compiled function for each device context + static std::map compiled_kernels; + int sm_arch = SMArch(dev_id); + KernelCache& compiled_kernels_this_arch = compiled_kernels[sm_arch]; // make null map as needed + KernelInfo& kinfo = compiled_kernels_this_arch[code]; // make KernelInfo as needed + if (kinfo.ptx.size() == 0) { + // It's the first time we've seen this kernel, so we need to generate the ptx and mangled_name. + static std::string common_header = + std::string(fusion::fp16_support_string) + "\n" + + fusion::type_support_string + "\n" + + fusion::function_definitions + "\n" + + fusion::backward_function_definitions + "\n"; + std::string code_with_header = common_header + code; + // If verbose mode, output kernel source, though not including the common header + if (dmlc::GetEnv("MXNET_FUSION_VERBOSE", false)) { + LOG(INFO) << "\n" << std::string(80, '-') << "\n" << code; + } + if (compiled_kernels_this_arch.size() == CACHESIZE_WARN_THRESHOLD + 1 && + dmlc::GetEnv("MXNET_FUSION_SIZE_WARNING", true)) { + LOG(WARNING) << "The number of different fused ops exceeds " << CACHESIZE_WARN_THRESHOLD + << ". Set MXNET_FUSION_SIZE_WARNING=0 to quiet this warning."; + } + nvrtcProgram program; + NVRTC_CALL(nvrtcCreateProgram(&program, // prog + &code_with_header[0], // buffer + (kernel_name + "_kernel.cu").c_str(), // name + 0, // num headers + nullptr, // headers + nullptr)); // include names + + std::string gpu_arch_arg = "--gpu-architecture=compute_" + std::to_string(sm_arch); + const char *opts[] = {gpu_arch_arg.c_str(), + "--std=c++14"}; + const std::string kernel_name_demangled = "FusedKernel_" + kernel_name; + NVRTC_CALL(nvrtcAddNameExpression(program, (kernel_name_demangled).c_str())); + + nvrtcResult compileResult = nvrtcCompileProgram(program, // prog + 2, // num options + opts); // options + CHECK_EQ(compileResult, NVRTC_SUCCESS) + << "NVRTC Compilation failed. Please set environment variable MXNET_USE_FUSION to 0.\n" + << GetCompileLog(program); + + kinfo.ptx = GetPtx(program); + const char *mangled_name; + NVRTC_CALL(nvrtcGetLoweredName(program, + kernel_name_demangled.c_str(), + &mangled_name)); + kinfo.mangled_name = mangled_name; + // Destroy the program. + NVRTC_CALL(nvrtcDestroyProgram(&program)); + } + // Ensure function array is deep enough to index by dev_id + while (kinfo.functions.size() <= static_cast(dev_id)) + kinfo.functions.push_back(static_cast(nullptr)); + // Jit-compile ptx for the device as needed + if (kinfo.functions[dev_id] == static_cast(nullptr)) { + // Make sure driver context is set to the proper device + CUdevice cu_device; + CUcontext context; + CUDA_DRIVER_CALL(cuDeviceGet(&cu_device, dev_id)); + CUDA_DRIVER_CALL(cuDevicePrimaryCtxRetain(&context, cu_device)); + // Jit-compile ptx for the driver's current context + CUmodule module; + CUDA_DRIVER_CALL(cuModuleLoadData(&module, kinfo.ptx.c_str())); + CUDA_DRIVER_CALL(cuModuleGetFunction(&kinfo.functions[dev_id], + module, + kinfo.mangled_name.c_str())); + } + return kinfo.functions[dev_id]; +} + + +void FusedOp::CheckShapesAndTypes(const std::vector &inputs, + const std::vector &outputs, + std::vector *in_dtypes, + std::vector *in_ndims, + std::vector *out_dtypes, + std::vector *out_ndims, + int *nvec) { + std::vector in_shapes; + std::vector out_shapes; + CHECK_EQ(inputs.size(), inputs_.size()); + CHECK_EQ(outputs.size(), outputs_.size()); + + for (size_t counter = 0; counter < inputs.size(); ++counter) { + const auto& blob = inputs[counter]; + in_dtypes->push_back(blob.type_flag_); + in_ndims->push_back(blob.ndim()); + in_shapes.push_back(blob.shape_); + initialized_ = initialized_ && blob.type_flag_ == inputs_[counter].dtype; + initialized_ = initialized_ && blob.ndim() == inputs_[counter].ndim; + inputs_[counter].dtype = blob.type_flag_; + inputs_[counter].ndim = blob.ndim(); + *nvec = max(*nvec, mshadowTypeToVectorLength(blob.type_flag_)); + } + + for (size_t counter = 0; counter < outputs.size(); ++counter) { + const auto& blob = outputs[counter]; + out_dtypes->push_back(blob.type_flag_); + out_ndims->push_back(blob.ndim()); + out_shapes.push_back(blob.shape_); + initialized_ = initialized_ && blob.type_flag_ == outputs_[counter].dtype; + initialized_ = initialized_ && blob.ndim() == outputs_[counter].ndim; + outputs_[counter].dtype = blob.type_flag_; + outputs_[counter].ndim = blob.ndim(); + *nvec = max(*nvec, mshadowTypeToVectorLength(blob.type_flag_)); + } + + for (auto it = intermediate_shapes_.begin(); + it != intermediate_shapes_.end(); + ++it) { + if (it->input_attr == in_shapes && it->output_attr == out_shapes) { + intermediate_shapes_.erase(intermediate_shapes_.begin(), it); + break; + } + } + for (auto it = intermediate_dtypes_.begin(); + it != intermediate_dtypes_.end(); + ++it) { + if (it->input_attr == *in_dtypes && it->output_attr == *out_dtypes) { + intermediate_dtypes_.erase(intermediate_dtypes_.begin(), it); + break; + } + } +} + +template <> +void FusedOp::Forward(const nnvm::NodeAttrs& attrs, + const OpContext &ctx, + const std::vector &inputs, + const std::vector &req, + const std::vector &outputs) { + using namespace mshadow; + std::lock_guard lock(my_mutex_); + CHECK_GE(outputs.size(), 1) << "There needs to be at least 1 output."; + + std::vector in_dtypes; + std::vector in_ndims; + std::vector out_dtypes; + std::vector out_ndims; + int nvec = 1; + + CheckShapesAndTypes(inputs, outputs, &in_dtypes, &in_ndims, + &out_dtypes, &out_ndims, &nvec); + + const auto& node_shapes = intermediate_shapes_[0].internal_attr; + const auto& node_dtypes = intermediate_dtypes_[0].internal_attr; + + int dev_id = ctx.run_ctx.ctx.dev_id; + + // A change between training and inference modes may require different kernel functions + initialized_ = initialized_ && (req == saved_reqs_); + saved_reqs_ = req; + + if (!initialized_) { + const auto& code = GenerateCode(req, in_dtypes, out_dtypes, in_ndims, out_ndims, + node_shapes, node_dtypes, nvec, attrs.name, &check_shape_args_); + kernel_functions_[fusion::kGeneral] = CompileCode(code, attrs.name, dev_id); + if (check_shape_args_.size() > 0) { + const auto& code = GenerateCode(req, in_dtypes, out_dtypes, in_ndims, out_ndims, + node_shapes, node_dtypes, nvec, attrs.name, nullptr); + kernel_functions_[fusion::kShapeOptimized] = CompileCode(code, attrs.name, dev_id); + } + initialized_ = true; + kernel_function_dev_id_ = dev_id; + } + + // A change in device would force recompiling, but this is unexpected so signal as an error + if (dev_id != kernel_function_dev_id_) + LOG(FATAL) << "Fused op compiled for device " << kernel_function_dev_id_ + << ", not expecting switch to device " << dev_id; + + Stream* s = ctx.get_stream(); + auto stream = Stream::GetStream(s); + std::vector args; + size_t N = 0; + for (const auto& output : outputs) { + N = std::max(N, output.shape_.Size()); + } + N = (N + nvec - 1)/nvec; + args.push_back(&N); + + unsigned int num_blocks = (N + FusedOp::NTHREADS - 1) / FusedOp::NTHREADS; + + std::vector ptrs; + std::vector> shapes; + + for (const auto &shape_id : extra_shape_args_) { + AddShape(node_shapes[shape_id], &shapes); + } + for (const auto &data : inputs) { + AddPointerAndShape(data, &ptrs, &shapes, s); + } + for (const auto &data : outputs) { + AddPointerAndShape(data, &ptrs, &shapes, s); + } + + for (auto &tensor_shapes : shapes) { + args.push_back(tensor_shapes.data()); + } + for (auto &ptr : ptrs) { + args.push_back(reinterpret_cast(&ptr)); + } + int kernel_variant = fusion::kGeneral; + if (check_shape_args_.size() > 0) { + kernel_variant = fusion::kShapeOptimized; + for (const auto &shape_id : check_shape_args_) { + const auto& shape = node_shapes[shape_id]; + if (shape[shape.ndim()-1] % nvec != 0) { + kernel_variant = fusion::kGeneral; + } + } + } + CUDA_DRIVER_CALL( + cuLaunchKernel(kernel_functions_[kernel_variant], + num_blocks, 1, 1, // grid dim + FusedOp::NTHREADS, 1, 1, // block dim + 0, stream, // shared mem and stream + &(args[0]), 0)); // arguments +} + +void FusedOpForwardGPU(const nnvm::NodeAttrs& attrs, + const OpContext &ctx, + const std::vector &inputs, + const std::vector &req, + const std::vector &outputs) { + const FusedOpPtr& op = nnvm::get(attrs.parsed); + op->Forward(attrs, ctx, inputs, req, outputs); +} + +NNVM_REGISTER_OP(_FusedOp) +.set_attr("FCompute", FusedOpForwardGPU); + +} // namespace mxnet + +#endif // MXNET_ENABLE_CUDA_RTC diff --git a/cuda_code/fused_reshape_concat_general_layer_1.cu b/cuda_code/fused_reshape_concat_general_layer_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..39229f18be90beb35752a3b0f0c76460f0c33666 --- /dev/null +++ b/cuda_code/fused_reshape_concat_general_layer_1.cu @@ -0,0 +1,173 @@ +/* + * Copyright (c) 2020, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include + +#ifndef NDEBUG +#include +#endif + +namespace HugeCTR { + +namespace { + +#define BLOCK_DIM_SIZE 32 +template +__global__ void fused_reshape_concat_general_kernel(bool forward, T** inputs, T* output, + int batch_size, int slot_num, size_t* vecs_size, + int output_width, int num) { + int tid = blockIdx.x * blockDim.x + threadIdx.x; + int threads_num = blockDim.x * gridDim.x; + int out_size = batch_size * slot_num * output_width; + + for (int index = tid; index < out_size; index += threads_num) { + int row = index / output_width; + int out_col = index % output_width; + + int in_no = 0; + int in_col = out_col; + int accum_width = 0; + for (int k = 0; k < num; k++) { + if (out_col < accum_width + vecs_size[k]) { + in_no = k; + in_col -= accum_width; + break; + } + accum_width += vecs_size[k]; + } + T* in = inputs[in_no]; + int in_idx = row * vecs_size[in_no] + in_col; + if (forward) { + output[index] = in[in_idx]; + } else { + in[in_idx] = output[index]; + } + } +} + +} // end of namespace + +template +FusedReshapeConcatGeneralLayer::FusedReshapeConcatGeneralLayer( + const Tensors2& in_tensors, Tensor2& out_tensor, + const std::shared_ptr>& blobs_buff, + const std::shared_ptr& gpu_resource) + : Layer(gpu_resource) { + try { + if (in_tensors.empty()) { + HCTR_OWN_THROW(Error_t::WrongInput, "Empty input tensors"); + } + + num_ = in_tensors.size(); + for (size_t i = 0; i < num_; i++) { + auto cur_in_dims = in_tensors[i].get_dimensions(); + if (i != 0) { + auto first_in_dims = in_tensors[0].get_dimensions(); + if (cur_in_dims[0] != first_in_dims[0]) { + HCTR_OWN_THROW(Error_t::WrongInput, + "All the input tensors must have the same batch_size"); + } + if (cur_in_dims[1] != first_in_dims[1]) { + HCTR_OWN_THROW(Error_t::WrongInput, "All the input tensors must have the same slot_num"); + } + } + if (cur_in_dims.size() != 3) { + HCTR_OWN_THROW(Error_t::WrongInput, "All the input tensors must be 3D"); + } + if (i == 0) { + batch_size_ = cur_in_dims[0]; + slot_num_ = cur_in_dims[1]; + } + new_width_ += cur_in_dims[2]; + h_vecs_size_.push_back(cur_in_dims[2]); + } + + std::vector out_dims = {batch_size_ * slot_num_, new_width_}; + blobs_buff->reserve(out_dims, &out_tensor); + + for (const Tensor2& in_tensor : in_tensors) { + in_tensors_.push_back(in_tensor); + } + out_tensor_ = out_tensor; + blobs_buff->reserve({num_}, &d_inputs_); + blobs_buff->reserve({num_}, &vecs_size_); + + } catch (const std::runtime_error& rt_err) { + HCTR_LOG_S(ERROR, WORLD) << rt_err.what() << std::endl; + throw; + } +} + +template +void FusedReshapeConcatGeneralLayer::initialize() { + std::shared_ptr> pinned_host_buf = + GeneralBuffer2::create(); + pinned_host_buf->reserve({num_}, &h_inputs_); + pinned_host_buf->allocate(); + + for (size_t i = 0; i < num_; i++) { + h_inputs_.get_ptr()[i] = in_tensors_[i].get_ptr(); + } + HCTR_LIB_THROW(cudaMemcpyAsync((void*)vecs_size_.get_ptr(), (void*)h_vecs_size_.data(), + num_ * sizeof(size_t), cudaMemcpyHostToDevice, + get_gpu().get_stream())); + + HCTR_LIB_THROW(cudaMemcpyAsync((void*)d_inputs_.get_ptr(), (void*)h_inputs_.get_ptr(), + num_ * sizeof(T*), cudaMemcpyHostToDevice, + get_gpu().get_stream())); +} + +template +void FusedReshapeConcatGeneralLayer::fprop(bool is_train) { + CudaDeviceContext context(get_device_id()); + Tensor2& out_tensor = out_tensor_; + T* output = out_tensor.get_ptr(); + dim3 block_size(256, 1, 1); + size_t n_sms = get_gpu().get_sm_count(); + dim3 grid_size(n_sms * 8, 1, 1); + fused_reshape_concat_general_kernel<<>>( + true, d_inputs_.get_ptr(), output, batch_size_, slot_num_, vecs_size_.get_ptr(), new_width_, + num_); +#ifndef NDEBUG + cudaDeviceSynchronize(); + HCTR_LIB_THROW(cudaGetLastError()); +#endif +} + +template +void FusedReshapeConcatGeneralLayer::bprop() { + CudaDeviceContext context(get_device_id()); + Tensor2& out_tensor = out_tensor_; + T* output = out_tensor.get_ptr(); + dim3 block_size(256, 1, 1); + size_t n_sms = get_gpu().get_sm_count(); + dim3 grid_size(n_sms * 8, 1, 1); + fused_reshape_concat_general_kernel<<>>( + false, d_inputs_.get_ptr(), output, batch_size_, slot_num_, vecs_size_.get_ptr(), new_width_, + num_); +#ifndef NDEBUG + cudaDeviceSynchronize(); + HCTR_LIB_THROW(cudaGetLastError()); +#endif +} + +template class FusedReshapeConcatGeneralLayer; + +} // namespace HugeCTR diff --git a/cuda_code/fusion_bart_decoding_op_3.cu b/cuda_code/fusion_bart_decoding_op_3.cu new file mode 100644 index 0000000000000000000000000000000000000000..3fd710c2933b5d5459a8f670ea52a2a038b86b8b --- /dev/null +++ b/cuda_code/fusion_bart_decoding_op_3.cu @@ -0,0 +1,521 @@ +/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "fastertransformer/cuda/cub/cub.cuh" +#include "fusion_bart_decoding_op.h" +#include "pd_traits.h" + + +template +std::vector bart_decoding_kernel( + const paddle::Tensor& input, + const paddle::Tensor& memory_sequence_length, + const paddle::Tensor& word_emb, + const std::vector& self_layernorm_weight, + const std::vector& self_layernorm_bias, + const std::vector& self_attn_query_weight, + const std::vector& self_attn_query_bias, + const std::vector& self_attn_key_weight, + const std::vector& self_attn_key_bias, + const std::vector& self_attn_value_weight, + const std::vector& self_attn_value_bias, + const std::vector& self_attn_output_weight, + const std::vector& self_attn_output_bias, + const std::vector& cross_layernorm_weight, + const std::vector& cross_layernorm_bias, + const std::vector& cross_attn_query_weight, + const std::vector& cross_attn_query_bias, + const std::vector& cross_attn_key_weight, + const std::vector& cross_attn_key_bias, + const std::vector& cross_attn_value_weight, + const std::vector& cross_attn_value_bias, + const std::vector& cross_attn_output_weight, + const std::vector& cross_attn_output_bias, + const std::vector& ffn_layernorm_weight, + const std::vector& ffn_layernorm_bias, + const std::vector& ffn_intermediate_weight, + const std::vector& ffn_intermediate_bias, + const std::vector& ffn_output_weight, + const std::vector& ffn_output_bias, + const paddle::Tensor& decoder_layernorm_weight, + const paddle::Tensor& decoder_layernorm_bias, + const paddle::Tensor& embedding_weight, + const paddle::Tensor& embedding_bias, + const paddle::Tensor& position_encoding_table, + paddle::Tensor& output_ids, + paddle::Tensor& parent_ids, + paddle::Tensor& sequence_length, + std::string decoding_strategy, + int beam_size, + int topk, + float topp, + int head_num_, + int size_per_head_, + int num_layer_, + int start_id_, + int end_id_, + int64_t max_seq_len_, + float beam_search_diversity_rate_, + float alpha, + cublasHandle_t cublas_handle_, + cublasLtHandle_t cublaslt_handle_, + cudaStream_t stream) { + int beam_width_ = (decoding_strategy == "beam_search" || + decoding_strategy == "beam_search_v2") + ? beam_size + : 1; + int candidate_num_ = (decoding_strategy == "topk_sampling" || + decoding_strategy == "topp_sampling") + ? topk + : 1; + float probability_threshold_ = (decoding_strategy == "topk_sampling" || + decoding_strategy == "topp_sampling") + ? topp + : 0.0; + + auto input_dims = input.shape(); + int batch_size_ = (decoding_strategy == "beam_search" || + decoding_strategy == "beam_search_v2") + ? input_dims[0] / beam_width_ + : input_dims[0]; + const int memory_max_seq_len = input_dims[1]; + const int memory_hidden_dim = input_dims[2]; + const int vocab_size = word_emb.shape()[0]; + + typedef PDTraits traits_; + typedef typename traits_::DataType DataType_; + typedef typename traits_::data_t data_t_; + + DecodingInitParam decoding_params; + decoding_params.cublas_handle = cublas_handle_; + decoding_params.cublaslt_handle = cublaslt_handle_; + + decoding_params.output_ids = output_ids.mutable_data(input.place()); + decoding_params.parent_ids = parent_ids.mutable_data(input.place()); + decoding_params.sequence_length = + sequence_length.mutable_data(input.place()); + + typedef DecoderTransformerTraits DecodingTraits_; + decoding_params.stream = stream; + fastertransformer::Allocator allocator_(stream); + + decoding_params.memory_tensor = + reinterpret_cast(input.data()); + decoding_params.memory_sequence_length = memory_sequence_length.data(); + + DecoderInitParam* params = + new DecoderInitParam[num_layer_]; + + for (int i = 0; i < num_layer_; i++) { + params[i].stream = stream; + params[i].cublas_handle = cublas_handle_; + params[i].cublaslt_handle = cublaslt_handle_; + + if (decoding_strategy == "beam_search" || + decoding_strategy == "beam_search_v2") { + params[i].request_batch_size = batch_size_ * beam_width_; + params[i].request_max_mem_seq_len = memory_max_seq_len; + } else if (decoding_strategy == "sampling" || + decoding_strategy == "topk_sampling" || + decoding_strategy == "topp_sampling") { + params[i].request_batch_size = batch_size_; + params[i].request_max_mem_seq_len = memory_max_seq_len; + } + + // self attn + params[i].self_layernorm.gamma = reinterpret_cast( + self_layernorm_weight[i].data()); + params[i].self_layernorm.beta = reinterpret_cast( + self_layernorm_bias[i].data()); + // query + params[i].self_attention.query_weight.kernel = + reinterpret_cast( + self_attn_query_weight[i].data()); + params[i].self_attention.query_weight.bias = + reinterpret_cast( + self_attn_query_bias[i].data()); + // key + params[i].self_attention.key_weight.kernel = + reinterpret_cast( + self_attn_key_weight[i].data()); + params[i].self_attention.key_weight.bias = + reinterpret_cast( + self_attn_key_bias[i].data()); + // value + params[i].self_attention.value_weight.kernel = + reinterpret_cast( + self_attn_value_weight[i].data()); + params[i].self_attention.value_weight.bias = + reinterpret_cast( + self_attn_value_bias[i].data()); + // out proj + params[i].self_attention.attention_output_weight.kernel = + reinterpret_cast( + self_attn_output_weight[i].data()); + params[i].self_attention.attention_output_weight.bias = + reinterpret_cast( + self_attn_output_bias[i].data()); + + // cross + params[i].cross_layernorm.gamma = reinterpret_cast( + cross_layernorm_weight[i].data()); + params[i].cross_layernorm.beta = reinterpret_cast( + cross_layernorm_bias[i].data()); + // query + params[i].cross_attention.query_weight.kernel = + reinterpret_cast( + cross_attn_query_weight[i].data()); + params[i].cross_attention.query_weight.bias = + reinterpret_cast( + cross_attn_query_bias[i].data()); + // key + params[i].cross_attention.key_weight.kernel = + reinterpret_cast( + cross_attn_key_weight[i].data()); + params[i].cross_attention.key_weight.bias = + reinterpret_cast( + cross_attn_key_bias[i].data()); + // value + params[i].cross_attention.value_weight.kernel = + reinterpret_cast( + cross_attn_value_weight[i].data()); + params[i].cross_attention.value_weight.bias = + reinterpret_cast( + cross_attn_value_bias[i].data()); + // out proj + params[i].cross_attention.attention_output_weight.kernel = + reinterpret_cast( + cross_attn_output_weight[i].data()); + params[i].cross_attention.attention_output_weight.bias = + reinterpret_cast( + cross_attn_output_bias[i].data()); + + // ffn + params[i].ffn_layernorm.gamma = reinterpret_cast( + ffn_layernorm_weight[i].data()); + params[i].ffn_layernorm.beta = reinterpret_cast( + ffn_layernorm_bias[i].data()); + // intermediate proj + params[i].ffn.intermediate_weight.kernel = + reinterpret_cast( + ffn_intermediate_weight[i].data()); + params[i].ffn.intermediate_weight.bias = reinterpret_cast( + ffn_intermediate_bias[i].data()); + // out proj + params[i].ffn.output_weight.kernel = reinterpret_cast( + ffn_output_weight[i].data()); + params[i].ffn.output_weight.bias = + reinterpret_cast(ffn_output_bias[i].data()); + } + + decoding_params.layernorm.gamma = reinterpret_cast( + decoder_layernorm_weight.data()); + decoding_params.layernorm.beta = reinterpret_cast( + decoder_layernorm_bias.data()); + // for embedding + decoding_params.embedding_table = + reinterpret_cast(word_emb.data()); + + // for weight sharing matmul + decoding_params.embedding_kernel = + reinterpret_cast(embedding_weight.data()); + // for matmul bias + decoding_params.embedding_bias = + reinterpret_cast(embedding_bias.data()); + decoding_params.position_encoding_table = reinterpret_cast( + position_encoding_table.data()); + + if ("beam_search" == decoding_strategy) { + DecodingBeamsearch* decoding_beamsearch_; + decoding_beamsearch_ = new DecodingBeamsearch( + allocator_, + batch_size_, + beam_width_, + max_seq_len_, + head_num_, + size_per_head_, + vocab_size, + num_layer_, + memory_hidden_dim, + memory_max_seq_len, + start_id_, + end_id_, + beam_search_diversity_rate_, + true, /*is_fuse_topk_softMax*/ + false, /*is_fuse_qkv*/ + false, /*keep_alive_beam*/ + alpha, + false, /*normalization_before*/ + 2, + ActivationType::GELU); + + decoding_beamsearch_->forward(params, decoding_params); + + delete decoding_beamsearch_; + } else if ("beam_search_v2" == decoding_strategy) { + DecodingBeamsearch* decoding_beamsearch_; + decoding_beamsearch_ = new DecodingBeamsearch( + allocator_, + batch_size_, + beam_width_, + max_seq_len_, + head_num_, + size_per_head_, + vocab_size, + num_layer_, + memory_hidden_dim, + memory_max_seq_len, + start_id_, + end_id_, + beam_search_diversity_rate_, + true, /*is_fuse_topk_softMax*/ + false, /*is_fuse_qkv*/ + true, /*keep_alive_beam*/ + alpha, + false, /*normalization_before*/ + 2, + ActivationType::GELU); + + decoding_beamsearch_->forward(params, decoding_params); + + delete decoding_beamsearch_; + } else if ("topk_sampling" == decoding_strategy || + "topp_sampling" == decoding_strategy || + "sampling" == decoding_strategy) { + DecodingSampling* decoding_sampling_; + decoding_sampling_ = + new DecodingSampling(allocator_, + batch_size_, + max_seq_len_, + head_num_, + size_per_head_, + vocab_size, + num_layer_, + memory_hidden_dim, + memory_max_seq_len, + start_id_, + end_id_, + candidate_num_, + probability_threshold_, + false, + false, + 2, + ActivationType::GELU); + + decoding_sampling_->forward(params, decoding_params); + + delete decoding_sampling_; + } else { + PD_THROW( + "Only beam_search, topk_sampling and topp_sampling are supported for " + "FasterTransformer. "); + } + delete[] params; + + return {output_ids, parent_ids, sequence_length}; +} + +std::vector BartDecodingCUDAForward( + const paddle::Tensor& input, + const paddle::Tensor& mem_seq_len, + const paddle::Tensor& word_embedding, + const std::vector& self_ln_weight, + const std::vector& self_ln_bias, + const std::vector& self_q_weight, + const std::vector& self_q_bias, + const std::vector& self_k_weight, + const std::vector& self_k_bias, + const std::vector& self_v_weight, + const std::vector& self_v_bias, + const std::vector& self_out_weight, + const std::vector& self_out_bias, + const std::vector& cross_ln_weight, + const std::vector& cross_ln_bias, + const std::vector& cross_q_weight, + const std::vector& cross_q_bias, + const std::vector& cross_k_weight, + const std::vector& cross_k_bias, + const std::vector& cross_v_weight, + const std::vector& cross_v_bias, + const std::vector& cross_out_weight, + const std::vector& cross_out_bias, + const std::vector& ffn_ln_weight, + const std::vector& ffn_ln_bias, + const std::vector& ffn_inter_weight, + const std::vector& ffn_inter_bias, + const std::vector& ffn_out_weight, + const std::vector& ffn_out_bias, + const paddle::Tensor& decoder_ln_weight, + const paddle::Tensor& decoder_ln_bias, + const paddle::Tensor& embedding_weight, + const paddle::Tensor& embedding_bias, + const paddle::Tensor& positional_embedding_weight, + paddle::Tensor& output_ids, + paddle::Tensor& parent_ids, + paddle::Tensor& sequence_length, + std::string decoding_strategy, + int beam_size, + int topk, + float topp, + int n_head, + int size_per_head, + int num_layer, + int bos_id, + int eos_id, + int64_t max_len, + float beam_search_diversity_rate, + float alpha) { + auto stream = input.stream(); + cublasHandle_t cublas_handle_; + cublasCreate(&cublas_handle_); + cublasLtHandle_t cublaslt_handle_; + cublasLtCreate(&cublaslt_handle_); + cublasSetStream(cublas_handle_, stream); + + std::vector ret; + + switch (input.type()) { + case paddle::DataType::FLOAT16: { + ret = bart_decoding_kernel( + input, + mem_seq_len, + word_embedding, + self_ln_weight, + self_ln_bias, + self_q_weight, + self_q_bias, + self_k_weight, + self_k_bias, + self_v_weight, + self_v_bias, + self_out_weight, + self_out_bias, + cross_ln_weight, + cross_ln_bias, + cross_q_weight, + cross_q_bias, + cross_k_weight, + cross_k_bias, + cross_v_weight, + cross_v_bias, + cross_out_weight, + cross_out_bias, + ffn_ln_weight, + ffn_ln_bias, + ffn_inter_weight, + ffn_inter_bias, + ffn_out_weight, + ffn_out_bias, + decoder_ln_weight, + decoder_ln_bias, + embedding_weight, + embedding_bias, + positional_embedding_weight, + output_ids, + parent_ids, + sequence_length, + decoding_strategy, + beam_size, + topk, + topp, + n_head, + size_per_head, + num_layer, + bos_id, + eos_id, + max_len, + beam_search_diversity_rate, + alpha, + cublas_handle_, + cublaslt_handle_, + stream); + break; + } + case paddle::DataType::FLOAT32: { + ret = bart_decoding_kernel( + input, + mem_seq_len, + word_embedding, + self_ln_weight, + self_ln_bias, + self_q_weight, + self_q_bias, + self_k_weight, + self_k_bias, + self_v_weight, + self_v_bias, + self_out_weight, + self_out_bias, + cross_ln_weight, + cross_ln_bias, + cross_q_weight, + cross_q_bias, + cross_k_weight, + cross_k_bias, + cross_v_weight, + cross_v_bias, + cross_out_weight, + cross_out_bias, + ffn_ln_weight, + ffn_ln_bias, + ffn_inter_weight, + ffn_inter_bias, + ffn_out_weight, + ffn_out_bias, + decoder_ln_weight, + decoder_ln_bias, + embedding_weight, + embedding_bias, + positional_embedding_weight, + output_ids, + parent_ids, + sequence_length, + decoding_strategy, + beam_size, + topk, + topp, + n_head, + size_per_head, + num_layer, + bos_id, + eos_id, + max_len, + beam_search_diversity_rate, + alpha, + cublas_handle_, + cublaslt_handle_, + stream); + break; + } + default: { + PD_THROW( + "NOT supported data type. " + "Only float16 and float32 are supported. "); + break; + } + } + + cublasDestroy(cublas_handle_); + cublasLtDestroy(cublaslt_handle_); + return ret; +} diff --git a/cuda_code/gather_layer_3.cu b/cuda_code/gather_layer_3.cu new file mode 100644 index 0000000000000000000000000000000000000000..de5d77bd466602a2b5882ded6701b0fea83a89f3 --- /dev/null +++ b/cuda_code/gather_layer_3.cu @@ -0,0 +1,63 @@ +#include + +#include "caffe/layers/gather_layer.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +template +__global__ void gather(const int nthreads, const int K, const Dtype* bottom_data, + const Dtype* idx, Dtype* top_data) { + CUDA_KERNEL_LOOP(index, nthreads) { + top_data[index] = bottom_data[index * K + (int)idx[index]]; + } +} + +template +__global__ void gather_inverse(const int nthreads, const int K, const Dtype* bottom_data, + const Dtype* idx, Dtype* top_data) { + // note: K is the second dimension of the OUTPUT, not the input. + CUDA_KERNEL_LOOP(index, nthreads) { + const int i = index / K; + const int j = index % K; + if (j < (int)idx[i]) + top_data[index] = bottom_data[i * (K+1) + j]; + else + top_data[index] = bottom_data[i * (K+1) + j + 1]; + } +} + +template +void GatherLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + const GatherParameter& param = this->layer_param_.gather_param(); + const Dtype* bottom_data = bottom[0]->gpu_data(); + const Dtype* idx = bottom[1]->gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); + const bool inverse = param.inverse(); + const int M = bottom[0]->count(0, 1); + const int K = bottom[0]->count(1, 2); + CHECK_EQ(M * K, bottom[0]->count()) << "M,K=" << M << ","<< K << ", only support 2-d bottom[0], e.g. the output of fc layer"; + const int nthreads = inverse? M*(K-1): M; + if (!inverse){ + gather // NOLINT_NEXT_LINE(whitespace/operators) + <<>>( + nthreads, K, bottom_data, idx, top_data); + } + else { + gather_inverse // NOLINT_NEXT_LINE(whitespace/operators) + <<>>( + nthreads, K-1, bottom_data, idx, top_data); + } + +} + +template +void GatherLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + +} + +INSTANTIATE_LAYER_GPU_FUNCS(GatherLayer); + +} // namespace caffe diff --git a/cuda_code/gauss.cu b/cuda_code/gauss.cu new file mode 100644 index 0000000000000000000000000000000000000000..6c61c273b086a83b390071b23dfa05312dfd076f --- /dev/null +++ b/cuda_code/gauss.cu @@ -0,0 +1,27 @@ +#include "examples/kernels/gauss.h" + +extern "C" __global__ void gaussFilterKernel(Pixel *image, + float weight[5][5], + int width, int height) { + int row = blockIdx.y * blockDim.y + threadIdx.y; + int col = blockIdx.x * blockDim.x + threadIdx.x; + Pixel new_value; + new_value.r = 0; + new_value.g = 0; + new_value.b = 0; + for (int xl = -2; xl <= 2; ++xl) { + for (int yl = -2; yl <= 2; ++yl) { + if (((col + xl) + (row + yl) * width) < 0 || + ((col + xl) + (row + yl) * width) >= width * height) { + continue; + } + new_value.r += + image[(col + xl) + (row + yl) * width].r * weight[xl + 2][yl + 2]; + new_value.g += + image[(col + xl) + (row + yl) * width].g * weight[xl + 2][yl + 2]; + new_value.b += + image[(col + xl) + (row + yl) * width].b * weight[xl + 2][yl + 2]; + } + } + image[col + row * width] = new_value; +} \ No newline at end of file diff --git a/cuda_code/gaussian_16.cu b/cuda_code/gaussian_16.cu new file mode 100644 index 0000000000000000000000000000000000000000..0543fa96f8693c1885bedb7b5c2da0b6da8402d8 --- /dev/null +++ b/cuda_code/gaussian_16.cu @@ -0,0 +1,514 @@ +/*----------------------------------------------------------- + ** gaussian.cu -- The program is to solve a linear system Ax = b + ** by using Gaussian Elimination. The algorithm on page 101 + ** ("Foundations of Parallel Programming") is used. + ** The sequential version is gaussian.c. This parallel + ** implementation converts three independent for() loops + ** into three Fans. Use the data file ge_3.dat to verify + ** the correction of the output. + ** + ** Written by Andreas Kura, 02/15/95 + ** Modified by Chong-wei Xu, 04/20/95 + ** Modified by Chris Gregg for CUDA, 07/20/2009 + **----------------------------------------------------------- + */ +#include +#include +#include +#include "cuda.h" +#include +#include +#include +#include "half_operator_overload.cuh" +#include "newhalf.hpp" +#ifdef RD_WG_SIZE_0_0 + #define MAXBLOCKSIZE RD_WG_SIZE_0_0 +#elif defined(RD_WG_SIZE_0) + #define MAXBLOCKSIZE RD_WG_SIZE_0 +#elif defined(RD_WG_SIZE) + #define MAXBLOCKSIZE RD_WG_SIZE +#else + #define MAXBLOCKSIZE 512 +#endif + +//2D defines. Go from specific to general +#ifdef RD_WG_SIZE_1_0 + #define BLOCK_SIZE_XY RD_WG_SIZE_1_0 +#elif defined(RD_WG_SIZE_1) + #define BLOCK_SIZE_XY RD_WG_SIZE_1 +#elif defined(RD_WG_SIZE) + #define BLOCK_SIZE_XY RD_WG_SIZE +#else + #define BLOCK_SIZE_XY 4 +#endif + +int Size; +float *a, *b, *finalVec; +float *m; + +FILE *fp; + +void InitProblemOnce(char *filename); +void InitPerRun(); +void ForwardSub(); +void BackSub(); +__global__ void Fan1(float *m, float *a, int Size, int t); +__global__ void Fan2(float *m, float *a, float *b,int Size, int j1, int t); +void InitMat(float *ary, int nrow, int ncol); +void InitAry(float *ary, int ary_size); +void PrintMat(float *ary, int nrow, int ncolumn); +void PrintAry(float *ary, int ary_size); +void PrintDeviceProperties(); +void checkCUDAError(const char *msg); + +unsigned int totalKernelTime = 0; + +// create both matrix and right hand side, Ke Wang 2013/08/12 11:51:06 +void +create_matrix(float *m, int size){ + int i,j; + float lamda = -0.01; + float coe[2*size-1]; + float coe_i =0.0; + + for (i=0; i < size; i++) + { + coe_i = 10*exp(lamda*i); + j=size-1+i; + coe[j]=coe_i; + j=size-1-i; + coe[j]=coe_i; + } + + + for (i=0; i < size; i++) { + for (j=0; j < size; j++) { + m[i*size+j]=coe[size-1-i+j]; + } + } + + +} + + +int main(int argc, char *argv[]) +{ + printf("WG size of kernel 1 = %d, WG size of kernel 2= %d X %d\n", MAXBLOCKSIZE, BLOCK_SIZE_XY, BLOCK_SIZE_XY); + int verbose = 1; + int i, j; + char flag; + if (argc < 2) { + printf("Usage: gaussian -f filename / -s size [-q]\n\n"); + printf("-q (quiet) suppresses printing the matrix and result values.\n"); + printf("-f (filename) path of input file\n"); + printf("-s (size) size of matrix. Create matrix and rhs in this program \n"); + printf("The first line of the file contains the dimension of the matrix, n."); + printf("The second line of the file is a newline.\n"); + printf("The next n lines contain n tab separated values for the matrix."); + printf("The next line of the file is a newline.\n"); + printf("The next line of the file is a 1xn vector with tab separated values.\n"); + printf("The next line of the file is a newline. (optional)\n"); + printf("The final line of the file is the pre-computed solution. (optional)\n"); + printf("Example: matrix4.txt:\n"); + printf("4\n"); + printf("\n"); + printf("-0.6 -0.5 0.7 0.3\n"); + printf("-0.3 -0.9 0.3 0.7\n"); + printf("-0.4 -0.5 -0.3 -0.8\n"); + printf("0.0 -0.1 0.2 0.9\n"); + printf("\n"); + printf("-0.85 -0.68 0.24 -0.53\n"); + printf("\n"); + printf("0.7 0.0 -0.4 -0.5\n"); + exit(0); + } + + //PrintDeviceProperties(); + //char filename[100]; + //sprintf(filename,"matrices/matrix%d.txt",size); + + for(i=1;i= Size-1-t) printf("."); + //printf("blockIDx.x:%d,threadIdx.x:%d,Size:%d,t:%d,Size-1-t:%d\n",blockIdx.x,threadIdx.x,Size,t,Size-1-t); + + if(threadIdx.x + blockIdx.x * blockDim.x >= Size-1-t) return; + int xidx = blockIdx.x * blockDim.x + threadIdx.x; + + //~ *(m_cuda+Size*(blockDim.x*blockIdx.x+threadIdx.x+t+1)+t) = *(a_cuda+Size*(blockDim.x*blockIdx.x+threadIdx.x+t+1)+t) / *(a_cuda+Size*t+t); + m_cuda[Size*(xidx+t+1)+t] = a_cuda[Size*(xidx+t+1)+t] / a_cuda[Size*t+t]; +} + +/*------------------------------------------------------- + ** Fan2() -- Modify the matrix A into LUD + **------------------------------------------------------- + */ + +__global__ void Fan2(half *m_cuda, half *a_cuda, half *b_cuda,int Size, int j1, int t) +{ + if(threadIdx.x + blockIdx.x * blockDim.x >= Size-1-t) return; + if(threadIdx.y + blockIdx.y * blockDim.y >= Size-t) return; + + int xidx = blockIdx.x * blockDim.x + threadIdx.x; + int yidx = blockIdx.y * blockDim.y + threadIdx.y; + //printf("blockIdx.x:%d,threadIdx.x:%d,blockIdx.y:%d,threadIdx.y:%d,blockDim.x:%d,blockDim.y:%d\n",blockIdx.x,threadIdx.x,blockIdx.y,threadIdx.y,blockDim.x,blockDim.y); + + a_cuda[Size*(xidx+1+t)+(yidx+t)] -= m_cuda[Size*(xidx+1+t)+t] * a_cuda[Size*t+(yidx+t)]; + //a_cuda[xidx+1+t][yidx+t] -= m_cuda[xidx+1+t][t] * a_cuda[t][yidx+t]; + if(yidx == 0){ + //printf("blockIdx.x:%d,threadIdx.x:%d,blockIdx.y:%d,threadIdx.y:%d,blockDim.x:%d,blockDim.y:%d\n",blockIdx.x,threadIdx.x,blockIdx.y,threadIdx.y,blockDim.x,blockDim.y); + //printf("xidx:%d,yidx:%d\n",xidx,yidx); + b_cuda[xidx+1+t] -= m_cuda[Size*(xidx+1+t)+(yidx+t)] * b_cuda[t]; + } +} + +/*------------------------------------------------------ + ** ForwardSub() -- Forward substitution of Gaussian + ** elimination. + **------------------------------------------------------ + */ +void ForwardSub() +{ + int t; + half *m_cuda,*a_cuda,*b_cuda; + half_float::half *m_half, *a_half,*b_half; + + m_half = (half_float::half*)malloc( Size * Size * sizeof(half_float::half)); + + a_half = (half_float::half*)malloc( Size * Size * sizeof(half_float::half)); + + b_half = (half_float::half*)malloc( Size * sizeof(half_float::half)); + + for(int i =0;i>>(m_cuda,a_cuda,Size,t); + cudaThreadSynchronize(); + Fan2<<>>(m_cuda,a_cuda,b_cuda,Size,Size-t,t); + cudaThreadSynchronize(); + checkCUDAError("Fan2"); + } + // end timing kernels + struct timeval time_end; + gettimeofday(&time_end, NULL); + totalKernelTime = (time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec); + + // copy memory back to CPU + cudaMemcpy(m_half, m_cuda, Size * Size * sizeof(half),cudaMemcpyDeviceToHost ); + cudaMemcpy(a_half, a_cuda, Size * Size * sizeof(half),cudaMemcpyDeviceToHost ); + cudaMemcpy(b_half, b_cuda, Size * sizeof(half),cudaMemcpyDeviceToHost ); + + + for(int i =0;i +#include +#include +#include "fixed_point.hpp" +#include "gbsa.hpp" +#include "gbsa_jvp.cuh" +#include "gpu_utils.cuh" +#include "math_utils.cuh" +#include "k_gbsa.cuh" +#include "k_gbsa_jvp.cuh" + +namespace timemachine { + +template +GBSA::GBSA( + const std::vector &charge_param_idxs, // [N] + const std::vector &atomic_radii_idxs, // [N] + const std::vector &scale_factor_idxs, // [E,2] + double alpha, + double beta, + double gamma, + double dielectric_offset, + double surface_tension, + double solute_dielectric, + double solvent_dielectric, + double probe_radius, + double cutoff_radii, + double cutoff_force +) : + + N_(charge_param_idxs.size()), + alpha_(alpha), + beta_(beta), + gamma_(gamma), + dielectric_offset_(dielectric_offset), + surface_tension_(surface_tension), + solute_dielectric_(solute_dielectric), + solvent_dielectric_(solvent_dielectric), + probe_radius_(probe_radius), + cutoff_radii_(cutoff_radii), + cutoff_force_(cutoff_force), + nblist_(charge_param_idxs.size(), D) { + + if(cutoff_radii != cutoff_force) { + throw std::runtime_error("GB currently requires that cutoff_radii be equal to cutoff_force!"); + } + + gpuErrchk(cudaMalloc(&d_charge_param_idxs_, N_*sizeof(*d_charge_param_idxs_))); + gpuErrchk(cudaMalloc(&d_scale_factor_idxs_, N_*sizeof(*d_scale_factor_idxs_))); + gpuErrchk(cudaMalloc(&d_atomic_radii_idxs_, N_*sizeof(*d_atomic_radii_idxs_))); + gpuErrchk(cudaMemcpy(d_charge_param_idxs_, &charge_param_idxs[0], N_*sizeof(*d_charge_param_idxs_), cudaMemcpyHostToDevice)); + gpuErrchk(cudaMemcpy(d_scale_factor_idxs_, &scale_factor_idxs[0], N_*sizeof(*d_scale_factor_idxs_), cudaMemcpyHostToDevice)); + gpuErrchk(cudaMemcpy(d_atomic_radii_idxs_, &atomic_radii_idxs[0], N_*sizeof(*d_atomic_radii_idxs_), cudaMemcpyHostToDevice)); + + // we probaly don't need *all* these buffers if we do just one pass, but they take up only + // O(N) ram so we don't really care and just pre-allocate everything to keep things simple. + // it also ensures that we can RAII properly. + + const int N = charge_param_idxs.size(); + + gpuErrchk(cudaMalloc(&d_born_psi_buffer_, N*sizeof(*d_born_psi_buffer_))); + gpuErrchk(cudaMalloc(&d_born_radii_buffer_, N*sizeof(*d_born_radii_buffer_))); + gpuErrchk(cudaMalloc(&d_obc_buffer_, N*sizeof(*d_obc_buffer_))); + gpuErrchk(cudaMalloc(&d_born_forces_buffer_, N*sizeof(*d_born_forces_buffer_))); + + gpuErrchk(cudaMalloc(&d_born_radii_buffer_jvp_, N*sizeof(*d_born_radii_buffer_jvp_))); + gpuErrchk(cudaMalloc(&d_obc_buffer_jvp_, N*sizeof(*d_obc_buffer_jvp_))); + gpuErrchk(cudaMalloc(&d_obc_ri_buffer_jvp_, N*sizeof(*d_obc_ri_buffer_jvp_))); + gpuErrchk(cudaMalloc(&d_born_forces_buffer_jvp_, N*sizeof(*d_born_forces_buffer_jvp_))); + + +} + +template +GBSA::~GBSA() { + + gpuErrchk(cudaFree(d_charge_param_idxs_)); + gpuErrchk(cudaFree(d_atomic_radii_idxs_)); + gpuErrchk(cudaFree(d_scale_factor_idxs_)); + + gpuErrchk(cudaFree(d_born_psi_buffer_)); + gpuErrchk(cudaFree(d_born_radii_buffer_)); + gpuErrchk(cudaFree(d_obc_buffer_)); + gpuErrchk(cudaFree(d_born_forces_buffer_)); + + gpuErrchk(cudaFree(d_born_radii_buffer_jvp_)); + gpuErrchk(cudaFree(d_obc_buffer_jvp_)); + gpuErrchk(cudaFree(d_obc_ri_buffer_jvp_)); + gpuErrchk(cudaFree(d_born_forces_buffer_jvp_)); + + +}; + + +template +void GBSA::execute_device( + const int N, + const int P, + const double *d_coords, + const double *d_coords_tangents, + const double *d_params, + unsigned long long *d_out_coords, + double *d_out_coords_tangents, + double *d_out_params_tangents, + cudaStream_t stream +) { + + // std::cout << "exec GB" << std::endl; + + int tpb = 32; + int B = (N_+tpb-1)/tpb; + + dim3 dimGrid(B, B, 1); // x, y, z dims + + double prefactor; + if (solute_dielectric_ != 0.0 && solvent_dielectric_ != 0.0) { + // prefactor = -screening_*((1.0/solute_dielectric_) - (1.0/solvent_dielectric_)); + prefactor = -((1.0/solute_dielectric_) - (1.0/solvent_dielectric_)); + } else { + prefactor = 0.0; + } + + + // std::cout << "cutoff 12: " << cutoff_radii_ << " " << cutoff_force_ << std::endl; + + // cudaDeviceSynchronize(); + nblist_.compute_block_bounds(N, D, d_coords, stream); + + auto start = std::chrono::high_resolution_clock::now(); + if(d_coords_tangents == nullptr) { + + // inference mode + gpuErrchk(cudaMemsetAsync(d_born_psi_buffer_, 0, N*sizeof(*d_born_psi_buffer_), stream)); + gpuErrchk(cudaMemsetAsync(d_born_radii_buffer_, 0, N*sizeof(*d_born_radii_buffer_), stream)); + gpuErrchk(cudaMemsetAsync(d_obc_buffer_, 0, N*sizeof(*d_obc_buffer_), stream)); + gpuErrchk(cudaMemsetAsync(d_born_forces_buffer_, 0, N*sizeof(*d_born_forces_buffer_), stream)); + + k_compute_born_radii_gpu<<>>( + N_, + d_coords, + d_params, + d_atomic_radii_idxs_, + d_scale_factor_idxs_, + dielectric_offset_, + cutoff_radii_, + nblist_.get_block_bounds_ctr(), + nblist_.get_block_bounds_ext(), + d_born_psi_buffer_ + ); + + + // cudaDeviceSynchronize(); + gpuErrchk(cudaPeekAtLastError()); + + k_reduce_born_radii<<>>( + N_, + d_params, + d_atomic_radii_idxs_, + dielectric_offset_, + alpha_, + beta_, + gamma_, + d_born_psi_buffer_, + d_born_radii_buffer_, + d_obc_buffer_ + ); + + // cudaDeviceSynchronize(); + gpuErrchk(cudaPeekAtLastError()); + + + k_compute_born_first_loop_gpu<<>>( + N_, + d_coords, + d_params, + d_charge_param_idxs_, + d_born_radii_buffer_, + prefactor, + cutoff_force_, + nblist_.get_block_bounds_ctr(), + nblist_.get_block_bounds_ext(), + d_born_forces_buffer_, // output + d_out_coords // ouput + ); + + // cudaDeviceSynchronize(); + gpuErrchk(cudaPeekAtLastError()); + + k_reduce_born_forces<<>>( + N_, + d_params, + d_atomic_radii_idxs_, + d_born_radii_buffer_, + d_obc_buffer_, + surface_tension_, + probe_radius_, + d_born_forces_buffer_ + ); + + // cudaDeviceSynchronize(); + gpuErrchk(cudaPeekAtLastError()); + + k_compute_born_energy_and_forces<<>>( + N_, + d_coords, + d_params, + d_atomic_radii_idxs_, + d_scale_factor_idxs_, + d_born_radii_buffer_, + d_obc_buffer_, + dielectric_offset_, + cutoff_force_, + nblist_.get_block_bounds_ctr(), + nblist_.get_block_bounds_ext(), + d_born_forces_buffer_, + d_out_coords + ); + + // cudaDeviceSynchronize(); + gpuErrchk(cudaPeekAtLastError()); + + // auto finish = std::chrono::high_resolution_clock::now(); + // std::chrono::duration elapsed = finish - start; + // std::cout << "Nonbonded Elapsed time: " << elapsed.count() << " s\n"; + + } else { + + // std::cout << "exec GB JVP" << std::endl; + + gpuErrchk(cudaMemsetAsync(d_born_radii_buffer_jvp_, 0, N*sizeof(*d_born_radii_buffer_jvp_), stream)); + gpuErrchk(cudaMemsetAsync(d_obc_buffer_jvp_, 0, N*sizeof(*d_obc_buffer_jvp_), stream)); + gpuErrchk(cudaMemsetAsync(d_obc_ri_buffer_jvp_, 0, N*sizeof(*d_obc_ri_buffer_jvp_), stream)); + gpuErrchk(cudaMemsetAsync(d_born_forces_buffer_jvp_, 0, N*sizeof(*d_born_forces_buffer_jvp_), stream)); + + k_compute_born_radii_gpu_jvp<<>>( + N_, + d_coords, + d_coords_tangents, + d_params, + d_atomic_radii_idxs_, + d_scale_factor_idxs_, + dielectric_offset_, + cutoff_radii_, + nblist_.get_block_bounds_ctr(), + nblist_.get_block_bounds_ext(), + d_born_radii_buffer_jvp_ + ); + + // cudaDeviceSynchronize(); + gpuErrchk(cudaPeekAtLastError()); + + k_reduce_born_radii_jvp<<>>( + N_, + d_params, + d_atomic_radii_idxs_, + dielectric_offset_, + alpha_, + beta_, + gamma_, + d_born_radii_buffer_jvp_, + d_obc_buffer_jvp_, + d_obc_ri_buffer_jvp_ + ); + + // cudaDeviceSynchronize(); + gpuErrchk(cudaPeekAtLastError()); + + k_compute_born_first_loop_gpu_jvp<<>>( + N_, + d_coords, + d_coords_tangents, + d_params, + d_charge_param_idxs_, + d_born_radii_buffer_jvp_, + prefactor, + cutoff_force_, + nblist_.get_block_bounds_ctr(), + nblist_.get_block_bounds_ext(), + d_born_forces_buffer_jvp_, // output + d_out_coords_tangents, // ouput + d_out_params_tangents // ouput + ); + + // cudaDeviceSynchronize(); + gpuErrchk(cudaPeekAtLastError()); + + k_reduce_born_forces_jvp<<>>( + N_, + d_params, + d_atomic_radii_idxs_, + d_born_radii_buffer_jvp_, + d_obc_buffer_jvp_, + d_obc_ri_buffer_jvp_, + surface_tension_, + probe_radius_, + d_born_forces_buffer_jvp_, + d_out_params_tangents + ); + + // cudaDeviceSynchronize(); + gpuErrchk(cudaPeekAtLastError()); + + + // auto start = std::chrono::high_resolution_clock::now(); + k_compute_born_energy_and_forces_jvp<<>>( + N_, + d_coords, + d_coords_tangents, + d_params, + d_atomic_radii_idxs_, + d_scale_factor_idxs_, + d_born_radii_buffer_jvp_, + d_obc_buffer_jvp_, + d_obc_ri_buffer_jvp_, + dielectric_offset_, + cutoff_force_, + nblist_.get_block_bounds_ctr(), + nblist_.get_block_bounds_ext(), + d_born_forces_buffer_jvp_, + d_out_coords_tangents, + d_out_params_tangents + ); + + // cudaDeviceSynchronize(); + gpuErrchk(cudaPeekAtLastError()); + + // auto finish = std::chrono::high_resolution_clock::now(); + // std::chrono::duration elapsed = finish - start; + // std::cout << "Nonbonded JVP Elapsed time: " << elapsed.count() << " s\n"; + + + } + + // cudaDeviceSynchronize(); + // auto finish = std::chrono::high_resolution_clock::now(); + // std::chrono::duration elapsed = finish - start; + // std::cout << "GBSA Elapsed time: " << elapsed.count() << " s\n"; + + +} + +template class GBSA; +template class GBSA; + +template class GBSA; +template class GBSA; + + +} + diff --git a/cuda_code/gebrd.cu b/cuda_code/gebrd.cu new file mode 100644 index 0000000000000000000000000000000000000000..7958be4313944710d8e604e7ae895fb8c96bc43b --- /dev/null +++ b/cuda_code/gebrd.cu @@ -0,0 +1,73 @@ +#include // cudaMalloc, cudaMemcpy, etc. +#include // cusolverDn +#include "../../cusolver_utils.h" +#include // printf +#include // EXIT_FAILURE + +int main(void) { + + int m = 3; + int n = 3; + int lda = 3; + float hA[] = {1, 2, 3, 2, 5, 5, 3, 5, 12}; + + float hD_result[] = {-3.741657, -1.573688, -0.339662}; + float hE_result[] = {14.952305, 2.415928, 0.000000}; + float hTAUQ_result[] = {1.267261, 1.654486, 0.000000}; + float hTAUP_result[] = {1.482605, 0.000000, 0.000000}; + + float *dA; + CUDA_CHECK( cudaMalloc((void**) &dA, n * lda * sizeof(float))); + CUDA_CHECK( cudaMemcpy(dA, hA, n * lda * sizeof(float), cudaMemcpyHostToDevice) ); + + cusolverDnHandle_t handle = NULL; + CUSOLVER_CHECK(cusolverDnCreate(&handle)); + + int Lwork; + CUSOLVER_CHECK(cusolverDnSgebrd_bufferSize(handle, m, n, &Lwork)); + + float *Workspace; + cudaMalloc((void**)&Workspace, Lwork); + + int *devInfo; + float *dD, *dE, *dTAUQ, *dTAUP; + CUDA_CHECK( cudaMalloc((void**) &devInfo, sizeof(int))); + CUDA_CHECK( cudaMalloc((void**) &dD, n * sizeof(float))); + CUDA_CHECK( cudaMalloc((void**) &dE, n * sizeof(float))); + CUDA_CHECK( cudaMalloc((void**) &dTAUQ, n * sizeof(float))); + CUDA_CHECK( cudaMalloc((void**) &dTAUP, n * sizeof(float))); + CUSOLVER_CHECK(cusolverDnSgebrd(handle, m, n, dA, lda, dD, dE, dTAUQ, dTAUP, Workspace, Lwork, devInfo)); + int hdevInfo; + CUDA_CHECK( cudaMemcpy(&hdevInfo, devInfo, sizeof(int), cudaMemcpyDeviceToHost) ); + float valuesD[n]; + float valuesE[n]; + float valuesTAUQ[n]; + float valuesTAUP[n]; + CUDA_CHECK( cudaMemcpy(valuesD, dD, n * sizeof(float), cudaMemcpyDeviceToHost) ); + CUDA_CHECK( cudaMemcpy(valuesE, dE, n * sizeof(float), cudaMemcpyDeviceToHost) ); + CUDA_CHECK( cudaMemcpy(valuesTAUQ, dTAUQ, n * sizeof(float), cudaMemcpyDeviceToHost) ); + CUDA_CHECK( cudaMemcpy(valuesTAUP, dTAUP, n * sizeof(float), cudaMemcpyDeviceToHost) ); + + int correct = (hdevInfo == 0); + for (int i = 0; i < n ; i++) { + //printf("%f \t %f \t %f \t %f\n", valuesD[i], valuesE[i], valuesTAUQ[i], valuesTAUP[i]); + if (fabsf(valuesD[i] - hD_result[i]) > 0.001 + || fabsf(valuesE[i] - hE_result[i]) > 0.001 + || fabsf(valuesTAUQ[i] - hTAUQ_result[i]) > 0.001 + || fabsf(valuesTAUP[i] - hTAUP_result[i]) > 0.001) { + correct = 0; + break; + } + } + + if (correct == 1) { + printf("gebrd test PASSED\n"); + } else { + printf("gebrd test FAILED\n"); + } + + CUSOLVER_CHECK(cusolverDnDestroy(handle)); + + return EXIT_SUCCESS; + +} \ No newline at end of file diff --git a/cuda_code/geluKernel_3.cu b/cuda_code/geluKernel_3.cu new file mode 100644 index 0000000000000000000000000000000000000000..88c00a291ecd4b109a291bc0a15b596436c32d0c --- /dev/null +++ b/cuda_code/geluKernel_3.cu @@ -0,0 +1,136 @@ +/* + * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#if CUDA_VERSION >= 10010 + +#include +#include +#include + +#include "NvInfer.h" +#include "geluPlugin.h" +#include "bertCommon.h" +#include "common.cuh" +#include "serialize.hpp" + +using namespace nvinfer1; + +namespace bert +{ + +// constants for approximating the normal cdf +constexpr float A = 0.5f; +constexpr float B = 0.7978845608028654f; // sqrt(2.0/M_PI) +constexpr float C = 0.035677408136300125f; // 0.044715 * sqrt(2.0/M_PI) + +template +__global__ void geluKernel(const T a, const T b, const T c, int n, const T* input, T* output) +{ + const int idx = blockIdx.x * TPB + threadIdx.x; + + if (idx < n) + { + const T in = input[idx]; + const T cdf = a + a * tanh(in * (c * in * in + b)); + output[idx] = in * cdf; + } +} + +int computeGelu(cudaStream_t stream, int n, const float* input, float* output) +{ + constexpr int blockSize = 256; + const int gridSize = (n + blockSize - 1) / blockSize; + geluKernel<<>>(A, B, C, n, input, output); + + CHECK(cudaPeekAtLastError()); + return 0; +} + +int computeGelu(cudaStream_t stream, int n, const half* input, half* output) +{ + constexpr int blockSize = 256; + + if (0 == (n & 1)) + { + const int n2 = n / 2; + + const int gridSize = (n2 + blockSize - 1) / blockSize; + const half2 A2 = __floats2half2_rn(A, A); + const half2 B2 = __floats2half2_rn(B, B); + const half2 C2 = __floats2half2_rn(C, C); + const half2* input2 = reinterpret_cast(input); + half2* output2 = reinterpret_cast(output); + geluKernel<<>>(A2, B2, C2, n2, input2, output2); + } + else + { + const int gridSize = (n + blockSize - 1) / blockSize; + geluKernel<<>>(A, B, C, n, input, output); + } + + CHECK(cudaPeekAtLastError()); + return 0; +} + +template +__global__ void geluBiasKernel(const T a, const T b, const T c, T* output, const T* input, const T* bias, const int ld) +{ + + const int offset = blockIdx.x * ld; + + for (int it = threadIdx.x; it < ld; it += TPB) + { + const int idx = it + offset; + const T in = input[idx] + bias[it]; + const T cdf = a + a * tanh(in * (c * in * in + b)); + output[idx] = in * cdf; + } +} + +void computeGeluBias( + float* output, const float* input, const float* bias, const int ld, const int cols, cudaStream_t stream) +{ + geluBiasKernel<<>>(A, B, C, output, input, bias, ld); + CHECK(cudaPeekAtLastError()); +} + +void computeGeluBias( + half* output, const half* input, const half* bias, const int ld, const int cols, cudaStream_t stream) +{ + if (ld & 1) + { + geluBiasKernel<<>>(A, B, C, output, input, bias, ld); + } + else + { + + const half2 A2 = __floats2half2_rn(A, A); + const half2 B2 = __floats2half2_rn(B, B); + const half2 C2 = __floats2half2_rn(C, C); + const int ld2 = ld / 2; + const half2* input2 = reinterpret_cast(input); + const half2* bias2 = reinterpret_cast(bias); + half2* output2 = reinterpret_cast(output); + geluBiasKernel<<>>(A2, B2, C2, output2, input2, bias2, ld2); + } + + CHECK(cudaPeekAtLastError()); +} + +} // namespace bert + +#endif // CUDA_VERSION >= 10010 diff --git a/cuda_code/gemm_28.cu b/cuda_code/gemm_28.cu new file mode 100644 index 0000000000000000000000000000000000000000..32d4af4f16b78dc369833f19a029e403bbf56c11 --- /dev/null +++ b/cuda_code/gemm_28.cu @@ -0,0 +1,104 @@ +// This program performs general matrix multiplication on row-major layout +// using tf::cublasFlowCapturer::c_gemm. + +#include +#include +#include + +int main() { + + const int M = 2, N = 4, K = 3; + + const std::vector hA = { + 11, 12, 13, + 14, 15, 16 + }; // M x K + + const std::vector hB = { + 11, 12, 13, 14, + 15, 16, 17, 18, + 19, 20, 21, 22 + }; // K x N + + const std::vector golden = { + 548, 584, 620, 656, + 683, 728, 773, 818 + }; // M x N + + std::vector hC(M*N); + + //auto dA = tf::cuda_malloc_device(hA.size()); + //auto dB = tf::cuda_malloc_device(hB.size()); + //auto dC = tf::cuda_malloc_device(hC.size()); + //auto dAlpha = tf::cuda_malloc_device(1); + //auto dBeta = tf::cuda_malloc_device(1); + float *dA, *dB, *dC, *dAlpha, *dBeta; + + tf::Taskflow taskflow("Matrix Multiplication"); + tf::Executor executor; + + auto malloc_dA = taskflow.emplace( + [&](){ dA = tf::cuda_malloc_device(hA.size()); } + ).name("malloc_dA"); + + auto malloc_dB = taskflow.emplace( + [&](){ dB = tf::cuda_malloc_device(hB.size()); } + ).name("malloc_dB"); + + auto malloc_dC = taskflow.emplace( + [&](){ dC = tf::cuda_malloc_device(hC.size()); } + ).name("malloc_dC"); + + auto malloc_dAlpha = taskflow.emplace( + [&](){ dAlpha = tf::cuda_malloc_device(1); } + ).name("malloc_dAlpha"); + + auto malloc_dBeta = taskflow.emplace( + [&](){ dBeta = tf::cuda_malloc_device(1); } + ).name("malloc_dBeta"); + + auto cublasFlow = taskflow.emplace([&](tf::cudaFlowCapturer& capturer) { + auto blas = capturer.make_capturer(); + + auto alpha = blas->single_task([=] __device__ () { *dAlpha = 1; }) + .name("alpha=1"); + auto beta = blas->single_task([=] __device__ () { *dBeta = 0; }) + .name("beta=0"); + auto copyA = blas->copy(dA, hA.data(), hA.size()).name("copyA"); + auto copyB = blas->copy(dB, hB.data(), hB.size()).name("copyB"); + auto gemm = blas->c_gemm(CUBLAS_OP_N, CUBLAS_OP_N, + M, N, K, dAlpha, dA, K, dB, N, dBeta, dC, N + ).name("C = alpha * A * B + beta * C"); + auto copyC = blas->copy(hC.data(), dC, hC.size()).name("copyC"); + + gemm.succeed(alpha, beta, copyA, copyB) + .precede(copyC); + + blas->dump(std::cout); // dump the graph constructed so far. + }).name("cublasFlow"); + + cublasFlow.succeed( + malloc_dA, malloc_dB, malloc_dC, malloc_dAlpha, malloc_dBeta + ); + + executor.run(taskflow).wait(); + + taskflow.dump(std::cout); + + std::cout << "Matrix C:\n"; + for(int m=0; m +#include +#include +#include + +#include "cutlass/core_io.h" + +#include "cublas_helpers.h" +#include "gemm_operation_profiler.h" +#include "gpu_timer.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace profiler { + + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Ctor +GemmOperationProfiler::GemmOperationProfiler(Options const &options): + OperationProfiler( + options, + library::OperationKind::kGemm, + { + {ArgumentTypeID::kEnumerated, {"gemm_kind"}, "Variant of GEMM (gemm, batched, array, universal, planar_complex, planar_complex_array)"}, + {ArgumentTypeID::kInteger, {"m", "problem-size::m"}, "M dimension of the GEMM problem space"}, + {ArgumentTypeID::kInteger, {"n", "problem-size::n"}, "N dimension of the GEMM problem space"}, + {ArgumentTypeID::kInteger, {"k", "problem-size::k"}, "K dimension of the GEMM problem space"}, + {ArgumentTypeID::kTensor, {"A"}, "Tensor storing the A operand"}, + {ArgumentTypeID::kTensor, {"B"}, "Tensor storing the B operand"}, + {ArgumentTypeID::kTensor, {"C"}, "Tensor storing the C operand"}, + {ArgumentTypeID::kScalar, {"alpha", "epilogue::alpha"}, "Epilogue scalar alpha"}, + {ArgumentTypeID::kScalar, {"beta", "epilogue::beta"}, "Epilogue scalar beta"}, + {ArgumentTypeID::kInteger, {"split_k_slices", "split-k-slices"}, "Number of partitions of K dimension"}, + {ArgumentTypeID::kInteger, {"batch_count", "batch-count"}, "Number of GEMMs computed in one batch"}, + }, + { library::Provider::kCUBLAS} + ) { + + description_ = " General matrix-matrix product. D = alpha * A*B + beta * C"; +} + +/// Destructor +GemmOperationProfiler::~GemmOperationProfiler() { + +} + +/// Prints usage statement for the math function +void GemmOperationProfiler::print_usage(std::ostream &out) const { + out << "GEMM" << "\n\n"; + + OperationProfiler::print_usage(out); +} + +/// Prints examples +void GemmOperationProfiler::print_examples(std::ostream &out) const { + + out << "\nExamples:\n\n" + << "Profile a particular problem size:\n" + << " $ cutlass_profiler --operation=Gemm --m=1024 --n=1024 --k=128\n\n" + + << "Schmoo over problem size and beta:\n" + << " $ cutlass_profiler --operation=Gemm --m=1024:4096:256 --n=1024:4096:256 --k=128:8192:128 --beta=0,1,2.5\n\n" + + << "Schmoo over accumulator types:\n" + << " $ cutlass_profiler --operation=Gemm --accumulator-type=f16,f32\n\n" + + << "Run when A is f16 with column-major and B is any datatype with row-major (For column major, use column, col, or n. For row major use, row or t):\n" + << " $ cutlass_profiler --operation=Gemm --A=f16:column --B=*:row\n\n" + + << "Using various input value distribution:\n" + << " $ cutlass_profiler --operation=Gemm --dist=uniform,min:0,max:3\n" + << " $ cutlass_profiler --operation=Gemm --dist=gaussian,mean:0,stddev:3\n" + << " $ cutlass_profiler --operation=Gemm --dist=sequential,start:0,delta:1\n\n" + + << "Run a kernel with cta tile size of 256x128x32 and save workspace if results are incorrect (note that --cta-tile::k=32 is default cta-tile size):\n" + << " $ cutlass_profiler --operation=Gemm --cta_m=256 --cta_n=128 --cta_k=32 --save-workspace=incorrect\n\n" + + << "Test your changes to gemm kernels with a quick functional test and save results in functional-test.csv:\n" + << " $ cutlass_profiler --operation=Gemm \\ \n" + << " --m=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n" + << " --n=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n" + << " --k=8,16,32,64,128,256,288,384,504,512,520 \\ \n" + << " --beta=0,1,2 --profiling-iterations=1 \\ \n" + << " --providers=cutlass --output=functional-test.csv\n\n"; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +#if 0 +// used this for debugging +static std::string byte_string(std::vector const &bytes) { + std::stringstream ss; + + ss << "0x"; + + for (size_t idx = bytes.size(); idx > 0; --idx) { + ss << std::hex << std::setw(2) << std::setfill('0') << uint32_t(bytes.at(idx - 1)); + } + + return ss.str(); +} +#endif + +Status GemmOperationProfiler::GemmProblem::parse( + library::GemmDescription const &operation_desc, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + if (!arg_as_int(this->m, "m", problem_space, problem)) { + // default value + this->m = 1024; + } + + if (!arg_as_int(this->n, "n", problem_space, problem)) { + // default value + this->n = 1024; + } + + if (!arg_as_int(this->k, "k", problem_space, problem)) { + // default value + this->k = 1024; + } + + if (!arg_as_int(this->split_k_slices, "split_k_slices", problem_space, problem)) { + // default value + this->split_k_slices = 1; + } + + if (!arg_as_int(this->batch_count, "batch_count", problem_space, problem)) { + // default value + this->batch_count = 1; + } + + if (!tensor_description_satisfies(operation_desc.A, "A", problem_space, problem)) { + return Status::kErrorInvalidProblem; + } + + if (!tensor_description_satisfies(operation_desc.B, "B", problem_space, problem)) { + return Status::kErrorInvalidProblem; + } + + if (!tensor_description_satisfies(operation_desc.C, "C", problem_space, problem)) { + return Status::kErrorInvalidProblem; + } + + if (!arg_as_scalar( + this->alpha, + operation_desc.element_epilogue, + "alpha", + problem_space, + problem)) { + + if (!cast_from_double(this->alpha, operation_desc.element_epilogue, 1)) { + return Status::kErrorInternal; + } + } + + if (!arg_as_scalar( + this->beta, + operation_desc.element_epilogue, + "beta", + problem_space, + problem)) { + + if (!cast_from_double(this->beta, operation_desc.element_epilogue, 0)) { + return Status::kErrorInternal; + } + } + + this->lda = DeviceAllocation::get_packed_layout( + operation_desc.A.layout, {int(this->m), int(this->k)}).front(); + + this->ldb = DeviceAllocation::get_packed_layout( + operation_desc.B.layout, {int(this->k), int(this->n)}).front(); + + this->ldc = DeviceAllocation::get_packed_layout( + operation_desc.C.layout, {int(this->m), int(this->n)}).front(); + + return Status::kSuccess; +} + +/// Initializes a performance result +void GemmOperationProfiler::GemmProblem::initialize_result( + PerformanceResult &result, + library::GemmDescription const &operation_desc, + ProblemSpace const &problem_space) { + + result.arguments.resize(problem_space.rank()); + + set_argument(result, "gemm_kind", problem_space, library::to_string(operation_desc.gemm_kind)); + + set_argument(result, "A", problem_space, + std::string(library::to_string(operation_desc.A.element)) + ":" + library::to_string(operation_desc.A.layout)); + + set_argument(result, "B", problem_space, + std::string(library::to_string(operation_desc.B.element)) + ":" + library::to_string(operation_desc.B.layout)); + + set_argument(result, "C", problem_space, + std::string(library::to_string(operation_desc.C.element)) + ":" + library::to_string(operation_desc.C.layout)); + + set_argument(result, "m", problem_space, m); + set_argument(result, "n", problem_space, n); + set_argument(result, "k", problem_space, k); + + set_argument(result, "split_k_slices", problem_space, split_k_slices); + set_argument(result, "batch_count", problem_space, batch_count); + + set_argument(result, "alpha", problem_space, + library::lexical_cast(alpha, operation_desc.element_epilogue)); + + set_argument(result, "beta", problem_space, + library::lexical_cast(beta, operation_desc.element_epilogue)); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Extracts the problem dimensions +Status GemmOperationProfiler::initialize_configuration( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + library::GemmDescription const &operation_desc = + static_cast(operation->description()); + + if (operation_desc.gemm_kind != library::GemmKind::kUniversal) { + return Status::kErrorInvalidProblem; + } + + Status status = problem_.parse(operation_desc, problem_space, problem); + + if (status != Status::kSuccess) { + return status; + } + + gemm_workspace_.configuration.problem_size.m() = int(problem_.m); + gemm_workspace_.configuration.problem_size.n() = int(problem_.n); + gemm_workspace_.configuration.problem_size.k() = int(problem_.k); + gemm_workspace_.configuration.lda = problem_.lda; + gemm_workspace_.configuration.ldb = problem_.ldb; + gemm_workspace_.configuration.ldc = problem_.ldc; + gemm_workspace_.configuration.ldd = problem_.ldc; + //gemm_workspace_.configuration.split_k_slices = int(problem_.split_k_slices); + gemm_workspace_.configuration.batch_count = int(problem_.split_k_slices); + + gemm_workspace_.arguments.A = nullptr; + gemm_workspace_.arguments.B = nullptr; + gemm_workspace_.arguments.C = nullptr; + gemm_workspace_.arguments.D = nullptr; + gemm_workspace_.arguments.alpha = problem_.alpha.data(); + gemm_workspace_.arguments.beta = problem_.beta.data(); + gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; + + initialize_result_(this->model_result_, options, operation_desc, problem_space); + + return operation->can_implement(&gemm_workspace_.configuration, &gemm_workspace_.arguments); +} + +/// Initializes the performance result +void GemmOperationProfiler::initialize_result_( + PerformanceResult &result, + Options const &options, + library::GemmDescription const &operation_desc, + ProblemSpace const &problem_space) { + + result.provider = library::Provider::kCUTLASS; + result.disposition = Disposition::kNotRun; + result.status = Status::kSuccess; + result.operation_name = operation_desc.name; + + problem_.initialize_result(result, operation_desc, problem_space); + + OperationProfiler::initialize_result_(result, operation_desc, problem_space); + + // Input bytes read and Output bytes written for the gemm problem + result.bytes = + int64_t(library::sizeof_bits(operation_desc.A.element) * problem_.m / 8) * problem_.k + + int64_t(library::sizeof_bits(operation_desc.B.element) * problem_.n / 8) * problem_.k + + int64_t(library::sizeof_bits(operation_desc.C.element) * problem_.m / 8) * problem_.n; + + // Set is_beta_zero true if beta is zero + bool is_beta_zero = std::all_of(problem_.beta.begin(), problem_.beta.end(), [](uint8_t i) { return i==0; }); + + // Output bytes read for the gemm problem for non-zero beta values + if (!is_beta_zero) { + result.bytes += int64_t(library::sizeof_bits(operation_desc.C.element) * problem_.m / 8) * problem_.n; + } + + result.flops = 2 * (problem_.m * problem_.n * problem_.k + problem_.m * problem_.n); + result.runtime = 0; + + // complex-valued support + switch (operation_desc.tile_description.math_instruction.math_operation) { + case library::MathOperationID::kMultiplyAddComplex: + result.flops *= 4; + break; + + default: break; + } + +} + +/// Initializes workspace +Status GemmOperationProfiler::initialize_workspace( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + library::GemmDescription const &operation_desc = + static_cast(operation->description()); + + if (options.execution_mode != ExecutionMode::kDryRun) { + + gemm_workspace_.A = device_context.allocate_tensor( + options, + "A", + operation_desc.A.element, + operation_desc.A.layout, + {int(problem_.m), int(problem_.k)}, + {int(problem_.lda)} + ); + + gemm_workspace_.B = device_context.allocate_tensor( + options, + "B", + operation_desc.B.element, + operation_desc.B.layout, + {int(problem_.k), int(problem_.n)}, + {int(problem_.ldb)} + ); + + gemm_workspace_.C = device_context.allocate_tensor( + options, + "C", + operation_desc.C.element, + operation_desc.C.layout, + {int(problem_.m), int(problem_.n)}, + {int(problem_.ldc)} + ); + + gemm_workspace_.Computed = device_context.allocate_tensor( + "D", + operation_desc.C.element, + operation_desc.C.layout, + {int(problem_.m), int(problem_.n)}, + {int(problem_.ldc)} + ); + + gemm_workspace_.Reference = device_context.allocate_tensor( + "Reference", + operation_desc.C.element, + operation_desc.C.layout, + {int(problem_.m), int(problem_.n)}, + {int(problem_.ldc)} + ); + + gemm_workspace_.Reference->copy_from_device(gemm_workspace_.C->data()); + } + + + // + // Initialize the CUTLASS operation + // + Status status = Status::kSuccess; + + if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) { + + if (options.execution_mode != ExecutionMode::kDryRun) { + + uint64_t workspace_size = operation->get_host_workspace_size(&gemm_workspace_.configuration); + gemm_workspace_.host_workspace.resize(workspace_size, 0); + + workspace_size = operation->get_device_workspace_size(&gemm_workspace_.configuration); + gemm_workspace_.device_workspace.reset(library::NumericTypeID::kU8, workspace_size); + + status = operation->initialize( + &gemm_workspace_.configuration, + gemm_workspace_.host_workspace.data(), + gemm_workspace_.device_workspace.data()); + } + + // + // If CUTLASS is enabled, generate a result for it + // + results_.push_back(model_result_); + results_.back().provider = library::Provider::kCUTLASS; + results_.back().op_kind = library::OperationKind::kGemm; + results_.back().disposition = Disposition::kNotRun; + + for(auto provider : verification_providers_) { + results_.back().verification_map[provider] = Disposition::kNotRun; + } + } + + return status; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Verifies CUTLASS against references +bool GemmOperationProfiler::verify_cutlass( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) { + return true; + } + + if (options.execution_mode == ExecutionMode::kDryRun) { + return true; + } + + // Initialize structure containing GEMM arguments + gemm_workspace_.arguments.A = gemm_workspace_.A->data(); + gemm_workspace_.arguments.B = gemm_workspace_.B->data(); + gemm_workspace_.arguments.C = gemm_workspace_.C->data(); + gemm_workspace_.arguments.D = gemm_workspace_.Computed->data(); + gemm_workspace_.arguments.alpha = problem_.alpha.data(); + gemm_workspace_.arguments.beta = problem_.beta.data(); + gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; + + // + // Run the CUTLASS operation + // + + results_.back().status = operation->run( + &gemm_workspace_.arguments, + gemm_workspace_.host_workspace.data(), + gemm_workspace_.device_workspace.data()); + + if (results_.back().status != Status::kSuccess) { + results_.back().disposition = Disposition::kFailed; + return false; + } + + cudaError_t result = cudaDeviceSynchronize(); + if (result != cudaSuccess) { + results_.back().disposition = Disposition::kFailed; + return false; + } + + // CUTLASS op ran the but not yet verified against any verification provider + results_.back().disposition = Disposition::kNotVerified; + + // + // Run verification providers + // + + if (options.verification.enabled) { + +#if CUTLASS_ENABLE_CUBLAS + if (options.verification.provider_enabled(library::Provider::kCUBLAS)) { + + // Guard against unsupported cases + auto const & gemm_desc = static_cast(operation->description()); + + if (cublas_satisfies(gemm_desc) == Status::kSuccess) { + + // call cublas verification if supported + verify_with_cublas_( + options, + report, + device_context, + operation, + problem_space, + problem); + } + + else { + // set verification map for cublas to not supported + results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotSupported; + } + } +#endif // #if CUTLASS_ENABLE_CUBLAS + + // Update disposition to worst case verification outcome among all + // verification providers which are supported + bool is_any_verification_run_passed = false; + for(auto &m : results_.back().verification_map) { + if(m.second == Disposition::kFailed || m.second == Disposition::kIncorrect) { + results_.back().disposition = m.second; + return true; + } + if(!is_any_verification_run_passed && m.second == Disposition::kPassed) { + is_any_verification_run_passed = true; + } + } + + if(is_any_verification_run_passed) { + results_.back().disposition = Disposition::kPassed; + } + } + + // Return true means continue profiling + return true; +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// + +/// Verifies CUTLASS against references +bool GemmOperationProfiler::verify_with_cublas_( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + +#if CUTLASS_ENABLE_CUBLAS + + library::GemmDescription const &gemm_desc = + static_cast(operation->description()); + + // + // Construct cuBLAS operators + // + + CublasCreate handle; + cublasStatus_t status = handle.get_cublas_create_status(); + + if (status != CUBLAS_STATUS_SUCCESS) { + + results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed; + return true; + } + + std::vector algorithms; + + detail::select_cublas_algorithms( + algorithms, + options, + gemm_desc); + + if (algorithms.empty()) { + // no algorithm selected + return true; + } + + // + // Initialize state + // + + try { + + // + // Construct dispatcher to cublasGemmEx() + // + + // Initialize structure containing GEMM arguments + gemm_workspace_.arguments.A = gemm_workspace_.A->data(); + gemm_workspace_.arguments.B = gemm_workspace_.B->data(); + gemm_workspace_.arguments.C = gemm_workspace_.Reference->data(); + gemm_workspace_.arguments.D = gemm_workspace_.Reference->data(); + gemm_workspace_.arguments.alpha = problem_.alpha.data(); + gemm_workspace_.arguments.beta = problem_.beta.data(); + gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; + + detail::cublasGemmExDispatcher gemm_op( + gemm_desc, + gemm_workspace_.configuration, + gemm_workspace_.arguments, + algorithms.front() + ); + + if (gemm_op.status != Status::kSuccess) { + results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotRun; + return true; + } + + results_.back().status = Status::kSuccess; + + status = gemm_op(handle); + + // Handle errors + if (status != CUBLAS_STATUS_SUCCESS) { + + results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed; + return true; + } + + // + // Verify results + // + + results_.back().verification_map[library::Provider::kCUBLAS] = compare_tensors( + options, + *gemm_workspace_.Computed, + *gemm_workspace_.Reference + ); + + // Save workspace if incorrect + if (options.verification.save_workspace == SaveWorkspace::kIncorrect && + results_.back().verification_map[library::Provider::kCUBLAS] == Disposition::kIncorrect) { + + save_workspace( + device_context, + options, + gemm_desc, + library::Provider::kCUTLASS, + library::Provider::kCUBLAS); + } + } + catch (...) { + results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed; + } + +#endif + + // Return true means continue profiling + return true; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Measures performance results +bool GemmOperationProfiler::profile( + Options const &options, + PerformanceReport &report, + DeviceContext &device_context, + library::Operation const *operation, + ProblemSpace const &problem_space, + ProblemSpace::Problem const &problem) { + + if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) { + + // Initialize structure containing GEMM arguments + gemm_workspace_.arguments.A = gemm_workspace_.A->data(); + gemm_workspace_.arguments.B = gemm_workspace_.B->data(); + gemm_workspace_.arguments.C = gemm_workspace_.C->data(); + gemm_workspace_.arguments.D = gemm_workspace_.Computed->data(); + gemm_workspace_.arguments.alpha = problem_.alpha.data(); + gemm_workspace_.arguments.beta = problem_.beta.data(); + gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; + + results_.back().status = profile_cutlass_( + results_.back().runtime, + options, + operation, + &gemm_workspace_.arguments, + gemm_workspace_.host_workspace.data(), + gemm_workspace_.device_workspace.data() + ); + } + return true; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace profiler +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/cuda_code/gen_gpu.cu b/cuda_code/gen_gpu.cu new file mode 100644 index 0000000000000000000000000000000000000000..93838608571099602a8752333355725c9c35d3e8 --- /dev/null +++ b/cuda_code/gen_gpu.cu @@ -0,0 +1,67 @@ +#include "gen_gpu.h" + +// ******************** General Mat-Mat Functions ****************** + +__global__ void gen_matvec(float *A, float *x, float *y, const int m, const int n) +{ + unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x; + if ( xIndex < m ){ + float c = 0.0f; + for(int i=0; i>>((float*)A, (float*)in, (float*)out, m, n); + cudaThreadSynchronize(); + + return; +} + + +/* +***************************************** +** The matrix Transpose multiplication ** +***************************************** +*/ + +void AT_gen(float * out, float * in, float * A, const int m, const int n, dim3 numBlocks, dim3 threadsPerBlock) +{ + +// perform the multiplication + gen_matvecT <<< numBlocks, threadsPerBlock >>>((float*)A, (float*)out, (float*)in, m, n); + cudaThreadSynchronize(); + + return; +} + + + + + + + + diff --git a/cuda_code/genz_1abs_5d_1.cu b/cuda_code/genz_1abs_5d_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..63bd3e1ca183bccf42daf7ef61959c246d4126c3 --- /dev/null +++ b/cuda_code/genz_1abs_5d_1.cu @@ -0,0 +1,31 @@ +#include "demo_utils.cuh" +#include "genz_1abs_5d.cuh" +#include +#include +#include +#include + +int +main() +{ + Genz_1abs_5d integrand; + double epsrel = 1.0e-3; + double const epsrel_min = 1.0240000000000002e-10; + double true_value = 6.371054e-01; // this value is an approximation + constexpr int ndim = 5; + Config configuration; + configuration.outfileVerbosity = 0; + configuration.heuristicID = 0; + + PrintHeader(); + while (cu_time_and_call("Genz_1abs_5d", + integrand, + epsrel, + true_value, + "gpucuhre", + std::cout, + configuration) == true && + epsrel >= epsrel_min) { + epsrel /= 5.0; + } +} diff --git a/cuda_code/get_hausdorff_dis_gpu.cu b/cuda_code/get_hausdorff_dis_gpu.cu new file mode 100644 index 0000000000000000000000000000000000000000..f1b22d6530d0f98fd47b1bb1470ad9b9e5213809 --- /dev/null +++ b/cuda_code/get_hausdorff_dis_gpu.cu @@ -0,0 +1,111 @@ +#include +#include +#include + +#include "get_hausdorff_dis_gpu.h" +#include "cuda_utils.h" + +#define gt_num 42 +#define voxel_dim 31 +#define dict_grid_num (voxel_dim*voxel_dim*voxel_dim) +#define prior_point_num 9 + +__global__ void get_hausdorff_dis_kernel_fast(const float *__restrict__ neighbor_points, + float *__restrict__ features, float radius, + int batch_size, int whole_point_num, + int keypoint_num, int neighbor_point_num, + const float* __restrict__ prior_points, + const float* __restrict__ dis_dicts, + float voxel_len, cudaStream_t stream){ + // whole_points: B N C + // keypoints: B M C + // neighbor_points: B M nsample C + // prior_points: Nshapes Npoints_per_shape Cxyz + // dis_dicts: Nshapes Ngrid Cxyz + // output: + // features: batch_size Nshapes point_num + + // dim3 blocks(DIVUP(point_num, THREADS_PER_BLOCK), batch_size); // blockIdx.x(col), blockIdx.y(row) + // dim3 threads(THREADS_PER_BLOCK); + int batch_idx = blockIdx.y; + int point_idx = blockIdx.x * blockDim.x + threadIdx.x; + + neighbor_points += batch_idx * keypoint_num * neighbor_point_num * 3 + point_idx * neighbor_point_num * 3; + features += batch_idx * keypoint_num * gt_num + point_idx * gt_num; + + float to_prior_dis = 0; + float tmp_dis; + int xth, yth, zth; + int i, j; + int prior_hash_idx; + float prior_to_dis = 0; + float min_point_pair_dis = radius; + float hsdf_dis = radius; + for(int gt_idx = 0; gt_idx < gt_num; gt_idx++ ){ + to_prior_dis = 0; + for( i = 0; i < neighbor_point_num; i++ ){ + xth = floor(abs(neighbor_points[i*3 + 0] + radius) / voxel_len); + yth = floor(abs(neighbor_points[i*3 + 1] + radius) / voxel_len); + zth = floor(abs(neighbor_points[i*3 + 2] + radius) / voxel_len); + prior_hash_idx = xth + yth * voxel_dim + zth * voxel_dim * voxel_dim; + tmp_dis = dis_dicts[gt_idx*dict_grid_num + prior_hash_idx]; + if( to_prior_dis < tmp_dis ){ + to_prior_dis = tmp_dis; + } + } + + prior_to_dis = 0; + for( i = 0; i < prior_point_num; i++ ){ + min_point_pair_dis = 99.9; + for( j = 0; j < neighbor_point_num; j++ ){ + tmp_dis = ( pow(prior_points[gt_idx*prior_point_num*3 + i*3 + 0] + - neighbor_points[j*3 + 0], 2) + + pow(prior_points[gt_idx*prior_point_num*3 + i*3 + 1] + - neighbor_points[j*3 + 1], 2) + + pow(prior_points[gt_idx*prior_point_num*3 + i*3 + 2] + - neighbor_points[j*3 + 2], 2) ); + if( min_point_pair_dis > tmp_dis ){ + min_point_pair_dis = tmp_dis; + } + } + if( min_point_pair_dis > prior_to_dis ){ + prior_to_dis = min_point_pair_dis; + } + } + prior_to_dis = sqrt(prior_to_dis); + + hsdf_dis = (prior_to_dis > to_prior_dis? prior_to_dis : to_prior_dis) / radius; + features[gt_idx] = (hsdf_dis > 1? 1: hsdf_dis) < 0.1? 0: hsdf_dis; + } +} + +void get_hausdorff_dis_kernel_launcher_fast(const float* neighbor_points, + float* features, float radius, + int batch_size, int whole_point_num, int keypoint_num, + int neighbor_point_num, + const float* prior_points, const float* dis_dicts, + float voxel_len, cudaStream_t stream){ + // whole_points: B N C + // keypoints: B N C + // neighbor_points: B N nsample C + // prior_points: Nshapes Npoints_per_shape Cxyz + // dis_dicts: Nshapes N_hash_grid_per_shape Cxyz + // output: + // features: batch_size point_num Nshapes + + cudaError_t err; + + dim3 blocks(DIVUP(keypoint_num, THREADS_PER_BLOCK), batch_size); // blockIdx.x(col), blockIdx.y(row) + dim3 threads(THREADS_PER_BLOCK); + + get_hausdorff_dis_kernel_fast<<>>( + neighbor_points, features, radius, batch_size, whole_point_num, + keypoint_num, neighbor_point_num, prior_points, dis_dicts, voxel_len, stream); + + // cudaDeviceSynchronize(); // for using printf in kernel function + err = cudaGetLastError(); + if (cudaSuccess != err) { + fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); + exit(-1); + } +} diff --git a/cuda_code/gl_interop.cu b/cuda_code/gl_interop.cu new file mode 100644 index 0000000000000000000000000000000000000000..3c3d2fcbb549beb299c078dcccbb9447d14d9a2c --- /dev/null +++ b/cuda_code/gl_interop.cu @@ -0,0 +1,185 @@ +/*****************************************************************************/ +/* Copyright (c) 2016, Karl Pauwels, Alessandro Pieropan */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or without */ +/* modification, are permitted provided that the following conditions */ +/* are met: */ +/* */ +/* 1. Redistributions of source code must retain the above copyright */ +/* notice, this list of conditions and the following disclaimer. */ +/* */ +/* 2. Redistributions in binary form must reproduce the above copyright */ +/* notice, this list of conditions and the following disclaimer in the */ +/* documentation and/or other materials provided with the distribution. */ +/* */ +/* 3. Neither the name of the copyright holder nor the names of its */ +/* contributors may be used to endorse or promote products derived from */ +/* this software without specific prior written permission. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ +/* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ +/* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR */ +/* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT */ +/* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, */ +/* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT */ +/* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, */ +/* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY */ +/* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ +/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE */ +/* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ +/*****************************************************************************/ + +//#define UNROLL_INNER +//#define IMUL(a, b) __mul24(a, b) +#include "gl_interop.h" +#include +#include + +namespace fato{ +namespace gpu { + +texture d_float_texture; +texture d_rgba_texture; + +int iDivUp(int a, int b) { return (a % b != 0) ? (a / b + 1) : (a / b); } + +__device__ static float rgbaToGray(uchar4 rgba) { + return (0.299f * (float)rgba.x + 0.587f * (float)rgba.y + + 0.114f * (float)rgba.z); +} + +/*****************************************************************************/ +/* KERNELS */ +/*****************************************************************************/ +__global__ void copyTextureToFloat(float *out_image, int width, int height) { + const int x = blockIdx.x * blockDim.x + threadIdx.x; + const int y = blockIdx.y * blockDim.y + threadIdx.y; + // opengl and cuda have inverted image coordinate systems + const int inv_y = height - 1; + + if (x < width && y < height) { + float val = tex2D(d_float_texture, (float)x + 0.5f, (float)(y) + 0.5f); + out_image[y * width + x] = val; + } +} + +__global__ void copyTextureToRGBA(uchar4 *out_image, int width, int height) { + const int x = blockIdx.x * blockDim.x + threadIdx.x; + const int y = blockIdx.y * blockDim.y + threadIdx.y; + // opengl and cuda have inverted image coordinate systems + const int inv_y = height - 1; + + if (x < width && y < height) { + out_image[y * width + x] = + tex2D(d_rgba_texture, (float)x + 0.5f, (float)(inv_y - y) + 0.5f); + } +} + +__global__ void convertRGBAArrayToGrayVX_kernel(uchar *out_image, int width, + int height, int step) { + const int x = blockIdx.x * blockDim.x + threadIdx.x; + const int y = blockIdx.y * blockDim.y + threadIdx.y; + + if (x < width && y < height) { + const int inv_y = height - 1; + uchar4 rgba = tex2D(d_rgba_texture, (float)x + 0.5f, (float)(inv_y - y) + 0.5f); + + float val = 0.299f * (float)rgba.x + 0.587f * (float)rgba.y + + 0.114f * (float)rgba.y; + + uchar *dst_row = (uchar *)(out_image + y * step); + + dst_row[x] = (uchar)val; + } +} + +/*****************************************************************************/ +/* CALLING FUNCTIONS */ +/*****************************************************************************/ + +std::runtime_error cudaException(const char *file, int line, + cudaError_t error) { + std::stringstream message; + message << file << "," << line << ": " + << std::string(cudaGetErrorString(error)); + return (std::runtime_error(message.str())); +} + +void downloadTextureToRGBA(uchar4 *d_out_image, cudaArray *in_array, int width, + int height) { + // Bind textures to arrays + cudaChannelFormatDesc channelDesc = + cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsigned); + cudaError_t err = + cudaBindTextureToArray(d_rgba_texture, in_array, channelDesc); + + if (err != cudaSuccess) { + cudaException(__FILE__, __LINE__, err); + std::cout << "downloadTextureToRGBA(102) :" + + std::string(cudaGetErrorString(err)) << std::endl; + // exit(0); + } + + dim3 dimBlock(16, 8, 1); + dim3 dimGrid(iDivUp(width, dimBlock.x), iDivUp(height, dimBlock.y), 1); + // std::cout << "calling the kernel " << std::endl; + copyTextureToRGBA << >> (d_out_image, width, height); + + err = cudaGetLastError(); + if (err != cudaSuccess) { + cudaException(__FILE__, __LINE__, err); + std::cout << "downloadTextureToRGBA(114) :" + + std::string(cudaGetErrorString(err)) << std::endl; + // exit(0); + } + + cudaUnbindTexture(d_rgba_texture); +} + +void downloadDepthTexture(float *d_out_image, cudaArray *in_array, int width, + int height) { + // Bind textures to arrays + cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(); + //cudaChannelFormatDesc channelDesc = + //cudaCreateChannelDesc(32,0,0,0,cudaChannelFormatKindFloat); + cudaError_t err = + cudaBindTextureToArray(d_float_texture, in_array, channelDesc); + if (err != cudaSuccess) { + throw cudaException(__FILE__, __LINE__, err); + } + + dim3 dimBlock(16, 8, 1); + dim3 dimGrid(iDivUp(width, dimBlock.x), iDivUp(height, dimBlock.y), 1); + + copyTextureToFloat << >> (d_out_image, width, height); + + cudaUnbindTexture(d_float_texture); +} + +void convertRGBArrayToGrayVX(uchar *d_out_image, cudaArray *in_array, + int width, int height, int step) +{ + cudaChannelFormatDesc channelDesc = + cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsigned); + cudaError_t err = cudaBindTextureToArray(d_rgba_texture, in_array, channelDesc); + if (err != cudaSuccess) { + throw cudaException(__FILE__, __LINE__, err); + } + + dim3 dimBlock(16, 8, 1); + dim3 dimGrid(iDivUp(width, dimBlock.x), iDivUp(height, dimBlock.y), 1); + + convertRGBAArrayToGrayVX_kernel << >> + (d_out_image, width, height, step); + + err = cudaGetLastError(); + if (err != cudaSuccess) { + throw cudaException(__FILE__, __LINE__, err); + } + + cudaUnbindTexture(d_rgba_texture); +} + +} +} // end namespace gpu diff --git a/cuda_code/global_6.cu b/cuda_code/global_6.cu new file mode 100644 index 0000000000000000000000000000000000000000..9f2faf1f40f47fd69e0df48ca46f7eae9fe0ddcf --- /dev/null +++ b/cuda_code/global_6.cu @@ -0,0 +1,156 @@ +// +// global.cu +// Kernels of generation polynomials. +// +// Copyright (c) 2021 Tatsuki Ono +// +// This software is released under the MIT License. +// https://opensource.org/licenses/mit-license.php +// + +#include "device.cuh" +#include "global.cuh" +#include "kernel_params.cuh" + +namespace atpqc_cuda::kyber::genpoly_warp::global { + +template +__global__ void genmatrix(short2* poly, const std::uint8_t* seed, + std::size_t seed_pitch, unsigned npolys) { + constexpr bool transposed = Transposed; + constexpr unsigned k = K; + using kp = kernel_params::genmatrix; + + extern __shared__ std::uint8_t shared_buf[]; + const unsigned polyid = blockIdx.x * blockDim.y + threadIdx.y; + + if (polyid < npolys) { + std::int16_t* poly_buf = reinterpret_cast( + shared_buf + kp::smem_byte_per_warp * threadIdx.y); + std::uint8_t* bytes_buf = + reinterpret_cast(poly_buf) + kp::poly_bytes; + + poly += (params::n / 2) * polyid; + seed += seed_pitch * (polyid / (k * k)); + unsigned x = polyid / k % k; + unsigned y = polyid % k; + + symmetric_ws::device::state_type state; + symmetric_ws::device::keccak_type keccak; + symmetric_ws::device::xof xof; + device::rej rej; + + bytes_buf[threadIdx.x] = seed[threadIdx.x]; + + if (threadIdx.x == 0) { + if constexpr (transposed) { + bytes_buf[params::symbytes] = x; + bytes_buf[params::symbytes + 1] = y; + } else { + bytes_buf[params::symbytes] = y; + bytes_buf[params::symbytes + 1] = x; + } + } + + __syncwarp(); + + state = xof.absorb(bytes_buf, keccak); + state = xof.squeezeblocks(bytes_buf, kp::xof_nblocks, state, keccak); + + __syncwarp(); + + unsigned ctr = rej(poly_buf, params::n, bytes_buf, kp::rej_bytes); + unsigned buflen = kp::rej_bytes; + while (ctr < params::n) { + unsigned off = buflen % 3; + if (threadIdx.x < off) + bytes_buf[threadIdx.x] = bytes_buf[buflen - off + threadIdx.x]; + + __syncwarp(); + + state = xof.squeezeblocks(bytes_buf + off, 1, state, keccak); + buflen = off + kp::xof_blockbytes; + + __syncwarp(); + + ctr += rej(poly_buf + ctr, params::n - ctr, bytes_buf, buflen); + } + + __syncwarp(); + + poly[threadIdx.x + 0] = make_short2(poly_buf[2 * threadIdx.x + 0], + poly_buf[2 * threadIdx.x + 1]); + poly[threadIdx.x + 32] = make_short2(poly_buf[2 * threadIdx.x + 64], + poly_buf[2 * threadIdx.x + 65]); + poly[threadIdx.x + 64] = make_short2(poly_buf[2 * threadIdx.x + 128], + poly_buf[2 * threadIdx.x + 129]); + poly[threadIdx.x + 96] = make_short2(poly_buf[2 * threadIdx.x + 192], + poly_buf[2 * threadIdx.x + 193]); + } +} + +template +__global__ void gennoise(short2* poly, const std::uint8_t* seed, + std::size_t seed_pitch, unsigned npolys, + std::uint8_t nonce_begin) { + constexpr unsigned k = K; + constexpr unsigned eta = Eta; + using kp = kernel_params::gennoise; + + extern __shared__ std::uint8_t shared_buf[]; + const unsigned polyid = blockIdx.x * blockDim.y + threadIdx.y; + + std::uint8_t* bytes_buf = shared_buf + kp::smem_byte_per_warp * threadIdx.y; + + if (polyid < npolys) { + poly += (params::n / 2) * polyid; + seed += seed_pitch * (polyid / k); + + symmetric_ws::device::keccak_type keccak; + symmetric_ws::device::prf prf; + device::cbd cbd; + + bytes_buf[threadIdx.x] = seed[threadIdx.x]; + if (threadIdx.x == 0) + bytes_buf[params::symbytes] = nonce_begin + polyid % k; + + __syncwarp(); + + prf(bytes_buf, kp::prf_nblocks, bytes_buf, kp::extseed_bytes, keccak); + + __syncwarp(); + + cbd(poly, bytes_buf); + } +} + +template __global__ void genmatrix<2, true>(short2*, const std::uint8_t*, + std::size_t, unsigned); +template __global__ void genmatrix<2, false>(short2*, const std::uint8_t*, + std::size_t, unsigned); +template __global__ void genmatrix<3, true>(short2*, const std::uint8_t*, + std::size_t, unsigned); +template __global__ void genmatrix<3, false>(short2*, const std::uint8_t*, + std::size_t, unsigned); +template __global__ void genmatrix<4, true>(short2*, const std::uint8_t*, + std::size_t, unsigned); +template __global__ void genmatrix<4, false>(short2*, const std::uint8_t*, + std::size_t, unsigned); +template __global__ void gennoise<1, 2>(short2*, const std::uint8_t*, + std::size_t, unsigned, std::uint8_t); +template __global__ void gennoise<2, 2>(short2*, const std::uint8_t*, + std::size_t, unsigned, std::uint8_t); +template __global__ void gennoise<3, 2>(short2*, const std::uint8_t*, + std::size_t, unsigned, std::uint8_t); +template __global__ void gennoise<4, 2>(short2*, const std::uint8_t*, + std::size_t, unsigned, std::uint8_t); +template __global__ void gennoise<1, 3>(short2*, const std::uint8_t*, + std::size_t, unsigned, std::uint8_t); +template __global__ void gennoise<2, 3>(short2*, const std::uint8_t*, + std::size_t, unsigned, std::uint8_t); +template __global__ void gennoise<3, 3>(short2*, const std::uint8_t*, + std::size_t, unsigned, std::uint8_t); +template __global__ void gennoise<4, 3>(short2*, const std::uint8_t*, + std::size_t, unsigned, std::uint8_t); + +} // namespace atpqc_cuda::kyber::genpoly_warp::global diff --git a/cuda_code/gorpho_mex_flatBallApprox.cu b/cuda_code/gorpho_mex_flatBallApprox.cu new file mode 100644 index 0000000000000000000000000000000000000000..dfbe9465b126986da042254e37d1c9d66a3f16c3 --- /dev/null +++ b/cuda_code/gorpho_mex_flatBallApprox.cu @@ -0,0 +1,67 @@ +#include + +#include "mex.h" +#include "matrix.h" + +#include "flat_linear_morph.cuh" +#include "strel.cuh" + +#include "mex_common.cuh" + +inline gpho::ApproxType toApproxType(int approxType) +{ + if (approxType == 0) { + return gpho::APPROX_INSIDE; + } else if (approxType == 1) { + return gpho::APPROX_BEST; + } else { + return gpho::APPROX_OUTSIDE; + } +} + +/** Line segment approximation to flat ball structuring element + * + * Parameters + * ---------- + * radius : numeric scalar + * Radius of ball. + * approxType : numeric scalar + * Type of approximation: 0 = constrained inside, 1 = best, 2 = constrained outside. + * + * Returns + * ------- + * lineSteps : int32 matrix + * N x 3 matrix with step vectors for line segments. + * lineLens : int32 vector + * N x 1 vector with length sof line segments (in steps). + */ +void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) +{ + // Validate inputs + ensureOrError(nrhs == 2, "Must supply 2 inputs"); + ensureOrError(nlhs == 2, "Must have 2 outputs"); + + int radius = getValidatedScalar(prhs[0], "radius"); + ensureOrError(radius > 0, "radius mus the positive"); + int approxType = getValidatedScalar(prhs[1], "approxType"); + ensureValue(approxType, { 0, 1, 2 }, "approxType"); + + std::vector lines = gpho::flatBallApprox(radius, toApproxType(approxType)); + + // Allocate and fill outputs + mxArray *mxLineSteps = mxCreateUninitNumericMatrix(lines.size(), 3, mxINT32_CLASS, mxREAL); + mxArray *mxLineLens = mxCreateUninitNumericMatrix(lines.size(), 1, mxINT32_CLASS, mxREAL); + int *lineStepsData = static_cast(mxGetData(mxLineSteps)); + int *lineLensData = static_cast(mxGetData(mxLineLens)); + + for (size_t i = 0; i < lines.size(); ++i) { + const auto& ls = lines[i]; + lineStepsData[i + 0 * lines.size()] = ls.step.x; + lineStepsData[i + 1 * lines.size()] = ls.step.y; + lineStepsData[i + 2 * lines.size()] = ls.step.z; + lineLensData[i] = ls.length; + } + + plhs[0] = mxLineSteps; + plhs[1] = mxLineLens; +} \ No newline at end of file diff --git a/cuda_code/gpu-kmeans2D.cu b/cuda_code/gpu-kmeans2D.cu new file mode 100644 index 0000000000000000000000000000000000000000..e8324b9ce860e86a432bd7d0ab9eb7f512b37f4d --- /dev/null +++ b/cuda_code/gpu-kmeans2D.cu @@ -0,0 +1,415 @@ +/*********************************************************************** + hadoop-gpu + Authors: Koichi Shirahata, Hitoshi Sato, Satoshi Matsuoka + +This software is licensed under Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +------------------------------------------------------------------------- +File: gpu-kmeans1D.cc + - Kmeans with 2D input data on GPU. +Version: 0.20.1 +***********************************************************************/ + +#include "stdint.h" + +#include "hadoop/Pipes.hh" +#include "hadoop/TemplateFactory.hh" +#include "hadoop/StringUtils.hh" + +#include +#include + +#include +#include +#include + +#include +#include + +//#define DEBUG + +int deviceID = 0; + +// datum of a plot +// x,y : coordinate +// cent : id of nearest cluster +class data { +public: + float x; + float y; + int cent; +}; + +__device__ float mysqrt(data a, data b) { + float x = abs(a.x - b.x); + float y = abs(a.y - b.y); + return std::sqrt(x*x + y*y); +} + +//data object assignment +__global__ void assign_data(data *centroids, + data *data, + int num_of_data, + int num_of_cluster) +{ + int i; + int tid = threadIdx.x + blockIdx.x * blockDim.x; + // int tid = threadIdx.x; + // int nthreads = blockDim.x; + int nthreads = blockDim.x * gridDim.x; + // int part = num_of_data / nthreads; /* 65535*512 */ + // for(i = part*tid; i < part*(tid+1); i++) { + for (i = tid; i < num_of_data; i += nthreads) { + int center = 0; + float dmin = mysqrt(centroids[0], data[i]); + for(int j = 1; j < num_of_cluster; j++) { + float dist = mysqrt(centroids[j], data[i]); + if(dist < dmin) { + dmin = dist; + center = j; + } + } + data[i].cent = center; + } +} + +//K centroids recalculation +//tid has to be less than the num of newcent +__global__ void centroids_recalc( + data *newcent, + data *d, + int *ndata) { + int j; + int tid = blockIdx.x; + __shared__ float sx[64]; + __shared__ float sy[64]; + float x = 0.0f; + float y = 0.0f; + for(j = ndata[tid] + threadIdx.x; j < ndata[tid+1]; j += blockDim.x) { + x += d[j].x; + y += d[j].y; + } + sx[threadIdx.x] = x; + sy[threadIdx.x] = y; + __syncthreads(); + float n = static_cast(ndata[tid+1]-ndata[tid]); + + if (threadIdx.x == 0) { +#pragma unroll + for (j = 1; j < 64; j++) { + x += sx[j]; + y += sy[j]; + } + newcent[tid].x = x / n; + newcent[tid].y = y / n; + } + /* + int j; + int tid = threadIdx.x; + newcent[tid].x = 0.0; + newcent[tid].y = 0.0; + for(j = ndata[tid]; j < ndata[tid+1]; j++) { + newcent[tid].x += d[j].x; + newcent[tid].y += d[j].y; + } + float n = static_cast(ndata[tid+1]-ndata[tid]); + newcent[tid].x /= n; + newcent[tid].y /= n; + */ +} + + +class KmeansMap: public HadoopPipes::Mapper { +public: + KmeansMap(HadoopPipes::TaskContext& context){} + + double gettime() { + struct timeval tv; + gettimeofday(&tv,NULL); + return tv.tv_sec+tv.tv_usec * 1e-6; + } + + //zero init + void init_int(int *data, int num) { + for(int i = 0; i < num; i++) { + data[i] = 0; + } + } + void init_float(float *data, int num) { + for(int i = 0; i < num; i++) { + data[i] = 0.0; + } + } + + void sort_by_cent(data *d, int start, int end) + { + int i = start; + int j = end; + int base = (d[start].cent + d[end].cent) / 2; + while(1) { + while (d[i].cent < base) i++; + while (d[j].cent > base) j--; + if (i >= j) break; + data temp = d[i]; + d[i] = d[j]; + d[j] = temp; + i++; + j--; + } + if (start < i-1) sort_by_cent(d, start, i-1); + if (end > j+1) sort_by_cent(d, j+1, end); + } + + //counts the nunber of data objects contained by each cluster + void count_data_in_cluster(data *d, int *ndata, + int num_of_data, int num_of_cluster) { + int i; + for(i = 0; i < num_of_data; i++) { + ndata[d[i].cent + 1]++; + } + for(i = 1; i < num_of_cluster + 1; i++) { + ndata[i] += ndata[i-1]; + } + } + + /* + bool floatcmp(data *a, data *b, int num) { + for(int i = 0; i < num; i++) { + if(a[i].x != b[i].x || a[i].y != b[i].y) { + return false; + } + } + return true; + } + */ + float mysqrt(data a, data b) { + float x = a.x - b.x; + float y = a.y - b.y; + return std::sqrt(x*x + y*y); + } + + bool datacmp(data *a, data *b, int num) { + for(int i = 0; i < num; i++) { + if( mysqrt(a[i], b[i]) > 1 ) { + return false; + } + } + return true; + } + + void map(HadoopPipes::MapContext& context) { + // input format + // --num of clusters ( == k) + // --num of data( == n) + // --initial centers for all clusters; + // --input rows; + + // fprintf(stderr, "start\n"); + + int mp; + + double t[10]; + t[0] = gettime(); + + std::vector elements + = HadoopUtils::splitString(context.getInputValue(), " "); + + t[1] = gettime(); + + const int k = HadoopUtils::toInt(elements[0]); + const int n = HadoopUtils::toInt(elements[1]); + // c[] : pos of cluster + // d[] : data + // ndata[] : num of data for each cluster + data c[2][k]; + data d[n]; + int ndata[k+1]; + int i, cur, next, iter; + + //for Device + data *dc; + data *dd; + int *dndata; + + //initialize + for(i = 0; i < k; i++) { + c[0][i].x = HadoopUtils::toFloat(elements[2*i+2]); + c[0][i].y = HadoopUtils::toFloat(elements[2*i+3]); + } + for(i = 0; i < n; i++) { + d[i].x = HadoopUtils::toFloat(elements[2*i+2*k+2]); + d[i].y = HadoopUtils::toFloat(elements[2*i+2*k+3]); + } + + t[2] = gettime(); + +#ifdef DEBUG + for(i = 0; i < k; i++) { + std::cout << c[0][i].x << " " << c[0][i].y; + } + std::cout << '\n'; + for(i = 0; i < n; i++) + std::cout << d[i].x << " " << d[i].y << " "; + std::cout << '\n'; +#endif + + //cuda init + cudaDeviceProp prop; + cudaGetDeviceProperties(&prop, 0); + mp = prop.multiProcessorCount; + cudaError_t err = cudaMalloc((void **)&dc, sizeof(data)*2*k); + if(err != cudaSuccess) { + std::cerr << "Malloc_1 failed: " << cudaGetErrorString(err) << ".\n"; + } + err = cudaMalloc((void **)&dd, sizeof(data)*n); + if(err != cudaSuccess) { + std::cerr << "Malloc_2 failed: " << cudaGetErrorString(err) << ".\n"; + } + err = cudaMalloc((void **)&dndata, sizeof(int)*(k+1)); + if(err != cudaSuccess) { + std::cerr << "Malloc_3 failed: " << cudaGetErrorString(err) << ".\n"; + } + + t[3] = gettime(); + + // fprintf(stderr, "mid\n"); + + + // buffer id + cur = 0; + next = 1; + iter = 0; + do { + iter++; + // for(int j = 0; j < 10; j++) { + init_int(ndata, k+1); + + //data object assignment + cudaMemcpy(dc + cur*k, c[cur], sizeof(data)*k, cudaMemcpyHostToDevice); + cudaMemcpy(dd, d, sizeof(data)*n, cudaMemcpyHostToDevice); + assign_data<<>>(dc + cur*k, dd, n, k); + err = cudaMemcpy(d, dd, sizeof(data)*n, cudaMemcpyDeviceToHost); + if(err != cudaSuccess) { + std::cerr << "Memcpy_1 failed: " << cudaGetErrorString(err) << ".\n"; + } + + t[4] = gettime(); + +#ifdef DEBUG + for(i = 0; i < n; i++) + std::cout << d[i].cent << " "; + std::cout << '\n'; +#endif + + //rearranges all data objects + //and counts the nunber of data objects contained by each cluster + sort_by_cent(d, 0, n-1); + count_data_in_cluster(d, ndata, n, k); + + t[5] = gettime(); + +#ifdef DEBUG + for(i = 0; i < k+1; i++) + std::cout << ndata[i] << " "; + std::cout << '\n'; +#endif + + //K centroids recalculation + err = cudaMemcpy(dndata, ndata, sizeof(int)*(k+1), cudaMemcpyHostToDevice); + if(err != cudaSuccess) { + std::cerr << "Memcpy_2_1 failed: " << cudaGetErrorString(err) << ".\n"; + } else { + std::cerr << "Memcpy_2_1 success: " << cudaGetErrorString(err) << ".\n"; + } + err = cudaMemcpy(dc + next*k, c[next], sizeof(data)*k, cudaMemcpyHostToDevice); + if(err != cudaSuccess) { + std::cerr << "Memcpy_2_2 failed: " << cudaGetErrorString(err) << ".\n"; + } else { + std::cerr << "Memcpy_2_2 success: " << cudaGetErrorString(err) << ".\n"; + } + err = cudaMemcpy(dd, d, sizeof(data)*n, cudaMemcpyHostToDevice); + if(err != cudaSuccess) { + std::cerr << "Memcpy_2_3 failed: " << cudaGetErrorString(err) << ".\n"; + } else { + std::cerr << "Memcpy_2_3 success: " << cudaGetErrorString(err) << ".\n"; + } + // centroids_recalc<<<1,k>>>(dc + next*k, dd, dndata); + centroids_recalc<<>>(dc + next*k, dd, dndata); + err = cudaMemcpy(c[next], dc + next*k, sizeof(data)*k, cudaMemcpyDeviceToHost); + if(err != cudaSuccess) { + std::cerr << "Memcpy_2_4 failed: " << cudaGetErrorString(err) << ".\n"; + } else { + std::cerr << "Memcpy_2_4 success: " << cudaGetErrorString(err) << ".\n"; + } + + + t[6] = gettime(); + + +#ifdef DEBUG + for(i = 0; i < k; i++) + std::cout << c[next][i].x << " " << c[next][i].y << " "; + std::cout << "\n\n"; +#endif + + cur = 1 - cur; + next = 1 - next; + } while( datacmp(c[cur], c[next], k) == false && iter < 100); + // } + + // fprintf(stderr, "finish\n"); + + + //emit + //key : cluster id + //value : cluster centroid position + for(i = 0; i < k; i++) { + context.emit(context.getInputKey() + '\t' + HadoopUtils::toString(i), + HadoopUtils::toString((int)c[cur][i].x) + '\t' + + HadoopUtils::toString((int)c[cur][i].y)); + } + + + t[7] = gettime(); + + std::cout << "Run on GPU" << '\n'; + std::cout << "iter : " << iter << '\n'; + for(i = 0; i < 7; i++) { + std::cout << t[i+1] - t[i] << '\n'; + } + std::cout << t[7] - t[0] << '\n'; + std::cout << '\n'; + + cudaFree(dc); + cudaFree(dd); + cudaFree(dndata); + } +}; + +class KmeansReduce: public HadoopPipes::Reducer { +public: + KmeansReduce(HadoopPipes::TaskContext& context){} + void reduce(HadoopPipes::ReduceContext& context) { + while(context.nextValue()) { + context.emit(context.getInputKey(), context.getInputValue()); + } + } +}; + +int main(int argc, char *argv[]) { + if(argc > 1) { + deviceID = atoi(argv[1]); + std::cout << "deviceID: " << deviceID << ".\n"; + } + return HadoopPipes::runTask(HadoopPipes::TemplateFactory()); +} diff --git a/cuda_code/gpuBilinearInterpolator.cu b/cuda_code/gpuBilinearInterpolator.cu new file mode 100644 index 0000000000000000000000000000000000000000..63f43b7fab2cb1375f5415d99e7332889bfe0d65 --- /dev/null +++ b/cuda_code/gpuBilinearInterpolator.cu @@ -0,0 +1,121 @@ +#include +#include +#include +#include + +#include +#include + +#include "gpuInterpolator.h" + +using isce3::cuda::core::gpuBilinearInterpolator; +using isce3::cuda::core::gpuInterpolator; + +template +__global__ void gpuInterpolator_g(gpuBilinearInterpolator interp, double* x, + double* y, const T* z, T* value, size_t nx, size_t ny = 0) +{ + /* + * GPU kernel to test interpolate() on the device for consistency. + */ + int i = threadIdx.x; + value[i] = interp.interpolate(x[i], y[i], z, nx, ny); +} + +template +__host__ void isce3::cuda::core::gpuBilinearInterpolator::interpolate_h( + const Matrix& truth, Matrix& m, double start, double delta, + T* h_z) +{ + /* + * CPU-side function to call the corresponding GPU function on a single + * thread for consistency checking + */ + + // allocate host side memory + size_t size_input_pts = truth.length() * sizeof(double); + size_t size_output_pts = truth.length() * sizeof(T); + size_t nx = m.width(); + + double* h_x = (double*) malloc(size_input_pts); + double* h_y = (double*) malloc(size_input_pts); + + // assign host side inputs + for (size_t i = 0; i < truth.length(); ++i) { + h_x[i] = (truth(i, 0) - start) / delta; + h_y[i] = (truth(i, 1) - start) / delta; + } + + // allocate devie side memory + double* d_x; + checkCudaErrors(cudaMalloc((void**) &d_x, size_input_pts)); + double* d_y; + checkCudaErrors(cudaMalloc((void**) &d_y, size_input_pts)); + T* d_z; + checkCudaErrors(cudaMalloc((void**) &d_z, size_output_pts)); + T* d_m; + checkCudaErrors(cudaMalloc((T**) &d_m, m.length() * m.width() * sizeof(T))); + + // copy input data + checkCudaErrors( + cudaMemcpy(d_x, h_x, size_input_pts, cudaMemcpyHostToDevice)); + checkCudaErrors( + cudaMemcpy(d_y, h_y, size_input_pts, cudaMemcpyHostToDevice)); + checkCudaErrors(cudaMemcpy(d_m, m.data(), + m.length() * m.width() * sizeof(T), cudaMemcpyHostToDevice)); + + // launch! + int n_threads = truth.length(); + gpuInterpolator_g<<<1, n_threads>>>(*this, d_x, d_y, d_m, d_z, nx); + + // copy device results to host + checkCudaErrors( + cudaMemcpy(h_z, d_z, size_output_pts, cudaMemcpyDeviceToHost)); + + // free memory + checkCudaErrors(cudaFree(d_x)); + checkCudaErrors(cudaFree(d_y)); + checkCudaErrors(cudaFree(d_z)); + checkCudaErrors(cudaFree(d_m)); +} + +template +__device__ T isce3::cuda::core::gpuBilinearInterpolator::interpolate( + double x, double y, const T* z, size_t nx, size_t ny = 0) +{ + size_t x1 = floor(x); + size_t x2 = ceil(x); + size_t y1 = floor(y); + size_t y2 = ceil(y); + + T q11 = z[y1 * nx + x1]; + T q12 = z[y2 * nx + x1]; + T q21 = z[y1 * nx + x2]; + T q22 = z[y2 * nx + x2]; + + if ((y1 == y2) && (x1 == x2)) { + return q11; + } else if (y1 == y2) { + return ((T)((x2 - x) / (x2 - x1)) * q11) + + ((T)((x - x1) / (x2 - x1)) * q21); + } else if (x1 == x2) { + return ((T)((y2 - y) / (y2 - y1)) * q11) + + ((T)((y - y1) / (y2 - y1)) * q12); + } else { + return ((q11 * (T)((x2 - x) * (y2 - y))) / (T)((x2 - x1) * (y2 - y1))) + + ((q21 * (T)((x - x1) * (y2 - y))) / (T)((x2 - x1) * (y2 - y1))) + + ((q12 * (T)((x2 - x) * (y - y1))) / (T)((x2 - x1) * (y2 - y1))) + + ((q22 * (T)((x - x1) * (y - y1))) / (T)((x2 - x1) * (y2 - y1))); + } +} + +// Explicit instantiations +template class gpuBilinearInterpolator; +template class gpuBilinearInterpolator>; +template class gpuBilinearInterpolator; +template class gpuBilinearInterpolator>; +template class gpuBilinearInterpolator; + +template __global__ void gpuInterpolator_g( + gpuBilinearInterpolator interp, double* x, double* y, + const double* z, double* value, size_t nx, size_t ny); diff --git a/cuda_code/gpuSolver_1.cu b/cuda_code/gpuSolver_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..c4f1d75ea598ebc425889c06ede793841289c422 --- /dev/null +++ b/cuda_code/gpuSolver_1.cu @@ -0,0 +1,79 @@ +#include "gaussian_multi_gpu_rdma.h" + +void gaussianEliminationOnGPU(stSolverState *solverState, int numProcs, int myRank, int myNodeLocalRank) +{ + double bcastTime = 0.0; + long int packedSize; + int i; + void **req, **status; +#ifdef PIVOTPACK + int packedRowCount; +#endif + + req = (void **)malloc(sizeof(void *)); + status = (void **)malloc(sizeof(void *)); + dim3 findPivotThreads(128, 1, 1); + dim3 findPivotBlocks(intCeilDiv(solverState->rowCount, 128), 1, 1); + + dim3 extractPivotRowThreads(1024, 1, 1); + dim3 extractPivotRowBlocks; + + extractPivotRowBlocks.x = intCeilDiv(solverState->myPackedColumnCount, extractPivotRowThreads.x); + extractPivotRowBlocks.y = 1; + extractPivotRowBlocks.z = 1; + + dim3 rowEliminationThreads(512, 1, 1); + dim3 rowEliminationBlocks; + + rowEliminationBlocks.x = intCeilDiv(solverState->rowCount, (2*rowEliminationThreads.x)); + rowEliminationBlocks.y = intCeilDiv(solverState->myPackedColumnCount, rowEliminationThreads.y); + rowEliminationBlocks.z = 1; + + + checkCudaErrors(cudaSetDevice(myNodeLocalRank)); + packedSize = (long int)(solverState->rowCount * solverState->myNumChunks * CHUNK_SIZE * sizeof(elemtype)); + checkCudaErrors(cudaMalloc((void**)&(solverState->d_packedTransposeAB), packedSize)); + checkCudaErrors(cudaMemset(solverState->d_packedTransposeAB, 0, packedSize)); + checkCudaErrors(cudaMemcpy(solverState->d_packedTransposeAB, solverState->h_myPartOfPackedTransposeAB, packedSize, cudaMemcpyHostToDevice)); + + +#ifdef PIVOTPACK + packedRowCount = intCeilDiv(solverState->rowCount, PIVOT_PACK_SIZE); +#endif + for(i = 0; i < solverState->columnCount; i++) + { + int pivotProc, chunkNumber; //Process that holds data corresponding to ith column + + chunkNumber = (i / PACK_SIZE)/CHUNK_SIZE; + pivotProc = (chunkNumber % numProcs); + //printf("Pivot Proc %d\n", pivotProc); +#ifdef PIVOTPACK + checkCudaErrors(cudaMemset(solverState->multipliers, 0, sizeof(unsigned int) * packedRowCount)); +#endif + if (myRank == pivotProc) // Only the process that holds i, does the following + { + int localChunkIndex; + + localChunkIndex = (chunkNumber/numProcs); + solverState->columnPack = (localChunkIndex * CHUNK_SIZE) + ((i /PACK_SIZE) % CHUNK_SIZE); + solverState->columnBit = i % PACK_SIZE; + //printf("columnpack %d, column bit %d\n", solverState->columnPack, solverState->columnBit); + findPivotRowAndMultipliers<<>>((*solverState), solverState->d_packedTransposeAB); + checkCudaErrors(cudaDeviceSynchronize()); + } + /* Rank pivotProc needs to broadcast the pivotRowIndex and multipliers */ + bcastPivotRow(pivotProc, myRank, numProcs, solverState, req, status); + solverState->columnPack = (i /PACK_SIZE); + solverState->columnBit = i % PACK_SIZE; + extractPivotRow<<>>(*solverState, solverState->d_packedTransposeAB); + rowElimination<<>>(solverState->d_packedTransposeAB, *solverState); + waitForPivotBcast(pivotProc, *req, *status, numProcs, myRank); + checkCudaErrors(cudaDeviceSynchronize()); + } + free(req); + free(status); + + checkCudaErrors(cudaMemcpy(solverState->h_myPartOfPackedTransposeAB, solverState->d_packedTransposeAB, packedSize, cudaMemcpyDeviceToHost)); + checkCudaErrors(cudaDeviceSynchronize()); + return; +} diff --git a/cuda_code/gpuSolver_2.cu b/cuda_code/gpuSolver_2.cu new file mode 100644 index 0000000000000000000000000000000000000000..5b1e0673e2fb4308d4445184abd73f823e20b1ca --- /dev/null +++ b/cuda_code/gpuSolver_2.cu @@ -0,0 +1,53 @@ +#include "gpuSolverFunctions.cu" + +void gaussianElimination(unsigned char* A, unsigned char* B, int rowCount, int columnCount) +{ + + // --- variable used for single gpu code + int packedColumnCount; + + unsigned int* packedAB; + unsigned int* packedTransposeAB; + + packedColumnCount = intCeilDiv(columnCount + 1, PACK_SIZE); + + // --- Allocate memory for input matrix A on cpu + long int packedSize = ((long int)sizeof(unsigned int)) * rowCount * packedColumnCount; + packedAB = (unsigned int*) malloc(packedSize); + packedTransposeAB = (unsigned int*) malloc(packedSize); + + if(packedAB == NULL || packedTransposeAB == NULL) + { + printf("Unable to allocate space for packed linear system.\n"); + return; + } + packLinearSystem(packedAB, A, B, rowCount, columnCount); + transposeMatrixCPU(packedTransposeAB, packedAB, rowCount, packedColumnCount); + gaussianEliminationSingleGPU(packedTransposeAB, rowCount, columnCount); + transposeMatrixCPU(packedAB, packedTransposeAB, packedColumnCount, rowCount); + unpackLinearSystem(A, B, packedAB, rowCount, columnCount); + return; +} + +void gaussianEliminationSingleGPU(unsigned int* packedTransposeAB, int rowCount, int columnCount) +{ + int i; + + stSolverState solverState; + initializeSolver(&solverState, rowCount, columnCount, packedTransposeAB); + + + for(i = 0; i < columnCount; i++) + { + solverState.columnPack = i / PACK_SIZE; + solverState.columnBit = i % PACK_SIZE; + launchFindPivotRowKernel(&solverState); + launchExtractPivotRowKernel(&solverState); + launchRowEliminationKernel(&solverState); + } + + gatherResult(packedTransposeAB, &solverState); + freeSolver(&solverState); +} + + diff --git a/cuda_code/gpuTdfunc.cu b/cuda_code/gpuTdfunc.cu new file mode 100644 index 0000000000000000000000000000000000000000..81bcbc5fd63e3c886f3eb5813013d59c934a3ebd --- /dev/null +++ b/cuda_code/gpuTdfunc.cu @@ -0,0 +1,57 @@ +template __global__ void kernelgpuTdfunc(T *f, T *xdg, T *udg, T *odg, T *wdg, T *uinf, T *param, T time, int ng, int nc, int ncu, int nd, int ncx, int nco, int ncw) +{ + int i = threadIdx.x + blockIdx.x * blockDim.x; + while (i void gpuTdfunc(T *f, T *xdg, T *udg, T *odg, T *wdg, T *uinf, T *param, T time, int ng, int nc, int ncu, int nd, int ncx, int nco, int ncw) +{ + int blockDim = 256; + int gridDim = (ng + blockDim - 1) / blockDim; + gridDim = (gridDim>1024)? 1024 : gridDim; + kernelgpuTdfunc<<>>(f, xdg, udg, odg, wdg, uinf, param, time, ng, nc, ncu, nd, ncx, nco, ncw); +} + +template void gpuTdfunc(double *, double *, double *, double *, double *, double *, double *, double, int, int, int, int, int, int, int); +template void gpuTdfunc(float *, float *, float *, float *, float *, float *, float *, float, int, int, int, int, int, int, int); diff --git a/cuda_code/gpuVariables.cu b/cuda_code/gpuVariables.cu new file mode 100644 index 0000000000000000000000000000000000000000..75b033dbea8c8e292d164994e8fde14af38858d4 --- /dev/null +++ b/cuda_code/gpuVariables.cu @@ -0,0 +1,380 @@ +// Filename: gpuVariables.cu +// +// Copyright (c) 2010-2012, Florencio Balboa Usabiaga +// +// This file is part of Fluam +// +// Fluam is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Fluam is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with Fluam. If not, see . + + + +/*********************************************************/ +/* CELL VARIABLES FOR GPU */ +/*********************************************************/ +//DATA FOR RANDOM: 90.000.000 INT = 343 MB +//DATA FOR EACH CELL: 26 INT + 10 DOUBLE = 144 B +//DATA FOR EACH BOUNDARY: 1 INT + 34 DOUBLE = 140 B + +typedef struct{ + int* vecino0GPU; + int* vecino1GPU; + int* vecino2GPU; + int* vecino3GPU; + int* vecino4GPU; + int* vecino5GPU; + int* vecinopxpyGPU; + int* vecinopxmyGPU; + int* vecinopxpzGPU; + int* vecinopxmzGPU; + int* vecinomxpyGPU; + int* vecinomxmyGPU; + int* vecinomxpzGPU; + int* vecinomxmzGPU; + int* vecinopypzGPU; + int* vecinopymzGPU; + int* vecinomypzGPU; + int* vecinomymzGPU; + int* vecinopxpypzGPU; + int* vecinopxpymzGPU; + int* vecinopxmypzGPU; + int* vecinopxmymzGPU; + int* vecinomxpypzGPU; + int* vecinomxpymzGPU; + int* vecinomxmypzGPU; + int* vecinomxmymzGPU; +} vecinos; + +typedef struct{ + double* fcell; + double* fvec0; + double* fvec1; + double* fvec2; + double* fvec3; + double* fvec4; + double* fvec5; + double* fvecpxpy; + double* fvecpxmy; + double* fvecpxpz; + double* fvecpxmz; + double* fvecmxpy; + double* fvecmxmy; + double* fvecmxpz; + double* fvecmxmz; + double* fvecpypz; + double* fvecpymz; + double* fvecmypz; + double* fvecmymz; + double* fvecpxpypz; + double* fvecpxpymz; + double* fvecpxmypz; + double* fvecpxmymz; + double* fvecmxpypz; + double* fvecmxpymz; + double* fvecmxmypz; + double* fvecmxmymz; + int* position; +} fvec; + +typedef struct{ + int* countparticlesincellX; + int* countparticlesincellY; + int* countparticlesincellZ; + int* partincellX; + int* partincellY; + int* partincellZ; + int* countPartInCellNonBonded; + int* partInCellNonBonded; + //freeEnergyCompressibleParticles + int* countparticlesincell; + int* partincell; +} particlesincell; + +typedef struct{ + cufftDoubleComplex* gradKx; + cufftDoubleComplex* gradKy; + cufftDoubleComplex* gradKz; + cufftDoubleComplex* expKx; + cufftDoubleComplex* expKy; + cufftDoubleComplex* expKz; +} prefactorsFourier; + + +__constant__ int mxGPU, myGPU, mzGPU; +__constant__ int mxtGPU, mytGPU, mztGPU, mxmytGPU; +__constant__ int ncellsGPU, ncellstGPU; +__constant__ bool thermostatGPU; +__constant__ double lxGPU, lyGPU, lzGPU; +__constant__ double velocityboundaryGPU; +__constant__ double dtGPU; +__constant__ int numberneighboursGPU; +//int *cellneighbourGPU;// cellneighbourGPU[i*numberneighbours+j] neighbour j cell i +__constant__ double volumeGPU; +__constant__ double exGPU[6], eyGPU[6], ezGPU[6]; +__constant__ double dxGPU, dyGPU, dzGPU; +__constant__ double invdxGPU, invdyGPU, invdzGPU; +__constant__ double invlxGPU, invlyGPU, invlzGPU; +__constant__ double invdtGPU; + +//__device__ double *massGPU; +__device__ double *densityGPU; +__device__ double *densityPredictionGPU; +__device__ double *vxGPU, *vyGPU, *vzGPU; +__device__ double *vxPredictionGPU, *vyPredictionGPU, *vzPredictionGPU; +__device__ double *fxGPU, *fyGPU, *fzGPU; +__device__ double *dmGPU; +__device__ double *dpxGPU, *dpyGPU, *dpzGPU; +__device__ double *rxcellGPU, *rycellGPU, *rzcellGPU; +__device__ double *advXGPU, *advYGPU, *advZGPU; +__device__ double *omegaGPU; + +//IMEXRK +__device__ double *vx2GPU, *vy2GPU, *vz2GPU; +__device__ double *vx3GPU, *vy3GPU, *vz3GPU; +__device__ double *rxboundary2GPU, *ryboundary2GPU, *rzboundary2GPU; +__device__ double *rxboundary3GPU, *ryboundary3GPU, *rzboundary3GPU; +__device__ double *vxboundary2GPU, *vyboundary2GPU, *vzboundary2GPU; +__device__ double *vxboundary3GPU, *vyboundary3GPU, *vzboundary3GPU; +__device__ double *fx2GPU, *fy2GPU, *fz2GPU; +__device__ double *fx3GPU, *fy3GPU, *fz3GPU; + +//__constant__ double omega1, omega2, omega3, omega4, omega5; + + +//Binary Mixture +__device__ double *cGPU, *cPredictionGPU, *dcGPU; + +__constant__ double cWall0GPU, cWall1GPU, densityWall0GPU, densityWall1GPU; +__constant__ double vxWall0GPU, vxWall1GPU; +__constant__ double vyWall0GPU, vyWall1GPU; +__constant__ double vzWall0GPU, vzWall1GPU; +__constant__ double diffusionGPU, massSpecies0GPU, massSpecies1GPU; +__constant__ double shearviscosityGPU; +__constant__ double bulkviscosityGPU; +__constant__ double temperatureGPU; +__constant__ double pressurea0GPU; +__constant__ double pressurea1GPU; +__constant__ double pressurea2GPU; +__constant__ double densfluidGPU; + +__constant__ double fact1GPU, fact2GPU, fact3GPU, fact4GPU; +__constant__ double fact5GPU, fact6GPU, fact7GPU; +__constant__ double volumeboundaryconstGPU; + +__constant__ double soretCoefficientGPU, gradTemperatureGPU; + +__constant__ double extraMobilityGPU; +__constant__ bool setExtraMobilityGPU; + +__device__ double *rxboundaryGPU, *ryboundaryGPU, *rzboundaryGPU; +__device__ double *vxboundaryGPU, *vyboundaryGPU, *vzboundaryGPU; +__device__ double *fxboundaryGPU, *fyboundaryGPU, *fzboundaryGPU; +__device__ double *fboundaryOmega; +__device__ double *volumeboundaryGPU; +__device__ double *fbcell; + +//__device__ double *rxParticleGPU, *ryParticleGPU, *rzParticleGPU; +//__device__ double *vxParticleGPU, *vyParticleGPU, *vzParticleGPU; +__constant__ double massParticleGPU, volumeParticleGPU; +__constant__ int npGPU; +__constant__ bool setparticlesGPU, setboundaryGPU; +__constant__ double omega0GPU; +/*__device__ double *fb0, *fb1, *fb2, *fb3, *fb4, *fb5; +__device__ double *fbpxpy, *fbpxmy, *fbpxpz, *fbpxmz; +__device__ double *fbmxpy, *fbmxmy, *fbmxpz, *fbmxmz; +__device__ double *fbpypz, *fbpymz, *fbmypz, *fbmymz; +__device__ double *fbpxpypz, *fbpxpymz, *fbpxmypz, *fbpxmymz; +__device__ double *fbmxpypz, *fbmxpymz, *fbmxmypz, *fbmxmymz; +__device__ int *bposition;*/ + +__device__ int *ghostIndexGPU, *realIndexGPU; +__device__ int *ghostToPIGPU, *ghostToGhostGPU; +__device__ int *vecino0GPU, *vecino1GPU, *vecino2GPU; +__device__ int *vecino3GPU, *vecino4GPU, *vecino5GPU; +__device__ int *vecinopxpyGPU, *vecinopxmyGPU, *vecinopxpzGPU, *vecinopxmzGPU; +__device__ int *vecinomxpyGPU, *vecinomxmyGPU, *vecinomxpzGPU, *vecinomxmzGPU; +__device__ int *vecinopypzGPU, *vecinopymzGPU, *vecinomypzGPU, *vecinomymzGPU; +__device__ int *vecinopxpypzGPU, *vecinopxpymzGPU, *vecinopxmypzGPU, *vecinopxmymzGPU; +__device__ int *vecinomxpypzGPU, *vecinomxpymzGPU, *vecinomxmypzGPU, *vecinomxmymzGPU; +__constant__ int nboundaryGPU; +__constant__ double vboundaryGPU; + +__device__ int *neighbor0GPU, *neighbor1GPU, *neighbor2GPU; +__device__ int *neighbor3GPU, *neighbor4GPU, *neighbor5GPU; +__device__ int *neighborpxpyGPU, *neighborpxmyGPU, *neighborpxpzGPU, *neighborpxmzGPU; +__device__ int *neighbormxpyGPU, *neighbormxmyGPU, *neighbormxpzGPU, *neighbormxmzGPU; +__device__ int *neighborpypzGPU, *neighborpymzGPU, *neighbormypzGPU, *neighbormymzGPU; +__device__ int *neighborpxpypzGPU, *neighborpxpymzGPU, *neighborpxmypzGPU, *neighborpxmymzGPU; +__device__ int *neighbormxpypzGPU, *neighbormxpymzGPU, *neighbormxmypzGPU, *neighbormxmymzGPU; +__constant__ int mxNeighborsGPU, myNeighborsGPU, mzNeighborsGPU, mNeighborsGPU; + + +__device__ int *partincellX, *partincellY, *partincellZ; +__device__ int *countparticlesincellX, *countparticlesincellY, *countparticlesincellZ; +__device__ int *partInCellNonBonded, *countPartInCellNonBonded; +__device__ int *countparticlesincell, *partincell; +__constant__ int maxNumberPartInCellGPU, maxNumberPartInCellNonBondedGPU; +__device__ int *errorKernel; +__constant__ double cutoffGPU, invcutoffGPU, invcutoff2GPU; + +__constant__ double *saveForceX, *saveForceY, *saveForceZ; + +//WAVE SOURCE +__device__ long long *stepGPU; +__constant__ double densityConstGPU, dDensityGPU; + +__device__ vecinos *vec; +__device__ fvec *fb; +__device__ particlesincell *pc; + +__device__ double *rxCheckGPU, *ryCheckGPU, *rzCheckGPU; +__device__ double *vxCheckGPU, *vyCheckGPU, *vzCheckGPU; + +cudaArray *cuArrayDelta; +cudaArray *cuArrayDeltaDerived; +cudaArray *forceNonBonded1; + +texture texvecino0GPU; +texture texvecino1GPU; +texture texvecino2GPU; +texture texvecino3GPU; +texture texvecino4GPU; +texture texvecino5GPU; +texture texvecinopxpyGPU; +texture texvecinopxmyGPU; +texture texvecinopxpzGPU; +texture texvecinopxmzGPU; +texture texvecinomxpyGPU; +texture texvecinomxmyGPU; +texture texvecinomxpzGPU; +texture texvecinomxmzGPU; +texture texvecinopypzGPU; +texture texvecinopymzGPU; +texture texvecinomypzGPU; +texture texvecinomymzGPU; +texture texvecinopxpypzGPU; +texture texvecinopxpymzGPU; +texture texvecinopxmypzGPU; +texture texvecinopxmymzGPU; +texture texvecinomxpypzGPU; +texture texvecinomxpymzGPU; +texture texvecinomxmypzGPU; +texture texvecinomxmymzGPU; + +texture texrxboundaryGPU; +texture texryboundaryGPU; +texture texrzboundaryGPU; +texture texfxboundaryGPU; +texture texfyboundaryGPU; +texture texfzboundaryGPU; + +cudaArray *cuArrayDeltaPBC; + +texture texCountParticlesInCellX; +texture texCountParticlesInCellY; +texture texCountParticlesInCellZ; +texture texPartInCellX; +texture texPartInCellY; +texture texPartInCellZ; + +texture texVxGPU; +texture texVyGPU; +texture texVzGPU; + +texture texCountParticlesInCellNonBonded; +texture texPartInCellNonBonded; + +texture texforceNonBonded1; + +texture texneighbor0GPU; +texture texneighbor1GPU; +texture texneighbor2GPU; +texture texneighbor3GPU; +texture texneighbor4GPU; +texture texneighbor5GPU; +texture texneighborpxpyGPU; +texture texneighborpxmyGPU; +texture texneighborpxpzGPU; +texture texneighborpxmzGPU; +texture texneighbormxpyGPU; +texture texneighbormxmyGPU; +texture texneighbormxpzGPU; +texture texneighbormxmzGPU; +texture texneighborpypzGPU; +texture texneighborpymzGPU; +texture texneighbormypzGPU; +texture texneighbormymzGPU; +texture texneighborpxpypzGPU; +texture texneighborpxpymzGPU; +texture texneighborpxmypzGPU; +texture texneighborpxmymzGPU; +texture texneighbormxpypzGPU; +texture texneighbormxpymzGPU; +texture texneighbormxmypzGPU; +texture texneighbormxmymzGPU; + + +//Incompressible +__device__ cufftDoubleComplex *WxZ, *WyZ, *WzZ; +__device__ cufftDoubleComplex *vxZ, *vyZ, *vzZ; +__device__ cufftDoubleComplex *cZ; +__device__ cufftDoubleComplex *gradKx, *gradKy, *gradKz; +__device__ cufftDoubleComplex *expKx, *expKy, *expKz; +__device__ prefactorsFourier *pF; + + +//IncompressibleBoundaryRK2 +__device__ double *rxboundaryPredictionGPU, *ryboundaryPredictionGPU, *rzboundaryPredictionGPU; +__device__ double *vxboundaryPredictionGPU, *vyboundaryPredictionGPU, *vzboundaryPredictionGPU; + + + +//NEW Bonded forces +typedef struct{ + int *bondsParticleParticleGPU; + int *bondsParticleParticleOffsetGPU; + int *bondsIndexParticleParticleGPU; + double *r0ParticleParticleGPU; + double *kSpringParticleParticleGPU; + + + int *bondsParticleFixedPointGPU; + int *bondsParticleFixedPointOffsetGPU; + //int *bondsIndexParticleFixedPointGPU; + double *r0ParticleFixedPointGPU; + double *kSpringParticleFixedPointGPU; + double *rxFixedPointGPU; + double *ryFixedPointGPU; + double *rzFixedPointGPU; +} bondedForcesVariables; + +__constant__ bool bondedForcesGPU; +__device__ bondedForcesVariables *bFV; +__device__ int *bondsParticleParticleGPU; +__device__ int *bondsParticleParticleOffsetGPU; +__device__ int *bondsIndexParticleParticleGPU; +__device__ double *r0ParticleParticleGPU; +__device__ double *kSpringParticleParticleGPU; + + +__device__ int *bondsParticleFixedPointGPU; +__device__ int *bondsParticleFixedPointOffsetGPU; +//__device__ int *bondsIndexParticleFixedPointGPU; +__device__ double *r0ParticleFixedPointGPU; +__device__ double *kSpringParticleFixedPointGPU; +__device__ double *rxFixedPointGPU; +__device__ double *ryFixedPointGPU; +__device__ double *rzFixedPointGPU; +__constant__ bool particlesWallGPU; +__constant__ bool computeNonBondedForcesGPU; + diff --git a/cuda_code/gpu_asum_1.cu b/cuda_code/gpu_asum_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..2f369fad14eb7b6e49c96d4f2bbdffec9e0c1851 --- /dev/null +++ b/cuda_code/gpu_asum_1.cu @@ -0,0 +1,178 @@ +#include + +#include "caffe/common.hpp" +#include "caffe/util/gpu_math_functions.cuh" +#include "caffe/util/math_functions.hpp" +#include "caffe/type.hpp" + +namespace caffe { + +SHMEM(asum); +CAFFE_GPU_SHMEM(asum); + +///////////////////////////////////// ASUM REDUCTION /////////////////////////////////// + +template +__device__ void asum_reduce_block(volatile TR *sdata, TR my_sum, unsigned int tid) { + volatile TR* st = sdata + tid; + tassign(st, my_sum); + __syncthreads(); + + // do reduction in shared mem + if (BlockSize >= 512) { + if (tid < 256) { + tsum_replace(st, sdata[tid + 256]); + } + __syncthreads(); + } + if (BlockSize >= 256) { + if (tid < 128) { + tsum_replace(st, sdata[tid + 128]); + } + __syncthreads(); + } + if (BlockSize >= 128) { + if (tid < 64) { + tsum_replace(st, sdata[tid + 64]); + } + __syncthreads(); + } + if (tid < 32) { + for (int i = 32; i > 0; i >>= 1) { + tsum_replace(st, sdata[tid + i]); + } + } +} + +// Global variable used by asum_reduce_kernel to count how many blocks have finished +__device__ unsigned int asum_blocks_count[REGRESSION_GROUPS_MAX]; + +void set_asum_blocks_count(unsigned int cnt, int group, cudaStream_t stream) { + CUDA_CHECK_ARG(cudaMemcpyToSymbolAsync(asum_blocks_count, &cnt, sizeof(unsigned int), + group * sizeof(unsigned int), cudaMemcpyHostToDevice, stream), Caffe::current_device()); +} + +template +__device__ void asum_reduce_blocks(const T *in, TR *out, unsigned int n) { + struct __dyn_shmem_asum__> asum_blocks_shmem; + TR* partial_asum = reinterpret_cast(asum_blocks_shmem.getPtr()); + + // first level of reduction: + // reading from global memory, writing to shared memory + unsigned int tid = threadIdx.x; + unsigned int i = blockIdx.x * BlockSize * 2 + threadIdx.x; + unsigned int gridSize = BlockSize * 2 * gridDim.x; + TR my_sum = tzero(); + // We reduce multiple elements per thread. The number is determined by the + // number of active thread blocks (via gridDim). More blocks will result + // in a larger gridSize and therefore fewer elements per thread. + while (i < n) { + if (IsPow2 || i + BlockSize < n) { + tsum_replace(&my_sum, tsum(tabs(in[i]), tabs(in[i + BlockSize]))); + } else { + tsum_replace(&my_sum, tabs(in[i])); + } + i += gridSize; + } + + // do reduction in shared mem + asum_reduce_block(partial_asum, my_sum, tid); + // write result for this block to global mem + if (tid == 0) { + out[blockIdx.x] = partial_asum[0]; + } +} + +template +__global__ void asum_reduce_kernel(unsigned int n, const T *in, TR *out, int group) { + asum_reduce_blocks(in, out, n); + if (gridDim.x > 1) { + const unsigned int tid = threadIdx.x; + struct __dyn_shmem_asum__> asum_reduce_shmem; + TR* partial_asum = reinterpret_cast(asum_reduce_shmem.getPtr()); + __shared__ bool last_asum_reduce_block; + + // wait until all outstanding memory instructions in this thread are finished + __threadfence(); + + // Thread 0 takes a ticket + if (tid == 0) { + unsigned int ticket = atomicInc(asum_blocks_count + group, gridDim.x); + last_asum_reduce_block = (ticket == gridDim.x - 1); + } + __syncthreads(); + + // The last block sums the results of all other blocks + if (last_asum_reduce_block) { + int i = tid; + TR my_sum = tzero(); + + while (i < gridDim.x) { + tsum_replace(&my_sum, out[i]); + i += BlockSize; + } + asum_reduce_block(partial_asum, my_sum, tid); + if (tid == 0) { + out[0] = partial_asum[0]; + // reset blocks count so that next run succeeds + asum_blocks_count[group] = 0U; + } + } + } +} + +template +void gpu_asum_t(const int n, const T* x, TR* sum, int group) { + CHECK_LT(group, REGRESSION_GROUPS_MAX); + cudaStream_t stream = Caffe::thread_stream(); + const bool po2 = is_pow2(n); + // See kernel for details + CHECK_LE(CAFFE_CUDA_NUM_THREADS_HALF, 512); + CHECK_GE(CAFFE_CUDA_NUM_THREADS_HALF, 128); + const int threadsPerCta = CAFFE_CUDA_NUM_THREADS_HALF; + const int nbrCtas = CAFFE_GET_BLOCKS_HALF(n); + const int reduction_size = (nbrCtas + 1) * sizeof(TR); + GPUMemory::Workspace ws(reduction_size); + TR* dev_ptr_sum = reinterpret_cast(ws.data()); + set_asum_blocks_count(0U, group, stream); + if (po2 && n > CAFFE_CUDA_NUM_THREADS_HALF) { + // NOLINT_NEXT_LINE(whitespace/operators) + asum_reduce_kernel<<>> + ((unsigned int)n, x, dev_ptr_sum, group); + } else { + // NOLINT_NEXT_LINE(whitespace/operators) + asum_reduce_kernel<<>> + ((unsigned int)n, x, dev_ptr_sum, group); + } + CUDA_POST_KERNEL_CHECK; + CUDA_CHECK(cudaMemcpyAsync(sum, dev_ptr_sum, sizeof(TR), cudaMemcpyDeviceToHost, stream)); + CUDA_CHECK(cudaStreamSynchronize(stream)); +} + +template<> +void caffe_gpu_asum(const int n, const float16* x, float* sum, int group) { + // For odd counts we allocate extra element to speed up kernels. + // We have to keep it clean. + cudaStream_t stream = Caffe::thread_stream(); + if (n & 1) { + clean_last_element(const_cast(x) + n, stream); + } + const int n2 = even(n) / 2; + gpu_asum_t(n2, reinterpret_cast(x), sum, group); +} +template<> +void caffe_gpu_asum(const int n, const float16* x, double* sum, int group) { + float sf; + caffe_gpu_asum(n, x, &sf, group); + *sum = sf; +} +template<> +void caffe_gpu_asum(const int n, const float16* x, float16* sum, int group) { + float sf; + caffe_gpu_asum(n, x, &sf, group); + *sum = sf; +} + +} // namespace caffe diff --git a/cuda_code/gpu_fish_template.cu b/cuda_code/gpu_fish_template.cu new file mode 100644 index 0000000000000000000000000000000000000000..455006a837e317a5e54fd452b44648a38471160a --- /dev/null +++ b/cuda_code/gpu_fish_template.cu @@ -0,0 +1,209 @@ +/* +The MIT License (MIT) + +Copyright (c) 2016 Charles Hubbard and Chinmay Hegde + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ + +/* Test GPUFish with Movielens dataset */ + +#include +#include +#include +#include +#include +#include +#include +#include "gpu_fish_headers.h" + +std::string training_data = "../Data_sets-Resutls/ml_1m_train1"; +std::string testing_data = "../Data_sets-Resutls/ml_1m_test1"; +#define RANK 30 +#define CORES 200 + +//========== define derivative and regulizer for gradient updates ============// +__global__ +void GradientUpdate(float* L, float* R, const int rank, float* dev_ratings, int* dev_offsets, int* dev_chunk_size, int round, float alpha, float average) { + + + __shared__ float sh_L[RANK]; + __shared__ float sh_R[RANK]; + __shared__ float ijr[3]; + + int idx = blockIdx.x; + int tidx = threadIdx.x; + int offset = dev_offsets[idx]; + int N = dev_chunk_size[idx]/3; + + float B = 4.95; + float m_hat = 0; + int i = 0; + int j = 0; + float m = 0; + float deriv; + + for (int p=0;paverage) { + deriv = -1/(exp(m_hat) + 1); + } + else { + deriv = exp(m_hat)/(exp(m_hat)+1); + } + + /* + + Perform movielens analysis with squared error function + //========== calculate derivative: squared error ============// + + deriv = 2*(m_hat-(m-average)); + + */ + + for (int k=(threadIdx.x); kB){ + for (int k=(threadIdx.x); kB){ + + for (int k=(threadIdx.x); k> host_all_ratings; + + //========== read training data ============// + dataSet* data = readFile(training_data, &host_all_ratings); + + + const int rows = data->rows; + const int columns = data->columns; + float* cpu_nums_R = new float[columns*rank]; + float* cpu_nums_L = new float[rows*rank]; + + gpu_fish(&host_all_ratings, cpu_nums_L, cpu_nums_R, rows, columns, rank, cores, data->numRatings, data->average); + + + //========== testing ============// + std::fstream infile; + infile.open(testing_data); + if (!infile.is_open()) { + throw std::runtime_error("File not found!"); + } + + std::string line; + + int user; + int movie; + float rating; + float m_hat; + + while (getline(infile,line,'\n')) { + + std::stringstream stream(line); + + m_hat = 0; + stream >> user >> movie >> rating; + + //========== get estimate from L[i,:]*R[j,:] ============// + for (int k=0;k +#include +#include +#include +#include +#include +#include + +#include "eddl/hardware/gpu/gpu_kernels.h" + + +/* this GPU kernel function is used to initialize the random states */ +__global__ void init(unsigned int seed, curandState_t* states) { + + /* we have to initialize the state */ + curand_init((unsigned int)clock64(), /* the seed can be the same for each core, here we pass the time in from the CPU */ + blockIdx.x, /* the sequence number should be different for each core (unless you want all + cores to get the same sequence of numbers for some reason - use thread id! */ + 0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */ + &states[blockIdx.x]); +} + +/* this GPU kernel takes an array of states, and an array of ints, and puts a random int into each */ +__global__ void random_uniform(curandState_t* states, float* numbers) { + /* curand works like rand - except that it takes a state as a parameter */ + numbers[blockIdx.x] = curand_uniform(&states[blockIdx.x]); +} diff --git a/cuda_code/gpu_ovps.cu b/cuda_code/gpu_ovps.cu new file mode 100644 index 0000000000000000000000000000000000000000..076a9092b627d439755a12992883d5548a3fd19d --- /dev/null +++ b/cuda_code/gpu_ovps.cu @@ -0,0 +1,411 @@ +#include +#include +#include +#include +#include + +#include +#include "cublas_v2.h" + +#include +#include + +#include "cublasStatus_t_getErrorString.h" +#include "qc_ovps.h" + +void OVPs::init(const int dimm, const int mc_pair_num_, const Basis& basis) { + throw std::runtime_error("OVPs for GPU not implemented"); +} + +void OVPs::init_02(int p1, int p2, int p3, int p4, const Basis &basis) { + mc_pair_num = p1; + numBand = p2; + offBand = p3; + numDiff = p4; + + iocc1 = basis.iocc1; + iocc2 = basis.iocc2; + ivir1 = basis.ivir1; + ivir2 = basis.ivir2; + + cudaError_t_Assert(cudaMallocHost((void**)&ovps.rv, sizeof(double) * mc_pair_num), __FILE__, __LINE__); + + cudaError_t_Assert(cudaMallocHost((void**)&ovps.occ1, sizeof(double) * mc_pair_num * (iocc2 - iocc1)), __FILE__, __LINE__); + cudaError_t_Assert(cudaMallocHost((void**)&ovps.occ2, sizeof(double) * mc_pair_num * (iocc2 - iocc1)), __FILE__, __LINE__); + cudaError_t_Assert(cudaMallocHost((void**)&ovps.vir1, sizeof(double) * mc_pair_num * (ivir2 - ivir1)), __FILE__, __LINE__); + cudaError_t_Assert(cudaMallocHost((void**)&ovps.vir2, sizeof(double) * mc_pair_num * (ivir2 - ivir1)), __FILE__, __LINE__); + cudaError_t_Assert(cudaMallocHost((void**)&ovps.occTau1, sizeof(double) * mc_pair_num * (iocc2 - iocc1)), __FILE__, __LINE__); + cudaError_t_Assert(cudaMallocHost((void**)&ovps.occTau2, sizeof(double) * mc_pair_num * (iocc2 - iocc1)), __FILE__, __LINE__); + cudaError_t_Assert(cudaMallocHost((void**)&ovps.virTau1, sizeof(double) * mc_pair_num * (ivir2 - ivir1)), __FILE__, __LINE__); + cudaError_t_Assert(cudaMallocHost((void**)&ovps.virTau2, sizeof(double) * mc_pair_num * (ivir2 - ivir1)), __FILE__, __LINE__); +} +void OVPs::alloc_02() { + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.rv, sizeof(double) * mc_pair_num), __FILE__, __LINE__); + + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.occ1, sizeof(double) * mc_pair_num * (iocc2 - iocc1)), __FILE__, __LINE__); + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.occ2, sizeof(double) * mc_pair_num * (iocc2 - iocc1)), __FILE__, __LINE__); + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.vir1, sizeof(double) * mc_pair_num * (ivir2 - ivir1)), __FILE__, __LINE__); + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.vir2, sizeof(double) * mc_pair_num * (ivir2 - ivir1)), __FILE__, __LINE__); + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.psi1, sizeof(double) * mc_pair_num * (ivir2 - iocc1)), __FILE__, __LINE__); + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.psi2, sizeof(double) * mc_pair_num * (ivir2 - iocc1)), __FILE__, __LINE__); + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.occTau1, sizeof(double) * mc_pair_num * (iocc2 - iocc1)), __FILE__, __LINE__); + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.occTau2, sizeof(double) * mc_pair_num * (iocc2 - iocc1)), __FILE__, __LINE__); + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.virTau1, sizeof(double) * mc_pair_num * (ivir2 - ivir1)), __FILE__, __LINE__); + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.virTau2, sizeof(double) * mc_pair_num * (ivir2 - ivir1)), __FILE__, __LINE__); + + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.os_13, sizeof(double) * mc_pair_num * mc_pair_num), __FILE__, __LINE__); + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.os_14, sizeof(double) * mc_pair_num * mc_pair_num), __FILE__, __LINE__); + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.os_23, sizeof(double) * mc_pair_num * mc_pair_num), __FILE__, __LINE__); + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.os_24, sizeof(double) * mc_pair_num * mc_pair_num), __FILE__, __LINE__); + + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.vs_13, sizeof(double) * mc_pair_num * mc_pair_num), __FILE__, __LINE__); + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.vs_14, sizeof(double) * mc_pair_num * mc_pair_num), __FILE__, __LINE__); + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.vs_23, sizeof(double) * mc_pair_num * mc_pair_num), __FILE__, __LINE__); + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.vs_24, sizeof(double) * mc_pair_num * mc_pair_num), __FILE__, __LINE__); + + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.ps_24, sizeof(double) * mc_pair_num * mc_pair_num * numBand), __FILE__, __LINE__); + + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.en2mCore, sizeof(double) * mc_pair_num * mc_pair_num), __FILE__, __LINE__); + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.en2pCore, sizeof(double) * mc_pair_num * mc_pair_num), __FILE__, __LINE__); + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.en2m, sizeof(double) * (ivir2 - iocc1) * (ivir2 - iocc1)), __FILE__, __LINE__); + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.en2p, sizeof(double) * (ivir2 - iocc1) * (ivir2 - iocc1)), __FILE__, __LINE__); + + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.ent, sizeof(double) * (ivir2 - iocc1) * mc_pair_num), __FILE__, __LINE__); + + d_ovps.en2 = std::vector>(numBand, std::vector(numDiff)); + + d_ovps.en2Ex1 = std::vector>(numBand, std::vector(numDiff)); + d_ovps.en2Ex2 = std::vector>(numBand, std::vector(numDiff)); + for (auto i = 0; i < d_ovps.en2Ex1.size(); i++) { + for (auto j = 0; j < d_ovps.en2Ex1[i].size(); j++) { + cudaError_t_Assert(cudaMalloc((void**)&(d_ovps.en2[i][j]), sizeof(double) * (ivir2 - iocc1) * (ivir2 - iocc1)), __FILE__, __LINE__); + + cudaError_t_Assert(cudaMalloc((void**)&(d_ovps.en2Ex1[i][j]), sizeof(double) * (ivir2 - iocc1) * (ivir2 - iocc1)), __FILE__, __LINE__); + cudaError_t_Assert(cudaMalloc((void**)&(d_ovps.en2Ex2[i][j]), sizeof(double) * (ivir2 - iocc1) * (ivir2 - iocc1)), __FILE__, __LINE__); + + cudaError_t_Assert(cudaMemset(d_ovps.en2Ex1[i][j], 0, sizeof(double) * (ivir2 - iocc1) * (ivir2 - iocc1)), __FILE__, __LINE__); + cudaError_t_Assert(cudaMemset(d_ovps.en2Ex2[i][j], 0, sizeof(double) * (ivir2 - iocc1) * (ivir2 - iocc1)), __FILE__, __LINE__); + } + } +} +void OVPs::free_tau_02() { + cudaError_t_Assert(cudaFreeHost(ovps.rv), __FILE__, __LINE__); + + cudaError_t_Assert(cudaFreeHost(ovps.occ1), __FILE__, __LINE__); + cudaError_t_Assert(cudaFreeHost(ovps.occ2), __FILE__, __LINE__); + cudaError_t_Assert(cudaFreeHost(ovps.vir1), __FILE__, __LINE__); + cudaError_t_Assert(cudaFreeHost(ovps.vir2), __FILE__, __LINE__); + cudaError_t_Assert(cudaFreeHost(ovps.occTau1), __FILE__, __LINE__); + cudaError_t_Assert(cudaFreeHost(ovps.occTau2), __FILE__, __LINE__); + cudaError_t_Assert(cudaFreeHost(ovps.virTau1), __FILE__, __LINE__); + cudaError_t_Assert(cudaFreeHost(ovps.virTau2), __FILE__, __LINE__); +} +void OVPs::free_02() { + cudaError_t_Assert(cudaFree(d_ovps.rv), __FILE__, __LINE__); + + cudaError_t_Assert(cudaFree(d_ovps.psi1), __FILE__, __LINE__); + cudaError_t_Assert(cudaFree(d_ovps.psi2), __FILE__, __LINE__); + + cudaError_t_Assert(cudaFree(d_ovps.occ1), __FILE__, __LINE__); + cudaError_t_Assert(cudaFree(d_ovps.occ2), __FILE__, __LINE__); + cudaError_t_Assert(cudaFree(d_ovps.vir1), __FILE__, __LINE__); + cudaError_t_Assert(cudaFree(d_ovps.vir2), __FILE__, __LINE__); + cudaError_t_Assert(cudaFree(d_ovps.occTau1), __FILE__, __LINE__); + cudaError_t_Assert(cudaFree(d_ovps.occTau2), __FILE__, __LINE__); + cudaError_t_Assert(cudaFree(d_ovps.virTau1), __FILE__, __LINE__); + cudaError_t_Assert(cudaFree(d_ovps.virTau2), __FILE__, __LINE__); + + cudaError_t_Assert(cudaFree(d_ovps.os_13), __FILE__, __LINE__); + cudaError_t_Assert(cudaFree(d_ovps.os_14), __FILE__, __LINE__); + cudaError_t_Assert(cudaFree(d_ovps.os_23), __FILE__, __LINE__); + cudaError_t_Assert(cudaFree(d_ovps.os_24), __FILE__, __LINE__); + + cudaError_t_Assert(cudaFree(d_ovps.vs_13), __FILE__, __LINE__); + cudaError_t_Assert(cudaFree(d_ovps.vs_14), __FILE__, __LINE__); + cudaError_t_Assert(cudaFree(d_ovps.vs_23), __FILE__, __LINE__); + cudaError_t_Assert(cudaFree(d_ovps.vs_24), __FILE__, __LINE__); + + cudaError_t_Assert(cudaFree(d_ovps.ps_24), __FILE__, __LINE__); + + cudaError_t_Assert(cudaFree(d_ovps.ent), __FILE__, __LINE__); + cudaError_t_Assert(cudaFree(d_ovps.en2mCore), __FILE__, __LINE__); + cudaError_t_Assert(cudaFree(d_ovps.en2pCore), __FILE__, __LINE__); + cudaError_t_Assert(cudaFree(d_ovps.en2m), __FILE__, __LINE__); + cudaError_t_Assert(cudaFree(d_ovps.en2p), __FILE__, __LINE__); + + for (auto i = 0; i < d_ovps.en2Ex1.size(); i++) { + for (auto j = 0; j < d_ovps.en2Ex1[i].size(); j++) { + cudaError_t_Assert(cudaFree(d_ovps.en2[i][j]), __FILE__, __LINE__); + cudaError_t_Assert(cudaFree(d_ovps.en2Ex1[i][j]), __FILE__, __LINE__); + cudaError_t_Assert(cudaFree(d_ovps.en2Ex2[i][j]), __FILE__, __LINE__); + } + } +} + +void OVPs::init_03(int p1, int p2, int p3, int p4, const Basis &basis) { + init_02(p1, p2, p3, p4, basis); +} +void OVPs::alloc_03() { + alloc_02(); + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.os_15, sizeof(double) * mc_pair_num * mc_pair_num), __FILE__, __LINE__); + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.os_16, sizeof(double) * mc_pair_num * mc_pair_num), __FILE__, __LINE__); + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.os_25, sizeof(double) * mc_pair_num * mc_pair_num), __FILE__, __LINE__); + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.os_26, sizeof(double) * mc_pair_num * mc_pair_num), __FILE__, __LINE__); + + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.os_35, sizeof(double) * mc_pair_num * mc_pair_num), __FILE__, __LINE__); + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.os_36, sizeof(double) * mc_pair_num * mc_pair_num), __FILE__, __LINE__); + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.os_45, sizeof(double) * mc_pair_num * mc_pair_num), __FILE__, __LINE__); + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.os_46, sizeof(double) * mc_pair_num * mc_pair_num), __FILE__, __LINE__); + + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.vs_15, sizeof(double) * mc_pair_num * mc_pair_num), __FILE__, __LINE__); + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.vs_16, sizeof(double) * mc_pair_num * mc_pair_num), __FILE__, __LINE__); + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.vs_25, sizeof(double) * mc_pair_num * mc_pair_num), __FILE__, __LINE__); + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.vs_26, sizeof(double) * mc_pair_num * mc_pair_num), __FILE__, __LINE__); + + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.vs_35, sizeof(double) * mc_pair_num * mc_pair_num), __FILE__, __LINE__); + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.vs_36, sizeof(double) * mc_pair_num * mc_pair_num), __FILE__, __LINE__); + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.vs_45, sizeof(double) * mc_pair_num * mc_pair_num), __FILE__, __LINE__); + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.vs_46, sizeof(double) * mc_pair_num * mc_pair_num), __FILE__, __LINE__); + + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.ps_12c, sizeof(double) * mc_pair_num * numBand), __FILE__, __LINE__); + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.ps_22c, sizeof(double) * mc_pair_num * numBand), __FILE__, __LINE__); + + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.en3_1pCore, sizeof(double) * mc_pair_num * mc_pair_num), __FILE__, __LINE__); + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.en3_2pCore, sizeof(double) * mc_pair_num * mc_pair_num), __FILE__, __LINE__); + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.en3_12pCore, sizeof(double) * mc_pair_num * mc_pair_num), __FILE__, __LINE__); + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.en3_1mCore, sizeof(double) * mc_pair_num * mc_pair_num), __FILE__, __LINE__); + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.en3_2mCore, sizeof(double) * mc_pair_num * mc_pair_num), __FILE__, __LINE__); + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.en3_12mCore, sizeof(double) * mc_pair_num * mc_pair_num), __FILE__, __LINE__); + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.en3_12cCore, sizeof(double) * mc_pair_num * mc_pair_num), __FILE__, __LINE__); + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.en3_22cCore, sizeof(double) * mc_pair_num * mc_pair_num), __FILE__, __LINE__); + + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.en3_1p, sizeof(double) * (ivir2 - iocc1) * (ivir2 - iocc1)), __FILE__, __LINE__); + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.en3_2p, sizeof(double) * (ivir2 - iocc1) * (ivir2 - iocc1)), __FILE__, __LINE__); + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.en3_12p, sizeof(double) * (ivir2 - iocc1) * (ivir2 - iocc1)), __FILE__, __LINE__); + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.en3_1m, sizeof(double) * (ivir2 - iocc1) * (ivir2 - iocc1)), __FILE__, __LINE__); + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.en3_2m, sizeof(double) * (ivir2 - iocc1) * (ivir2 - iocc1)), __FILE__, __LINE__); + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.en3_12m, sizeof(double) * (ivir2 - iocc1) * (ivir2 - iocc1)), __FILE__, __LINE__); + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.en3_c, sizeof(double) * (ivir2 - iocc1) * (ivir2 - iocc1)), __FILE__, __LINE__); + + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.one, sizeof(double) * mc_pair_num), __FILE__, __LINE__); + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.en3c12, sizeof(double) * mc_pair_num), __FILE__, __LINE__); + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.en3c22, sizeof(double) * mc_pair_num), __FILE__, __LINE__); + + d_ovps.en3 = std::vector>(numBand, std::vector(numDiff)); + d_ovps.en3Ex1 = std::vector>(numBand, std::vector(numDiff)); + d_ovps.en3Ex2 = std::vector>(numBand, std::vector(numDiff)); + for (auto i = 0; i < d_ovps.en3Ex1.size(); i++) { + for (auto j = 0; j < d_ovps.en3Ex1[i].size(); j++) { + cudaError_t_Assert(cudaMalloc((void**)&d_ovps.en3[i][j], sizeof(double) * (ivir2 - iocc1) * (ivir2 - iocc1)), __FILE__, __LINE__); + + cudaError_t_Assert(cudaMalloc((void**)&(d_ovps.en3Ex1[i][j]), sizeof(double) * (ivir2 - iocc1) * (ivir2 - iocc1)), __FILE__, __LINE__); + cudaError_t_Assert(cudaMalloc((void**)&(d_ovps.en3Ex2[i][j]), sizeof(double) * (ivir2 - iocc1) * (ivir2 - iocc1)), __FILE__, __LINE__); + + cudaError_t_Assert(cudaMemset(d_ovps.en3Ex1[i][j], 0, sizeof(double) * (ivir2 - iocc1) * (ivir2 - iocc1)), __FILE__, __LINE__); + cudaError_t_Assert(cudaMemset(d_ovps.en3Ex2[i][j], 0, sizeof(double) * (ivir2 - iocc1) * (ivir2 - iocc1)), __FILE__, __LINE__); + } + } + + std::vector one(mc_pair_num); + std::fill(one.begin(), one.end(), 1.0); + cudaError_t_Assert(cudaMemcpy(d_ovps.one, one.data(), sizeof(double) * one.size(), cudaMemcpyHostToDevice), __FILE__, __LINE__); +} +void OVPs::free_tau_03() { + free_tau_02(); +} +void OVPs::free_03() { + free_02(); + + cudaError_t_Assert(cudaFree(d_ovps.os_15), __FILE__, __LINE__); + cudaError_t_Assert(cudaFree(d_ovps.os_16), __FILE__, __LINE__); + cudaError_t_Assert(cudaFree(d_ovps.os_25), __FILE__, __LINE__); + cudaError_t_Assert(cudaFree(d_ovps.os_26), __FILE__, __LINE__); + + cudaError_t_Assert(cudaFree(d_ovps.os_35), __FILE__, __LINE__); + cudaError_t_Assert(cudaFree(d_ovps.os_36), __FILE__, __LINE__); + cudaError_t_Assert(cudaFree(d_ovps.os_45), __FILE__, __LINE__); + cudaError_t_Assert(cudaFree(d_ovps.os_46), __FILE__, __LINE__); + + cudaError_t_Assert(cudaFree(d_ovps.vs_15), __FILE__, __LINE__); + cudaError_t_Assert(cudaFree(d_ovps.vs_16), __FILE__, __LINE__); + cudaError_t_Assert(cudaFree(d_ovps.vs_25), __FILE__, __LINE__); + cudaError_t_Assert(cudaFree(d_ovps.vs_26), __FILE__, __LINE__); + + cudaError_t_Assert(cudaFree(d_ovps.vs_35), __FILE__, __LINE__); + cudaError_t_Assert(cudaFree(d_ovps.vs_36), __FILE__, __LINE__); + cudaError_t_Assert(cudaFree(d_ovps.vs_45), __FILE__, __LINE__); + cudaError_t_Assert(cudaFree(d_ovps.vs_46), __FILE__, __LINE__); + + cudaError_t_Assert(cudaFree(d_ovps.ps_12c), __FILE__, __LINE__); + cudaError_t_Assert(cudaFree(d_ovps.ps_22c), __FILE__, __LINE__); + + cudaError_t_Assert(cudaFree(d_ovps.en3_1pCore), __FILE__, __LINE__); + cudaError_t_Assert(cudaFree(d_ovps.en3_2pCore), __FILE__, __LINE__); + cudaError_t_Assert(cudaFree(d_ovps.en3_12pCore), __FILE__, __LINE__); + cudaError_t_Assert(cudaFree(d_ovps.en3_1mCore), __FILE__, __LINE__); + cudaError_t_Assert(cudaFree(d_ovps.en3_2mCore), __FILE__, __LINE__); + cudaError_t_Assert(cudaFree(d_ovps.en3_12mCore), __FILE__, __LINE__); + cudaError_t_Assert(cudaFree(d_ovps.en3_12cCore), __FILE__, __LINE__); + cudaError_t_Assert(cudaFree(d_ovps.en3_22cCore), __FILE__, __LINE__); + + cudaError_t_Assert(cudaFree(d_ovps.en3_1p), __FILE__, __LINE__); + cudaError_t_Assert(cudaFree(d_ovps.en3_2p), __FILE__, __LINE__); + cudaError_t_Assert(cudaFree(d_ovps.en3_12p), __FILE__, __LINE__); + cudaError_t_Assert(cudaFree(d_ovps.en3_1m), __FILE__, __LINE__); + cudaError_t_Assert(cudaFree(d_ovps.en3_2m), __FILE__, __LINE__); + cudaError_t_Assert(cudaFree(d_ovps.en3_12m), __FILE__, __LINE__); + cudaError_t_Assert(cudaFree(d_ovps.en3_c), __FILE__, __LINE__); + + cudaError_t_Assert(cudaFree(d_ovps.one), __FILE__, __LINE__); + cudaError_t_Assert(cudaFree(d_ovps.en3c12), __FILE__, __LINE__); + cudaError_t_Assert(cudaFree(d_ovps.en3c22), __FILE__, __LINE__); + + for (auto i = 0; i < d_ovps.en3Ex1.size(); i++) { + for (auto j = 0; j < d_ovps.en3Ex1[i].size(); j++) { + cudaError_t_Assert(cudaFree(d_ovps.en3[i][j]), __FILE__, __LINE__); + cudaError_t_Assert(cudaFree(d_ovps.en3Ex1[i][j]), __FILE__, __LINE__); + cudaError_t_Assert(cudaFree(d_ovps.en3Ex2[i][j]), __FILE__, __LINE__); + } + } +} + +void OVPs::zero_energy_arrays_02() { + for (auto& it : d_ovps.en2) { + for (auto& jt : it) { + cudaMemset(jt, 0, sizeof(double) * (ivir2 - iocc1) * (ivir2 - iocc1)); + } + } +} +void OVPs::zero_energy_arrays_03() { + for (auto& it : d_ovps.en2) { + for (auto& jt : it) { + cudaMemset(jt, 0, sizeof(double) * (ivir2 - iocc1) * (ivir2 - iocc1)); + } + } + for (auto& it : d_ovps.en3) { + for (auto& jt : it) { + cudaMemset(jt, 0, sizeof(double) * (ivir2 - iocc1) * (ivir2 - iocc1)); + } + } +} + +__global__ void freq_indp_gf(OVPS_ARRAY ovps, int mc_pair_num, int iocc2, int offBand, int numBand) { + int tidx = blockIdx.x * blockDim.x + threadIdx.x; + int tidy = blockIdx.y * blockDim.y + threadIdx.y; + if (tidx < mc_pair_num) { + int index = tidy * mc_pair_num + tidx; + if (tidy - offBand < 0) { + int lookup = (iocc2 - offBand + tidy) * mc_pair_num + tidx; + ovps.ps_12c[index] = ovps.occ1[lookup] * ovps.occ2[lookup]; + ovps.ps_22c[index] = ovps.occ2[lookup] * ovps.occ2[lookup]; + } else { + int lookup = (tidy - offBand) * mc_pair_num + tidx; + ovps.ps_12c[index] = ovps.vir1[lookup] * ovps.vir2[lookup]; + ovps.ps_22c[index] = ovps.vir2[lookup] * ovps.vir2[lookup]; + } + } +} + +__global__ void print_out(double* A, int m, int n) { + for (int i = 0; i < m; i++) { + for (int j = 0; j < n; j++) { + printf("%12.8f", A[i * n + j]); + } + printf("\n"); + } +} +void OVPs::update_ovps_02(el_pair_typ* el_pair_list, Stochastic_Tau& tau) { + int ip, am; + double alpha = 1.00; + double beta = 0.00; + cublasHandle_t handle; + cublasStatusAssert(cublasCreate(&handle), __FILE__, __LINE__); + + //copy weights from el_pair_list to host arrays + for (ip = 0; ip < mc_pair_num; ip++) { //do i = 1, el_pair_num - 1 + ovps.rv[ip] = el_pair_list[ip].rv; + } + cudaError_t_Assert(cudaMemcpy(d_ovps.rv, ovps.rv, sizeof(double) * mc_pair_num, cudaMemcpyHostToDevice), __FILE__, __LINE__); + + auto t_val1 = tau.get_exp_tau_device(0, 0); + + cublasStatusAssert(cublasDdgmm(handle, CUBLAS_SIDE_RIGHT, mc_pair_num, iocc2 - iocc1, d_ovps.occ1, mc_pair_num, &t_val1[iocc1], 1, d_ovps.occTau1, mc_pair_num), __FILE__, __LINE__); + cublasStatusAssert(cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, mc_pair_num, mc_pair_num, iocc2 - iocc1, &alpha, d_ovps.occTau1, mc_pair_num, d_ovps.occ1, mc_pair_num, &beta, d_ovps.os_13, mc_pair_num), __FILE__, __LINE__); + cublasStatusAssert(cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, mc_pair_num, mc_pair_num, iocc2 - iocc1, &alpha, d_ovps.occTau1, mc_pair_num, d_ovps.occ2, mc_pair_num, &beta, d_ovps.os_23, mc_pair_num), __FILE__, __LINE__); + + cublasStatusAssert(cublasDdgmm(handle, CUBLAS_SIDE_RIGHT, mc_pair_num, iocc2 - iocc1, d_ovps.occ2, mc_pair_num, &t_val1[iocc1], 1, d_ovps.occTau2, mc_pair_num), __FILE__, __LINE__); + cublasStatusAssert(cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, mc_pair_num, mc_pair_num, iocc2 - iocc1, &alpha, d_ovps.occTau2, mc_pair_num, d_ovps.occ1, mc_pair_num, &beta, d_ovps.os_14, mc_pair_num), __FILE__, __LINE__); + cublasStatusAssert(cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, mc_pair_num, mc_pair_num, iocc2 - iocc1, &alpha, d_ovps.occTau2, mc_pair_num, d_ovps.occ2, mc_pair_num, &beta, d_ovps.os_24, mc_pair_num), __FILE__, __LINE__); + + cublasStatusAssert(cublasDdgmm(handle, CUBLAS_SIDE_RIGHT, mc_pair_num, ivir2 - ivir1, d_ovps.vir1, mc_pair_num, &t_val1[ivir1], 1, d_ovps.virTau1, mc_pair_num), __FILE__, __LINE__); + cublasStatusAssert(cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, mc_pair_num, mc_pair_num, ivir2 - ivir1, &alpha, d_ovps.virTau1, mc_pair_num, d_ovps.vir1, mc_pair_num, &beta, d_ovps.vs_13, mc_pair_num), __FILE__, __LINE__); + cublasStatusAssert(cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, mc_pair_num, mc_pair_num, ivir2 - ivir1, &alpha, d_ovps.virTau1, mc_pair_num, d_ovps.vir2, mc_pair_num, &beta, d_ovps.vs_23, mc_pair_num), __FILE__, __LINE__); + + cublasStatusAssert(cublasDdgmm(handle, CUBLAS_SIDE_RIGHT, mc_pair_num, ivir2 - ivir1, d_ovps.vir2, mc_pair_num, &t_val1[ivir1], 1, d_ovps.virTau2, mc_pair_num), __FILE__, __LINE__); + cublasStatusAssert(cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, mc_pair_num, mc_pair_num, ivir2 - ivir1, &alpha, d_ovps.virTau2, mc_pair_num, d_ovps.vir1, mc_pair_num, &beta, d_ovps.vs_14, mc_pair_num), __FILE__, __LINE__); + cublasStatusAssert(cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, mc_pair_num, mc_pair_num, ivir2 - ivir1, &alpha, d_ovps.virTau2, mc_pair_num, d_ovps.vir2, mc_pair_num, &beta, d_ovps.vs_24, mc_pair_num), __FILE__, __LINE__); + + cudaError_t_Assert(cudaMemset(d_ovps.ps_24, 0, sizeof(double) * mc_pair_num * mc_pair_num * numBand), __FILE__, __LINE__); + for (am = 0; am < numBand; am++) { + alpha = 1.00; + if (am - offBand < 0) { //construct ps_?? and ps_??c for occupied orbitals + cublasStatusAssert(cublasDger(handle, mc_pair_num, mc_pair_num, &alpha, d_ovps.occ2 + (am + iocc2 - iocc1 - offBand) * mc_pair_num, 1, d_ovps.occ2 + (am + iocc2 - iocc1 - offBand) * mc_pair_num, 1, d_ovps.ps_24 + am * mc_pair_num * mc_pair_num, mc_pair_num), __FILE__, __LINE__); + } else { //construct ps_?? and ps_??c for virtualorbitals + cublasStatusAssert(cublasDger(handle, mc_pair_num, mc_pair_num, &alpha, d_ovps.vir2 + (am - offBand) * mc_pair_num, 1, d_ovps.vir2 + (am - offBand) * mc_pair_num, 1, d_ovps.ps_24 + am * mc_pair_num * mc_pair_num, mc_pair_num), __FILE__, __LINE__); + } + } + + cudaError_t_Assert(cudaThreadSynchronize(), __FILE__, __LINE__); + cublasStatusAssert(cublasDestroy(handle), __FILE__, __LINE__); +} +void OVPs::update_ovps_03(el_pair_typ* el_pair_list, Stochastic_Tau& tau) { + double alpha = 1.00; + double beta = 0.00; + cublasHandle_t handle; + cublasStatusAssert(cublasCreate(&handle), __FILE__, __LINE__); + + update_ovps_02(el_pair_list, tau); + + //copy wave functions from host to device; + auto t_val2 = tau.get_exp_tau_device(1, 1); + auto t_val12 = tau.get_exp_tau_device(1, 0); + + dim3 blockSize(128, 1, 1); + dim3 gridSize((mc_pair_num + 127) / 128, numBand, 1); + freq_indp_gf<<>>(d_ovps, mc_pair_num, iocc2 - iocc1, offBand, numBand); + cudaError_t_Assert(cudaPeekAtLastError(), __FILE__, __LINE__); + + cublasStatusAssert(cublasDdgmm(handle, CUBLAS_SIDE_RIGHT, mc_pair_num, iocc2 - iocc1, d_ovps.occ1, mc_pair_num, &t_val2[iocc1], 1, d_ovps.occTau1, mc_pair_num), __FILE__, __LINE__); + cublasStatusAssert(cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, mc_pair_num, mc_pair_num, iocc2 - iocc1, &alpha, d_ovps.occTau1, mc_pair_num, d_ovps.occ1, mc_pair_num, &beta, d_ovps.os_35, mc_pair_num), __FILE__, __LINE__); + cublasStatusAssert(cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, mc_pair_num, mc_pair_num, iocc2 - iocc1, &alpha, d_ovps.occTau1, mc_pair_num, d_ovps.occ2, mc_pair_num, &beta, d_ovps.os_45, mc_pair_num), __FILE__, __LINE__); + + cublasStatusAssert(cublasDdgmm(handle, CUBLAS_SIDE_RIGHT, mc_pair_num, iocc2 - iocc1, d_ovps.occ2, mc_pair_num, &t_val2[iocc1], 1, d_ovps.occTau2, mc_pair_num), __FILE__, __LINE__); + cublasStatusAssert(cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, mc_pair_num, mc_pair_num, iocc2 - iocc1, &alpha, d_ovps.occTau2, mc_pair_num, d_ovps.occ1, mc_pair_num, &beta, d_ovps.os_36, mc_pair_num), __FILE__, __LINE__); + cublasStatusAssert(cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, mc_pair_num, mc_pair_num, iocc2 - iocc1, &alpha, d_ovps.occTau2, mc_pair_num, d_ovps.occ2, mc_pair_num, &beta, d_ovps.os_46, mc_pair_num), __FILE__, __LINE__); + + cublasStatusAssert(cublasDdgmm(handle, CUBLAS_SIDE_RIGHT, mc_pair_num, ivir2 - ivir1, d_ovps.vir1, mc_pair_num, &t_val2[ivir1], 1, d_ovps.virTau1, mc_pair_num), __FILE__, __LINE__); + cublasStatusAssert(cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, mc_pair_num, mc_pair_num, ivir2 - ivir1, &alpha, d_ovps.virTau1, mc_pair_num, d_ovps.vir1, mc_pair_num, &beta, d_ovps.vs_35, mc_pair_num), __FILE__, __LINE__); + cublasStatusAssert(cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, mc_pair_num, mc_pair_num, ivir2 - ivir1, &alpha, d_ovps.virTau1, mc_pair_num, d_ovps.vir2, mc_pair_num, &beta, d_ovps.vs_45, mc_pair_num), __FILE__, __LINE__); + + cublasStatusAssert(cublasDdgmm(handle, CUBLAS_SIDE_RIGHT, mc_pair_num, ivir2 - ivir1, d_ovps.vir2, mc_pair_num, &t_val2[ivir1], 1, d_ovps.virTau2, mc_pair_num), __FILE__, __LINE__); + cublasStatusAssert(cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, mc_pair_num, mc_pair_num, ivir2 - ivir1, &alpha, d_ovps.virTau2, mc_pair_num, d_ovps.vir1, mc_pair_num, &beta, d_ovps.vs_36, mc_pair_num), __FILE__, __LINE__); + cublasStatusAssert(cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, mc_pair_num, mc_pair_num, ivir2 - ivir1, &alpha, d_ovps.virTau2, mc_pair_num, d_ovps.vir2, mc_pair_num, &beta, d_ovps.vs_46, mc_pair_num), __FILE__, __LINE__); + + cublasStatusAssert(cublasDdgmm(handle, CUBLAS_SIDE_RIGHT, mc_pair_num, iocc2 - iocc1, d_ovps.occ1, mc_pair_num, &t_val12[iocc1], 1, d_ovps.occTau1, mc_pair_num), __FILE__, __LINE__); + cublasStatusAssert(cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, mc_pair_num, mc_pair_num, iocc2 - iocc1, &alpha, d_ovps.occTau1, mc_pair_num, d_ovps.occ1, mc_pair_num, &beta, d_ovps.os_15, mc_pair_num), __FILE__, __LINE__); + cublasStatusAssert(cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, mc_pair_num, mc_pair_num, iocc2 - iocc1, &alpha, d_ovps.occTau1, mc_pair_num, d_ovps.occ2, mc_pair_num, &beta, d_ovps.os_25, mc_pair_num), __FILE__, __LINE__); + + cublasStatusAssert(cublasDdgmm(handle, CUBLAS_SIDE_RIGHT, mc_pair_num, iocc2 - iocc1, d_ovps.occ2, mc_pair_num, &t_val12[iocc1], 1, d_ovps.occTau2, mc_pair_num), __FILE__, __LINE__); + cublasStatusAssert(cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, mc_pair_num, mc_pair_num, iocc2 - iocc1, &alpha, d_ovps.occTau2, mc_pair_num, d_ovps.occ1, mc_pair_num, &beta, d_ovps.os_16, mc_pair_num), __FILE__, __LINE__); + cublasStatusAssert(cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, mc_pair_num, mc_pair_num, iocc2 - iocc1, &alpha, d_ovps.occTau2, mc_pair_num, d_ovps.occ2, mc_pair_num, &beta, d_ovps.os_26, mc_pair_num), __FILE__, __LINE__); + + cublasStatusAssert(cublasDdgmm(handle, CUBLAS_SIDE_RIGHT, mc_pair_num, ivir2 - ivir1, d_ovps.vir1, mc_pair_num, &t_val12[ivir1], 1, d_ovps.virTau1, mc_pair_num), __FILE__, __LINE__); + cublasStatusAssert(cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, mc_pair_num, mc_pair_num, ivir2 - ivir1, &alpha, d_ovps.virTau1, mc_pair_num, d_ovps.vir1, mc_pair_num, &beta, d_ovps.vs_15, mc_pair_num), __FILE__, __LINE__); + cublasStatusAssert(cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, mc_pair_num, mc_pair_num, ivir2 - ivir1, &alpha, d_ovps.virTau1, mc_pair_num, d_ovps.vir2, mc_pair_num, &beta, d_ovps.vs_25, mc_pair_num), __FILE__, __LINE__); + + cublasStatusAssert(cublasDdgmm(handle, CUBLAS_SIDE_RIGHT, mc_pair_num, ivir2 - ivir1, d_ovps.vir2, mc_pair_num, &t_val12[ivir1], 1, d_ovps.virTau2, mc_pair_num), __FILE__, __LINE__); + cublasStatusAssert(cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, mc_pair_num, mc_pair_num, ivir2 - ivir1, &alpha, d_ovps.virTau2, mc_pair_num, d_ovps.vir1, mc_pair_num, &beta, d_ovps.vs_16, mc_pair_num), __FILE__, __LINE__); + cublasStatusAssert(cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, mc_pair_num, mc_pair_num, ivir2 - ivir1, &alpha, d_ovps.virTau2, mc_pair_num, d_ovps.vir2, mc_pair_num, &beta, d_ovps.vs_26, mc_pair_num), __FILE__, __LINE__); + cudaError_t_Assert(cudaThreadSynchronize(), __FILE__, __LINE__); + cublasStatusAssert(cublasDestroy(handle), __FILE__, __LINE__); +} diff --git a/cuda_code/gpu_softmax_kernel_1.cu b/cuda_code/gpu_softmax_kernel_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..44052fbccda6aafb6366b960fae30d52e77a0fd3 --- /dev/null +++ b/cuda_code/gpu_softmax_kernel_1.cu @@ -0,0 +1,203 @@ +// Copyright (C) 2020 THL A29 Limited, a Tencent company. +// All rights reserved. +// Licensed under the BSD 3-Clause License (the "License"); you may +// not use this file except in compliance with the License. You may +// obtain a copy of the License at +// https://opensource.org/licenses/BSD-3-Clause +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" basis, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. +// See the AUTHORS file for names of contributors. + +#include + +#include +#include + +#include "turbo_transformers/layers/kernels/gpu_softmax_kernel.h" + +namespace turbo_transformers { +namespace layers { +namespace kernels { + +namespace { +template +struct Array { + __device__ __forceinline__ Array() {} + __device__ __forceinline__ Array(T* inputs) { + for (int i = 0; i < Len; ++i) { + data[i] = inputs[i]; + } + } + T data[Len]; +}; + +template +struct ArrayAddFunc { + __device__ __forceinline__ Array operator()(const Array& p1, + const Array& p2) { + Array result; + for (int i = 0; i < Len; ++i) { + result.data[i] = p1.data[i] + p2.data[i]; + } + return result; + } +}; + +template +struct ArrayMaxFunc { + __device__ __forceinline__ Array operator()(const Array& p1, + const Array& p2) { + Array result; + for (int i = 0; i < Len; ++i) { + result.data[i] = p1.data[i] > p2.data[i] ? p1.data[i] : p2.data[i]; + } + return result; + } +}; + +template +__global__ void cub_softmax_kernel_k(float* qk_buf_, const float* attr_mask, + const int batch_size, const int head_num, + const int from_seq_len, + const int to_seq_len, const float scaler, + bool is_2D) { + __shared__ typename cub::BlockReduce, BlockDim>::TempStorage + temp_storage; + __shared__ float s_sum[K], s_max[K]; + float tmp[K]; + int qk_offset = blockIdx.x * K * to_seq_len; + + float mask_val = 0.; + for (int i = 0; i < K; ++i) { + float qk = threadIdx.x < to_seq_len + ? qk_buf_[threadIdx.x + qk_offset + to_seq_len * i] + : 0.0f; + if (attr_mask != nullptr) { + int batch_id = (blockIdx.x * K + i) / (head_num * from_seq_len); + int from_seq_id = (blockIdx.x * K + i) % from_seq_len; + mask_val = attr_mask[threadIdx.x + + (is_2D ? (batch_id * to_seq_len) + : (batch_id * from_seq_len + from_seq_id) * + to_seq_len)]; + } else { + mask_val = 0.0f; + } + // mask_val = (1.0f - mask_val) * -10000.0f; + tmp[i] = threadIdx.x < to_seq_len ? (qk * scaler + mask_val) : -1e20f; + } + + Array max_val = + cub::BlockReduce, BlockDim>(temp_storage) + .Reduce(Array(tmp), ArrayMaxFunc()); + + if (threadIdx.x == 0) { + for (int i = 0; i < K; ++i) { + s_max[i] = max_val.data[i]; + } + } + __syncthreads(); + + float qk_tmp[K]; + for (int i = 0; i < K; ++i) { + qk_tmp[i] = threadIdx.x < to_seq_len ? __expf((tmp[i] - s_max[i])) : 0.0f; + } + + Array sum_val = + cub::BlockReduce, BlockDim>(temp_storage) + .Reduce(Array(qk_tmp), ArrayAddFunc()); + + if (threadIdx.x == 0) { + for (int i = 0; i < K; ++i) { + s_sum[i] = sum_val.data[i] + 1e-6f; + } + } + __syncthreads(); + + if (threadIdx.x < to_seq_len) { + for (int i = 0; i < K; ++i) { + qk_buf_[threadIdx.x + qk_offset + to_seq_len * i] = + (qk_tmp[i] / s_sum[i]); + } + } +} +} // namespace + +#define SOFTMAX_KERNEL_CASE(BlockDim, ...) \ + case (BlockDim): \ + if (row_per_thread_block == RowsPerThreadBlock) { \ + cub_softmax_kernel_k \ + <<>>(__VA_ARGS__); \ + } else { \ + cub_softmax_kernel_k \ + <<>>(__VA_ARGS__); \ + } \ + break + +#define RUN_KERNEL(...) \ + do { \ + switch (block.x) { \ + SOFTMAX_KERNEL_CASE(32, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(64, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(96, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(128, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(160, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(192, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(224, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(256, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(288, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(320, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(352, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(384, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(416, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(448, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(480, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(512, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(544, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(576, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(608, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(640, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(672, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(704, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(736, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(768, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(800, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(832, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(864, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(896, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(928, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(960, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(992, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(1024, __VA_ARGS__); \ + default: \ + throw std::runtime_error("The block.x should be 32x."); \ + } \ + } while (0) + +template <> +void GPUSoftmaxMask(float* qk_buf, const float* attr_mask, int64_t batch_size, + int64_t head_num, int64_t from_seq_len, int64_t to_seq_len, + float scale, bool is_2D, cudaStream_t stream) { + dim3 block, grid; + int high_dim_size = batch_size * head_num * from_seq_len; + const int OneRowPerThreadBlock = 1; + const int RowsPerThreadBlock = 2; + int row_per_thread_block = OneRowPerThreadBlock; + if ((head_num * from_seq_len) % RowsPerThreadBlock == 0) { + row_per_thread_block = RowsPerThreadBlock; + } + // block size must be 32x, so warp reduce can work + block.x = (to_seq_len + 31) / 32 * 32; + grid.x = high_dim_size / row_per_thread_block; + // Because there are many function templates, the compilation speed may be + // slow. + RUN_KERNEL(qk_buf, attr_mask, batch_size, head_num, from_seq_len, to_seq_len, + scale, is_2D); +} +#undef RUN_KERNEL +#undef SOFTMAX_KERNEL_CASE +} // namespace kernels +} // namespace layers +} // namespace turbo_transformers diff --git a/cuda_code/gpu_softmax_kernel_3.cu b/cuda_code/gpu_softmax_kernel_3.cu new file mode 100644 index 0000000000000000000000000000000000000000..3bb078cf1e8e08a24c136516df5e23e94d1e0a1b --- /dev/null +++ b/cuda_code/gpu_softmax_kernel_3.cu @@ -0,0 +1,207 @@ +// Copyright (C) 2020 THL A29 Limited, a Tencent company. +// All rights reserved. +// Licensed under the BSD 3-Clause License (the "License"); you may +// not use this file except in compliance with the License. You may +// obtain a copy of the License at +// https://opensource.org/licenses/BSD-3-Clause +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" basis, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. +// See the AUTHORS file for names of contributors. + +#include + +#include +#include + +#include "turbo_transformers/layers/kernels/gpu_softmax_kernel.h" + +namespace turbo_transformers { +namespace layers { +namespace kernels { + +namespace { +template +struct Array { + __device__ __forceinline__ Array() {} + __device__ __forceinline__ Array(T* inputs) { + for (int i = 0; i < Len; ++i) { + data[i] = inputs[i]; + } + } + T data[Len]; +}; + +template +struct ArrayAddFunc { + __device__ __forceinline__ Array operator()(const Array& p1, + const Array& p2) { + Array result; + for (int i = 0; i < Len; ++i) { + result.data[i] = p1.data[i] + p2.data[i]; + } + return result; + } +}; + +template +struct ArrayMaxFunc { + __device__ __forceinline__ Array operator()(const Array& p1, + const Array& p2) { + Array result; + for (int i = 0; i < Len; ++i) { + result.data[i] = p1.data[i] > p2.data[i] ? p1.data[i] : p2.data[i]; + } + return result; + } +}; + +template +__global__ void cub_softmax_kernel_k(float* qk_buf_, const float* attr_mask, + const int batch_size, const int head_num, + const int from_seq_len, + const int to_seq_len, const float scaler, + bool is_2D) { + __shared__ typename cub::BlockReduce, BlockDim>::TempStorage + temp_storage; + __shared__ float s_sum[K], s_max[K]; + float tmp[K]; + int qk_offset = blockIdx.x * K * to_seq_len; + + if (threadIdx.x < to_seq_len) { + float mask_val = 0.; + for (int i = 0; i < K; ++i) { + float qk = qk_buf_[threadIdx.x + qk_offset + to_seq_len * i]; + if (attr_mask != nullptr) { + int batch_id = (blockIdx.x * K + i) / (head_num * from_seq_len); + int from_seq_id = (blockIdx.x * K + i) % from_seq_len; + mask_val = attr_mask[threadIdx.x + + (is_2D ? (batch_id * to_seq_len) + : (batch_id * from_seq_len + from_seq_id) * + to_seq_len)]; + } else { + mask_val = 0.0f; + } + // mask_val = (1.0f - mask_val) * -10000.0f; + tmp[i] = qk * scaler + mask_val; + } + } else { + for (int i = 0; i < K; ++i) { + tmp[i] = -1e20f; + } + } + + Array max_val = + cub::BlockReduce, BlockDim>(temp_storage) + .Reduce(Array(tmp), ArrayMaxFunc()); + + if (threadIdx.x == 0) { + for (int i = 0; i < K; ++i) { + s_max[i] = max_val.data[i]; + } + } + __syncthreads(); + + float qk_tmp[K]; + for (int i = 0; i < K; ++i) { + qk_tmp[i] = threadIdx.x < to_seq_len ? __expf((tmp[i] - s_max[i])) : 0.0f; + } + + Array sum_val = + cub::BlockReduce, BlockDim>(temp_storage) + .Reduce(Array(qk_tmp), ArrayAddFunc()); + + if (threadIdx.x == 0) { + for (int i = 0; i < K; ++i) { + s_sum[i] = sum_val.data[i] + 1e-6f; + } + } + __syncthreads(); + + if (threadIdx.x < to_seq_len) { + for (int i = 0; i < K; ++i) { + qk_buf_[threadIdx.x + qk_offset + to_seq_len * i] = + (qk_tmp[i] / s_sum[i]); + } + } +} +} // namespace + +#define SOFTMAX_KERNEL_CASE(BlockDim, ...) \ + case (BlockDim): \ + if (row_per_thread_block == RowsPerThreadBlock) { \ + cub_softmax_kernel_k \ + <<>>(__VA_ARGS__); \ + } else { \ + cub_softmax_kernel_k \ + <<>>(__VA_ARGS__); \ + } \ + break + +#define RUN_KERNEL(...) \ + do { \ + switch (block.x) { \ + SOFTMAX_KERNEL_CASE(32, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(64, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(96, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(128, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(160, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(192, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(224, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(256, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(288, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(320, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(352, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(384, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(416, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(448, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(480, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(512, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(544, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(576, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(608, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(640, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(672, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(704, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(736, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(768, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(800, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(832, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(864, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(896, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(928, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(960, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(992, __VA_ARGS__); \ + SOFTMAX_KERNEL_CASE(1024, __VA_ARGS__); \ + default: \ + throw std::runtime_error("The block.x should be 32x."); \ + } \ + } while (0) + +template <> +void GPUSoftmaxMask(float* qk_buf, const float* attr_mask, int64_t batch_size, + int64_t head_num, int64_t from_seq_len, int64_t to_seq_len, + float scale, bool is_2D, cudaStream_t stream) { + dim3 block, grid; + int high_dim_size = batch_size * head_num * from_seq_len; + const int OneRowPerThreadBlock = 1; + const int RowsPerThreadBlock = 2; + int row_per_thread_block = OneRowPerThreadBlock; + if ((head_num * from_seq_len) % RowsPerThreadBlock == 0) { + row_per_thread_block = RowsPerThreadBlock; + } + // block size must be 32x, so warp reduce can work + block.x = (to_seq_len + 31) / 32 * 32; + grid.x = high_dim_size / row_per_thread_block; + // Because there are many function templates, the compilation speed may be + // slow. + RUN_KERNEL(qk_buf, attr_mask, batch_size, head_num, from_seq_len, to_seq_len, + scale, is_2D); +} +#undef RUN_KERNEL +#undef SOFTMAX_KERNEL_CASE +} // namespace kernels +} // namespace layers +} // namespace turbo_transformers diff --git a/cuda_code/gpu_time_onlyfft.cu b/cuda_code/gpu_time_onlyfft.cu new file mode 100644 index 0000000000000000000000000000000000000000..6f8c7aa3476d9eca09f5750a1273af1df0e1778c --- /dev/null +++ b/cuda_code/gpu_time_onlyfft.cu @@ -0,0 +1,194 @@ +/* ENEL428 Distributed Computing Assignment 2012 + * Author: Campbell Sinclair + * Email: cls76@uclive.ac.nz + * Date: 4 October 2012 + * + * Uses Nvidia Cuda API to perform FFTs in parallel using the GPU. + * Outputs the time taken for FFTs (varying the array length) to a text file. + * + * Compile using: + * nvcc gpu_time_onlyfft.cu -arch=sm_61 -lcufft -o gpu_time_onlyfft + * Run using: + * ./gpu_time + * + * Based on: + * Dave van Leeuwen's notes. + */ + +#include +#include +#include +#include +#include + +#define MAX_THREADS_PER_BLOCK 1024 +#define PI 3.14159265359 + +// First kernel to run on device. +// This calculates the magnitude of the real&imaginary components, then overwrites the result into the x component memory (y memory no longer needed). +__global__ void CalcMagnitude(cuDoubleComplex *data) { + int myIndex = threadIdx.x + MAX_THREADS_PER_BLOCK*blockIdx.x; + data[myIndex].x = sqrt(data[myIndex].x * data[myIndex].x + data[myIndex].y * data[myIndex].y); +} + +// Second kernel to run on device. +// Single thread, performed sequentially. +// Finds the maximum magnitude (out of x array), and stores result in y[0]. +__global__ void FindMaximum(cuDoubleComplex *data, long signalLength) { + // use data[0].y as storage for maximum value + data[0].y = 0; + + int i; + for (i = 0; i < signalLength; i++) { + if (data[i].x > data[0].y) { + data[0].y = data[i].x; + } + } +} + +// Third kernel to run on device. +// Normalise data by dividing values by maximum value. +__global__ void Normalise(cuDoubleComplex *data) { + int myIndex = threadIdx.x + MAX_THREADS_PER_BLOCK*blockIdx.x; + data[myIndex].x = data[myIndex].x / data[0].y; +} + +// Function called from main to do an FFT on a signal of length "signalLength". +// Returns time taken in microseconds +// (1) Generate set of data to be transformed +// (2) Copy data from host to GPU +// (3) Do an FFT on the data +// (4) Normalise data +// (5) Copy data from GPU to host +long transform (long signalLength) { + cuDoubleComplex *d_data, *h_data; + cufftHandle plan; + struct timeval start, end; + + // Multiple blocks and threads for parallel computation + dim3 blocksParallel(ceil(((double)signalLength)/((double)MAX_THREADS_PER_BLOCK)), 1, 1); + dim3 threadsParallel(MAX_THREADS_PER_BLOCK, 1, 1); + // Single block and thread for sequential computation + dim3 blocksSequential(1, 1, 1); + dim3 threadsSequential(1, 1, 1); + + // Allocate host side matrix + h_data = (cuDoubleComplex *) malloc(sizeof(cuDoubleComplex) * signalLength); + // Allocate GPU side matrix + cudaMalloc((void**) &d_data, sizeof(cuDoubleComplex) * signalLength); + + // (1) Generate set of data: + // 3 sinusoids at 25, 50, 75 Hz + random noise. + double samplingFrequency = 200.0; + double samplingPeriod = 1.0/samplingFrequency; + double signalFrequency = 25.0; + double signalFrequency2 = 50.0; + double signalFrequency3 = 75.0; + double signalTime = 0.0; + int i; + for (i = 0; i < signalLength; i++) { + h_data[i].x = cos(2.0 * PI * signalFrequency * signalTime) + + cos(2.0 * PI * signalFrequency2 * signalTime) + + cos(2.0 * PI * signalFrequency3 * signalTime) + + (((double) (rand() % 2000000) - 1000000.0) / 2000000.0); + h_data[i].y = 0; + + signalTime += samplingPeriod; + } + + // Plan for running the FFT + cufftPlan3d(&plan, signalLength, 1, 1, CUFFT_Z2Z); + + // (2) Copy data from host to GPU + cudaMemcpy(d_data, h_data, sizeof(cuDoubleComplex)*signalLength, cudaMemcpyHostToDevice); + + // ---- START PERFORMANCE COMPARISON ---- + gettimeofday(&start, NULL); + + // (3) Do an FFT on the data + cufftExecZ2Z(plan, d_data, d_data, CUFFT_FORWARD); + // Wait for all threads to finish + cudaDeviceSynchronize(); + + // ---- END PERFORMANCE COMPARISON ---- + gettimeofday(&end, NULL); + + // (4a) Normalise data: calculate magnitude + CalcMagnitude<<>>(d_data); + // Wait for all threads to finish + cudaDeviceSynchronize(); + // (4b) Normalise data: find maximum + FindMaximum<<>>(d_data, signalLength); + // Wait for all threads to finish (only one thread but can't hurt) + cudaDeviceSynchronize(); + // (4c) Normalise data: divide all by maximum + Normalise<<>>(d_data); + // Wait for all threads to finish + cudaDeviceSynchronize(); + + // (5) Copy data from GPU to host + cudaMemcpy(h_data, d_data, sizeof(cuDoubleComplex) * signalLength, cudaMemcpyDeviceToHost); + + // Clean up memory no longer needed + cufftDestroy(plan); + cudaFree(d_data); + free(h_data); + + return ((end.tv_sec - start.tv_sec)*1000000 + (end.tv_usec - start.tv_usec)); +} + +// Calculate the average for a set of data +// Input the set of data "data[]", and length of this data "dataLength" +// Outputs the average +double average(long data[], long dataLength) { + double sum = 0; + int i; + for (i = 0; i < dataLength; i++) { + //printf("iteration i - %li\n", data[i]); + sum += data[i]; + } + + return (sum / dataLength); +} + + +// ----------------- +// Main program loop +// ----------------- +int main(int argc, char** argv) { + FILE *fp = fopen("GpuTimeData_onlyfft.txt", "w"); + long numIterations_small = 50; + long numIterations_large = 50; + long sig, signalLength, iteration, iterationResult[numIterations_large]; + + // Perform FFT for a range of signal lengths + for (sig = 3; sig <= 11; sig++) { + + signalLength=pow(2,sig); + printf("%li\n", signalLength); + // Do more iterations for tests that take less time + if (signalLength <= 64) { + for (iteration = 0; iteration < numIterations_large; iteration++) { + // Perform transform, and store result in array element + iterationResult[iteration] = transform(signalLength); + } + // Average the results and output to a text file + fprintf(fp, "%li\t%f\n", signalLength, average(iterationResult, numIterations_large)); + } + // Do less iterations for tests that take longer + else { + for (iteration = 0; iteration < numIterations_small; iteration++) { + // Perform transform, and store result in array element + iterationResult[iteration] = transform(signalLength); + } + // Average the results and output to a text file + fprintf(fp, "%li\t%f\n", signalLength, average(iterationResult, numIterations_small)); + } + + + } + + fclose(fp); + + return 0; +} diff --git a/cuda_code/gpulib2_3.cu b/cuda_code/gpulib2_3.cu new file mode 100644 index 0000000000000000000000000000000000000000..fbb54b95cbbfd31384f31e72ec1ef2aeb31c8505 --- /dev/null +++ b/cuda_code/gpulib2_3.cu @@ -0,0 +1,850 @@ +/*--------------------------------------------------------------------*/ +/* CUDA Library for GPU Tutorial */ +/* written by Viktor K. Decyk, UCLA */ + +#include +#include +#include "cuda.h" + +int nblock_size = 64; +int ngrid_size = 1; +int maxgsx = 65535; +int mmcc = 0; +static int devid; + +static cudaError_t crc; + +__global__ void emptyKernel() {} + +/*--------------------------------------------------------------------*/ +extern "C" void setgbsize(int nblock) { +/* set blocksize */ + nblock_size = nblock; + return; +} + +/*--------------------------------------------------------------------*/ +extern "C" int getmmcc() { +/* get major and minor computer capability */ + return mmcc; +} + +/*--------------------------------------------------------------------*/ +extern "C" void gpu_fallocate(float **g_f, int nsize, int *irc) { +/* allocate global float memory on GPU, return pointer to C */ + void *gptr; + crc = cudaMalloc(&gptr,sizeof(float)*nsize); + if (crc) { + printf("cudaMalloc float Error=%d:%s,l=%d\n",crc, + cudaGetErrorString(crc),nsize); + *irc = 1; + } + *g_f = (float *)gptr; + return; +} + +/*--------------------------------------------------------------------*/ +extern "C" void gpu_iallocate(int **g_i, int nsize, int *irc) { +/* allocate global integer memory on GPU, return pointer to C */ + void *gptr; + crc = cudaMalloc(&gptr,sizeof(int)*nsize); + if (crc) { + printf("cudaMalloc int Error=%d:%s,l=%d\n",crc, + cudaGetErrorString(crc),nsize); + *irc = 1; + } + *g_i = (int *)gptr; + return; +} + +/*--------------------------------------------------------------------*/ +extern "C" void gpu_deallocate(void *g_d, int *irc) { +/* deallocate global memory on GPU */ + crc = cudaFree(g_d); + if (crc) { + printf("cudaFree Error=%d:%s\n",crc,cudaGetErrorString(crc)); + *irc = 1; + } + return; +} + +/*--------------------------------------------------------------------*/ +extern "C" void gpu_fcopyin(float *f, float *g_f, int nsize) { +/* copy float array from host memory to global GPU memory */ + crc = cudaMemcpy((void *)g_f,f,sizeof(float)*nsize, + cudaMemcpyHostToDevice); + if (crc) { + printf("cudaMemcpyHostToDevice float Error=%d:%s\n",crc, + cudaGetErrorString(crc)); + exit(1); + } + return; +} + +/*--------------------------------------------------------------------*/ +extern "C" void gpu_fcopyout(float *f, float *g_f, int nsize) { +/* copy float array from global GPU memory to host memory */ + crc = cudaMemcpy(f,(void *)g_f,sizeof(float)*nsize, + cudaMemcpyDeviceToHost); + if (crc) { + printf("cudaMemcpyDeviceToHost float Error=%d:%s\n",crc, + cudaGetErrorString(crc)); + exit(1); + } + return; +} + +/*--------------------------------------------------------------------*/ +extern "C" void emptykernel() { + int ngx, ngy; + ngx = nblock_size < 32768 ? nblock_size : 32768; + ngy = (ngrid_size - 1)/ngx + 1; + dim3 dimBlock(nblock_size,1); + dim3 dimGrid(ngx,ngy); + crc = cudaGetLastError(); + emptyKernel<<>>(); + cudaThreadSynchronize(); + crc = cudaGetLastError(); + if (crc) { + printf("emptyKernel error=%d:%s\n",crc,cudaGetErrorString(crc)); + exit(1); + } + return; +} + +/*--------------------------------------------------------------------*/ +extern "C" void init_cu(int dev, int *irc) { +/* initialize CUDA with device dev or selects best GPU available */ +/* searches throughs devices, selects the device with the most compute */ +/* units, and saves the device id devid */ +/* if dev is a valid device, it is used, otherwise the GPU with the */ +/* most multi-processors is selected */ +/* error code is modified only if there is an error */ + int maxcpus = 0, jm = -1; + int j, ndevs, maxunits; + unsigned long msize; + double z; + struct cudaDeviceProp prop; +/* returns number of device */ + crc = cudaGetDeviceCount(&ndevs); + if (crc) { + printf("cudaGetDeviceCount Error=%i:%s\n",crc, + cudaGetErrorString(crc)); + *irc = 1; + return; + } +/* get information about devices */ + for (j = 0; j < ndevs; j++) { + crc = cudaGetDeviceProperties(&prop,j); + if (crc) { + printf("cudaGetDeviceProperties Error=%i:%s\n",crc, + cudaGetErrorString(crc)); + prop.name[0] = 0; + } + maxunits = prop.multiProcessorCount; + if (dev <= 0) { + printf("j=%i:CUDA_DEVICE_NAME=%s,CUDA_MULTIPROCESSOR_COUNT=%i\n", + j,prop.name,maxunits); + msize = prop.totalGlobalMem; + z = ((double) msize)/1073741824.0; + mmcc = 10*prop.major + prop.minor; + printf(" CUDA_GLOBAL_MEM_SIZE=%lu(%f GB),Capability=%d\n", + msize,(float) z,mmcc); + printf(" Capability=%d\n",mmcc); + if (maxunits > maxcpus) { + maxcpus = maxunits; + jm = j; + } + } + } + devid = jm; + if (dev >= 0) + devid = dev % ndevs; + printf("using device j=%i\n",devid); +/* get properties for this device */ + crc = cudaGetDeviceProperties(&prop,devid); + maxgsx = prop.maxGridSize[0]; + mmcc = 10*prop.major + prop.minor; +/* set device */ + crc = cudaSetDevice(devid); + if (crc) { + printf("cudaSetDevice Error=%i:%s\n",crc, + cudaGetErrorString(crc)); + *irc = 1; + return; + } +/* run empty kernel */ + emptykernel(); + return; +} + +/*--------------------------------------------------------------------*/ +extern "C" void end_cu() { +/* terminate CUDA */ + crc = cudaThreadExit(); + if (crc) { + printf("cudaThreadExit Error=%d:%s\n",crc,cudaGetErrorString(crc)); + } + return; +} + +/*--------------------------------------------------------------------*/ +__global__ void gcopy1(float a[], float b[], int nx) { +/* 1d copy a = b */ +/* one block of mx threads copies mx values */ +/* ((nx-1)/mx+1) independent blocks */ +/* nx = size of arrays in x */ +/* local data */ + int j, js, id, mx; + mx = blockDim.x; + j = threadIdx.x; + id = blockIdx.x; + + js = j + mx*id; + if (js < nx) a[js] = b[js]; + + return; +} + +/*--------------------------------------------------------------------*/ +__global__ void gcopy2a(float a[], float b[], int nx, int ny) { +/* 2d copy a = b */ +/* one block of mx threads copies mx values */ +/* nbx*ny independent blocks */ +/* local data */ + int j, k, js, id, mx; + mx = blockDim.x; + j = threadIdx.x; + id = blockIdx.x; + k = blockIdx.y; + + js = j + mx*id; + if ((js < nx) && (k < ny)) { + a[js+nx*k] = b[js+nx*k]; + } + + return; +} + +/*--------------------------------------------------------------------*/ +__global__ void gcopy2b(float a[], float b[], int nx, int ny) { +/* 2d copy a = b */ +/* one block of mx threads copies nx values */ +/* ny independent blocks */ +/* local data */ + int j, k, mx; + mx = blockDim.x; + k = blockIdx.x; + + j = threadIdx.x; + while (j < nx) { + if (k < ny) + a[j+nx*k] = b[j+nx*k]; + j += mx; + } + + return; +} + +/*--------------------------------------------------------------------*/ +__global__ void gsaxpy2(float a[], float b[], float s, int nx, int ny) { +/* 2d vector multiplye a = s*b + a */ +/* one block of mx threads copies nx values */ +/* ny independent blocks */ +/* local data */ + int j, k, mx; + mx = blockDim.x; + k = blockIdx.x; + + j = threadIdx.x; + while (j < nx) { + if (k < ny) + a[j+nx*k] = s*b[j+nx*k] + a[j+nx*k]; + j += mx; + } + + return; +} + +/*--------------------------------------------------------------------*/ +__global__ void gcopy3(float a[], float b[], int nx, int ny) { +/* 2d copy a = b */ +/* one block of mx*my threads copies mx*my values */ +/* ((nx-1)/mx+1)*((ny-1)/my+1) independent blocks */ +/* local data */ + int j, k, js, ks, idx, idy, mx, my; + mx = blockDim.x; my = blockDim.y; + j = threadIdx.x; k = threadIdx.y; + idx = blockIdx.x; idy = blockIdx.y; + + ks = k + my*idy; + js = j + mx*idx; + if ((js < nx) && (ks < ny)) + a[js+nx*ks] = b[js+nx*ks]; + + return; +} + +/*--------------------------------------------------------------------*/ +__global__ void gtranspose2(float a[], float b[], int nx, int ny) { +/* a = transpose(b) */ +/* one block of mx*mx threads transposes mx*mx values */ +/* ((nx-1)/mx+1)*((ny-1)/mx+1) independent blocks */ +/* local data */ + int j, k, js, ks, idx, idy, joff, koff, mx, mxv; + extern __shared__ float s[]; + mx = blockDim.x; mxv = mx + 1; + j = threadIdx.x; k = threadIdx.y; + idx = blockIdx.x; idy = blockIdx.y; + koff = mx*idy; + joff = mx*idx; + + ks = k + koff; + js = j + joff; + if ((js < nx) && (ks < ny)) + s[j+mxv*k] = b[js+nx*ks]; +/* synchronize threads */ + __syncthreads(); + js = k + joff; + ks = j + koff; + if ((js < nx) && (ks < ny)) + a[ks+ny*js] = s[k+mxv*j]; + + return; +} + +/*--------------------------------------------------------------------*/ +__global__ void gsum1(float a[], float *sa, int nx) { +/* 1d serial sum reductions, each of length mx */ +/* sa = sum(a) */ +/* local data */ + int j, js, jb, mx, joff, mxm; + float t; + extern __shared__ float s[]; + mx = blockDim.x; + js = threadIdx.x; + jb = blockIdx.x; + joff = mx*jb; + + j = js + joff; +/* copy global data to shared memory */ + if (j < nx) s[js] = a[j]; +/* synchronize to make sure each thread in block has the data */ + __syncthreads(); + if (js==0) { + mxm = nx - joff; + if (mxm > mx) mxm = mx; +/* perform serial local sum reduction: result in t */ + t = 0.0f; + for (j = 0; j < mxm; j++) { + t += s[j]; + } +/* accumulate results to global memory for each block */ +/* for devices with compute capability 2.x */ + atomicAdd(&sa[0],t); + } + + return; +} + +/*--------------------------------------------------------------------*/ +__device__ void lsum2(float *sdata, int n) { +/* finds local sum of nths data items shared by threads */ +/* using binary tree method. input is modified. */ +/* local data */ + int l, k; + float s; + l = threadIdx.x; + k = blockDim.x >> 1; + s = 0.0f; + + if (l < n) s = sdata[l]; + while (k > 0) { + if (l < k) { + if ((l+k) < n) { + s += sdata[l+k]; + sdata[l] = s; + } + } + __syncthreads(); + k >>= 1; + } + + return; +} + +/*--------------------------------------------------------------------*/ +__global__ void gsum2(float a[], float d[], int nx) { +/* segmented 1d sum reductions, each of length mx */ +/* forall (j = 1:nbx); d(j) = sum(a(1+mx*(j-1):min(nx,mx*j))) */ +/* parallel summation */ +/* local data */ + int j, js, jb, mx, joff, mxm; + extern __shared__ float s[]; + mx = blockDim.x; + js = threadIdx.x; + jb = blockIdx.x; + joff = mx*jb; + + j = js + joff; +/* copy global data to shared memory */ + if (j < nx) s[js] = a[j]; +/* synchronize to make sure each thread in block has the data */ + __syncthreads(); + mxm = nx - joff; + if (mxm > mx) mxm = mx; +/* perform parallel local sum reduction: result in s[0] */ + lsum2(s,mxm); +/* write out result to global memory for each block */ + if (js==0) d[jb] = s[0]; + + return; +} + +/*--------------------------------------------------------------------*/ +extern "C" void gpu_copy1(float *a, float *b, int mx, int nx) { +/* 2d copy of length nx, with block size mx */ +/* one block of mx threads copies mx values */ +/* ((nx-1)/mx+1) independent blocks */ +/* local data */ + int nbx; + nbx = (nx - 1)/mx + 1; + dim3 dimBlock(mx); + dim3 dimGrid(nbx); + crc = cudaGetLastError(); + + gcopy1<<>>(a,b,nx); + + cudaThreadSynchronize(); + crc = cudaGetLastError(); + if (crc) { + printf("gcopy1 error=%d:%s\n",crc,cudaGetErrorString(crc)); + exit(1); + } + return; +} + +/*--------------------------------------------------------------------*/ +extern "C" void gpu_copy2a(float *a, float *b, int mx, int nx, int ny) { +/* 2d copy a = b */ +/* one block of mx threads copies mx values */ +/* nbx*ny independent blocks */ +/* local data */ + int nbx; + nbx = (nx - 1)/mx + 1; + dim3 dimBlock(mx); + dim3 dimGrid(nbx,ny); + crc = cudaGetLastError(); + + gcopy2a<<>>(a,b,nx,ny); + + cudaThreadSynchronize(); + crc = cudaGetLastError(); + if (crc) { + printf("gcopy2a error=%d:%s\n",crc,cudaGetErrorString(crc)); + exit(1); + } + return; +} + +/*--------------------------------------------------------------------*/ +extern "C" void gpu_copy2b(float *a, float *b, int mx, int nx, int ny) { +/* 2d copy a = b */ +/* one block of mx threads copies nx values */ +/* ny independent blocks */ +/* local data */ + dim3 dimBlock(mx); + dim3 dimGrid(ny); + crc = cudaGetLastError(); + + gcopy2b<<>>(a,b,nx,ny); + + cudaThreadSynchronize(); + crc = cudaGetLastError(); + if (crc) { + printf("gcopy2b error=%d:%s\n",crc,cudaGetErrorString(crc)); + exit(1); + } + return; +} + +/*--------------------------------------------------------------------*/ +extern "C" void gpu_saxpy2(float *a, float *b, float s, int mx, int nx, + int ny) { +/* 2d vector multiply a = s*b + a */ +/* one block of mx threads copies nx values */ +/* ny independent blocks */ +/* local data */ + dim3 dimBlock(mx); + dim3 dimGrid(ny); + crc = cudaGetLastError(); + + gsaxpy2<<>>(a,b,s,nx,ny); + + cudaThreadSynchronize(); + crc = cudaGetLastError(); + if (crc) { + printf("gsaxpy2 error=%d:%s\n",crc,cudaGetErrorString(crc)); + exit(1); + } + return; +} + +/*--------------------------------------------------------------------*/ +extern "C" void gpu_copy3(float *a, float *b, int mx, int my, int nx, + int ny) { +/* 2d copy a = b */ +/* one block of mx*my threads copies mx*my values */ +/* ((nx-1)/mx+1)*((ny-1)/my+1) independent blocks */ +/* local data */ + int nbx, nby; + nbx = (nx - 1)/mx + 1; nby = (ny - 1)/my + 1; + dim3 dimBlock(mx,my); + dim3 dimGrid(nbx,nby); + crc = cudaGetLastError(); + + gcopy3<<>>(a,b,nx,ny); + + cudaThreadSynchronize(); + crc = cudaGetLastError(); + if (crc) { + printf("gcopy3 error=%d:%s\n",crc,cudaGetErrorString(crc)); + exit(1); + } + return; +} + +/*--------------------------------------------------------------------*/ +extern "C" void gpu_transpose2(float *a, float *b, int mx, int nx, + int ny) { +/* 2d transpose of length nx, ny, with block size mx, mx */ +/* one block of mx*mx threads transposes mx*mx values */ +/* ((nx-1)/mx+1)*((ny-1)/mx+1) independent blocks */ +/* local data */ + int nbx, nby, ns; + nbx = (nx - 1)/mx + 1; nby = (ny - 1)/mx + 1; + dim3 dimBlock(mx,mx); + dim3 dimGrid(nbx,nby); +/* calculate size of shared memory */ + ns = (mx + 1)*mx*sizeof(float); + crc = cudaGetLastError(); + + gtranspose2<<>>(a,b,nx,ny); + + cudaThreadSynchronize(); + crc = cudaGetLastError(); + if (crc) { + printf("gtranspose2 error=%d:%s\n",crc,cudaGetErrorString(crc)); + exit(1); + } + return; +} + +/*--------------------------------------------------------------------*/ +extern "C" void gpu_sum1(float *a, float *sa, int mx, int nx) { +/* 1d serial sum reductions, each of length mx */ +/* one block of mx threads sums mx values */ +/* ((nx-1)/mx+1) independent blocks */ +/* local data */ + int nbx, ns; + float t; + nbx = (nx - 1)/mx + 1; + dim3 dimBlock(mx); + dim3 dimGrid(nbx); + t = 0.0f; + gpu_fcopyin(&t,sa,1); +/* calculate size of shared memory */ + ns = mx*sizeof(float); + crc = cudaGetLastError(); + + gsum1<<>>(a,sa,nx); + + cudaThreadSynchronize(); + crc = cudaGetLastError(); + if (crc) { + printf("gsum1 error=%d:%s\n",crc,cudaGetErrorString(crc)); + exit(1); + } + return; +} + +/*--------------------------------------------------------------------*/ +extern "C" void gpu_sum2(float *a, float *d, int mx, int nx) { +/* segmented 1d parallel sum reductions, each of length mx */ +/* one block of mx threads sums mx values */ +/* ((nx-1)/mx+1) independent blocks */ +/* local data */ + int nbx, ns; + nbx = (nx - 1)/mx + 1; + dim3 dimBlock(mx); + dim3 dimGrid(nbx); +/* calculate size of shared memory */ + ns = mx*sizeof(float); + crc = cudaGetLastError(); + + gsum2<<>>(a,d,nx); + + cudaThreadSynchronize(); + crc = cudaGetLastError(); + if (crc) { + printf("gsum2 error=%d:%s\n",crc,cudaGetErrorString(crc)); + exit(1); + } + return; +} + +/*--------------------------------------------------------------------*/ +extern "C" void gpu_sum3(float *a, float *d, float *sa, int mx, + int nx) { +/* segmented 1d parallel sum reductions */ +/* one block of mx threads sums mx values */ +/* ((nx-1)/mx+1) independent blocks */ +/* local data */ + int nxs, nbx, n, ns; + nxs = nx; + nbx = (nxs - 1)/mx + 1; + dim3 dimBlock(mx); + dim3 dimGrid(nbx); +/* calculate size of shared memory */ + ns = mx*sizeof(float); + crc = cudaGetLastError(); + gsum2<<>>(a,d,nxs); +/* cudaThreadSynchronize(); */ + crc = cudaGetLastError(); + if (crc) { + printf("gsum2:0 error=%d:%s\n",crc,cudaGetErrorString(crc)); + exit(1); + } +/* write out result */ + if (nbx==1) { + dimGrid.x = 1; + crc = cudaGetLastError(); + gcopy1<<>>(sa,d,1); + cudaThreadSynchronize(); + crc = cudaGetLastError(); + if (crc) { + printf("gcopy1:0 error=%d:%s\n",crc,cudaGetErrorString(crc)); + exit(1); + } + return; + } +/* reduce further if necessary */ + if (nbx > 1) { + nxs = nbx; + nbx = (nxs - 1)/mx + 1; + dimGrid.x = nbx; + crc = cudaGetLastError(); + gsum2<<>>(d,sa,nxs); + if (nbx==1) + cudaThreadSynchronize(); + crc = cudaGetLastError(); + if (crc) { + printf("gsum2:1 error=%d:%s\n",crc,cudaGetErrorString(crc)); + exit(1); + } + n = 0; + } + if (nbx==1) + return; +/* iterate if necessary */ + while (nbx > 1) { + n += nbx; + nxs = nbx; + nbx = (nxs - 1)/mx + 1; + dimGrid.x = nbx; + crc = cudaGetLastError(); + gsum2<<>>(&sa[n-nxs],&sa[n],nxs); +/* cudaThreadSynchronize(); */ + crc = cudaGetLastError(); + if (crc) { + printf("gsum2:n error=%d:%s\n",crc,cudaGetErrorString(crc)); + exit(1); + } + } +/* write out result */ + dimGrid.x = 1; + crc = cudaGetLastError(); + gcopy1<<>>(sa,&sa[n],1); + cudaThreadSynchronize(); + crc = cudaGetLastError(); + if (crc) { + printf("gcopy1:n error=%d:%s\n",crc,cudaGetErrorString(crc)); + exit(1); + } + return; +} + + +/* Interfaces to Fortran */ + +/*--------------------------------------------------------------------*/ +extern "C" void setgbsize_(int *nblock) { + setgbsize(*nblock); + return; +} + +/*--------------------------------------------------------------------*/ +extern "C" int getmmcc_() { +/* get major and minor computer capability */ + return getmmcc(); +} + +/*--------------------------------------------------------------------*/ +extern "C" void gpu_fallocate_(unsigned long *gp_f, int *nsize, + int *irc) { +/* allocate global float memory on GPU, return pointer to Fortran */ + float *fptr; + gpu_fallocate(&fptr,*nsize,irc); + *gp_f = (long )fptr; + return; +} + +/*--------------------------------------------------------------------*/ +extern "C" void gpu_iallocate_(unsigned long *gp_i, int *nsize, + int *irc) { +/* allocate global integer memory on GPU, return pointer to Fortran */ + int *iptr; + gpu_iallocate(&iptr,*nsize,irc); + *gp_i = (long )iptr; + return; +} + +/*--------------------------------------------------------------------*/ +extern "C" void gpu_deallocate_(unsigned long *gp_d, int *irc) { +/* deallocate global memory on GPU, return pointer to Fortran */ + void *d; + d = (void *)*gp_d; + gpu_deallocate(d,irc); + *gp_d = 0; + return; +} + +/*--------------------------------------------------------------------*/ +extern "C" void gpu_fcopyin_(float *f, unsigned long *gp_f, + int *nsize) { +/* copy float array from main memory to global GPU memory */ + float *g_f; + g_f = (float *)*gp_f; + gpu_fcopyin(f,g_f,*nsize); + return; +} + +/*--------------------------------------------------------------------*/ +extern "C" void gpu_fcopyout_(float *f, unsigned long *gp_f, + int *nsize) { +/* copy float array from global GPU memory to main memory */ + float *g_f; + g_f = (float *)*gp_f; + gpu_fcopyout(f,g_f,*nsize); + return; +} + +/*--------------------------------------------------------------------*/ +extern "C" void emptykernel_() { + emptykernel(); + return; +} + +/*--------------------------------------------------------------------*/ +extern "C" void init_cu_(int *dev, int *irc) { + init_cu(*dev,irc); + return; +} + +/*--------------------------------------------------------------------*/ +extern "C" void end_cu_() { + end_cu(); + return; +} + +/*--------------------------------------------------------------------*/ +extern "C" void gpu_copy1_(unsigned long *gp_a, unsigned long *gp_b, + int *mx, int *nx) { + float *a, *b; + a = (float *)*gp_a; + b = (float *)*gp_b; + gpu_copy1(a,b,*mx,*nx); + return; +} + +/*--------------------------------------------------------------------*/ +extern "C" void gpu_copy2a_(unsigned long *gp_a, unsigned long *gp_b, + int *mx, int *nx, int *ny) { + float *a, *b; + a = (float *)*gp_a; + b = (float *)*gp_b; + gpu_copy2a(a,b,*mx,*nx,*ny); + return; +} + +/*--------------------------------------------------------------------*/ +extern "C" void gpu_copy2b_(unsigned long *gp_a, unsigned long *gp_b, + int *mx, int *nx, int *ny) { + float *a, *b; + a = (float *)*gp_a; + b = (float *)*gp_b; + gpu_copy2b(a,b,*mx,*nx,*ny); + return; +} + +/*--------------------------------------------------------------------*/ +extern "C" void gpu_saxpy2_(unsigned long *gp_a, unsigned long *gp_b, + float *s, int *mx, int *nx, int *ny) { + float *a, *b; + a = (float *)*gp_a; + b = (float *)*gp_b; + gpu_saxpy2(a,b,*s,*mx,*nx,*ny); + return; +} + +/*--------------------------------------------------------------------*/ +extern "C" void gpu_copy3_(unsigned long *gp_a, unsigned long *gp_b, + int *mx, int *my, int *nx, int *ny) { + float *a, *b; + a = (float *)*gp_a; + b = (float *)*gp_b; + gpu_copy3(a,b,*mx,*my,*nx,*ny); + return; +} + +/*--------------------------------------------------------------------*/ +extern "C" void gpu_transpose2_(unsigned long *gp_a, + unsigned long *gp_b, int *mx, int *nx, + int *ny) { + float *a, *b; + a = (float *)*gp_a; + b = (float *)*gp_b; + gpu_transpose2(a,b,*mx,*nx,*ny); + return; +} + +/*--------------------------------------------------------------------*/ +extern "C" void gpu_sum1_(unsigned long *gp_a, unsigned long *gp_sa, + int *mx, int *nx) { + float *a, *sa; + a = (float *)*gp_a; + sa = (float *)*gp_sa; + gpu_sum1(a,sa,*mx,*nx); + return; +} + +/*--------------------------------------------------------------------*/ +extern "C" void gpu_sum2_(unsigned long *gp_a, unsigned long *gp_d, + int *mx, int *nx) { + float *a, *d; + a = (float *)*gp_a; + d = (float *)*gp_d; + gpu_sum2(a,d,*mx,*nx); + return; +} + +/*--------------------------------------------------------------------*/ +extern "C" void gpu_sum3_(unsigned long *gp_a, unsigned long *gp_d, + unsigned long *gp_sa,int *mx, int *nx) { + float *a, *d, *sa; + a = (float *)*gp_a; + d = (float *)*gp_d; + sa = (float *)*gp_sa; + gpu_sum3(a,d,sa,*mx,*nx); + return; +} diff --git a/cuda_code/gpull_1.cu b/cuda_code/gpull_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..faadc7e64624ee5bc20b738eb7b42590feb6632f --- /dev/null +++ b/cuda_code/gpull_1.cu @@ -0,0 +1,114 @@ +#define GRB_USE_CUDA +#define private public + +#include +#include +#include + +#include +#include + +#include + +#include "graphblas/graphblas.hpp" +#include "test/test.hpp" + +int main( int argc, char** argv ) +{ + std::vector row_indices; + std::vector col_indices; + std::vector values; + graphblas::Index nrows, ncols, nvals; + + // Parse arguments + bool DEBUG = true; + + // Read in sparse matrix + if (argc < 2) { + fprintf(stderr, "Usage: %s [matrix-market-filename]\n", argv[0]); + exit(1); + } else { + readMtx(argv[argc-1], &row_indices, &col_indices, &values, &nrows, &ncols, + &nvals, 0, DEBUG); + } + + // Matrix A + graphblas::Matrix a(nrows, ncols); + a.build( &row_indices, &col_indices, &values, nvals, GrB_NULL ); + a.nrows( &nrows ); + a.ncols( &ncols ); + a.nvals( &nvals ); + if( DEBUG ) a.print(); + + // Vector x + graphblas::Vector x(nrows); + CHECK( x.fill( 0.f ) ); + CHECK( x.setElement(1.f, 0) ); + CHECK( x.size( &nrows ) ); + if( DEBUG ) x.print(); + + // Vector y + graphblas::Vector y(nrows); + if( DEBUG ) y.print(); + + // Mask + graphblas::Vector m(nrows); + CHECK( m.fill(-1.f) ); + CHECK( m.setElement(0.f, 0) ); + CHECK( m.size(&nrows) ); + if( DEBUG ) CHECK( m.print() ); + + // Descriptor + graphblas::Descriptor desc; + CHECK( desc.set(graphblas::GrB_MASK, graphblas::GrB_SCMP) ); + + // Warmup + CpuTimer warmup; + warmup.Start(); + graphblas::vxm( &y, &m, GrB_NULL, + graphblas::PlusMultipliesSemiring(), &x, &a, &desc ); + //graphblas::vxm( &y, GrB_NULL, GrB_NULL, &GrB_FP32AddMul, + // &x, &a, &desc ); + warmup.Stop(); + + CpuTimer cpu_vxm; + //cudaProfilerStart(); + cpu_vxm.Start(); + int NUM_ITER = 10; + for( int i=0; i( &y, &m, GrB_NULL, + graphblas::PlusMultipliesSemiring(), &x, &a, &desc ); + //graphblas::vxm( &y, GrB_NULL, GrB_NULL, + // &GrB_FP32AddMul, &x, &a, &desc ); + } + //cudaProfilerStop(); + cpu_vxm.Stop(); + + float flop = 0; + if( DEBUG ) std::cout << "warmup, " << warmup.ElapsedMillis() << ", " << + flop/warmup.ElapsedMillis()/1000000.0 << "\n"; + float elapsed_vxm = cpu_vxm.ElapsedMillis(); + std::cout << "vxm, " << elapsed_vxm/NUM_ITER << "\n"; + + if( DEBUG ) y.print(); + /*c.extractTuples( out_denseVal ); + for( int i=0; i +#include +// #include +#ifdef WITH_CUDA5 +# include +# define CUDA_SAFE_CALL checkCudaErrors +#else +# include +#endif +#include "cuda_pointer.h" +#define NTHREAD 128 + +#define PROFILE +#ifdef PROFILE +#include +static double get_wtime(){ + struct timeval tv; + gettimeofday(&tv, NULL); + return tv.tv_sec + 1.e-6 * tv.tv_usec; +} +#else +static double get_wtime(){ + return 0.0; +} +#endif + +static float2 float2_split(double x){ + const int shift = 20; + float2 ret; + x *= (1< 0.f) phii = float2_accum(phii, pij); + } + phii = float2_regularize(phii); + } + phi[i] = phii; +} + +extern "C" void gpunb_devinit_(); + +void gpupot( + int n, + double m[], + double x[][3], + double pot[]){ + gpunb_devinit_(); + + double t0 = get_wtime(); +#if 0 + float2 *phi_d, *phi_h; + Particle *ptcl_d, *ptcl_h; +#else + cudaPointer phi; + cudaPointer ptcl; +#endif + int ng = NTHREAD * (n/NTHREAD + (n%NTHREAD ? 1 : 0)); + +#if 0 + cudaMalloc ((void **)&phi_d, ng * sizeof(float2)); + cudaMallocHost((void **)&phi_h, ng * sizeof(float2)); + cudaMalloc ((void **)&ptcl_d, ng * sizeof(Particle)); + cudaMallocHost((void **)&ptcl_h, ng * sizeof(Particle)); +#else + phi.allocate(ng); + ptcl.allocate(ng); +#endif + + // std::cout << n << " " << ng << std::endl; + for(int i=0; i>> (n, ptcl_d, phi_d); + pot_kernel <<>> (n, ptcl, phi); + + // cudaMemcpy(phi_h, phi_d, n * sizeof(float2), cudaMemcpyDeviceToHost); + phi.dtoh(n); + for(int i=0; i +#include +#include +#include +#include +#include +#include +#include "rotate.h" +#include "convert.h" +#include +#include + + +#define uint unsigned int + +#define IMUL(a, b) __mul24(a, b) + +#define TEXTON32 1 +#define TEXTON64 2 + +int* computeGoldenIntegrals(int width, int height, int nbins, int* inputImage) { + int* integrals = (int*)malloc(sizeof(int)*width*height*nbins); + memset(integrals, 0, sizeof(int) * width * height * nbins); + for(int bin = 0; bin < nbins; bin++) { + for(int row = 0; row < height; row++) { + for(int col = 0; col < width; col++) { + int integralValue = 0; + if (row == 0) { + if (col == 0) { + integralValue = ((inputImage[0] == bin) ? 1 : 0); + } else { + integralValue = integrals[(col - 1) * nbins + bin] + ((inputImage[col] == bin) ? 1 : 0); + } + } else { + if (col == 0) { + integralValue = integrals[((row - 1) * width) * nbins + bin] + ((inputImage[row * width] == bin) ? 1 : 0); + } else { + integralValue = integrals[((row - 1) * width + col) * nbins + bin] + integrals[(row * width + col - 1)*nbins + bin] - integrals[((row - 1) * width + col - 1) * nbins + bin] + ((inputImage[row * width + col] == bin) ? 1 : 0); + } + } + integrals[(row * width + col)*nbins + bin] = integralValue; + } + } + } + return integrals; +} + +void checkIntegrals(int width, int height, int nbins, int* goldenIntegrals, int goldenIntegralPitch, int* suspectIntegrals, int suspectIntegralPitch) { + bool error = false; + for(int row = 0; row < height; row++) { + for(int col = 0; col < width; col++) { + for(int bin = 0; bin < nbins; bin++) { + if (goldenIntegrals[(row * width + col) * goldenIntegralPitch + bin] != + suspectIntegrals[(row * width + col) * suspectIntegralPitch + bin]) { + printf("Error at: %d, %d, %d\n", row, col, bin); + error = true; + } + } + } + } + if (!error) { + printf("Integrals check out!\n"); + } +} + +__global__ void integrateBins(int width, int height, int nbins, int* devImage, int binPitch, int* devIntegrals) { + __shared__ int pixels[16]; + const int blockX = blockDim.y * blockIdx.x; + const int threadX = threadIdx.y; + const int bin = threadIdx.x; + const int x = blockX + threadX; + if (x >= width) return; + if (bin > nbins) return; + int* imagePointer = devImage + x; + int* outputPointer = devIntegrals + binPitch * x + bin; + int accumulant = 0; + for(int y = 0; y < height; y++) { + if (bin == 0) { + pixels[threadX] = *imagePointer; + } + __syncthreads(); + if (pixels[threadX] == bin) accumulant++; + *outputPointer = accumulant; + imagePointer += width; + outputPointer += width * binPitch; + } +} + +__global__ void integrateBinsT(int width, int height, int nbins, int binPitch, int* devIntegrals) { + const int blockY = blockDim.y * blockIdx.x; + const int threadY = threadIdx.y; + const int bin = threadIdx.x; + const int y = blockY + threadY; + if (y >= height) return; + if (bin >= binPitch) return; + int* imagePointer = devIntegrals + binPitch * y * width + bin; + int accumulant = 0; + for(int x = 0; x < width; x++) { + accumulant += *imagePointer; + *imagePointer = accumulant; + imagePointer += binPitch; + } +} + +/** + * For a given orientation, computes the integral images for each of the histogram bins + */ +void formIntegralImages(int width, int height, int nbins, int* devImage, + int binPitch, int* devIntegrals) { + int pixelsPerCTA = 4; + dim3 gridDim = dim3((width - 1) / pixelsPerCTA + 1); + dim3 blockDim = dim3(nbins, pixelsPerCTA); + + integrateBins<<>>(width, height, nbins, devImage, binPitch, devIntegrals); + + gridDim = dim3((height - 1)/pixelsPerCTA + 1); + integrateBinsT<<>>(width, height, nbins, binPitch, devIntegrals); +} + + + +float* getImage(uint width, uint height, float* devImage) { + int imageSize = width * height * sizeof(float); + float* result = (float*)malloc(imageSize); + CUDA_SAFE_CALL(cudaMemcpy(result, devImage, imageSize, cudaMemcpyDeviceToHost)); + return result; +} + +int* getImage(uint width, uint height, int* devImage) { + int imageSize = width * height * sizeof(int); + int* result = (int*)malloc(imageSize); + CUDA_SAFE_CALL(cudaMemcpy(result, devImage, imageSize, cudaMemcpyDeviceToHost)); + return result; +} + +int findPitchInInts(int width) { + /* int* test; */ +/* size_t pitch; */ +/* cudaMallocPitch((void**)&test, &pitch, width * sizeof(int), 1); */ +/* cudaFree(test); */ +/* return pitch/sizeof(int); */ + return ((width - 1)/16 + 1) * 16; +} + + +int pixelPitch; +int binPitch; +int border; +int width; +int height; +int borderWidth; +int borderHeight; +int* devQuantized; +int* devMirrored; +float* devGradientA; +float* devGradientB; +int* devTurned; +int* devImageT; +int* devIntegralCol; +int* devIntegralColT; +int* devIntegralsT; +int* devIntegrals; +float* devGradients; +uint norients; +uint nscale; + +int initializeGradients(uint widthIn, uint heightIn, uint borderIn, uint maxbins, uint norientsIn, uint nscaleIn, uint textonChoice) { + width = widthIn; + height = heightIn; + border = borderIn; + norients = norientsIn; + nscale = nscaleIn; + borderWidth = width + 2 * border; + borderHeight = height + 2 * border; + + CUDA_SAFE_CALL(cudaMalloc((void**)&devGradients, sizeof(float) * norients * nscale * borderWidth * borderHeight)); + CUDA_SAFE_CALL(cudaMalloc((void**)&devQuantized, sizeof(int) * width * height)); + CUDA_SAFE_CALL(cudaMalloc((void**)&devMirrored, sizeof(int) * borderWidth * borderHeight)); + CUDA_SAFE_CALL(cudaMalloc((void**)&devGradientA, sizeof(float) * width * height)); + CUDA_SAFE_CALL(cudaMalloc((void**)&devGradientB, sizeof(float) * width * height)); + + int maxWidth = borderWidth + borderHeight; + int maxHeight = maxWidth; + int maxBins = 32; + if (textonChoice == TEXTON64) + maxBins = 64; + //pixelPitch = findPitchInInts(maxWidth * maxHeight); + binPitch = findPitchInInts(maxBins); + + + + CUDA_SAFE_CALL(cudaMalloc((void**)&devTurned, sizeof(int) * maxWidth * maxHeight)); + CUDA_SAFE_CALL(cudaMalloc((void**)&devIntegrals, sizeof(int) * binPitch * maxWidth * maxHeight)); + return binPitch; +} + + +void finalizeGradients() { + CUDA_SAFE_CALL(cudaFree(devIntegrals)); + CUDA_SAFE_CALL(cudaFree(devTurned)); + CUDA_SAFE_CALL(cudaFree(devGradientB)); + CUDA_SAFE_CALL(cudaFree(devGradientA)); + CUDA_SAFE_CALL(cudaFree(devMirrored)); + CUDA_SAFE_CALL(cudaFree(devQuantized)); + CUDA_SAFE_CALL(cudaFree(devGradients)); +} + + + +float* gradients(float* devImage, uint nbins, bool blur, float sigma, uint* radii, int textonChoice) { + + quantizeImage(width, height, nbins, devImage, devQuantized); + mirrorImage(width, height, border, devQuantized, devMirrored); + + for(int orientation = 0; orientation < norients/2; orientation++) { + float thetaPi = -float(orientation)/float(norients); + int newWidth; + int newHeight; + rotateImage(borderWidth, borderHeight, devMirrored, thetaPi, newWidth, newHeight, devTurned); + int* devTurnedImage = devTurned; + if (orientation == 0) { + devTurnedImage = devMirrored; + } + + + formIntegralImages(newWidth, newHeight, nbins, devTurnedImage, binPitch, devIntegrals); + + + for (int scale = 0; scale < nscale; scale++) { + + if (TEXTON32 == textonChoice) + { + dispatchGradient(false, width, height, border, nbins, thetaPi, newWidth, radii[scale], blur, (int)(sigma*(float)nbins), devIntegrals, binPitch, devGradientA, devGradientB); + } + else + { + + dispatchGradient_64(width, height, border, nbins, thetaPi, newWidth, radii[scale], blur, (int)(sigma*(float)nbins), devIntegrals, binPitch, devGradientA, devGradientB); + } + mirrorImage(width, height, border, devGradientA, &devGradients[borderWidth * borderHeight * (scale * norients + orientation + norients / 2)]); + mirrorImage(width, height, border, devGradientB, &devGradients[borderWidth * borderHeight * (scale * norients + orientation)]); + } + } + return devGradients; +} + +float* gradients(int* devImage, uint nbins, bool blur, float sigma, uint* radii, int textonChoice) { + + mirrorImage(width, height, border, devImage, devMirrored); + + for(int orientation = 0; orientation < norients/2; orientation++) { + float thetaPi = -float(orientation)/float(norients); + int newWidth; + int newHeight; + rotateImage(borderWidth, borderHeight, devMirrored, thetaPi, newWidth, newHeight, devTurned); + int* devTurnedImage = devTurned; + if (orientation == 0) { + devTurnedImage = devMirrored; + } + + + formIntegralImages(newWidth, newHeight, nbins, devTurnedImage, binPitch, devIntegrals); + + + for (int scale = 0; scale < nscale; scale++) { + + if (TEXTON32 == textonChoice) + { + dispatchGradient(true, width, height, border, nbins, thetaPi, newWidth, radii[scale], blur, (int)(sigma*(float)nbins), devIntegrals, binPitch, devGradientA, devGradientB); + } + else + { + dispatchGradient_64(width, height, border, nbins, thetaPi, newWidth, radii[scale], blur, (int)(sigma*(float)nbins), devIntegrals, binPitch, devGradientA, devGradientB); + + } + mirrorImage(width, height, border, devGradientA, &devGradients[borderWidth * borderHeight * (scale * norients + orientation + norients / 2)]); + mirrorImage(width, height, border, devGradientB, &devGradients[borderWidth * borderHeight * (scale * norients + orientation)]); + } + } + return devGradients; +} diff --git a/cuda_code/gram_7.cu b/cuda_code/gram_7.cu new file mode 100644 index 0000000000000000000000000000000000000000..b14722833cc617a6294d76fe6e3ddd1f04e1a343 --- /dev/null +++ b/cuda_code/gram_7.cu @@ -0,0 +1,197 @@ +/* + * Copyright (c) 2019-2021, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "test_utils.h" + +namespace MLCommon { +namespace Matrix { + +// Get the offset of element [i,k]. +HDI int get_offset(int i, int k, int ld, bool is_row_major) +{ + return is_row_major ? i * ld + k : i + k * ld; +} + +struct GramMatrixInputs { + int n1; // feature vectors in matrix 1 + int n2; // featuer vectors in matrix 2 + int n_cols; // number of elements in a feature vector + bool is_row_major; + KernelParams kernel; + int ld1; + int ld2; + int ld_out; + // We will generate random input using the dimensions given here. + // The reference output is calculated by a custom kernel. +}; + +std::ostream& operator<<(std::ostream& os, const GramMatrixInputs& p) +{ + std::vector kernel_names{"linear", "poly", "rbf", "tanh"}; + os << "/" << p.n1 << "x" << p.n2 << "x" << p.n_cols << "/" + << (p.is_row_major ? "RowMajor/" : "ColMajor/") << kernel_names[p.kernel.kernel] << "/ld_" + << p.ld1 << "x" << p.ld2 << "x" << p.ld_out; + return os; +} + +const std::vector inputs = { + {42, 137, 2, false, {KernelType::LINEAR}}, + {42, 137, 2, true, {KernelType::LINEAR}}, + {42, 137, 2, false, {KernelType::LINEAR}, 64, 179, 181}, + {42, 137, 2, true, {KernelType::LINEAR}, 64, 179, 181}, + {137, 42, 2, false, {KernelType::POLYNOMIAL, 2, 0.5, 2.4}}, + {137, 42, 2, true, {KernelType::POLYNOMIAL, 2, 0.5, 2.4}}, + {137, 42, 2, false, {KernelType::POLYNOMIAL, 2, 0.5, 2.4}, 159, 73, 144}, + {137, 42, 2, true, {KernelType::POLYNOMIAL, 2, 0.5, 2.4}, 159, 73, 144}, + {42, 137, 2, false, {KernelType::TANH, 0, 0.5, 2.4}}, + {42, 137, 2, true, {KernelType::TANH, 0, 0.5, 2.4}}, + {42, 137, 2, false, {KernelType::TANH, 0, 0.5, 2.4}, 64, 155, 49}, + {42, 137, 2, true, {KernelType::TANH, 0, 0.5, 2.4}, 64, 155, 143}, + {3, 4, 2, false, {KernelType::RBF, 0, 0.5}}, + {42, 137, 2, false, {KernelType::RBF, 0, 0.5}}, + {42, 137, 2, true, {KernelType::RBF, 0, 0.5}}, + // Distance kernel does not support LD parameter yet. + //{42, 137, 2, false, {KernelType::RBF, 0, 0.5}, 64, 155, 49}, + // {42, 137, 2, true, {KernelType::RBF, 0, 0.5}, 64, 155, 143}, +}; + +template +class GramMatrixTest : public ::testing::TestWithParam { + protected: + GramMatrixTest() + : params(GetParam()), + stream(0), + x1(0, stream), + x2(0, stream), + gram(0, stream), + gram_host(handle.get_host_allocator(), stream) + { + CUDA_CHECK(cudaStreamCreate(&stream)); + + if (params.ld1 == 0) { params.ld1 = params.is_row_major ? params.n_cols : params.n1; } + if (params.ld2 == 0) { params.ld2 = params.is_row_major ? params.n_cols : params.n2; } + if (params.ld_out == 0) { params.ld_out = params.is_row_major ? params.n2 : params.n1; } + // Derive the size of the ouptut from the offset of the last element. + size_t size = get_offset(params.n1 - 1, params.n_cols - 1, params.ld1, params.is_row_major) + 1; + x1.resize(size, stream); + size = get_offset(params.n2 - 1, params.n_cols - 1, params.ld2, params.is_row_major) + 1; + x2.resize(size, stream); + size = get_offset(params.n1 - 1, params.n2 - 1, params.ld_out, params.is_row_major) + 1; + gram.resize(size, stream); + gram_host.resize(gram.size()); + + raft::random::Rng r(42137ULL); + r.uniform(x1.data(), x1.size(), math_t(0), math_t(1), stream); + r.uniform(x2.data(), x2.size(), math_t(0), math_t(1), stream); + CUDA_CHECK(cudaMemsetAsync(gram.data(), 0, gram.size() * sizeof(math_t), stream)); + CUDA_CHECK(cudaMemsetAsync(gram_host.data(), 0, gram_host.size() * sizeof(math_t), stream)); + } + + ~GramMatrixTest() override { CUDA_CHECK_NO_THROW(cudaStreamDestroy(stream)); } + + // Calculate the Gram matrix on the host. + void naiveKernel() + { + host_buffer x1_host(handle.get_host_allocator(), stream, x1.size()); + raft::update_host(x1_host.data(), x1.data(), x1.size(), stream); + host_buffer x2_host(handle.get_host_allocator(), stream, x2.size()); + raft::update_host(x2_host.data(), x2.data(), x2.size(), stream); + CUDA_CHECK(cudaStreamSynchronize(stream)); + + for (int i = 0; i < params.n1; i++) { + for (int j = 0; j < params.n2; j++) { + float d = 0; + for (int k = 0; k < params.n_cols; k++) { + if (params.kernel.kernel == KernelType::RBF) { + math_t diff = x1_host[get_offset(i, k, params.ld1, params.is_row_major)] - + x2_host[get_offset(j, k, params.ld2, params.is_row_major)]; + d += diff * diff; + } else { + d += x1_host[get_offset(i, k, params.ld1, params.is_row_major)] * + x2_host[get_offset(j, k, params.ld2, params.is_row_major)]; + } + } + int idx = get_offset(i, j, params.ld_out, params.is_row_major); + math_t v = 0; + switch (params.kernel.kernel) { + case (KernelType::LINEAR): gram_host[idx] = d; break; + case (KernelType::POLYNOMIAL): + v = params.kernel.gamma * d + params.kernel.coef0; + gram_host[idx] = std::pow(v, params.kernel.degree); + break; + case (KernelType::TANH): + gram_host[idx] = std::tanh(params.kernel.gamma * d + params.kernel.coef0); + break; + case (KernelType::RBF): gram_host[idx] = exp(-params.kernel.gamma * d); break; + } + } + } + } + + void runTest() + { + std::unique_ptr> kernel = std::unique_ptr>( + KernelFactory::create(params.kernel, handle.get_cublas_handle())); + + kernel->evaluate(x1.data(), + params.n1, + params.n_cols, + x2.data(), + params.n2, + gram.data(), + params.is_row_major, + stream, + params.ld1, + params.ld2, + params.ld_out); + naiveKernel(); + ASSERT_TRUE(raft::devArrMatchHost( + gram_host.data(), gram.data(), gram.size(), raft::CompareApprox(1e-6f))); + } + + raft::handle_t handle; + cudaStream_t stream; + GramMatrixInputs params; + + std::shared_ptr host_allocator; + + rmm::device_uvector x1; + rmm::device_uvector x2; + rmm::device_uvector gram; + raft::mr::host::buffer gram_host; +}; + +typedef GramMatrixTest GramMatrixTestFloat; +typedef GramMatrixTest GramMatrixTestDouble; + +TEST_P(GramMatrixTestFloat, Gram) { runTest(); } + +INSTANTIATE_TEST_SUITE_P(GramMatrixTests, GramMatrixTestFloat, ::testing::ValuesIn(inputs)); +}; // end namespace Matrix +}; // end namespace MLCommon diff --git a/cuda_code/graph_5.cu b/cuda_code/graph_5.cu new file mode 100644 index 0000000000000000000000000000000000000000..bbaf58e083ea95583d8a53390658da999c8b88f4 --- /dev/null +++ b/cuda_code/graph_5.cu @@ -0,0 +1,127 @@ +#include "graph.h" +#include + Graph::Graph() { + h_nnodes = 0; + h_nedges = 0; + h_edges = nullptr; + h_offset = nullptr; + h_weights = nullptr; + d_edges = nullptr; + d_offset = nullptr; + d_weights = nullptr; + + } + + + void Graph::read(std::string filename) { + std::ifstream input(filename.c_str()); + if(!input.is_open()) { + std::cerr << "Could not open the file \"" << filename << "\"" << std::endl; + exit(1); + } + + // file is found + + input >> h_nnodes >> h_nedges; + +/******************************************************************/ +// Allocation starts + unsigned numbytes_offset = sizeof(uint64_t) * (h_nnodes+1); + unsigned numbytes_edges = sizeof(uint64_t) * h_nedges; + unsigned numbytes_weights = sizeof(unsigned) * h_nedges; + /***************************************************/ + // on host + h_offset = (uint64_t*)malloc(numbytes_offset); + //if(h_offset == NULL) + //{ + // printf("Memory allocation failed"); + // return; + //} + h_edges = (uint64_t*)malloc(numbytes_edges); + h_weights = (unsigned*)malloc(numbytes_weights); + memset(h_offset, 0, numbytes_offset); + memset(h_edges, 0, numbytes_edges); + memset(h_weights, 0, numbytes_weights); + /***************************************************/ +#if 1 + // getCSR() + // generating the CSR representation and populating the h_offset and h_edges array as the deliverable + + // Assumption: + // 1. There is a edge list representation of the graph available, sorted by source vertex ids. + // 2. The node ids always start from 0. + + // there are h_edges lines left in the file since + uint64_t srcPrev, srcCurr; // storing the ids of the previous and the current vertices + uint64_t offset = 0; // the offset in the h_edges array + uint64_t index = 0; // the index of the h_offset array to which the value of offset has to be written + input >> srcPrev >> h_edges[0] >> h_weights[0]; // reading the src and dest of the first edge + h_offset[index] = offset; + for (int i=1; i> srcCurr >> h_edges[i] >> h_weights[i]; +// if(srcCurr == srcPrev) { // we are in the middle of the edge list of the same source vertex +// ++offset; +// } + + ++offset; + if(srcPrev != srcCurr) { // srcCurr has a new source id +// ++offset; + uint64_t diff = srcCurr - srcPrev; + while(diff-- /*&& (index <= h_nnodes)*/ ) { // to account for the values of offset for the vertices that do not have any neighbors + ++index; + h_offset[index] = offset; + } + } + + srcPrev = srcCurr; // making the current node as the previous node, for the next run + } + + // putting the offset to 'h_nedges' for the last nodes that do not have any outgoing edges. + for(int i=index+1; i<=h_nnodes; i++) + h_offset[i] = h_nedges; + +#endif + + /***************************************************/ + + // on device + gpuErrchk(cudaMalloc(&d_offset, numbytes_offset)); + gpuErrchk(cudaMalloc(&d_edges, numbytes_edges)); + gpuErrchk(cudaMalloc(&d_weights, numbytes_weights)); + + //gpuErrchk(cudaMalloc(&d_nnodes, numbytes_edges)); + //gpuErrchk(cudaMalloc(&d_nedges, numbytes_edges)); + + /***************************************************/ + + // copying to device + + gpuErrchk(cudaMemcpy(d_offset, h_offset, numbytes_offset, cudaMemcpyHostToDevice)); + gpuErrchk(cudaMemcpy(d_edges, h_edges, numbytes_edges, cudaMemcpyHostToDevice)); + gpuErrchk(cudaMemcpy(d_weights, h_weights, numbytes_weights, cudaMemcpyHostToDevice)); + +} + + +void Graph::printGraph() { + std::cout << "offset array: " << std::endl; + for(int i=0; i 0) + startIndex = pInGPUPtrStartIds[nodeId-1]; + + //Iterate over the neighbors. + float curVal = 0.0f; + for(int neighCurIndex = startIndex; + neighCurIndex < endIndex; + ++neighCurIndex) + { + int nodeNeighId = pInGPUPtrNeighbors[neighCurIndex].x; + curVal += pInGPUPtrFeatures[nodeNeighId*pNumFeatures + featureOffset]; + } + + //Add the final value. + curVal += pInGPUPtrFeatures[nodeId*pNumFeatures + featureOffset]; + + //Normalize if necessary. + if(pNormalize) + curVal /= float(endIndex-startIndex+1); + + //Store the result. + pOutGPUPtrFeatures[nodeId*pNumFeatures + featureOffset] = curVal; + } +} + +__global__ void compute_graph_aggregation_grads_gpu_kernel( + const bool pNormalize, + const unsigned int pNumNodes, + const unsigned int pNumFeatures, + const unsigned int pNumNeighbors, + const float* __restrict__ pInGPUPtrGrads, + const int2* __restrict__ pInGPUPtrNeighbors, + const int* __restrict__ pInGPUPtrStartIds, + float* __restrict__ pOutGPUPtrGrads) +{ + int initIndex = mccnn::compute_global_index_gpu_funct(); + int totalThreads = mccnn::compute_total_threads_gpu_funct(); + + //Iterate over the nodes. + for(int curIndex = initIndex; + curIndex < pNumNodes*pNumFeatures; + curIndex += totalThreads) + { + //Get the ids. + int nodeId = curIndex/pNumFeatures; + int featureOffset = curIndex%pNumFeatures; + + //Get the neighbor range. + int startIndex = 0; + int endIndex = pInGPUPtrStartIds[nodeId]; + if(nodeId > 0) + startIndex = pInGPUPtrStartIds[nodeId-1]; + + //Get the input gradient. + float inGrad = pInGPUPtrGrads[nodeId*pNumFeatures + featureOffset]; + + //Normalize if necessary. + if(pNormalize) + inGrad /= float(endIndex-startIndex+1); + + //Iterate over the neighbors. + for(int neighCurIndex = startIndex; + neighCurIndex < endIndex; + ++neighCurIndex) + { + int nodeNeighId = pInGPUPtrNeighbors[neighCurIndex].x; + atomicAdd(&pOutGPUPtrGrads[nodeNeighId*pNumFeatures + featureOffset], inGrad); + } + + //Add the final value. + atomicAdd(&pOutGPUPtrGrads[nodeId*pNumFeatures + featureOffset], inGrad); + } +} + +///////////////////////// CPU + +void mccnn::compute_graph_aggregation_gpu( + std::unique_ptr& pDevice, + const bool pNormalize, + const unsigned int pNumNodes, + const unsigned int pNumFeatures, + const unsigned int pNumNeighbors, + const float* pInGPUPtrFeatures, + const int* pInGPUPtrNeighbors, + const int* pInGPUPtrStartIds, + float* pOutGPUPtrFeatures) +{ + //Get the cuda stream. + auto cudaStream = pDevice->getCUDAStream(); + +#ifdef DEBUG_INFO + cudaEvent_t start, stop; + cudaEventCreate(&start); + cudaEventCreate(&stop); + cudaEventRecord(start, cudaStream); +#endif + + //Get the device properties. + const GpuDeviceProperties& gpuProps = pDevice->get_device_properties(); + + //Calculate the ideal number of blocks for the selected block size. + unsigned int numMP = gpuProps.numMPs_; + unsigned int blockSize = gpuProps.warpSize_*2; + unsigned int numBlocks = pDevice->get_max_active_block_x_sm( + blockSize,(const void*)compute_graph_aggregation_gpu_kernel, 0); + pDevice->check_error(__FILE__, __LINE__); + + //Calculate the total number of blocks to execute. + unsigned int execBlocks = pNumNodes*pNumFeatures; + execBlocks += ((pNumNodes*pNumFeatures)%blockSize != 0)?1:0; + unsigned int totalNumBlocks = numMP*numBlocks; + totalNumBlocks = (totalNumBlocks > execBlocks)?execBlocks:totalNumBlocks; + + //Execute the cuda kernel. + compute_graph_aggregation_gpu_kernel<<>>( + pNormalize, pNumNodes, pNumFeatures, pNumNeighbors, + pInGPUPtrFeatures, (const int2*)pInGPUPtrNeighbors, + pInGPUPtrStartIds, pOutGPUPtrFeatures); + pDevice->check_error(__FILE__, __LINE__); + +#ifdef DEBUG_INFO + cudaEventRecord(stop, cudaStream); + cudaEventSynchronize(stop); + float milliseconds = 0; + cudaEventElapsedTime(&milliseconds, start, stop); + + float gpuOccupancy = (float)(numBlocks*blockSize)/(float)gpuProps.maxThreadsXMP_; + + fprintf(stderr, "### COMPUTE GRAPH AGGREGATION ###\n"); + fprintf(stderr, "Num nodes: %d\n", pNumNodes); + fprintf(stderr, "Num features: %d\n", pNumFeatures); + fprintf(stderr, "Occupancy: %f\n", gpuOccupancy); + fprintf(stderr, "Execution time: %f\n", milliseconds); + fprintf(stderr, "\n"); +#endif +} + +void mccnn::compute_graph_aggregation_grads_gpu( + std::unique_ptr& pDevice, + const bool pNormalize, + const unsigned int pNumNodes, + const unsigned int pNumFeatures, + const unsigned int pNumNeighbors, + const float* pInGPUPtrGradients, + const int* pInGPUPtrNeighbors, + const int* pInGPUPtrStartIds, + float* pOutGPUPtrGradients) +{ + //Get the cuda stream. + auto cudaStream = pDevice->getCUDAStream(); + +#ifdef DEBUG_INFO + cudaEvent_t start, stop; + cudaEventCreate(&start); + cudaEventCreate(&stop); + cudaEventRecord(start, cudaStream); +#endif + + //Get the device properties. + const GpuDeviceProperties& gpuProps = pDevice->get_device_properties(); + + //Initialize the gradient vector. + pDevice->memset(pOutGPUPtrGradients, 0, sizeof(float)*pNumFeatures*pNumNodes); + pDevice->check_error(__FILE__, __LINE__); + + //Calculate the ideal number of blocks for the selected block size. + unsigned int numMP = gpuProps.numMPs_; + unsigned int blockSize = gpuProps.warpSize_*2; + unsigned int numBlocks = pDevice->get_max_active_block_x_sm( + blockSize,(const void*)compute_graph_aggregation_gpu_kernel, 0); + pDevice->check_error(__FILE__, __LINE__); + + //Calculate the total number of blocks to execute. + unsigned int execBlocks = pNumNodes*pNumFeatures; + execBlocks += ((pNumNodes*pNumFeatures)%blockSize != 0)?1:0; + unsigned int totalNumBlocks = numMP*numBlocks; + totalNumBlocks = (totalNumBlocks > execBlocks)?execBlocks:totalNumBlocks; + + //Execute the cuda kernel. + compute_graph_aggregation_grads_gpu_kernel<<>>( + pNormalize, pNumNodes, pNumFeatures, pNumNeighbors, + pInGPUPtrGradients, (const int2*)pInGPUPtrNeighbors, + pInGPUPtrStartIds, pOutGPUPtrGradients); + pDevice->check_error(__FILE__, __LINE__); + +#ifdef DEBUG_INFO + cudaEventRecord(stop, cudaStream); + cudaEventSynchronize(stop); + float milliseconds = 0; + cudaEventElapsedTime(&milliseconds, start, stop); + + float gpuOccupancy = (float)(numBlocks*blockSize)/(float)gpuProps.maxThreadsXMP_; + + fprintf(stderr, "### COMPUTE GRAPH AGGREGATION GRADS ###\n"); + fprintf(stderr, "Num nodes: %d\n", pNumNodes); + fprintf(stderr, "Num features: %d\n", pNumFeatures); + fprintf(stderr, "Occupancy: %f\n", gpuOccupancy); + fprintf(stderr, "Execution time: %f\n", milliseconds); + fprintf(stderr, "\n"); +#endif +} diff --git a/cuda_code/graph_gpu_wrapper_2.cu b/cuda_code/graph_gpu_wrapper_2.cu new file mode 100644 index 0000000000000000000000000000000000000000..93854d7f1ec3f6a6ffa454a4b65c8adcafd77002 --- /dev/null +++ b/cuda_code/graph_gpu_wrapper_2.cu @@ -0,0 +1,355 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/fleet/heter_ps/graph_gpu_ps_table.h" +#include "paddle/fluid/framework/fleet/heter_ps/graph_gpu_wrapper.h" +#include "paddle/fluid/framework/fleet/heter_ps/heter_resource.h" +namespace paddle { +namespace framework { +#ifdef PADDLE_WITH_HETERPS +std::string nodes[] = { + std::string("user\t37\ta 0.34\tb 13 14\tc hello\td abc"), + std::string("user\t96\ta 0.31\tb 15 10\tc 96hello\td abcd"), + std::string("user\t59\ta 0.11\tb 11 14"), + std::string("user\t97\ta 0.11\tb 12 11"), + std::string("item\t45\ta 0.21"), + std::string("item\t145\ta 0.21"), + std::string("item\t112\ta 0.21"), + std::string("item\t48\ta 0.21"), + std::string("item\t247\ta 0.21"), + std::string("item\t111\ta 0.21"), + std::string("item\t46\ta 0.21"), + std::string("item\t146\ta 0.21"), + std::string("item\t122\ta 0.21"), + std::string("item\t49\ta 0.21"), + std::string("item\t248\ta 0.21"), + std::string("item\t113\ta 0.21")}; +char node_file_name[] = "nodes.txt"; +std::vector user_feature_name = {"a", "b", "c", "d"}; +std::vector item_feature_name = {"a"}; +std::vector user_feature_dtype = {"float32", "int32", "string", + "string"}; +std::vector item_feature_dtype = {"float32"}; +std::vector user_feature_shape = {1, 2, 1, 1}; +std::vector item_feature_shape = {1}; +void prepare_file(char file_name[]) { + std::ofstream ofile; + ofile.open(file_name); + + for (auto x : nodes) { + ofile << x << std::endl; + } + ofile.close(); +} + +void GraphGpuWrapper::set_device(std::vector ids) { + for (auto device_id : ids) { + device_id_mapping.push_back(device_id); + } +} +std::vector> GraphGpuWrapper::get_all_id(int type, int idx, + int slice_num) { + return ((GpuPsGraphTable *)graph_table) + ->cpu_graph_table->get_all_id(type, idx, slice_num); +} +void GraphGpuWrapper::set_up_types(std::vector &edge_types, + std::vector &node_types) { + id_to_edge = edge_types; + for (size_t table_id = 0; table_id < edge_types.size(); table_id++) { + int res = edge_to_id.size(); + edge_to_id[edge_types[table_id]] = res; + } + id_to_feature = node_types; + for (size_t table_id = 0; table_id < node_types.size(); table_id++) { + int res = feature_to_id.size(); + feature_to_id[node_types[table_id]] = res; + } + table_feat_mapping.resize(node_types.size()); + this->table_feat_conf_feat_name.resize(node_types.size()); + this->table_feat_conf_feat_dtype.resize(node_types.size()); + this->table_feat_conf_feat_shape.resize(node_types.size()); +} + +void GraphGpuWrapper::make_partitions(int idx, int64_t byte_size, + int device_len) { + ((GpuPsGraphTable *)graph_table) + ->cpu_graph_table->make_partitions(idx, byte_size, device_len); +} +int32_t GraphGpuWrapper::load_next_partition(int idx) { + return ((GpuPsGraphTable *)graph_table) + ->cpu_graph_table->load_next_partition(idx); +} + +void GraphGpuWrapper::set_search_level(int level) { + ((GpuPsGraphTable *)graph_table)->cpu_graph_table->set_search_level(level); +} + +std::vector GraphGpuWrapper::get_partition(int idx, int num) { + return ((GpuPsGraphTable *)graph_table) + ->cpu_graph_table->get_partition(idx, num); +} +int32_t GraphGpuWrapper::get_partition_num(int idx) { + return ((GpuPsGraphTable *)graph_table) + ->cpu_graph_table->get_partition_num(idx); +} +void GraphGpuWrapper::make_complementary_graph(int idx, int64_t byte_size) { + ((GpuPsGraphTable *)graph_table) + ->cpu_graph_table->make_complementary_graph(idx, byte_size); +} +void GraphGpuWrapper::load_edge_file(std::string name, std::string filepath, + bool reverse) { + // 'e' means load edge + std::string params = "e"; + if (reverse) { + // 'e<' means load edges from $2 to $1 + params += "<" + name; + } else { + // 'e>' means load edges from $1 to $2 + params += ">" + name; + } + if (edge_to_id.find(name) != edge_to_id.end()) { + ((GpuPsGraphTable *)graph_table) + ->cpu_graph_table->Load(std::string(filepath), params); + } +} + +void GraphGpuWrapper::load_node_file(std::string name, std::string filepath) { + // 'n' means load nodes and 'node_type' follows + + std::string params = "n" + name; + + if (feature_to_id.find(name) != feature_to_id.end()) { + ((GpuPsGraphTable *)graph_table) + ->cpu_graph_table->Load(std::string(filepath), params); + } +} + +void GraphGpuWrapper::add_table_feat_conf(std::string table_name, + std::string feat_name, + std::string feat_dtype, + int feat_shape) { + if (feature_to_id.find(table_name) != feature_to_id.end()) { + int idx = feature_to_id[table_name]; + if (table_feat_mapping[idx].find(feat_name) == + table_feat_mapping[idx].end()) { + int res = (int)table_feat_mapping[idx].size(); + table_feat_mapping[idx][feat_name] = res; + } + int feat_idx = table_feat_mapping[idx][feat_name]; + VLOG(0) << "table_name " << table_name << " mapping id " << idx; + VLOG(0) << " feat name " << feat_name << " feat id" << feat_idx; + if (feat_idx < table_feat_conf_feat_name[idx].size()) { + // overide + table_feat_conf_feat_name[idx][feat_idx] = feat_name; + table_feat_conf_feat_dtype[idx][feat_idx] = feat_dtype; + table_feat_conf_feat_shape[idx][feat_idx] = feat_shape; + } else { + // new + table_feat_conf_feat_name[idx].push_back(feat_name); + table_feat_conf_feat_dtype[idx].push_back(feat_dtype); + table_feat_conf_feat_shape[idx].push_back(feat_shape); + } + } + VLOG(0) << "add conf over"; +} +void GraphGpuWrapper::init_search_level(int level) { search_level = level; } + +void GraphGpuWrapper::init_service() { + table_proto.set_task_pool_size(24); + table_proto.set_search_level(search_level); + table_proto.set_table_name("cpu_graph_table"); + table_proto.set_use_cache(false); + for (int i = 0; i < id_to_edge.size(); i++) + table_proto.add_edge_types(id_to_edge[i]); + for (int i = 0; i < id_to_feature.size(); i++) { + table_proto.add_node_types(id_to_feature[i]); + auto feat_node = id_to_feature[i]; + ::paddle::distributed::GraphFeature *g_f = table_proto.add_graph_feature(); + for (int x = 0; x < table_feat_conf_feat_name[i].size(); x++) { + g_f->add_name(table_feat_conf_feat_name[i][x]); + g_f->add_dtype(table_feat_conf_feat_dtype[i][x]); + g_f->add_shape(table_feat_conf_feat_shape[i][x]); + } + } + std::shared_ptr resource = + std::make_shared(device_id_mapping); + resource->enable_p2p(); + GpuPsGraphTable *g = new GpuPsGraphTable(resource, 1); + g->init_cpu_table(table_proto); + graph_table = (char *)g; +} + +void GraphGpuWrapper::upload_batch(int idx, + std::vector> &ids) { + GpuPsGraphTable *g = (GpuPsGraphTable *)graph_table; + // std::vector vec; + for (int i = 0; i < ids.size(); i++) { + // vec.push_back(g->cpu_graph_table->make_gpu_ps_graph(idx, ids[i])); + GpuPsCommGraph sub_graph = + g->cpu_graph_table->make_gpu_ps_graph(idx, ids[i]); + g->build_graph_on_single_gpu(sub_graph, i); + sub_graph.release_on_cpu(); + VLOG(0) << "sub graph on gpu " << i << " is built"; + } + // g->build_graph_from_cpu(vec); +} + +void GraphGpuWrapper::initialize() { + std::vector device_id_mapping; + for (int i = 0; i < 2; i++) device_id_mapping.push_back(i); + int gpu_num = device_id_mapping.size(); + ::paddle::distributed::GraphParameter table_proto; + table_proto.add_edge_types("u2u"); + table_proto.add_node_types("user"); + table_proto.add_node_types("item"); + ::paddle::distributed::GraphFeature *g_f = table_proto.add_graph_feature(); + + for (int i = 0; i < user_feature_name.size(); i++) { + g_f->add_name(user_feature_name[i]); + g_f->add_dtype(user_feature_dtype[i]); + g_f->add_shape(user_feature_shape[i]); + } + ::paddle::distributed::GraphFeature *g_f1 = table_proto.add_graph_feature(); + for (int i = 0; i < item_feature_name.size(); i++) { + g_f1->add_name(item_feature_name[i]); + g_f1->add_dtype(item_feature_dtype[i]); + g_f1->add_shape(item_feature_shape[i]); + } + prepare_file(node_file_name); + table_proto.set_shard_num(24); + + std::shared_ptr resource = + std::make_shared(device_id_mapping); + resource->enable_p2p(); + GpuPsGraphTable *g = new GpuPsGraphTable(resource, 1); + g->init_cpu_table(table_proto); + graph_table = (char *)g; + g->cpu_graph_table->Load(node_file_name, "nuser"); + g->cpu_graph_table->Load(node_file_name, "nitem"); + std::remove(node_file_name); + std::vector vec; + std::vector node_ids; + node_ids.push_back(37); + node_ids.push_back(96); + std::vector> node_feat(2, + std::vector(2)); + std::vector feature_names; + feature_names.push_back(std::string("c")); + feature_names.push_back(std::string("d")); + g->cpu_graph_table->get_node_feat(0, node_ids, feature_names, node_feat); + VLOG(0) << "get_node_feat: " << node_feat[0][0]; + VLOG(0) << "get_node_feat: " << node_feat[0][1]; + VLOG(0) << "get_node_feat: " << node_feat[1][0]; + VLOG(0) << "get_node_feat: " << node_feat[1][1]; + int n = 10; + std::vector ids0, ids1; + for (int i = 0; i < n; i++) { + g->cpu_graph_table->add_comm_edge(0, i, (i + 1) % n); + g->cpu_graph_table->add_comm_edge(0, i, (i - 1 + n) % n); + if (i % 2 == 0) ids0.push_back(i); + } + g->cpu_graph_table->build_sampler(0); + ids1.push_back(5); + vec.push_back(g->cpu_graph_table->make_gpu_ps_graph(0, ids0)); + vec.push_back(g->cpu_graph_table->make_gpu_ps_graph(0, ids1)); + vec[0].display_on_cpu(); + vec[1].display_on_cpu(); + g->build_graph_from_cpu(vec); +} +void GraphGpuWrapper::test() { + int64_t cpu_key[3] = {0, 1, 2}; + void *key; + platform::CUDADeviceGuard guard(0); + cudaMalloc((void **)&key, 3 * sizeof(int64_t)); + cudaMemcpy(key, cpu_key, 3 * sizeof(int64_t), cudaMemcpyHostToDevice); + auto neighbor_sample_res = + ((GpuPsGraphTable *)graph_table) + ->graph_neighbor_sample(0, (int64_t *)key, 2, 3); + int64_t *res = new int64_t[7]; + cudaMemcpy(res, neighbor_sample_res.val, 3 * 2 * sizeof(int64_t), + cudaMemcpyDeviceToHost); + int *actual_sample_size = new int[3]; + cudaMemcpy(actual_sample_size, neighbor_sample_res.actual_sample_size, + 3 * sizeof(int), + cudaMemcpyDeviceToHost); // 3, 1, 3 + + //{0,9} or {9,0} is expected for key 0 + //{0,2} or {2,0} is expected for key 1 + //{1,3} or {3,1} is expected for key 2 + for (int i = 0; i < 3; i++) { + VLOG(0) << "actual sample size for " << i << " is " + << actual_sample_size[i]; + for (int j = 0; j < actual_sample_size[i]; j++) { + VLOG(0) << "sampled an neighbor for node" << i << " : " << res[i * 2 + j]; + } + } +} +NeighborSampleResult GraphGpuWrapper::graph_neighbor_sample_v3( + NeighborSampleQuery q, bool cpu_switch) { + return ((GpuPsGraphTable *)graph_table) + ->graph_neighbor_sample_v3(q, cpu_switch); +} + +// this function is contributed by Liwb5 +std::vector GraphGpuWrapper::graph_neighbor_sample( + int gpu_id, std::vector &key, int sample_size) { + int64_t *cuda_key; + platform::CUDADeviceGuard guard(gpu_id); + + cudaMalloc(&cuda_key, key.size() * sizeof(int64_t)); + cudaMemcpy(cuda_key, key.data(), key.size() * sizeof(int64_t), + cudaMemcpyHostToDevice); + + auto neighbor_sample_res = + ((GpuPsGraphTable *)graph_table) + ->graph_neighbor_sample(gpu_id, cuda_key, sample_size, key.size()); + + int *actual_sample_size = new int[key.size()]; + cudaMemcpy(actual_sample_size, neighbor_sample_res.actual_sample_size, + key.size() * sizeof(int), + cudaMemcpyDeviceToHost); // 3, 1, 3 + int cumsum = 0; + for (int i = 0; i < key.size(); i++) { + cumsum += actual_sample_size[i]; + } + /* VLOG(0) << "cumsum " << cumsum; */ + + std::vector cpu_key, res; + cpu_key.resize(key.size() * sample_size); + + cudaMemcpy(cpu_key.data(), neighbor_sample_res.val, + key.size() * sample_size * sizeof(int64_t), + cudaMemcpyDeviceToHost); + for (int i = 0; i < key.size(); i++) { + for (int j = 0; j < actual_sample_size[i]; j++) { + res.push_back(key[i]); + res.push_back(cpu_key[i * sample_size + j]); + } + } + /* for(int i = 0;i < res.size();i ++) { */ + /* VLOG(0) << i << " " << res[i]; */ + /* } */ + + cudaFree(cuda_key); + return res; +} + +NodeQueryResult GraphGpuWrapper::query_node_list(int gpu_id, int start, + int query_size) { + return ((GpuPsGraphTable *)graph_table) + ->query_node_list(gpu_id, start, query_size); +} +#endif +} +}; diff --git a/cuda_code/grid_collaboration.cu b/cuda_code/grid_collaboration.cu new file mode 100644 index 0000000000000000000000000000000000000000..974811af1d275052dc1b3e067bcd3dce17bede99 --- /dev/null +++ b/cuda_code/grid_collaboration.cu @@ -0,0 +1,366 @@ +/* + * Note: The tests in this file involve run-time work on the GPU. + * The way this is managed is with just a _single_ kernel for any + * and all possible testcase - which is very generic: It + * runs arbitrary test-case specific code, which is intended to + * produce a sequence of values. These values are not necessarily + * "results" - that depends on what it is you're running - but + * they're values to then _check_ afterwards on the host side. + */ + +#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN + +#include "common.cuh" + +#include +#include +#include +#include + +using std::size_t; + +#if __cplusplus < 201701L +#include +template +using optional = std::experimental::optional; +#else +template +#include +using optional = std::optional; +#endif + +template +const auto make_exact_comparison { optional{} }; + +namespace klcg = kat::linear_grid::collaborative::grid; +namespace klcb = kat::linear_grid::collaborative::block; +// namespace kcg = kat::collaborative::grid; +namespace kcb = kat::collaborative::block; +namespace kcw = kat::collaborative::warp; + +namespace kernels { + +template +__global__ void execute_testcase( + F testcase_device_function, + size_t num_values_to_populate, + T* __restrict__ values_to_populate, + const Is* __restrict__ ... inputs + ) +{ + testcase_device_function(num_values_to_populate, values_to_populate, inputs...); +} + +} // namespace kernels + + +template +std::size_t set_width_for_up_to(T max) +{ +// assert(std::is_integral::value, "Only integer types supported for now"); + std::stringstream ss; + ss << std::dec << max; + return ss.str().length(); +} + +namespace detail { + +template +auto tolerance_gadget(std::true_type, T x, optional tolerance) { + auto eps = tolerance.value_or(0); + return doctest::Approx(x).epsilon(eps); +} + + +template +T tolerance_gadget(std::false_type, T x, optional) { return x; } + +} // namespace detail + +template +auto tolerance_gadget(T x, optional tolerance) +{ + constexpr const auto is_arithmetic = std::is_arithmetic< std::decay_t >::value; + return + detail::tolerance_gadget(std::integral_constant{}, x, tolerance); +} + + +// TODO: Take iterator templates rather than pointers +template +void check_results( + size_t num_values_to_check, + // perhaps add another parameter for specific individual-check details? + const T* __restrict__ actual_values, + F expected_value_retriever, + optional comparison_tolerance_fraction, + const Is* __restrict__... inputs) +{ + std::stringstream ss; + auto index_width = set_width_for_up_to(num_values_to_check); + + // TODO: Consider using the maximum/minimum result values to set field widths. + + for(size_t i = 0; i < num_values_to_check; i++) { + ss.str(""); + ss + << "Assertion " << std::setw(index_width) << (i+1) << " for testcase " << doctest::current_test_name() + // << " :\n" + << "(" << std::make_tuple(inputs[i]...) << ")" + ; + auto mismatch_message { ss.str() }; + if (comparison_tolerance_fraction) { + CHECK_MESSAGE(actual_values[i] == tolerance_gadget(expected_value_retriever(i), comparison_tolerance_fraction), mismatch_message); + } + else { + CHECK_MESSAGE(actual_values[i] == expected_value_retriever(i), mismatch_message); + } + } +} + +template +struct tag {}; + +/** + * @brief Executes a testcase intended to make certain checks using a GPU kernel + * which produces the values to check for. + * + * @note The actual checks are eventually conducted on the host side, since doctest + * code can't actually do anything useful on the GPU. So on the GPU side we "merely" + * compute the values to check and let the test logic peform the actual comparison later + * on. + */ +template +auto execute_testcase_on_gpu( + tag, + std::index_sequence, + K testcase_kernel, + F testcase_device_function, + cuda::launch_configuration_t launch_config, + size_t num_values_to_populate, + Is* __restrict__ ... inputs) +{ + cuda::device_t device { cuda::device::current::get() }; + auto device_side_results { cuda::memory::device::make_unique(device, num_values_to_populate) }; + cuda::memory::device::zero(device_side_results.get(), num_values_to_populate * sizeof(T)); // just to be on the safe side + auto host_side_results { std::vector(num_values_to_populate) }; + + auto make_device_side_input = [&device, num_values_to_populate](auto input, size_t n) { + using input_type = std::remove_reference_t; + auto device_side_input = cuda::memory::device::make_unique(device, n); + cuda::memory::copy(device_side_input.get(), input, num_values_to_populate * sizeof(input_type)); + return std::move(device_side_input); + }; + auto device_side_inputs = std::make_tuple( make_device_side_input(inputs, num_values_to_populate)... ); + ignore(device_side_inputs); // for the case of no inputs + + cuda::launch( + testcase_kernel, + launch_config, + testcase_device_function, + num_values_to_populate, + device_side_results.get(), + std::get(device_side_inputs).get()... ); + + cuda::memory::copy(host_side_results.data(), device_side_results.get(), sizeof(T) * num_values_to_populate); + + return host_side_results; +} + +template +void execute_non_uniform_testcase_on_gpu_and_check( + F testcase_device_function, + ExpectedResultRetriever expected_value_retriever, + size_t num_values_to_populate, + cuda::grid::dimensions_t grid_dimensions, + cuda::grid::block_dimensions_t block_dimensions, + optional comparison_tolerance_fraction, + Is* __restrict__ ... inputs) +{ + auto launch_config { cuda::make_launch_config(grid_dimensions, block_dimensions) }; + // TODO: Should we check that num_values_to_populate is equal to the number of grid threads? + + auto host_side_results = execute_testcase_on_gpu( + tag{}, + typename std::make_index_sequence {}, + kernels::execute_testcase, + testcase_device_function, + launch_config, + num_values_to_populate, + inputs... + ); + + check_results ( + num_values_to_populate, + // perhaps add another parameter for specific testcase details? + host_side_results.data(), + expected_value_retriever, + comparison_tolerance_fraction, + inputs...); +} + +TEST_SUITE("grid-level") { + +// Note: Types for instantiation are chosen based on what's actually available in CUDA + +TEST_CASE("at_grid_stride") +{ + using checked_value_type = uint32_t; + // No inputs, nor concrete expected results. + + auto testcase_device_function = [] KAT_DEV (size_t length, checked_value_type* results) { + auto f_inner = [&] (size_t pos) { + results[pos] = kat::linear_grid::grid_info::thread::id_in_grid(); + }; + klcg::at_grid_stride(length, f_inner); + }; + + cuda::grid::dimension_t num_grid_blocks { 2 }; + cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 2 }; + auto total_num_threads = num_grid_blocks * num_threads_per_block; + + auto expected_value_retriever = [total_num_threads] (size_t pos) { + // Which thread processes position pos? + return checked_value_type(pos % total_num_threads); + }; + + auto num_values_to_populate = total_num_threads * 2 + kat::warp_size / 2 - 1; + + execute_non_uniform_testcase_on_gpu_and_check( + testcase_device_function, + expected_value_retriever, + num_values_to_populate, num_grid_blocks, num_threads_per_block, + make_exact_comparison + ); +} + +TEST_CASE("at_block_stride") +{ + using checked_value_type = uint32_t; // The type for number of grids in a thread. Should we typedef that? + cuda::grid::dimension_t num_grid_blocks { 2 }; + cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 2 }; + auto total_num_threads = num_grid_blocks * num_threads_per_block; + + size_t length_to_cover = total_num_threads * 2 + kat::warp_size / 2 - 1; + // We don't actually create input data, we just need each element in the + // range 0 ... length_to_cover-1 to be attended to by some thread + // + // In this test case, there's a single common range which the whole grid covers + // (as opposed to block-level or warp-level collaboration) + + + auto testcase_device_function = [] KAT_DEV (size_t length, checked_value_type* results) { + auto f_inner = [&] (size_t pos) { +// printf("Thread %u in block %u got pos %u of %u\n", threadIdx.x, blockIdx.x, (unsigned) pos, (unsigned) length); + results[pos] = kat::linear_grid::grid_info::thread::id_in_grid(); + }; + auto serialization_factor = + length / kat::linear_grid::grid_info::grid::num_threads() + (length % kat::linear_grid::grid_info::grid::num_threads() != 0); + klcg::at_block_stride(length, f_inner, serialization_factor); + }; + + auto serialization_factor = div_rounding_up(length_to_cover, total_num_threads); + auto elements_processed_per_block = serialization_factor * num_threads_per_block; + +// std::cout << "length_to_cover = " << length_to_cover << ", num_threads_per_block = " << num_threads_per_block << ", elements_per_block = " << serialization_factor << '\n'; + + auto expected_value_retriever = [=] (size_t pos) { + // Which thread processes position pos? + + auto processing_block_index = pos / elements_processed_per_block; + auto processing_thread_index = pos % num_threads_per_block; + // which is the same as (pos % processing_block_index) % num_threads_per_block + return checked_value_type(processing_block_index * num_threads_per_block + processing_thread_index); + }; + +// for(int i = 0; i < length_to_cover; i++) { +// if (i % 10 == 0) { std::cout << '\n' << std::setw(3) << i << ": "; } +// std::cout << std::setw(3) << expected_value_retriever(i) << " "; +// } +// std::cout << "\n\n"; + + execute_non_uniform_testcase_on_gpu_and_check( + testcase_device_function, + expected_value_retriever, + length_to_cover, + num_grid_blocks, + num_threads_per_block, + make_exact_comparison + ); + + +} + +struct attending_threads_info { + struct { + uint32_t grid_size_minus_first, last; + // We use grid_size_minus_first rather than first, so that + // zero-initialization would be semantically acceptable + } extrema; + uint32_t num; +}; // Note: All of this gets zero-initialized + +std::ostream& operator<<(std::ostream& os, const attending_threads_info& ati) +{ + return os << "{ {" << ati.extrema.grid_size_minus_first << ", " << ati.extrema.last << " }, " << ati.num << " }"; +} + +bool operator==(const attending_threads_info& lhs, const attending_threads_info & rhs) +{ + return + lhs.extrema.grid_size_minus_first == rhs.extrema.grid_size_minus_first and + lhs.extrema.last == rhs.extrema.last and + lhs.num == rhs.num; +} + +TEST_CASE("warp_per_input_element::at_grid_stride") +{ + using checked_value_type = attending_threads_info; + cuda::grid::dimension_t num_grid_blocks { 2 }; + cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 15 }; + auto total_num_threads = num_grid_blocks * num_threads_per_block; + auto length_to_cover = total_num_threads / 4 + 1; + // We don't actually create input data, we just need each element in the + // range 0 ... length_to_cover-1 to be attended to by some full warp + auto num_values_to_populate = length_to_cover; + + auto testcase_device_function = + [=] KAT_DEV ( + size_t length_of_attending_threads_info, + checked_value_type* attending_threads_info) + { + namespace gi = kat::linear_grid::grid_info; + const auto my_index = gi::thread::id_in_grid(); + auto grid_size_minus_my_index = gi::grid::num_threads() - my_index; + auto f_inner = [&] (size_t pos) { +// printf("Thead %d of block %d is handling pos %lu\n", threadIdx.x, blockIdx.x, pos); + kat::atomic::increment(&attending_threads_info[pos].num); + kat::atomic::max(&attending_threads_info[pos].extrema.grid_size_minus_first, grid_size_minus_my_index); + kat::atomic::max(&attending_threads_info[pos].extrema.last, my_index); + }; + + klcg::warp_per_input_element::at_grid_stride(length_to_cover, f_inner); + }; + + auto expected_value_retriever = [=] (size_t pos) { + // Which threads have handled position pos? + + auto total_num_warps = total_num_threads / kat::warp_size; + auto modular_pos = pos % total_num_warps; + uint32_t first_thread_to_handle_element = modular_pos * kat::warp_size; + uint32_t grid_size_minus_first = total_num_threads - first_thread_to_handle_element; + uint32_t last = (modular_pos+1) * kat::warp_size - 1; + uint32_t num = kat::warp_size; + return attending_threads_info { { grid_size_minus_first, last }, num }; + }; + + execute_non_uniform_testcase_on_gpu_and_check( + testcase_device_function, + expected_value_retriever, + num_values_to_populate, num_grid_blocks, num_threads_per_block, + make_exact_comparison + ); +} + + +} // TEST_SUITE("grid-level") diff --git a/cuda_code/grmat_test.cu b/cuda_code/grmat_test.cu new file mode 100644 index 0000000000000000000000000000000000000000..622b1022eeb559ab05a1c8af31340a5204d6d04c --- /dev/null +++ b/cuda_code/grmat_test.cu @@ -0,0 +1,467 @@ +/* + * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. + * + * NVIDIA CORPORATION and its licensors retain all intellectual property + * and proprietary rights in and to this software, related documentation + * and any modifications thereto. Any use, reproduction, disclosure or + * distribution of this software and related documentation without an express + * license agreement from NVIDIA CORPORATION is strictly prohibited. + * + */ + +// Grmat tests +// Author: Ramakrishna Prabhu ramakrishnap@nvidia.com + +#include "gtest/gtest.h" +#include "high_res_clock.h" +#include "cuda_profiler_api.h" +#include +#include "test_utils.h" +#include + +#include + +//#include "functions.h" +// do the perf measurements +// enabled by command line parameter s'--perf' +static int PERF = 0; + +// iterations for perf tests +// enabled by command line parameter '--perf-iters" +static int PERF_MULTIPLIER = 5; + +void dumy(void* in, void* out ) { + +} + + +void get_array_of_strings (char** argv, char* args, int& argc) +{ + char* tmp = nullptr; + tmp = strtok(args, " "); + for (int i = 0; (tmp != nullptr); i++) + { + argv[i] = (char *)malloc (sizeof(char)*(strlen(tmp)+1)); + strcpy (argv[i], tmp); + argc += 1; + tmp = strtok(nullptr, " "); + } +} + +void release_array (int argc, char** argv) +{ + if (argv != nullptr) + { + for (int i = 0; i < argc; i++) + { + if (argv[i] != nullptr) + { + free (argv[i]); + } + } + } +} + +typedef struct Grmat_Usecase_t { + std::string argv; + Grmat_Usecase_t(){ + } + Grmat_Usecase_t(std::string args){ + argv = args; + } + ~Grmat_Usecase_t(){ + } +} Grmat_Usecase; + +class Tests_Grmat : public ::testing::TestWithParam { + public: + Tests_Grmat() { } + static void SetupTestCase() { } + static void TearDownTestCase() { + if (PERF) { + for (unsigned int i = 0; i < grmat_time.size(); ++i) { + std::cout << grmat_time[i]/PERF_MULTIPLIER << std::endl; + } + } + } + virtual void SetUp() { } + virtual void TearDown() { } + + static std::vector grmat_time; + + // Check the coulmns of src and destination after the graph has been formed + + template + void run_check_configuration (const Grmat_Usecase& param) { + const ::testing::TestInfo* const test_info =::testing::UnitTest::GetInstance()->current_test_info(); + gdf_column col_sources, col_destinations; + + + gdf_dtype gdf_vertexId_type; + + if (sizeof (T) == 4) + gdf_vertexId_type = GDF_INT32; + else + gdf_vertexId_type = GDF_INT64; + + col_sources.dtype = gdf_vertexId_type; + col_sources.valid = nullptr; + col_destinations.dtype = gdf_vertexId_type; + col_destinations.valid = nullptr; + col_sources.null_count = 0; + col_destinations.null_count = 0; + col_sources.null_count = 0; + col_destinations.null_count = 0; + + int rmat_scale = 0, edge_factor = 0, undirected = false; + char* argv[32] = {0}; + int argc = 0; + std::string tmp_argv(param.argv.c_str()); + get_array_of_strings (argv, (char *)tmp_argv.c_str(), argc); + rmat_scale = atoi(strrchr(argv[1], '=')+1); + edge_factor = atoi(strrchr(argv[2], '=')+1); + for (int i = 0; i < argc; i++) + { + if (strcmp(argv[i], "--rmat_undirected") == 0) + { + undirected = true; + break; + } + } + release_array(argc, argv); + + size_t vertices = 1 << rmat_scale; + size_t edges = vertices * edge_factor * ((undirected == true)? 2 : 1); + size_t vertices1 = 0, edges1 = 0; + if ((vertices < 1000) || (edge_factor < 8)) + { + return; + } + + size_t free_before, total_before; + cudaMemGetInfo (&free_before, &total_before); + + ASSERT_EQ(gdf_grmat_gen ((char *)param.argv.c_str(), vertices1, edges1, &col_sources, &col_destinations, nullptr), GDF_SUCCESS); + + size_t free_after, total_after; + cudaMemGetInfo (&free_after, &total_after); + + ASSERT_EQ((0.99*(1<= vertices1), 0); + ASSERT_EQ((0.99*(1<= edges1), 0); + size_t memory_occupied_before = total_before - free_before; + size_t memory_occupied_after = total_after - free_after; + size_t expected_amount_of_memory = (edges1 * sizeof (T) * (2) ); // 2 - sources and destination + + if (expected_amount_of_memory < total_after) + { + ASSERT_EQ((expected_amount_of_memory <= (memory_occupied_after-memory_occupied_before)), 1); + } + + cudaStream_t stream{nullptr}; + ALLOC_FREE_TRY(col_sources.data, stream); + ALLOC_FREE_TRY(col_destinations.data, stream); + + //size_t free_release, total_release; + //cudaMemGetInfo (&free_release, &total_release); + //ASSERT_EQ(((total_release - free_release) < expected_amount_of_memory) ,1); + } + + template + void run_check_max(const Grmat_Usecase& param) { + int rmat_scale = 0, edge_factor = 0, undirected = false;; + char* argv[32] = {0}; + int argc = 0; + std::string tmp_argv(param.argv.c_str()); + + get_array_of_strings (argv, (char *)tmp_argv.c_str(), argc); + + rmat_scale = atoi(strrchr(argv[1], '=')+1); + edge_factor = atoi(strrchr(argv[2], '=')+1); + for (int i = 0; i < argc; i++) + { + if (strcmp(argv[i], "--rmat_undirected") == 0) + { + undirected = true; + break; + } + } + release_array(argc, argv); + edge_factor = edge_factor * ((undirected == true)? 2 :1); + size_t max_vertices = (1<<26); + size_t max_size = max_vertices * 23 * 4; + size_t current_size = (sizeof(VertexId) * (1<current_test_info(); + gdf_graph_ptr G{new gdf_graph, gdf_graph_deleter}; + gdf_column col_sources, col_destinations; + + gdf_dtype gdf_vertexId_type; + + if (sizeof (VertexId) == 4) + gdf_vertexId_type = GDF_INT32; + else + gdf_vertexId_type = GDF_INT64; + + col_sources.dtype = gdf_vertexId_type; + col_sources.valid = nullptr; + col_destinations.dtype = gdf_vertexId_type; + col_destinations.valid = nullptr; + + col_sources.null_count = 0; + col_destinations.null_count = 0; + + size_t vertices = 0, edges = 0; + + ASSERT_EQ(gdf_grmat_gen ((char *)param.argv.c_str(), vertices, edges, &col_sources, &col_destinations, nullptr), GDF_SUCCESS); + + ASSERT_EQ((vertices < (1 << 30)), 1); + cudaStream_t stream{nullptr}; + ALLOC_FREE_TRY(col_sources.data, stream); + ALLOC_FREE_TRY(col_destinations.data, stream); + + } + + template + void run_check_intergrity(const Grmat_Usecase& param) { + const ::testing::TestInfo* const test_info =::testing::UnitTest::GetInstance()->current_test_info(); + gdf_graph_ptr G{new gdf_graph, gdf_graph_deleter}; + gdf_column col_sources, col_destinations; + + gdf_dtype gdf_vertexId_type; + + gdf_vertexId_type = GDF_INT32; + + col_sources.dtype = gdf_vertexId_type; + col_sources.valid = nullptr; + col_destinations.dtype = gdf_vertexId_type; + col_destinations.valid = nullptr; + + col_sources.null_count = 0; + col_destinations.null_count = 0; + + size_t vertices = 0, edges = 0; + + ASSERT_EQ(gdf_grmat_gen ((char *)param.argv.c_str(), vertices, edges, &col_sources, &col_destinations, nullptr), GDF_SUCCESS); + std::vector src1_h(edges), dest1_h(edges); + + (cudaMemcpy(&src1_h[0], col_sources.data, sizeof(int) * edges, cudaMemcpyDeviceToHost)); + (cudaMemcpy(&dest1_h[0], col_destinations.data, sizeof(int) * edges, cudaMemcpyDeviceToHost)); + + col_sources.valid = nullptr; + col_destinations.valid = nullptr; + col_sources.null_count = 0; + col_destinations.null_count = 0; + + ASSERT_EQ(gdf_edge_list_view(G.get(), &col_sources, &col_destinations, nullptr), GDF_SUCCESS); + std::vector src2_h(edges), dest2_h(edges); + + (cudaMemcpy(&src2_h[0], G.get()->edgeList->src_indices->data, sizeof(int) * edges, cudaMemcpyDeviceToHost)); + (cudaMemcpy(&dest2_h[0], G.get()->edgeList->dest_indices->data, sizeof(int) * edges, cudaMemcpyDeviceToHost)); + + ASSERT_EQ( eq(src1_h,src2_h), 0); + ASSERT_EQ( eq(dest1_h,dest2_h), 0); + + cudaStream_t stream{nullptr}; + ALLOC_FREE_TRY(col_sources.data, stream); + ALLOC_FREE_TRY(col_destinations.data, stream); + } + + template + void run_check_with_different_size(const Grmat_Usecase& param) { + const ::testing::TestInfo* const test_info =::testing::UnitTest::GetInstance()->current_test_info(); + gdf_graph_ptr G{new gdf_graph, gdf_graph_deleter}; + gdf_column col_sources, col_destinations; + + gdf_dtype gdf_vertexId_type; + + if (sizeof (T1) == 4) + gdf_vertexId_type = GDF_INT32; + else + gdf_vertexId_type = GDF_INT64; + + col_sources.dtype = gdf_vertexId_type; + col_sources.valid = nullptr; + col_destinations.dtype = gdf_vertexId_type; + col_destinations.valid = nullptr; + + col_sources.null_count = 0; + col_destinations.null_count = 0; + + size_t vertices1 = 0, edges1 = 0; + + ASSERT_EQ(gdf_grmat_gen ((char *)param.argv.c_str(), vertices1, edges1, &col_sources, &col_destinations, nullptr), GDF_SUCCESS); + std::vector src1_h(edges1), dest1_h(edges1); + + cudaMemcpy(&src1_h[0], col_sources.data, sizeof(T1) * edges1, cudaMemcpyDeviceToHost); + cudaMemcpy(&dest1_h[0], col_destinations.data, sizeof(T1) * edges1, cudaMemcpyDeviceToHost); + + cudaStream_t stream{nullptr}; + ALLOC_FREE_TRY(col_sources.data, stream); + ALLOC_FREE_TRY(col_destinations.data, stream); + + if (sizeof (T2) == 4) + gdf_vertexId_type = GDF_INT32; + else + gdf_vertexId_type = GDF_INT64; + + col_sources.dtype = gdf_vertexId_type; + col_destinations.dtype = gdf_vertexId_type; + col_sources.valid = nullptr; + col_destinations.valid = nullptr; + + col_sources.null_count = 0; + col_destinations.null_count = 0; + + size_t vertices2 = 0, edges2 = 0; + + ASSERT_EQ(gdf_grmat_gen ((char *)param.argv.c_str(), vertices2, edges2, &col_sources, &col_destinations, nullptr), GDF_SUCCESS); + + std::vector src2_h(edges2), dest2_h(edges2); + + (cudaMemcpy(&src2_h[0], col_sources.data, sizeof(T2) * edges2, cudaMemcpyDeviceToHost)); + (cudaMemcpy(&dest2_h[0], col_destinations.data, sizeof(T2) * edges2, cudaMemcpyDeviceToHost)); + + ASSERT_EQ( eq(src1_h, src2_h), 0); + ASSERT_EQ( eq(dest1_h, dest2_h), 0); + + ALLOC_FREE_TRY(col_sources.data, stream); + ALLOC_FREE_TRY(col_destinations.data, stream); + } + + template + void run_current_test(const Grmat_Usecase& param) { + const ::testing::TestInfo* const test_info =::testing::UnitTest::GetInstance()->current_test_info(); + + gdf_graph_ptr G{new gdf_graph, gdf_graph_deleter}; + gdf_column col_sources, col_destinations; + gdf_error status = GDF_CUDA_ERROR; + float alpha = 0.85; + float tol = 1E-5f; + int max_iter = 500; + bool has_guess = false; + + HighResClock hr_clock; + double time_tmp; + gdf_column_ptr col_grmat; + gdf_dtype gdf_vertexId_type; + + if (sizeof (VertexId) == 4) + gdf_vertexId_type = GDF_INT32; + else + gdf_vertexId_type = GDF_INT64; + + // Currently, the page rank supports only int32 and doesn't support long + gdf_vertexId_type = GDF_INT32; + col_sources.dtype = gdf_vertexId_type; + col_sources.valid = nullptr; + col_destinations.dtype = gdf_vertexId_type; + col_destinations.valid = nullptr; + + col_sources.null_count = 0; + col_destinations.null_count = 0; + + size_t vertices = 0, edges = 0; + + ASSERT_EQ(gdf_grmat_gen ((char *)param.argv.c_str(), vertices, edges, &col_sources, &col_destinations, nullptr), GDF_SUCCESS); + + gdf_dtype_extra_info extra_info; + extra_info.time_unit = TIME_UNIT_NONE; + col_sources.dtype_info = extra_info; + col_sources.valid = nullptr; + col_destinations.dtype_info = extra_info; + col_destinations.valid = nullptr; + col_sources.null_count = 0; + col_destinations.null_count = 0; + std::vector grmat(vertices); + col_grmat = create_gdf_column(grmat); + + ASSERT_EQ(gdf_edge_list_view(G.get(), &col_sources, &col_destinations, nullptr),0); + if (manual_tanspose) + ASSERT_EQ(gdf_add_transposed_adj_list(G.get()),0); + + int device = 0; + (cudaGetDevice (&device)); + + (cudaDeviceSynchronize()); + if (PERF) { + hr_clock.start(); + for (int i = 0; i < PERF_MULTIPLIER; ++i) { + status = gdf_pagerank(G.get(), col_grmat.get(), nullptr, nullptr, alpha, tol, max_iter, has_guess); + (cudaDeviceSynchronize()); + } + hr_clock.stop(&time_tmp); + grmat_time.push_back(time_tmp); + } + else { + cudaProfilerStart(); + status = gdf_pagerank(G.get(), col_grmat.get(), nullptr, nullptr, alpha, tol, max_iter, has_guess); + cudaProfilerStop(); + (cudaDeviceSynchronize()); + } + cudaStream_t stream{nullptr}; + ALLOC_FREE_TRY (col_sources.data, stream); + ALLOC_FREE_TRY (col_destinations.data, stream); + + col_sources.data = nullptr; + col_destinations.data = nullptr; + EXPECT_EQ(status,0); + } +}; + +std::vector Tests_Grmat::grmat_time; + +TEST_P(Tests_Grmat, CheckFP32) { + run_current_test(GetParam()); + run_current_test(GetParam()); +} + +TEST_P(Tests_Grmat, CheckFP64) { + run_current_test(GetParam()); + run_current_test(GetParam()); +} + +TEST_P(Tests_Grmat, CheckInt32) +{ + run_check_max (GetParam()); +} + +TEST_P(Tests_Grmat, CheckInt64) +{ + run_check_max (GetParam()); +} + +TEST_P (Tests_Grmat, misc) +{ + run_check_configuration (GetParam()); + run_check_configuration (GetParam()); + run_check_intergrity (GetParam()); + run_check_with_different_size (GetParam()); + run_check_with_different_size (GetParam()); +} + +//--gtest_filter=*simple_test* +INSTANTIATE_TEST_CASE_P(simple_test, Tests_Grmat, + ::testing::Values( Grmat_Usecase("grmat --rmat_scale=16 --rmat_edgefactor=14 --device=0 --normalized --quiet") + ,Grmat_Usecase("grmat --rmat_scale=16 --rmat_edgefactor=16 --device=0 --rmat_undirected --quiet") + ,Grmat_Usecase("grmat --rmat_scale=17 --rmat_edgefactor=22 --device=0 --normalized --quiet") + ) + ); + + +int main(int argc, char **argv) { + srand(42); + ::testing::InitGoogleTest(&argc, argv); + for (int i = 0; i < argc; i++) { + if (strcmp(argv[i], "--perf") == 0) + PERF = 1; + if (strcmp(argv[i], "--perf-iters") == 0) + PERF_MULTIPLIER = atoi(argv[i+1]); + } + + return RUN_ALL_TESTS(); +} + + diff --git a/cuda_code/group_argmin_6.cu b/cuda_code/group_argmin_6.cu new file mode 100644 index 0000000000000000000000000000000000000000..f98f6aca36143cd23d3f944b973cd14ff77283e7 --- /dev/null +++ b/cuda_code/group_argmin_6.cu @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2020, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include + +namespace cudf { +namespace groupby { +namespace detail { +std::unique_ptr group_argmin(column_view const& values, + size_type num_groups, + rmm::device_vector const& group_labels, + column_view const& key_sort_order, + rmm::mr::device_memory_resource* mr, + cudaStream_t stream) +{ + auto indices = type_dispatcher(values.type(), + reduce_functor{}, + values, + num_groups, + group_labels, + rmm::mr::get_default_resource(), + stream); + + // The functor returns the index of minimum in the sorted values. + // We need the index of minimum in the original unsorted values. + // So use indices to gather the sort order used to sort `values`. + // Gather map cannot be null so we make a view with the mask removed. + // The values in data buffer of indices corresponding to null values was + // initialized to ARGMIN_SENTINEL which is an out of bounds index value (-1) + // and causes the gathered value to be null. + column_view null_removed_indices( + data_type(type_to_id()), + indices->size(), + static_cast(indices->view().template data())); + auto result_table = + cudf::detail::gather(table_view({key_sort_order}), + null_removed_indices, + indices->nullable() ? cudf::detail::out_of_bounds_policy::IGNORE + : cudf::detail::out_of_bounds_policy::NULLIFY, + cudf::detail::negative_index_policy::NOT_ALLOWED, + mr, + stream); + + return std::move(result_table->release()[0]); +} + +} // namespace detail +} // namespace groupby +} // namespace cudf diff --git a/cuda_code/group_norm_kernel_8.cu b/cuda_code/group_norm_kernel_8.cu new file mode 100644 index 0000000000000000000000000000000000000000..53ce77fa37b113c9016206d16f027b7bf422b0ee --- /dev/null +++ b/cuda_code/group_norm_kernel_8.cu @@ -0,0 +1,998 @@ +#define TORCH_ASSERT_ONLY_METHOD_OPERATORS +#include + +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef AT_PER_OPERATOR_HEADERS +#include +#else +#include +#endif + +namespace at { +namespace native { + +namespace { + +constexpr int kCUDANumThreads = 256; +constexpr int kReduceTileSize = 32; + +template +__global__ void RowwiseMomentsCUDAKernel( + int64_t N, + T eps, + const T* X, + T* mean, + T* rstd) { + using T_ACC = acc_type; + using WelfordType = WelfordData; + using WelfordOp = + WelfordOps>; + + const int64_t i = blockIdx.x; + WelfordOp welford_op = {/*correction=*/0, /*take_sqrt=*/false}; + WelfordType val(0, 0, 0, 0); + for (int64_t j = threadIdx.x; j < N; j += blockDim.x) { + const int64_t index = i * N + j; + val = welford_op.reduce(val, static_cast(X[index]), index); + } + if (blockDim.x <= C10_WARP_SIZE) { + val = cuda_utils::WarpReduce(val, welford_op); + } else { + // There will be a warning if we declare a __shared__ WelfordType array. + // https://github.com/pytorch/pytorch/pull/13967 + __shared__ typename std::aligned_storage< + sizeof(WelfordType), + alignof(WelfordType)>::type val_shared[C10_WARP_SIZE]; + WelfordType* val_shared_ptr = reinterpret_cast(val_shared); + val = cuda_utils::BlockReduce( + val, + welford_op, + /*identity_element=*/WelfordType(0, 0, 0, 0), + val_shared_ptr); + } + if (threadIdx.x == 0) { + T_ACC m1; + T_ACC m2; + thrust::tie(m2, m1) = welford_op.project(val); + mean[i] = m1; + rstd[i] = c10::cuda::compat::rsqrt(m2 + static_cast(eps)); + } +} + +template +__global__ void ComputeFusedParamsCUDAKernel( + int64_t N, + int64_t C, + int64_t group, + const T* mean, + const T* rstd, + const T* gamma, + const T* beta, + acc_type* a, + acc_type* b) { + using T_ACC = acc_type; + const int64_t index = blockIdx.x * blockDim.x + threadIdx.x; + if (index < N * C) { + const int64_t ng = index / (C / group); + const int64_t c = index % C; + const T_ACC scale = (gamma == nullptr) + ? static_cast(rstd[ng]) + : static_cast(rstd[ng]) * static_cast(gamma[c]); + a[index] = scale; + b[index] = -scale * static_cast(mean[ng]) + + ((beta == nullptr) ? 0 : static_cast(beta[c])); + } +} + +template +__global__ void Compute1dBackwardFusedParamsCUDAKernel( + int64_t C, + int64_t group, + const T* dY, + const T* X, + const T* mean, + const T* rstd, + const T* gamma, + acc_type* c2, + acc_type* c3) { + using T_ACC = acc_type; + const int64_t G = group; + const int64_t D = C / G; + const int64_t n = blockIdx.x; + const int64_t g = blockIdx.y; + const int64_t ng = n * G + g; + T_ACC sum1 = 0; + T_ACC sum2 = 0; + for (int64_t i = threadIdx.x; i < D; i += blockDim.x) { + const int64_t index = ng * D + i; + const int64_t c = g * D + i; + const T_ACC gamma_v = + gamma == nullptr ? T_ACC(1) : static_cast(gamma[c]); + sum1 += dY[index] * X[index] * gamma_v; + sum2 += dY[index] * gamma_v; + } + if (blockDim.x <= C10_WARP_SIZE) { + sum1 = cuda_utils::WarpReduceSum(sum1); + sum2 = cuda_utils::WarpReduceSum(sum2); + } else { + __shared__ T_ACC ds_shared[C10_WARP_SIZE]; + __shared__ T_ACC db_shared[C10_WARP_SIZE]; + sum1 = cuda_utils::BlockReduceSum(sum1, ds_shared); + sum2 = cuda_utils::BlockReduceSum(sum2, db_shared); + } + if (threadIdx.x == 0) { + const T_ACC s = T_ACC(1) / static_cast(D); + const T_ACC x = (sum2 * static_cast(mean[ng]) - sum1) * + static_cast(rstd[ng]) * static_cast(rstd[ng]) * + static_cast(rstd[ng]) * s; + c2[ng] = x; + c3[ng] = -x * static_cast(mean[ng]) - + sum2 * static_cast(rstd[ng]) * s; + } +} + +template +__global__ void GammaBeta1dBackwardCUDAKernel1( + int64_t N, + int64_t C, + int64_t group, + const T* dY, + const T* X, + const T* mean, + const T* rstd, + T* dgamma, + T* dbeta) { + using T_ACC = acc_type; + const int64_t c = blockIdx.x * blockDim.x + threadIdx.x; + if (c < C) { + const int64_t G = group; + const int64_t D = C / G; + T_ACC sum1 = 0; + T_ACC sum2 = 0; + for (int64_t n = 0; n < N; ++n) { + const int64_t nc = n * C + c; + const int64_t ng = n * G + c / D; + const T_ACC dy_acc = static_cast(dY[nc]); + const T_ACC x_acc = static_cast(X[nc]); + sum1 += (dgamma == nullptr) + ? T_ACC(0) + : ((dy_acc * x_acc - dy_acc * static_cast(mean[ng])) * + static_cast(rstd[ng])); + sum2 += (dbeta == nullptr) ? T_ACC(0) : dy_acc; + } + if (dgamma != nullptr) { + dgamma[c] = sum1; + } + if (dbeta != nullptr) { + dbeta[c] = sum2; + } + } +} + +template +__global__ void GammaBeta1dBackwardCUDAKernel2( + int64_t N, + int64_t C, + int64_t group, + const T* dY, + const T* X, + const T* mean, + const T* rstd, + T* dgamma, + T* dbeta) { + using T_ACC = acc_type; + __shared__ T_ACC g_shared[kReduceTileSize][kReduceTileSize + 1]; + __shared__ T_ACC b_shared[kReduceTileSize][kReduceTileSize + 1]; + const int64_t c = blockIdx.x * blockDim.x + threadIdx.x; + T_ACC dg_sum1 = 0; + T_ACC dg_sum2 = 0; + T_ACC db_sum1 = 0; + T_ACC db_sum2 = 0; + if (c < C) { + const int64_t G = group; + const int64_t D = C / G; + // Accumulate each 32 cols into a 32 * 32 tile. + // Since the blockDim is (32, 16), accumulate twice for 1st and 2nd 16 rows + // of a 32 contiguous elements. + for (int64_t n = threadIdx.y; n < N; n += blockDim.y * 2) { + const int64_t n1 = n; + const int64_t n2 = n + blockDim.y; + const int64_t nc1 = n1 * C + c; + const int64_t nc2 = n2 * C + c; + const int64_t ng1 = n1 * G + c / D; + const int64_t ng2 = n2 * G + c / D; + const T_ACC dy1_acc = static_cast(dY[nc1]); + const T_ACC x1_acc = static_cast(X[nc1]); + dg_sum1 += dgamma == nullptr + ? T_ACC(0) + : ((dy1_acc * x1_acc - dy1_acc * static_cast(mean[ng1])) * + static_cast(rstd[ng1])); + db_sum1 += dbeta == nullptr ? T_ACC(0) : dy1_acc; + if (n2 < N) { + const T_ACC dy2_acc = static_cast(dY[nc2]); + const T_ACC x2_acc = static_cast(X[nc2]); + dg_sum2 += dgamma == nullptr + ? T_ACC(0) + : ((dy2_acc * x2_acc - dy2_acc * static_cast(mean[ng2])) * + static_cast(rstd[ng2])); + db_sum2 += dbeta == nullptr ? T_ACC(0) : dy2_acc; + } + } + } + + // Write accumulated tile to shared memory. + g_shared[threadIdx.y][threadIdx.x] = dg_sum1; + g_shared[threadIdx.y + blockDim.y][threadIdx.x] = dg_sum2; + b_shared[threadIdx.y][threadIdx.x] = db_sum1; + b_shared[threadIdx.y + blockDim.y][threadIdx.x] = db_sum2; + __syncthreads(); + + // Do warp reduce for the 1st 16 cols in the tile. + T_ACC sum1 = g_shared[threadIdx.x][threadIdx.y]; + T_ACC sum2 = b_shared[threadIdx.x][threadIdx.y]; + sum1 = cuda_utils::WarpReduceSum(sum1); + sum2 = cuda_utils::WarpReduceSum(sum2); + if (threadIdx.x == 0) { + const int64_t c = blockIdx.x * blockDim.x + threadIdx.y; + if (c < C) { + if (dgamma != nullptr) { + dgamma[c] = sum1; + } + if (dbeta != nullptr) { + dbeta[c] = sum2; + } + } + } + + // Do warp reduce for the 2nd 16 cols in the tile. + sum1 = g_shared[threadIdx.x][threadIdx.y + blockDim.y]; + sum2 = b_shared[threadIdx.x][threadIdx.y + blockDim.y]; + sum1 = cuda_utils::WarpReduceSum(sum1); + sum2 = cuda_utils::WarpReduceSum(sum2); + if (threadIdx.x == 0) { + const int64_t c = blockIdx.x * blockDim.x + threadIdx.y + blockDim.y; + if (c < C) { + if (dgamma != nullptr) { + dgamma[c] = sum1; + } + if (dbeta != nullptr) { + dbeta[c] = sum2; + } + } + } +} + +template +__global__ void ComputeInternalGradientsCUDAKernel( + int64_t HxW, + const T* dY, + const T* X, + acc_type* ds, + acc_type* db) { + using T_ACC = acc_type; + const int64_t nc = blockIdx.x; + T_ACC sum1 = 0; + T_ACC sum2 = 0; + for (int64_t hw = threadIdx.x; hw < HxW; hw += blockDim.x) { + const int64_t index = nc * HxW + hw; + sum1 += static_cast(dY[index]) * static_cast(X[index]); + sum2 += static_cast(dY[index]); + } + if (blockDim.x <= C10_WARP_SIZE) { + sum1 = cuda_utils::WarpReduceSum(sum1); + sum2 = cuda_utils::WarpReduceSum(sum2); + } else { + __shared__ T_ACC ds_shared[C10_WARP_SIZE]; + __shared__ T_ACC db_shared[C10_WARP_SIZE]; + sum1 = cuda_utils::BlockReduceSum(sum1, ds_shared); + sum2 = cuda_utils::BlockReduceSum(sum2, db_shared); + } + if (threadIdx.x == 0) { + ds[nc] = sum1; + db[nc] = sum2; + } +} + +template +__global__ void ComputeBackwardFusedParamsCUDAKernel( + int64_t C, + int64_t HxW, + int64_t group, + const T* mean, + const T* rstd, + const T* gamma, + const acc_type* ds, + const acc_type* db, + acc_type* c2, + acc_type* c3) { + using T_ACC = acc_type; + const int64_t G = group; + const int64_t D = C / G; + const int64_t n = blockIdx.x; + const int64_t g = blockIdx.y; + const int64_t ng = n * G + g; + T_ACC sum1 = 0; + T_ACC sum2 = 0; + for (int64_t i = threadIdx.x; i < D; i += blockDim.x) { + const int64_t index = ng * D + i; + const int64_t c = g * D + i; + const T_ACC gamma_v = + gamma == nullptr ? T_ACC(1) : static_cast(gamma[c]); + sum1 += ds[index] * gamma_v; + sum2 += db[index] * gamma_v; + } + if (blockDim.x <= C10_WARP_SIZE) { + sum1 = cuda_utils::WarpReduceSum(sum1); + sum2 = cuda_utils::WarpReduceSum(sum2); + } else { + __shared__ T_ACC ds_shared[C10_WARP_SIZE]; + __shared__ T_ACC db_shared[C10_WARP_SIZE]; + sum1 = cuda_utils::BlockReduceSum(sum1, ds_shared); + sum2 = cuda_utils::BlockReduceSum(sum2, db_shared); + } + if (threadIdx.x == 0) { + const T_ACC s = T_ACC(1) / static_cast(D * HxW); + const T_ACC x = (sum2 * static_cast(mean[ng]) - sum1) * + static_cast(rstd[ng]) * static_cast(rstd[ng]) * + static_cast(rstd[ng]) * s; + c2[ng] = x; + c3[ng] = -x * static_cast(mean[ng]) - + sum2 * static_cast(rstd[ng]) * s; + } +} + +template +__global__ void GammaBetaBackwardCUDAKernel1( + int64_t N, + int64_t C, + int64_t group, + const T* mean, + const T* rstd, + const acc_type* ds, + const acc_type* db, + T* dgamma, + T* dbeta) { + using T_ACC = acc_type; + const int64_t c = blockIdx.x * blockDim.x + threadIdx.x; + if (c < C) { + const int64_t G = group; + const int64_t D = C / G; + T_ACC sum1 = 0; + T_ACC sum2 = 0; + for (int64_t n = 0; n < N; ++n) { + const int64_t nc = n * C + c; + const int64_t ng = n * G + c / D; + sum1 += (dgamma == nullptr) + ? T_ACC(0) + : ((ds[nc] - db[nc] * static_cast(mean[ng])) * + static_cast(rstd[ng])); + sum2 += (dbeta == nullptr) ? T_ACC(0) : db[nc]; + } + if (dgamma != nullptr) { + dgamma[c] = sum1; + } + if (dbeta != nullptr) { + dbeta[c] = sum2; + } + } +} + +template +__global__ void GammaBetaBackwardCUDAKernel2( + int64_t N, + int64_t C, + int64_t group, + const T* mean, + const T* rstd, + const acc_type* ds, + const acc_type* db, + T* dgamma, + T* dbeta) { + using T_ACC = acc_type; + __shared__ T_ACC g_shared[kReduceTileSize][kReduceTileSize + 1]; + __shared__ T_ACC b_shared[kReduceTileSize][kReduceTileSize + 1]; + const int64_t c = blockIdx.x * blockDim.x + threadIdx.x; + T_ACC dg_sum1 = 0; + T_ACC dg_sum2 = 0; + T_ACC db_sum1 = 0; + T_ACC db_sum2 = 0; + if (c < C) { + const int64_t G = group; + const int64_t D = C / G; + // Accumulate each 32 cols into a 32 * 32 tile. + // Since the blockDim is (32, 16), accumulate twice for 1st and 2nd 16 rows + // of a 32 contiguous elements. + for (int64_t n = threadIdx.y; n < N; n += blockDim.y * 2) { + const int64_t n1 = n; + const int64_t n2 = n + blockDim.y; + const int64_t nc1 = n1 * C + c; + const int64_t nc2 = n2 * C + c; + const int64_t ng1 = n1 * G + c / D; + const int64_t ng2 = n2 * G + c / D; + dg_sum1 += dgamma == nullptr + ? T_ACC(0) + : ((ds[nc1] - db[nc1] * static_cast(mean[ng1])) * + static_cast(rstd[ng1])); + db_sum1 += dbeta == nullptr ? T_ACC(0) : db[nc1]; + if (n2 < N) { + dg_sum2 += dgamma == nullptr + ? T_ACC(0) + : ((ds[nc2] - db[nc2] * static_cast(mean[ng2])) * + static_cast(rstd[ng2])); + db_sum2 += dbeta == nullptr ? T_ACC(0) : db[nc2]; + } + } + } + + // Write accumulated tile to shared memory. + g_shared[threadIdx.y][threadIdx.x] = dg_sum1; + g_shared[threadIdx.y + blockDim.y][threadIdx.x] = dg_sum2; + b_shared[threadIdx.y][threadIdx.x] = db_sum1; + b_shared[threadIdx.y + blockDim.y][threadIdx.x] = db_sum2; + __syncthreads(); + + // Do warp reduce for the 1st 16 cols in the tile. + T_ACC sum1 = g_shared[threadIdx.x][threadIdx.y]; + T_ACC sum2 = b_shared[threadIdx.x][threadIdx.y]; + sum1 = cuda_utils::WarpReduceSum(sum1); + sum2 = cuda_utils::WarpReduceSum(sum2); + if (threadIdx.x == 0) { + const int64_t c = blockIdx.x * blockDim.x + threadIdx.y; + if (c < C) { + if (dgamma != nullptr) { + dgamma[c] = sum1; + } + if (dbeta != nullptr) { + dbeta[c] = sum2; + } + } + } + + // Do warp reduce for the 2st 16 cols in the tile. + sum1 = g_shared[threadIdx.x][threadIdx.y + blockDim.y]; + sum2 = b_shared[threadIdx.x][threadIdx.y + blockDim.y]; + sum1 = cuda_utils::WarpReduceSum(sum1); + sum2 = cuda_utils::WarpReduceSum(sum2); + if (threadIdx.x == 0) { + const int64_t c = blockIdx.x * blockDim.x + threadIdx.y + blockDim.y; + if (c < C) { + if (dgamma != nullptr) { + dgamma[c] = sum1; + } + if (dbeta != nullptr) { + dbeta[c] = sum2; + } + } + } +} + +template +void GroupNorm1dForward( + const Tensor& X, + const Tensor& mean, + const Tensor& rstd, + const Tensor& gamma, + const Tensor& beta, + int64_t N, + int64_t C, + int64_t group, + Tensor& Y) { + using T_ACC = acc_type; + const int64_t G = group; + const int64_t D = C / G; + if (gamma.defined() && beta.defined()) { + auto iter = TensorIteratorConfig() + .resize_outputs(false) + .add_owned_output(Y.view({N, G, D})) + .add_owned_input(X.view({N, G, D})) + .add_owned_input(mean.view({N, G, 1})) + .add_owned_input(rstd.view({N, G, 1})) + .add_owned_input(gamma.view({1, G, D})) + .add_owned_input(beta.view({1, G, D})) + .build(); + gpu_kernel(iter, [] GPU_LAMBDA(T x, T mean, T rstd, T gamma, T beta) -> T { + return (static_cast(x) - static_cast(mean)) * + static_cast(rstd) * static_cast(gamma) + + static_cast(beta); + }); + } else if (gamma.defined()) { + auto iter = TensorIteratorConfig() + .resize_outputs(false) + .add_owned_output(Y.view({N, G, D})) + .add_owned_input(X.view({N, G, D})) + .add_owned_input(mean.view({N, G, 1})) + .add_owned_input(rstd.view({N, G, 1})) + .add_owned_input(gamma.view({1, G, D})) + .build(); + gpu_kernel(iter, [] GPU_LAMBDA(T x, T mean, T rstd, T gamma) -> T { + return (static_cast(x) - static_cast(mean)) * + static_cast(rstd) * static_cast(gamma); + }); + } else if (beta.defined()) { + auto iter = TensorIteratorConfig() + .resize_outputs(false) + .add_owned_output(Y.view({N, G, D})) + .add_owned_input(X.view({N, G, D})) + .add_owned_input(mean.view({N, G, 1})) + .add_owned_input(rstd.view({N, G, 1})) + .add_owned_input(beta.view({1, G, D})) + .build(); + gpu_kernel(iter, [] GPU_LAMBDA(T x, T mean, T rstd, T beta) -> T { + return (static_cast(x) - static_cast(mean)) * + static_cast(rstd) + + static_cast(beta); + }); + } else { + auto iter = TensorIteratorConfig() + .resize_outputs(false) + .add_owned_output(Y.view({N * G, D})) + .add_owned_input(X.view({N * G, D})) + .add_owned_input(mean.view({N * G, 1})) + .add_owned_input(rstd.view({N * G, 1})) + .build(); + gpu_kernel(iter, [] GPU_LAMBDA(T x, T mean, T rstd) -> T { + return (static_cast(x) - static_cast(mean)) * + static_cast(rstd); + }); + } + AT_CUDA_CHECK(cudaGetLastError()); +} + +template +void GroupNormKernelImplInternal( + const Tensor& X, + const Tensor& gamma, + const Tensor& beta, + int64_t N, + int64_t C, + int64_t HxW, + int64_t group, + T eps, + Tensor& Y, + Tensor& mean, + Tensor& rstd) { + using T_ACC = acc_type; + TORCH_CHECK(X.numel() == N * C * HxW); + TORCH_CHECK(!gamma.defined() || gamma.numel() == C); + TORCH_CHECK(!beta.defined() || beta.numel() == C); + if (N == 0) { + return; + } + const int64_t G = group; + const int64_t D = C / G; + const T* X_data = X.data_ptr(); + T* mean_data = mean.data_ptr(); + T* rstd_data = rstd.data_ptr(); + + cudaStream_t cuda_stream = at::cuda::getCurrentCUDAStream(); + const int64_t num_threads = D * HxW < cuda_utils::kCUDABlockReduceNumThreads + ? at::cuda::warp_size() + : cuda_utils::kCUDABlockReduceNumThreads; + RowwiseMomentsCUDAKernel<<>>( + D * HxW, eps, X_data, mean_data, rstd_data); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + + if (HxW == 1) { + GroupNorm1dForward(X, mean, rstd, gamma, beta, N, C, G, Y); + } else if (!gamma.defined() && !beta.defined()) { + auto iter = TensorIteratorConfig() + .resize_outputs(false) + .add_owned_output(Y.view({N * G, D * HxW})) + .add_owned_input(X.view({N * G, D * HxW})) + .add_owned_input(mean.view({N * G, 1})) + .add_owned_input(rstd.view({N * G, 1})) + .build(); + gpu_kernel(iter, [] GPU_LAMBDA(T x, T mean, T rstd) -> T { + return (static_cast(x) - static_cast(mean)) * + static_cast(rstd); + }); + } else { + const auto kAccType = + (X.scalar_type() == kHalf || X.scalar_type() == kBFloat16) + ? kFloat + : X.scalar_type(); + Tensor a = at::empty({N, C}, X.options().dtype(kAccType)); + Tensor b = at::empty({N, C}, X.options().dtype(kAccType)); + const T* gamma_data = gamma.defined() ? gamma.data_ptr() : nullptr; + const T* beta_data = beta.defined() ? beta.data_ptr() : nullptr; + T_ACC* a_data = a.data_ptr(); + T_ACC* b_data = b.data_ptr(); + + // TODO: Since there is some issues in gpu_kernel_multiple_outputs, we are + // using maunal kernel here. Make it using gpu_kernel_multiple_outputs once + // the issue fixed. + const int64_t B = (N * C + kCUDANumThreads - 1) / kCUDANumThreads; + ComputeFusedParamsCUDAKernel<<>>( + N, C, G, mean_data, rstd_data, gamma_data, beta_data, a_data, b_data); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + + auto iter = TensorIteratorConfig() + .check_all_same_dtype(std::is_same::value) + .resize_outputs(false) + .add_owned_output(Y.view({N * C, HxW})) + .add_owned_input(X.view({N * C, HxW})) + .add_owned_input(a.view({N * C, 1})) + .add_owned_input(b.view({N * C, 1})) + .build(); + gpu_kernel(iter, [] GPU_LAMBDA(T x, T_ACC a, T_ACC b) -> T { + return a * static_cast(x) + b; + }); + } + AT_CUDA_CHECK(cudaGetLastError()); +} + +void GroupNormKernelImpl( + const Tensor& X, + const Tensor& gamma, + const Tensor& beta, + int64_t N, + int64_t C, + int64_t HxW, + int64_t group, + double eps, + Tensor& Y, + Tensor& mean, + Tensor& rstd) { + AT_DISPATCH_FLOATING_TYPES_AND2( + at::ScalarType::Half, + at::ScalarType::BFloat16, + X.scalar_type(), + "GroupNormKernelImpl", + [&]() { + GroupNormKernelImplInternal( + X, + gamma, + beta, + N, + C, + HxW, + group, + static_cast(eps), + Y, + mean, + rstd); + }); +} + +template +void GroupNorm1dBackward( + const Tensor dY, + const Tensor X, + const Tensor mean, + const Tensor rstd, + const Tensor gamma, + int64_t N, + int64_t C, + int64_t group, + Tensor& dX, + Tensor& dgamma, + Tensor& dbeta) { + using T_ACC = acc_type; + const int64_t G = group; + const int64_t D = C / G; + const T* dY_data = dY.data_ptr(); + const T* X_data = X.data_ptr(); + const T* mean_data = mean.data_ptr(); + const T* rstd_data = rstd.data_ptr(); + + cudaStream_t cuda_stream = at::cuda::getCurrentCUDAStream(); + if (dX.defined()) { + const T* gamma_data = gamma.defined() ? gamma.data_ptr() : nullptr; + const auto kAccType = + (X.scalar_type() == kHalf || X.scalar_type() == kBFloat16) + ? kFloat + : X.scalar_type(); + Tensor c2 = at::empty({N, G}, X.options().dtype(kAccType)); + Tensor c3 = at::empty({N, G}, X.options().dtype(kAccType)); + T_ACC* c2_data = c2.data_ptr(); + T_ACC* c3_data = c3.data_ptr(); + const int64_t num_threads = (C / G) < cuda_utils::kCUDABlockReduceNumThreads + ? at::cuda::warp_size() + : cuda_utils::kCUDABlockReduceNumThreads; + Compute1dBackwardFusedParamsCUDAKernel + <<>>( + C, + G, + dY_data, + X_data, + mean_data, + rstd_data, + gamma_data, + c2_data, + c3_data); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + + if (gamma.defined()) { + auto iter = TensorIteratorConfig() + .check_all_same_dtype(std::is_same::value) + .resize_outputs(false) + .add_owned_output(dX.view({N, G, D})) + .add_owned_input(dY.view({N, G, D})) + .add_owned_input(X.view({N, G, D})) + .add_owned_input(rstd.view({N, G, 1})) + .add_owned_input(gamma.view({1, G, D})) + .add_owned_input(c2.view({N, G, 1})) + .add_owned_input(c3.view({N, G, 1})) + .build(); + gpu_kernel( + iter, + [] GPU_LAMBDA(T dy, T x, T rstd, T gamma, T_ACC c2, T_ACC c3) -> T { + const T_ACC c1 = + static_cast(rstd) * static_cast(gamma); + return c1 * static_cast(dy) + c2 * static_cast(x) + + c3; + }); + } else { + auto iter = TensorIteratorConfig() + .check_all_same_dtype(std::is_same::value) + .resize_outputs(false) + .add_owned_output(dX.view({N * G, D})) + .add_owned_input(dY.view({N * G, D})) + .add_owned_input(X.view({N * G, D})) + .add_owned_input(rstd.view({N * G, 1})) + .add_owned_input(c2.view({N * G, 1})) + .add_owned_input(c3.view({N * G, 1})) + .build(); + gpu_kernel( + iter, [] GPU_LAMBDA(T dy, T x, T rstd, T_ACC c2, T_ACC c3) -> T { + const T_ACC c1 = static_cast(rstd); + return c1 * static_cast(dy) + c2 * static_cast(x) + + c3; + }); + } + } + if (dgamma.defined() || dbeta.defined()) { + T* dgamma_data = dgamma.defined() ? dgamma.data_ptr() : nullptr; + T* dbeta_data = dbeta.defined() ? dbeta.data_ptr() : nullptr; + if (N <= 128) { + const int64_t B = (C + kCUDANumThreads - 1) / kCUDANumThreads; + GammaBeta1dBackwardCUDAKernel1<<>>( + N, + C, + G, + dY_data, + X_data, + mean_data, + rstd_data, + dgamma_data, + dbeta_data); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + } else { + const int64_t B = (C + kReduceTileSize - 1) / kReduceTileSize; + // The algorithm for colwise reduction here is to accumulate each 32 cols + // to a 32 * 32 tile and write the tile to shared memmory. Then do warp + // reduce for each col in the tile. So here the blockDim must be (32, 16). + constexpr int kThreadX = kReduceTileSize; + constexpr int kThreadY = kReduceTileSize / 2; + GammaBeta1dBackwardCUDAKernel2 + <<>>( + N, + C, + G, + dY_data, + X_data, + mean_data, + rstd_data, + dgamma_data, + dbeta_data); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + } + } +} + +template +void GroupNormBackwardKernelImplInternal( + const Tensor& dY, + const Tensor& X, + const Tensor& mean, + const Tensor& rstd, + const Tensor& gamma, + int64_t N, + int64_t C, + int64_t HxW, + int64_t group, + Tensor& dX, + Tensor& dgamma, + Tensor& dbeta) { + using T_ACC = acc_type; + const int64_t G = group; + const int64_t D = C / G; + TORCH_CHECK(dY.numel() == N * C * HxW); + TORCH_CHECK(X.numel() == N * C * HxW); + TORCH_CHECK(mean.numel() == N * G); + TORCH_CHECK(rstd.numel() == N * G); + TORCH_CHECK(!gamma.defined() || gamma.numel() == C); + cudaStream_t cuda_stream = at::cuda::getCurrentCUDAStream(); + + if (N == 0) { + if (dgamma.defined()) { + dgamma.fill_(T(0)); + } + if (dbeta.defined()) { + dbeta.fill_(T(0)); + } + return; + } + + const T* dY_data = dY.data_ptr(); + const T* X_data = X.data_ptr(); + const T* mean_data = mean.data_ptr(); + const T* rstd_data = rstd.data_ptr(); + const T* gamma_data = gamma.defined() ? gamma.data_ptr() : nullptr; + const auto kAccType = + (X.scalar_type() == kHalf || X.scalar_type() == kBFloat16) + ? kFloat + : X.scalar_type(); + Tensor ds = at::empty({N, C}, X.options().dtype(kAccType)); + Tensor db = at::empty({N, C}, X.options().dtype(kAccType)); + T_ACC* ds_data = ds.data_ptr(); + T_ACC* db_data = db.data_ptr(); + + if (HxW == 1) { + GroupNorm1dBackward( + dY, X, mean, rstd, gamma, N, C, G, dX, dgamma, dbeta); + return; + } + + int warp_size = at::cuda::warp_size(); + int64_t num_threads = HxW < cuda_utils::kCUDABlockReduceNumThreads + ? warp_size + : cuda_utils::kCUDABlockReduceNumThreads; + ComputeInternalGradientsCUDAKernel<<>>( + HxW, dY_data, X_data, ds_data, db_data); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + + if (dX.defined()) { + Tensor c1 = at::empty({0}, X.options().dtype(kAccType)); + Tensor c2 = at::empty({N, G}, X.options().dtype(kAccType)); + Tensor c3 = at::empty({N, G}, X.options().dtype(kAccType)); + T_ACC* c2_data = c2.data_ptr(); + T_ACC* c3_data = c3.data_ptr(); + + if (gamma.defined()) { + auto iter = TensorIteratorConfig() + .check_all_same_dtype(std::is_same::value) + .add_output(c1) + .add_owned_input(rstd.view({N, G, 1})) + .add_owned_input(gamma.view({1, G, D})) + .build(); + gpu_kernel(iter, [] GPU_LAMBDA(T rstd, T gamma) -> T_ACC { + return static_cast(rstd) * static_cast(gamma); + }); + } + + num_threads = (C / G) < cuda_utils::kCUDABlockReduceNumThreads + ? warp_size + : cuda_utils::kCUDABlockReduceNumThreads; + ComputeBackwardFusedParamsCUDAKernel + <<>>( + C, + HxW, + G, + mean_data, + rstd_data, + gamma_data, + ds_data, + db_data, + c2_data, + c3_data); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + + if (gamma.defined()) { + auto iter = TensorIteratorConfig() + .check_all_same_dtype(std::is_same::value) + .resize_outputs(false) + .add_owned_output(dX.view({N * G, D, HxW})) + .add_owned_input(dY.view({N * G, D, HxW})) + .add_owned_input(X.view({N * G, D, HxW})) + .add_owned_input(c1.view({N * G, D, 1})) + .add_owned_input(c2.view({N * G, 1, 1})) + .add_owned_input(c3.view({N * G, 1, 1})) + .build(); + gpu_kernel( + iter, [] GPU_LAMBDA(T dy, T x, T_ACC c1, T_ACC c2, T_ACC c3) -> T { + return c1 * static_cast(dy) + c2 * static_cast(x) + + c3; + }); + } else { + auto iter = TensorIteratorConfig() + .check_all_same_dtype(std::is_same::value) + .resize_outputs(false) + .add_owned_output(dX.view({N * G, D * HxW})) + .add_owned_input(dY.view({N * G, D * HxW})) + .add_owned_input(X.view({N * G, D * HxW})) + .add_owned_input(rstd.view({N * G, 1})) + .add_owned_input(c2.view({N * G, 1})) + .add_owned_input(c3.view({N * G, 1})) + .build(); + gpu_kernel( + iter, [] GPU_LAMBDA(T dy, T x, T_ACC c1, T_ACC c2, T_ACC c3) -> T { + return c1 * static_cast(dy) + c2 * static_cast(x) + + c3; + }); + } + } + if (dgamma.defined() || dbeta.defined()) { + T* dgamma_data = dgamma.defined() ? dgamma.data_ptr() : nullptr; + T* dbeta_data = dbeta.defined() ? dbeta.data_ptr() : nullptr; + if (N <= 128) { + // For small batch size, do colwise reduce directly. + const int64_t B = (C + kCUDANumThreads - 1) / kCUDANumThreads; + GammaBetaBackwardCUDAKernel1<<>>( + N, + C, + G, + mean_data, + rstd_data, + ds_data, + db_data, + dgamma_data, + dbeta_data); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + } else { + const int64_t B = (C + kReduceTileSize - 1) / kReduceTileSize; + // The algorithm for colwise reduction here is to accumulate each 32 cols + // to a 32 * 32 tile and write the tile to shared memmory. Then do warp + // reduce for each col in the tile. So here the blockDim must be (32, 16). + constexpr int kThreadX = kReduceTileSize; + constexpr int kThreadY = kReduceTileSize / 2; + GammaBetaBackwardCUDAKernel2 + <<>>( + N, + C, + G, + mean_data, + rstd_data, + ds_data, + db_data, + dgamma_data, + dbeta_data); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + } + } +} + +void GroupNormBackwardKernelImpl( + const Tensor& dY, + const Tensor& X, + const Tensor& mean, + const Tensor& rstd, + const Tensor& gamma, + int64_t N, + int64_t C, + int64_t HxW, + int64_t group, + Tensor& dX, + Tensor& dgamma, + Tensor& dbeta) { + AT_DISPATCH_FLOATING_TYPES_AND2( + at::ScalarType::Half, + at::ScalarType::BFloat16, + X.scalar_type(), + "GroupNormBackwardKernelImpl", + [&]() { + GroupNormBackwardKernelImplInternal( + dY, X, mean, rstd, gamma, N, C, HxW, group, dX, dgamma, dbeta); + }); +} + +} // namespace + +REGISTER_DISPATCH(GroupNormKernel, &GroupNormKernelImpl); +REGISTER_DISPATCH(GroupNormBackwardKernel, &GroupNormBackwardKernelImpl); + +} // namespace native +} // namespace at diff --git a/cuda_code/group_norm_op_20.cu b/cuda_code/group_norm_op_20.cu new file mode 100644 index 0000000000000000000000000000000000000000..27174630227c8123a31cb1c70d5eb5f5b3ee5107 --- /dev/null +++ b/cuda_code/group_norm_op_20.cu @@ -0,0 +1,292 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include "paddle/fluid/operators/group_norm_op.h" + +namespace paddle { +namespace operators { + +template +__global__ void GroupNormForwardGetMeanAndVar(const T* x, int N, int C, + int imsize, int groups, + int group_size, T* mean, T* var) { + int gid = blockIdx.y; + int cid = blockIdx.x; + int bid = blockIdx.z; + int number = min(group_size, static_cast(C - gid * group_size)); + int ccid = gid * group_size + cid; + if (ccid >= C) return; + T x_mean = 0, x_var = 0; + for (int imid = threadIdx.x; imid < imsize; imid += blockDim.x) { + T val = x[(bid * C + ccid) * imsize + imid]; + x_mean += val; + x_var += val * val; + } + x_mean /= number * imsize; + x_var /= number * imsize; + __shared__ T s_mem[2]; + if (threadIdx.x == 0) { + s_mem[0] = s_mem[1] = 0; + } + __syncthreads(); + paddle::platform::CudaAtomicAdd(&s_mem[0], x_mean); + paddle::platform::CudaAtomicAdd(&s_mem[1], x_var); + __syncthreads(); + if (threadIdx.x == 0) { + paddle::platform::CudaAtomicAdd(&mean[bid * groups + gid], s_mem[0]); + paddle::platform::CudaAtomicAdd(&var[bid * groups + gid], s_mem[1]); + } +} + +template +__global__ void GroupNormForward(const T* x, const T* mean, const T* var, + const T* scale, const T* bias, int N, int C, + int imsize, int groups, int group_size, + T epsilon, T* y, T* real_var) { + int gid = blockIdx.y; + int cid = blockIdx.x; + int bid = blockIdx.z; + int ccid = gid * group_size + cid; + if (ccid >= C) return; + T x_mean = mean[bid * groups + gid]; + T x_var = var[bid * groups + gid]; + x_var = x_var - x_mean * x_mean; + T var_inv = 1.0 / sqrt(x_var + epsilon); + if (cid == 0 && threadIdx.x == 0) real_var[bid * groups + gid] = x_var; + for (int imid = threadIdx.x; imid < imsize; imid += blockDim.x) { + T val = x[(bid * C + ccid) * imsize + imid]; + val = (val - x_mean) * var_inv; + if (scale) val *= scale[gid * group_size + cid]; + if (bias) val += bias[gid * group_size + cid]; + y[(bid * C + ccid) * imsize + imid] = val; + } +} + +template +class GroupNormKernel + : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + const float epsilon = ctx.Attr("epsilon"); + auto* scale = ctx.Input("Scale"); + auto* bias = ctx.Input("Bias"); + auto* x = ctx.Input("X"); + + auto* y = ctx.Output("Y"); + auto* mean = ctx.Output("Mean"); + auto* var = ctx.Output("Variance"); + const auto groups = ctx.Attr("groups"); + + const auto x_dims = x->dims(); + const int group_size = (x_dims[1] - 1) / groups + 1; + + y->mutable_data(ctx.GetPlace()); + mean->mutable_data(ctx.GetPlace()); + var->mutable_data(ctx.GetPlace()); + math::SetConstant set_zero; + auto& dev_ctx = ctx.template device_context(); + Tensor temp_var; + temp_var.mutable_data(var->dims(), ctx.GetPlace()); + + set_zero(dev_ctx, mean, static_cast(0)); + set_zero(dev_ctx, &temp_var, static_cast(0)); + + auto* x_data = x->data(); + auto* y_data = y->data(); + auto* mean_data = mean->data(); + auto* var_data = var->data(); + auto* temp_var_data = temp_var.data(); + + const T* scale_data = nullptr; + if (scale) scale_data = scale->data(); + const T* bias_data = nullptr; + if (bias) bias_data = bias->data(); + + int imsize = x_dims[2] * x_dims[3]; + int block_size = std::min(512, imsize); + dim3 grid(group_size, groups, x_dims[0]); + dim3 threads(block_size, 1, 1); + GroupNormForwardGetMeanAndVar<<>>( + x_data, x_dims[0], x_dims[1], imsize, groups, group_size, mean_data, + temp_var_data); + GroupNormForward<<>>( + x_data, mean_data, temp_var_data, scale_data, bias_data, x_dims[0], + x_dims[1], imsize, groups, group_size, epsilon, y_data, var_data); + } +}; + +template +__global__ void GroupNormBackwardGetMeanAndVar( + const T* x, const T* mean, const T* var, const T* scale, const T* d_y, + int N, int C, int imsize, int groups, int group_size, T epsilon, T* d_x, + T* d_mean, T* d_var, T* d_scale, T* d_bias) { + int gid = blockIdx.y; + int cid = blockIdx.x; + int bid = blockIdx.z; + int number = min(group_size, static_cast(C - gid * group_size)); + int ccid = gid * group_size + cid; + if (ccid >= C) return; + T x_mean = mean[bid * groups + gid]; + T x_var = var[bid * groups + gid]; + T var_inv = 1.0 / sqrt(x_var + epsilon); + T d_var_inv = 0, d_x_mean = 0; + T d_mean_data = 0, d_var_data = 0, d_scale_data = 0, d_bias_data = 0; + + for (int imid = threadIdx.x; imid < imsize; imid += blockDim.x) { + T tmp = x[(bid * C + ccid) * imsize + imid]; + T val = (tmp - x_mean) * var_inv; + T dval = d_y[(bid * C + ccid) * imsize + imid]; + if (d_bias) d_bias_data += dval; + if (d_scale) d_scale_data += val * dval; + if (scale) dval = dval * scale[ccid]; + d_var_data += (tmp - x_mean) * dval; + T d_tmp = dval * var_inv; + if (d_x) d_x[(bid * C + ccid) * imsize + imid] = d_tmp; + d_mean_data -= d_tmp; + } + + __shared__ T s_mem[4]; + if (threadIdx.x == 0) { + s_mem[0] = s_mem[1] = 0; + if (d_scale) s_mem[2] = 0; + if (d_bias) s_mem[3] = 0; + } + __syncthreads(); + paddle::platform::CudaAtomicAdd(&s_mem[0], d_mean_data); + paddle::platform::CudaAtomicAdd(&s_mem[1], d_var_data); + if (d_scale) paddle::platform::CudaAtomicAdd(&s_mem[2], d_scale_data); + if (d_bias) paddle::platform::CudaAtomicAdd(&s_mem[3], d_bias_data); + __syncthreads(); + if (threadIdx.x == 0) { + paddle::platform::CudaAtomicAdd(&d_mean[bid * groups + gid], s_mem[0]); + paddle::platform::CudaAtomicAdd(&d_var[bid * groups + gid], s_mem[1]); + if (d_scale) paddle::platform::CudaAtomicAdd(&d_scale[ccid], s_mem[2]); + if (d_bias) paddle::platform::CudaAtomicAdd(&d_bias[ccid], s_mem[3]); + } +} + +template +__global__ void GroupNormBackward(const T* x, const T* mean, const T* var, + const T* d_mean, const T* d_var, int N, int C, + int imsize, int groups, int group_size, + T epsilon, T* d_x) { + int gid = blockIdx.y; + int cid = blockIdx.x; + int bid = blockIdx.z; + int number = min(group_size, static_cast(C - gid * group_size)); + int ccid = gid * group_size + cid; + if (ccid >= C) return; + T x_mean = mean[bid * groups + gid]; + T x_var = var[bid * groups + gid]; + T d_x_mean = d_mean[bid * groups + gid]; + T d_var_inv = d_var[bid * groups + gid]; + + T d_x_var = + -1.0 / (2 * (x_var + epsilon) * sqrt(x_var + epsilon)) * d_var_inv; + d_x_mean -= 2 * d_x_var * x_mean; + d_x_var /= number * imsize; + d_x_mean /= number * imsize; + for (int imid = threadIdx.x; imid < imsize; imid += blockDim.x) { + T tmp = x[(bid * C + ccid) * imsize + imid]; + if (d_x) + d_x[(bid * C + ccid) * imsize + imid] += d_x_mean + tmp * 2 * d_x_var; + } +} + +template +class GroupNormGradKernel + : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + const float epsilon = ctx.Attr("epsilon"); + auto* x = ctx.Input("X"); + auto* mean = ctx.Input("Mean"); + auto* var = ctx.Input("Variance"); + auto* scale = ctx.Input("Scale"); + auto* d_y = ctx.Input(framework::GradVarName("Y")); + const auto groups = ctx.Attr("groups"); + + // init output + auto* d_x = ctx.Output(framework::GradVarName("X")); + auto* d_scale = ctx.Output(framework::GradVarName("Scale")); + auto* d_bias = ctx.Output(framework::GradVarName("Bias")); + + const auto& x_dims = x->dims(); + const int group_size = (x_dims[1] - 1) / groups + 1; + + T* d_x_data = nullptr; + if (d_x) { + d_x->mutable_data(ctx.GetPlace()); + d_x_data = d_x->data(); + } + math::SetConstant set_zero; + auto& dev_ctx = ctx.template device_context(); + + Tensor temp_var; + temp_var.mutable_data(var->dims(), ctx.GetPlace()); + set_zero(dev_ctx, &temp_var, static_cast(0)); + T* temp_var_data = temp_var.data(); + + Tensor temp_mean; + temp_mean.mutable_data(var->dims(), ctx.GetPlace()); + set_zero(dev_ctx, &temp_mean, static_cast(0)); + T* temp_mean_data = temp_mean.data(); + + auto* x_data = x->data(); + auto* y_data = d_y->data(); + auto* mean_data = mean->data(); + auto* var_data = var->data(); + T* d_scale_data = nullptr; + if (d_scale) { + d_scale->mutable_data(ctx.GetPlace()); + set_zero(dev_ctx, d_scale, static_cast(0)); + d_scale_data = d_scale->data(); + } + T* d_bias_data = nullptr; + if (d_bias) { + d_bias->mutable_data(ctx.GetPlace()); + set_zero(dev_ctx, d_bias, static_cast(0)); + d_bias_data = d_bias->data(); + } + + const T* scale_data = nullptr; + if (scale) scale_data = scale->data(); + + int imsize = x_dims[2] * x_dims[3]; + int block_size = std::min(512, imsize); + dim3 grid(group_size, groups, x_dims[0]); + dim3 threads(block_size, 1, 1); + GroupNormBackwardGetMeanAndVar<<>>( + x_data, mean_data, var_data, scale_data, y_data, x_dims[0], x_dims[1], + imsize, groups, group_size, epsilon, d_x_data, temp_mean_data, + temp_var_data, d_scale_data, d_bias_data); + GroupNormBackward<<>>( + x_data, mean_data, var_data, temp_mean_data, temp_var_data, x_dims[0], + x_dims[1], imsize, groups, group_size, epsilon, d_x_data); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP_CUDA_KERNEL( + group_norm, + ops::GroupNormKernel, + ops::GroupNormKernel); +REGISTER_OP_CUDA_KERNEL( + group_norm_grad, + ops::GroupNormGradKernel, + ops::GroupNormGradKernel); diff --git a/cuda_code/groupby_24.cu b/cuda_code/groupby_24.cu new file mode 100644 index 0000000000000000000000000000000000000000..569b545c35a138a3428491de66cfbeb2d89297d0 --- /dev/null +++ b/cuda_code/groupby_24.cu @@ -0,0 +1,341 @@ + +/* + * Copyright (c) 2019, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include
+#include
+#include +#include +#include +#include "groupby_kernels.cuh" +#include "groupby/common/legacy/aggregation_requests.hpp" +#include "groupby/common/legacy/type_info.hpp" +#include "groupby/common/legacy/utils.hpp" +#include +#include +#include +#include +#include + +namespace cudf { +namespace groupby { + +namespace hash { +namespace { + +template +auto build_aggregation_map(table const& input_keys, table const& input_values, + device_table const& d_input_keys, + device_table const& d_input_values, + std::vector const& ops, Options options, + cudaStream_t stream) { + cudf::size_type constexpr unused_key{std::numeric_limits::max()}; + cudf::size_type constexpr unused_value{ + std::numeric_limits::max()}; + CUDF_EXPECTS(input_keys.num_rows() < unused_key, + "Groupby input size too large."); + + // The exact output size is unknown a priori, therefore, use the input size as + // an upper bound. + cudf::size_type const output_size_estimate{input_keys.num_rows()}; + + cudf::table sparse_output_values{ + output_size_estimate, + target_dtypes(column_dtypes(input_values), ops), + column_dtype_infos(input_values), + values_have_nulls, + false, + stream}; + + initialize_with_identity(sparse_output_values, ops, stream); + + auto d_sparse_output_values = + device_table::create(sparse_output_values, stream); + rmm::device_vector d_ops(ops); + + // If we ignore null keys, then nulls are not equivalent + bool const null_keys_are_equal{not options.ignore_null_keys}; + bool const skip_key_rows_with_nulls{keys_have_nulls and + not null_keys_are_equal}; + + row_hasher hasher{d_input_keys}; + row_equality_comparator rows_equal{ + d_input_keys, d_input_keys, null_keys_are_equal}; + + using map_type = + concurrent_unordered_map; + + auto map = map_type::create(compute_hash_table_size(input_keys.num_rows()), + unused_key, unused_value, hasher, rows_equal); + + // TODO: Explore optimal block size and work per thread. + cudf::util::cuda::grid_config_1d grid_params{input_keys.num_rows(), 256}; + + if (skip_key_rows_with_nulls) { + auto row_bitmask{cudf::row_bitmask(input_keys, stream)}; + build_aggregation_map + <<>>(*map, d_input_keys, d_input_values, + *d_sparse_output_values, d_ops.data().get(), + row_bitmask.data().get()); + } else { + build_aggregation_map + <<>>(*map, d_input_keys, d_input_values, + *d_sparse_output_values, d_ops.data().get(), nullptr); + } + CHECK_STREAM(stream); + + return std::make_pair(std::move(map), sparse_output_values); +} + +template +auto extract_results(table const& input_keys, table const& input_values, + device_table const& d_input_keys, + table const& sparse_output_values, Map const& map, + cudaStream_t stream) { + + cudf::table output_keys{ + cudf::allocate_like( + input_keys, + keys_have_nulls ? RETAIN : NEVER, + stream)}; + cudf::table output_values{ + cudf::allocate_like( + sparse_output_values, + values_have_nulls ? RETAIN : NEVER, + stream)}; + + auto d_sparse_output_values = + device_table::create(sparse_output_values, stream); + + auto d_output_keys = device_table::create(output_keys, stream); + auto d_output_values = device_table::create(output_values, stream); + + cudf::size_type* d_result_size{nullptr}; + RMM_TRY(RMM_ALLOC(&d_result_size, sizeof(cudf::size_type), stream)); + CUDA_TRY(cudaMemsetAsync(d_result_size, 0, sizeof(cudf::size_type), stream)); + + cudf::util::cuda::grid_config_1d grid_params{input_keys.num_rows(), 256}; + + extract_groupby_result + <<>>(map, d_input_keys, *d_output_keys, *d_sparse_output_values, + *d_output_values, d_result_size); + + CHECK_STREAM(stream); + + cudf::size_type result_size{-1}; + CUDA_TRY(cudaMemcpyAsync(&result_size, d_result_size, sizeof(cudf::size_type), + cudaMemcpyDeviceToHost, stream)); + + // Update size and null count of output columns + auto update_column = [result_size](gdf_column* col) { + CUDF_EXPECTS(col != nullptr, "Attempt to update Null column."); + col->size = result_size; + set_null_count(*col); + return col; + }; + + std::transform(output_keys.begin(), output_keys.end(), output_keys.begin(), + update_column); + std::transform(output_values.begin(), output_values.end(), + output_values.begin(), update_column); + + return std::make_pair(output_keys, output_values); +} + +/**---------------------------------------------------------------------------* + * @brief Computes the groupby operation for a set of keys, values, and + * operators using a hash-based implementation. + * + * The algorithm has two primary steps: + * 1.) Build a hash map + * 2.) Extract the non-empty entries from the hash table + * + * 1.) The hash map is built by inserting every row `i` from the `keys` and + * `values` tables as a single (key,value) pair. When the pair is inserted, if + * the key was not already present in the map, then the corresponding value is + * simply copied to the output. If the key was already present in the map, + * then the inserted `values` row is aggregated with the existing row. This + * aggregation is done for every element `j` in the row by applying aggregation + * operation `j` between the new and existing element. + * + * This process yields a hash map and table holding the resulting aggregation + * rows. The aggregation output table is sparse, i.e., not every row is + * populated. This is because the size of the output is not known a priori, and + * so the output aggregation table is allocated to be as large as the input (the + * upper bound of the output size). + * + * 2.) The final result is materialized by extracting the non-empty keys from + * the hash map and the non-empty rows from the sparse output aggregation table. + * Every non-empty key and value row is appended to the output key and value + * tables. + * + * @tparam keys_have_nulls Indicates keys have one or more null values + * @tparam values_have_nulls Indicates values have one or more null values + * @param keys Table whose rows are used as keys of the groupby + * @param values Table whose rows are aggregated in the groupby + * @param ops Set of aggregation operations to perform for each element in a row + * in the values table + * @param options Options to control behavior of the groupby operation + * @param stream CUDA stream on which all memory allocations and kernels will be + * executed + * @return A pair of the output keys table and output values table + *---------------------------------------------------------------------------**/ +template +auto compute_hash_groupby(cudf::table const& keys, cudf::table const& values, + std::vector const& ops, Options options, + cudaStream_t stream) { + CUDF_EXPECTS(values.num_columns() == static_cast(ops.size()), + "Size mismatch between number of value columns and number of " + "aggregations."); + + // An "aggregation request" is the combination of a `gdf_column*` to a column + // of values, and an aggregation operation enum indicating the aggregation + // requested to be performed on the column + std::vector original_requests(values.num_columns()); + std::transform(values.begin(), values.end(), ops.begin(), + original_requests.begin(), + [](gdf_column const* col, operators op) { + return std::make_pair(const_cast(col), op); + }); + + // Some aggregations are "compound", meaning they need be satisfied via the + // composition of 1 or more "simple" aggregation requests. For example, MEAN + // is satisfied via the division of the SUM by the COUNT aggregation. We + // translate these compound requests into simple requests, and compute the + // groupby operation for these simple requests. Later, we translate the simple + // requests back to compound request results. + std::vector simple_agg_columns = + compound_to_simple(original_requests); + + std::vector simple_values_columns; + std::vector simple_operators; + for (auto const& p : simple_agg_columns) { + const AggRequestType& agg_req_type = p.first; + simple_values_columns.push_back( + const_cast(agg_req_type.first)); + simple_operators.push_back(agg_req_type.second); + } + + cudf::table simple_values_table{simple_values_columns}; + + auto const d_input_keys = device_table::create(keys); + auto const d_input_values = device_table::create(simple_values_table); + + // Step 1: Build hash map + auto result = build_aggregation_map( + keys, simple_values_table, *d_input_keys, *d_input_values, + simple_operators, options, stream); + + auto const map{std::move(result.first)}; + cudf::table sparse_output_values{result.second}; + + // Step 2: Extract non-empty entries + cudf::table output_keys; + cudf::table simple_output_values; + std::tie(output_keys, simple_output_values) = + extract_results( + keys, values, *d_input_keys, sparse_output_values, *map, stream); + + // Delete intermediate results storage + sparse_output_values.destroy(); + + // If any of the original requests were compound, compute them from the + // results of simple aggregation requests + cudf::table final_output_values = compute_original_requests( + original_requests, simple_agg_columns, simple_output_values, stream); + + return std::make_pair(output_keys, final_output_values); +} + +/**---------------------------------------------------------------------------* + * @brief Returns appropriate callable instantiation of `compute_hash_groupby` + * based on presence of null values in keys and values. + * + * @param keys The groupby key columns + * @param values The groupby value columns + * @return Instantiated callable of compute_hash_groupby + *---------------------------------------------------------------------------**/ +auto groupby_null_specialization(table const& keys, table const& values) { + if (cudf::has_nulls(keys)) { + if (cudf::has_nulls(values)) { + return compute_hash_groupby; + } else { + return compute_hash_groupby; + } + } else { + if (cudf::has_nulls(values)) { + return compute_hash_groupby; + } else { + return compute_hash_groupby; + } + } +} + +} // namespace +namespace detail { +std::pair groupby(cudf::table const& keys, + cudf::table const& values, + std::vector const& ops, + Options options, + cudaStream_t stream = 0) { + CUDF_EXPECTS(keys.num_rows() == values.num_rows(), + "Size mismatch between number of rows in keys and values."); + + verify_operators(values, ops); + + // Empty inputs + if (keys.num_rows() == 0) { + return std::make_pair( + cudf::empty_like(keys), + cudf::table(0, target_dtypes(column_dtypes(values), ops), + column_dtype_infos(values))); + } + + auto compute_groupby = groupby_null_specialization(keys, values); + + cudf::table output_keys; + cudf::table output_values; + std::tie(output_keys, output_values) = + compute_groupby(keys, values, ops, options, stream); + + update_nvcategories(keys, output_keys, values, output_values); + + return std::make_pair(output_keys, output_values); +} +} // namespace detail + +std::pair groupby(cudf::table const& keys, + cudf::table const& values, + std::vector const& ops, + Options options) { + return detail::groupby(keys, values, ops, options); +} +} // namespace hash +} // namespace groupby +} // namespace cudf diff --git a/cuda_code/groupby_39.cu b/cuda_code/groupby_39.cu new file mode 100644 index 0000000000000000000000000000000000000000..133246cce1e83980fe472a0cd5ebe97c87ecc415 --- /dev/null +++ b/cuda_code/groupby_39.cu @@ -0,0 +1,428 @@ +/* + * Copyright (c) 2019-2020, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "group_reductions.hpp" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +namespace cudf { +namespace groupby { +namespace detail { +/** + * @brief Functor to dispatch aggregation with + * + * This functor is to be used with `aggregation_dispatcher` to compute the + * appropriate aggregation. If the values on which to run the aggregation are + * unchanged, then this functor should be re-used. This is because it stores + * memoised sorted and/or grouped values and re-using will save on computation + * of these values. + */ +struct store_result_functor { + store_result_functor(size_type col_idx, + column_view const& values, + sort::sort_groupby_helper& helper, + cudf::detail::result_cache& cache, + cudaStream_t stream, + rmm::mr::device_memory_resource* mr) + : col_idx(col_idx), values(values), helper(helper), cache(cache), stream(stream), mr(mr) + { + } + + template + void operator()(aggregation const& agg) + { + } + + private: + /** + * @brief Get the grouped values + * + * Computes the grouped values from @p values on first invocation and returns + * the stored result on subsequent invocation + */ + column_view get_grouped_values() + { + // TODO (dm): After implementing single pass multi-agg, explore making a + // cache of all grouped value columns rather than one at a time + if (grouped_values) + return grouped_values->view(); + else if (sorted_values) + // TODO (dm): When we implement scan, it wouldn't be ok to return sorted + // values when asked for grouped values. Change this then. + return sorted_values->view(); + else + grouped_values = helper.grouped_values(values); + return grouped_values->view(); + }; + + /** + * @brief Get the grouped and sorted values + * + * Computes the grouped and sorted (within each group) values from @p values + * on first invocation and returns the stored result on subsequent invocation + */ + column_view get_sorted_values() + { + if (not sorted_values) sorted_values = helper.sorted_values(values); + return sorted_values->view(); + }; + + private: + size_type col_idx; ///< Index of column in requests being operated on + sort::sort_groupby_helper& helper; ///< Sort helper + cudf::detail::result_cache& cache; ///< cache of results to store into + column_view const& values; ///< Column of values to group and aggregate + + cudaStream_t stream; ///< CUDA stream on which to execute kernels + rmm::mr::device_memory_resource* mr; ///< Memory resource to allocate space for results + + std::unique_ptr sorted_values; ///< Memoised grouped and sorted values + std::unique_ptr grouped_values; ///< Memoised grouped values +}; + +template <> +void store_result_functor::operator()(aggregation const& agg) +{ + if (cache.has_result(col_idx, agg)) return; + + cache.add_result( + col_idx, + agg, + get_grouped_values().nullable() + ? detail::group_count_valid( + get_grouped_values(), helper.group_labels(), helper.num_groups(), mr, stream) + : detail::group_count_all(helper.group_offsets(), helper.num_groups(), mr, stream)); +} + +template <> +void store_result_functor::operator()(aggregation const& agg) +{ + if (cache.has_result(col_idx, agg)) return; + + cache.add_result( + col_idx, agg, detail::group_count_all(helper.group_offsets(), helper.num_groups(), mr, stream)); +} + +template <> +void store_result_functor::operator()(aggregation const& agg) +{ + if (cache.has_result(col_idx, agg)) return; + + cache.add_result(col_idx, + agg, + detail::group_sum( + get_grouped_values(), helper.num_groups(), helper.group_labels(), mr, stream)); +}; + +template <> +void store_result_functor::operator()(aggregation const& agg) +{ + if (cache.has_result(col_idx, agg)) return; + + cache.add_result(col_idx, + agg, + detail::group_argmax(get_grouped_values(), + helper.num_groups(), + helper.group_labels(), + helper.key_sort_order(), + mr, + stream)); +}; + +template <> +void store_result_functor::operator()(aggregation const& agg) +{ + if (cache.has_result(col_idx, agg)) return; + + cache.add_result(col_idx, + agg, + detail::group_argmin(get_grouped_values(), + helper.num_groups(), + helper.group_labels(), + helper.key_sort_order(), + mr, + stream)); +}; + +template <> +void store_result_functor::operator()(aggregation const& agg) +{ + if (cache.has_result(col_idx, agg)) return; + + auto result = [&]() { + if (cudf::is_fixed_width(values.type())) { + return detail::group_min( + get_grouped_values(), helper.num_groups(), helper.group_labels(), mr, stream); + } else { + auto argmin_agg = make_argmin_aggregation(); + operator()(*argmin_agg); + column_view argmin_result = cache.get_result(col_idx, *argmin_agg); + + // We make a view of ARGMIN result without a null mask and gather using + // this mask. The values in data buffer of ARGMIN result corresponding + // to null values was initialized to ARGMIN_SENTINEL which is an out of + // bounds index value and causes the gathered value to be null. + column_view null_removed_map( + data_type(type_to_id()), + argmin_result.size(), + static_cast(argmin_result.template data())); + auto transformed_result = + cudf::detail::gather(table_view({values}), + null_removed_map, + argmin_result.nullable() ? cudf::detail::out_of_bounds_policy::IGNORE + : cudf::detail::out_of_bounds_policy::NULLIFY, + cudf::detail::negative_index_policy::NOT_ALLOWED, + mr, + stream); + return std::move(transformed_result->release()[0]); + } + }(); + + cache.add_result(col_idx, agg, std::move(result)); +}; + +template <> +void store_result_functor::operator()(aggregation const& agg) +{ + if (cache.has_result(col_idx, agg)) return; + + auto result = [&]() { + if (cudf::is_fixed_width(values.type())) { + return detail::group_max( + get_grouped_values(), helper.num_groups(), helper.group_labels(), mr, stream); + } else { + auto argmax_agg = make_argmax_aggregation(); + operator()(*argmax_agg); + column_view argmax_result = cache.get_result(col_idx, *argmax_agg); + + // We make a view of ARGMAX result without a null mask and gather using + // this mask. The values in data buffer of ARGMAX result corresponding + // to null values was initialized to ARGMAX_SENTINEL which is an out of + // bounds index value and causes the gathered value to be null. + column_view null_removed_map( + data_type(type_to_id()), + argmax_result.size(), + static_cast(argmax_result.template data())); + auto transformed_result = + cudf::detail::gather(table_view({values}), + null_removed_map, + argmax_result.nullable() ? cudf::detail::out_of_bounds_policy::IGNORE + : cudf::detail::out_of_bounds_policy::NULLIFY, + cudf::detail::negative_index_policy::NOT_ALLOWED, + mr, + stream); + return std::move(transformed_result->release()[0]); + } + }(); + + cache.add_result(col_idx, agg, std::move(result)); +}; + +template <> +void store_result_functor::operator()(aggregation const& agg) +{ + if (cache.has_result(col_idx, agg)) return; + + auto sum_agg = make_sum_aggregation(); + auto count_agg = make_count_aggregation(); + operator()(*sum_agg); + operator()(*count_agg); + column_view sum_result = cache.get_result(col_idx, *sum_agg); + column_view count_result = cache.get_result(col_idx, *count_agg); + + // TODO (dm): Special case for timestamp. Add target_type_impl for it. + // Blocked until we support operator+ on timestamps + auto result = + cudf::detail::binary_operation(sum_result, + count_result, + binary_operator::DIV, + cudf::detail::target_type(values.type(), aggregation::MEAN), + mr, + stream); + cache.add_result(col_idx, agg, std::move(result)); +}; + +template <> +void store_result_functor::operator()(aggregation const& agg) +{ + if (cache.has_result(col_idx, agg)) return; + + auto var_agg = static_cast(agg); + auto mean_agg = make_mean_aggregation(); + auto count_agg = make_count_aggregation(); + operator()(*mean_agg); + operator()(*count_agg); + column_view mean_result = cache.get_result(col_idx, *mean_agg); + column_view group_sizes = cache.get_result(col_idx, *count_agg); + + auto result = detail::group_var(get_grouped_values(), + mean_result, + group_sizes, + helper.group_labels(), + var_agg._ddof, + mr, + stream); + cache.add_result(col_idx, agg, std::move(result)); +}; + +template <> +void store_result_functor::operator()(aggregation const& agg) +{ + if (cache.has_result(col_idx, agg)) return; + + auto std_agg = static_cast(agg); + auto var_agg = make_variance_aggregation(std_agg._ddof); + operator()(*var_agg); + column_view var_result = cache.get_result(col_idx, *var_agg); + + auto result = cudf::detail::unary_operation(var_result, unary_op::SQRT, mr, stream); + cache.add_result(col_idx, agg, std::move(result)); +}; + +template <> +void store_result_functor::operator()(aggregation const& agg) +{ + if (cache.has_result(col_idx, agg)) return; + + auto count_agg = make_count_aggregation(); + operator()(*count_agg); + column_view group_sizes = cache.get_result(col_idx, *count_agg); + auto quantile_agg = static_cast(agg); + + auto result = detail::group_quantiles(get_sorted_values(), + group_sizes, + helper.group_offsets(), + helper.num_groups(), + quantile_agg._quantiles, + quantile_agg._interpolation, + mr, + stream); + cache.add_result(col_idx, agg, std::move(result)); +}; + +template <> +void store_result_functor::operator()(aggregation const& agg) +{ + if (cache.has_result(col_idx, agg)) return; + + auto count_agg = make_count_aggregation(); + operator()(*count_agg); + column_view group_sizes = cache.get_result(col_idx, *count_agg); + + auto result = detail::group_quantiles(get_sorted_values(), + group_sizes, + helper.group_offsets(), + helper.num_groups(), + {0.5}, + interpolation::LINEAR, + mr, + stream); + cache.add_result(col_idx, agg, std::move(result)); +}; + +template <> +void store_result_functor::operator()(aggregation const& agg) +{ + if (cache.has_result(col_idx, agg)) return; + + auto nunique_agg = static_cast(agg); + + auto result = detail::group_nunique(get_sorted_values(), + helper.group_labels(), + helper.num_groups(), + helper.group_offsets(), + nunique_agg._null_handling, + mr, + stream); + cache.add_result(col_idx, agg, std::move(result)); +}; + +template <> +void store_result_functor::operator()(aggregation const& agg) +{ + if (cache.has_result(col_idx, agg)) return; + + auto nth_element_agg = static_cast(agg); + + auto count_agg = make_count_aggregation(nth_element_agg._null_handling); + if (count_agg->kind == aggregation::COUNT_VALID) + operator()(*count_agg); + else if (count_agg->kind == aggregation::COUNT_ALL) + operator()(*count_agg); + else + CUDF_FAIL("Wrong count aggregation kind"); + column_view group_sizes = cache.get_result(col_idx, *count_agg); + + cache.add_result(col_idx, + agg, + detail::group_nth_element(get_grouped_values(), + group_sizes, + helper.group_labels(), + helper.group_offsets(), + helper.num_groups(), + nth_element_agg._n, + nth_element_agg._null_handling, + mr, + stream)); +} +} // namespace detail + +// Sort-based groupby +std::pair, std::vector> groupby::sort_aggregate( + std::vector const& requests, + cudaStream_t stream, + rmm::mr::device_memory_resource* mr) +{ + // We're going to start by creating a cache of results so that aggs that + // depend on other aggs will not have to be recalculated. e.g. mean depends on + // sum and count. std depends on mean and count + cudf::detail::result_cache cache(requests.size()); + + for (size_t i = 0; i < requests.size(); i++) { + auto store_functor = + detail::store_result_functor(i, requests[i].values, helper(), cache, stream, mr); + for (size_t j = 0; j < requests[i].aggregations.size(); j++) { + // TODO (dm): single pass compute all supported reductions + cudf::detail::aggregation_dispatcher( + requests[i].aggregations[j]->kind, store_functor, *requests[i].aggregations[j]); + } + } + + auto results = detail::extract_results(requests, cache); + + return std::make_pair(helper().unique_keys(mr, stream), std::move(results)); +} +} // namespace groupby +} // namespace cudf diff --git a/cuda_code/gspmm.cu b/cuda_code/gspmm.cu new file mode 100644 index 0000000000000000000000000000000000000000..cd6eabd7e861621dae0b1038c539d31622dbcaca --- /dev/null +++ b/cuda_code/gspmm.cu @@ -0,0 +1,177 @@ +#define GRB_USE_APSPIE +#define private public + +#include +#include +#include + +#include +#include +#include + +#include "graphblas/mmio.hpp" +#include "graphblas/util.hpp" +#include "graphblas/graphblas.hpp" + +#include +#include + +int main( int argc, char** argv ) +{ + std::vector row_indices; + std::vector col_indices; + std::vector values; + graphblas::Index nrows, ncols, nvals; + + // Parse arguments + namespace po = boost::program_options; + po::variables_map vm; + parseArgs( argc, argv, vm ); + parseArgs( argc, argv, vm ); + int TA, TB, NT, NUM_ITER, MAX_NCOLS; + bool ROW_MAJOR, DEBUG; + std::string mode; + if( vm.count("ta") ) + TA = vm["ta"].as(); + if( vm.count("tb") ) + TB = vm["tb"].as(); + if( vm.count("nt") ) + NT = vm["nt"].as(); + if( vm.count("max_ncols") ) + MAX_NCOLS= vm["max_ncols"].as(); + + // default values of TA, TB, NT will be used + graphblas::Descriptor desc; + desc.set( graphblas::GrB_MODE, graphblas::GrB_FIXEDROW ); + desc.set( graphblas::GrB_NT, NT ); + desc.set( graphblas::GrB_TA, TA ); + desc.set( graphblas::GrB_TB, TB ); + + if( vm.count("debug") ) + DEBUG = vm["debug"].as(); + if( vm.count("iter") ) + NUM_ITER = vm["iter"].as(); + if( vm.count("mode") ) { + mode = vm["mode"].as(); + } + + // cuSPARSE (column major) + if( mode=="cusparse" ) { + ROW_MAJOR = false; + desc.set( graphblas::GrB_MODE, graphblas::GrB_CUSPARSE ); + // fixed # of threads per row (row major) + } else if( mode=="fixedrow" ) { + ROW_MAJOR = true; + desc.set( graphblas::GrB_MODE, graphblas::GrB_FIXEDROW ); + // fixed # of threads per column (col major) + } else if( mode=="fixedcol" ) { + ROW_MAJOR = false; + desc.set( graphblas::GrB_MODE, graphblas::GrB_FIXEDCOL ); + // variable # of threads per row (row major) + } else if( mode=="mergepath" ) { + ROW_MAJOR = true; + desc.set( graphblas::GrB_MODE, graphblas::GrB_MERGEPATH ); + } + + // Info + if( DEBUG ) { + std::cout << "ta: " << TA << "\n"; + std::cout << "tb: " << TB << "\n"; + std::cout << "nt: " << NT << "\n"; + std::cout << "row: " << ROW_MAJOR << "\n"; + std::cout << "debug: " << DEBUG << "\n"; + } + + // Read in sparse matrix + if (argc < 2) { + fprintf(stderr, "Usage: %s [matrix-market-filename]\n", argv[0]); + exit(1); + } else { + readMtx( argv[argc-1], row_indices, col_indices, values, nrows, ncols, + nvals, DEBUG ); + } + + // Matrix A + graphblas::Matrix a(nrows, ncols); + a.build( row_indices, col_indices, values, nvals ); + a.nrows( nrows ); + a.ncols( ncols ); + a.nvals( nvals ); + if( DEBUG ) a.print(); + + // Matrix B + graphblas::Index MEM_SIZE = 1000000000; // 2x4=8GB GPU memory for dense + graphblas::Index max_ncols = std::min( MEM_SIZE/nrows/32*32, MAX_NCOLS ); + if( DEBUG ) std::cout << "Restricting col to: " << max_ncols << std::endl; + + graphblas::Matrix b(ncols, max_ncols); + std::vector denseVal; + + graphblas::Index a_nvals; + a.nvals( a_nvals ); + int num_blocks = (a_nvals+NT-1)/NT; + int num_segreduce = num_blocks*max_ncols; + CUDA( cudaMalloc( &desc.descriptor_.d_limits_, + (num_blocks+1)*sizeof(graphblas::Index) )); + CUDA( cudaMalloc( &desc.descriptor_.d_carryin_, + num_blocks*max_ncols*sizeof(float) )); + CUDA( cudaMalloc( &desc.descriptor_.d_carryout_, + num_segreduce*sizeof(float) )); + + // Row major order + if( ROW_MAJOR ) + for( int i=0; i c(nrows, max_ncols); + graphblas::Semiring op; + + graphblas::GpuTimer gpu_mxm; + cudaProfilerStart(); + gpu_mxm.Start(); + graphblas::mxm( c, graphblas::GrB_NULL, graphblas::GrB_NULL, op, a, b, desc ); + gpu_mxm.Stop(); + cudaProfilerStop(); + float elapsed_mxm = gpu_mxm.ElapsedMillis(); + std::cout << "mxm: " << elapsed_mxm << " ms\n"; + //ROW_MAJOR=false; + + std::vector out_denseVal; + if( DEBUG ) c.print(); + c.extractTuples( out_denseVal ); + int count = 0, correct=0; + for( int i=0; i +#include +#include +#include + +#include "utilities.h" +#include + +#include + +int main(int argn, char *argv[]) +{ + + // Host problem definition + + const int m = 4; + const int n = 4; + int batchCount = 2; + int batchStride = m; + + cuDoubleComplex hdl[] = {make_cuDoubleComplex(1, 0), make_cuDoubleComplex(1, 0), make_cuDoubleComplex(1, 0), make_cuDoubleComplex(1, 0), make_cuDoubleComplex(1, 0), make_cuDoubleComplex(1, 0), make_cuDoubleComplex(5, 0), make_cuDoubleComplex(1, 0)}; + cuDoubleComplex hd[] = {make_cuDoubleComplex(1, 0), make_cuDoubleComplex(4, 0), make_cuDoubleComplex(6, 0), make_cuDoubleComplex(9, 0), make_cuDoubleComplex(1, 0), make_cuDoubleComplex(2, 0), make_cuDoubleComplex(1, 0), make_cuDoubleComplex(1, 0)}; + cuDoubleComplex hdu[] = {make_cuDoubleComplex(1, 0), make_cuDoubleComplex(1, 0), make_cuDoubleComplex(7, 0), make_cuDoubleComplex(1, 0), make_cuDoubleComplex(1, 0), make_cuDoubleComplex(3, 0), make_cuDoubleComplex(1, 0), make_cuDoubleComplex(1, 0)}; + + cuDoubleComplex hx[] = {make_cuDoubleComplex(1, 0), make_cuDoubleComplex(2, 0), make_cuDoubleComplex(1, 0), make_cuDoubleComplex(3, 0), + make_cuDoubleComplex(1, 0), make_cuDoubleComplex(1, 0), make_cuDoubleComplex(1, 0), make_cuDoubleComplex(4, 0), + make_cuDoubleComplex(1, 0), make_cuDoubleComplex(1, 0), make_cuDoubleComplex(5, 0), make_cuDoubleComplex(6, 0), + make_cuDoubleComplex(1, 0), make_cuDoubleComplex(1, 0), make_cuDoubleComplex(7, 0), make_cuDoubleComplex(8, 0), + make_cuDoubleComplex(1, 0), make_cuDoubleComplex(1, 0), make_cuDoubleComplex(1, 0), make_cuDoubleComplex(1, 0), + make_cuDoubleComplex(2, 0), make_cuDoubleComplex(1, 0), make_cuDoubleComplex(3, 0), make_cuDoubleComplex(4, 0), + make_cuDoubleComplex(1, 0), make_cuDoubleComplex(1, 0), make_cuDoubleComplex(5, 0), make_cuDoubleComplex(1, 0), + make_cuDoubleComplex(1, 0), make_cuDoubleComplex(1, 0), make_cuDoubleComplex(6, 0), make_cuDoubleComplex(7, 0)}; + + cuDoubleComplex hx_result[] = {make_cuDoubleComplex(0.553030, 0), make_cuDoubleComplex(0.446970, 0), make_cuDoubleComplex(-0.340909, 0), make_cuDoubleComplex(0.371212, 0), + make_cuDoubleComplex(1.600000, 0), make_cuDoubleComplex(-0.600000, 0), make_cuDoubleComplex(0.200000, 0), make_cuDoubleComplex(3.800000, 0), + make_cuDoubleComplex(1.000000, 0), make_cuDoubleComplex(1.000000, 0), make_cuDoubleComplex(5.000000, 0), make_cuDoubleComplex(6.000000, 0), + make_cuDoubleComplex(1.000000, 0), make_cuDoubleComplex(1.000000, 0), make_cuDoubleComplex(7.000000, 0), make_cuDoubleComplex(8.000000, 0), + make_cuDoubleComplex(1.000000, 0), make_cuDoubleComplex(1.000000, 0), make_cuDoubleComplex(1.000000, 0), make_cuDoubleComplex(1.000000, 0), + make_cuDoubleComplex(2.000000, 0), make_cuDoubleComplex(1.000000, 0), make_cuDoubleComplex(3.000000, 0), make_cuDoubleComplex(4.000000, 0), + make_cuDoubleComplex(1.000000, 0), make_cuDoubleComplex(1.000000, 0), make_cuDoubleComplex(5.000000, 0), make_cuDoubleComplex(1.000000, 0), + make_cuDoubleComplex(1.000000, 0), make_cuDoubleComplex(1.000000, 0), make_cuDoubleComplex(6.000000, 0), make_cuDoubleComplex(7.000000, 0)}; + + // Device memory management + cuDoubleComplex *ddl, *dd, *ddu, *dx; + + CHECK_CUDA( cudaMalloc((void**) &ddl, m * batchCount * sizeof(cuDoubleComplex))); + CHECK_CUDA( cudaMalloc((void**) &dd, m * batchCount * sizeof(cuDoubleComplex))); + CHECK_CUDA( cudaMalloc((void**) &ddu, m * batchCount * sizeof(cuDoubleComplex))); + CHECK_CUDA( cudaMalloc((void**) &dx, m * n * batchCount * sizeof(cuDoubleComplex)) ); + + CHECK_CUDA( cudaMemcpy(ddl, hdl, m * batchCount * sizeof(cuDoubleComplex), cudaMemcpyHostToDevice) ); + CHECK_CUDA( cudaMemcpy(dd, hd, m * batchCount * sizeof(cuDoubleComplex), cudaMemcpyHostToDevice) ); + CHECK_CUDA( cudaMemcpy(ddu, hdu, m * batchCount * sizeof(cuDoubleComplex), cudaMemcpyHostToDevice) ); + CHECK_CUDA( cudaMemcpy(dx, hx, m * n * batchCount * sizeof(cuDoubleComplex), cudaMemcpyHostToDevice) ); + + // CUSPARSE APIs + cusparseHandle_t handle = NULL; + CHECK_CUSPARSE(cusparseCreate(&handle)); + + size_t bufferSizeInBytes; + void *pBuffer = 0; + + cusparseZgtsv2StridedBatch_bufferSizeExt(handle, m, ddl, dd, ddu, dx, batchCount, batchStride, &bufferSizeInBytes); + + cudaMalloc((void**)&pBuffer, bufferSizeInBytes); + + cusparseZgtsv2StridedBatch(handle, m, ddl, dd, ddu, dx, batchCount, batchStride, pBuffer); + + // device result check + CHECK_CUDA( cudaMemcpy(hx, dx, m * n * batchCount * sizeof(cuDoubleComplex), cudaMemcpyDeviceToHost) ); + + int correct = 1; + for (int i = 0; i < (m * n * batchCount); i++) { + if((fabs(hx[i].x - hx_result[i].x) > 0.000001)) { + correct = 0; + break; + } + } + if (correct) + printf("gtsv2Batched test PASSED\n"); + else + printf("gtsv2Batched test FAILED: wrong result\n"); + + // step 6: free resources + + // device memory deallocation + CHECK_CUDA(cudaFree(pBuffer)); + CHECK_CUDA(cudaFree(dx)); + CHECK_CUDA(cudaFree(ddl)); + CHECK_CUDA(cudaFree(dd)); + CHECK_CUDA(cudaFree(ddu)); + + // destroy + CHECK_CUSPARSE(cusparseDestroy(handle)); + + return EXIT_SUCCESS; +} \ No newline at end of file diff --git a/cuda_code/gumbel_softmax_op_6.cu b/cuda_code/gumbel_softmax_op_6.cu new file mode 100644 index 0000000000000000000000000000000000000000..bf0ac667411d86ed1a888258fddb518e562f5719 --- /dev/null +++ b/cuda_code/gumbel_softmax_op_6.cu @@ -0,0 +1,173 @@ +/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ +#pragma once + +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/operators/gumbel_softmax_op.h" + +#if defined(__NVCC__) || defined(__HIPCC__) +#ifdef __NVCC__ +#include "cub/cub.cuh" +#endif +#ifdef __HIPCC__ +#include +namespace cub = hipcub; +#endif + +#include +#include +#include +#include +#include "paddle/fluid/framework/generator.h" +#include "paddle/fluid/memory/memcpy.h" + +namespace paddle { +namespace operators { + +template +using KeyValuePair = cub::KeyValuePair; + +template +struct UniformCUDAGenerator { + T min_, max_; + unsigned int seed_; + unsigned int offset_ = 0; + HOSTDEVICE UniformCUDAGenerator(T min, T max, unsigned int seed) + : min_(min), max_(max), seed_(seed) {} + HOSTDEVICE UniformCUDAGenerator(T min, T max, unsigned int seed, + unsigned int offset) + : min_(min), max_(max), seed_(seed), offset_(offset) {} + + HOSTDEVICE T operator()(const unsigned int n) const { + thrust::minstd_rand rng; + rng.seed(seed_); + thrust::uniform_real_distribution dist(min_, max_); + rng.discard(n + offset_); + return dist(rng); + } +}; + +template +__global__ void OneHotCUDAKernel(const int64_t height, const int64_t width, + const int64_t size_out_axis, const T init, + const T* in, T* out) { + typedef cub::BlockReduce, BlockDim> BlockReduce; + __shared__ typename BlockReduce::TempStorage temp_storage; + + for (int64_t idx = blockIdx.x; idx < height; idx += gridDim.x) { + KeyValuePair kv_pair = {-1, init}; + int h = idx / size_out_axis; + int w = idx % size_out_axis; + cub::ArgMax reducer; + for (int k = threadIdx.x; k < width; k += blockDim.x) { + kv_pair = reducer( + {k, in[h * width * size_out_axis + k * size_out_axis + w]}, kv_pair); + } + kv_pair = BlockReduce(temp_storage).Reduce(kv_pair, reducer); + if (threadIdx.x == 0) { + int index = static_cast(kv_pair.key); + out[h * width * size_out_axis + index * size_out_axis + w] = 1; + } + __syncthreads(); + } +} + +template +struct OneHotGenerator { + static void Transform(const platform::CUDADeviceContext& context, + const Tensor& X, Tensor* Out, int axis) { + const int size_to_axis = SizeToAxis(axis, X.dims()); + const int size_from_axis = SizeFromAxis(axis, X.dims()); + const int size_out_axis = SizeOutAxis(axis, X.dims()); + constexpr int thread_size = 512; + int64_t max_grid_dimx = context.GetCUDAMaxGridDimSize().x; + int64_t height = size_to_axis * size_out_axis; + int block_size = height < max_grid_dimx ? height : max_grid_dimx; + + Tensor input_tensor; + input_tensor.mutable_data(Out->dims(), platform::CUDAPlace()); + TensorCopy(*Out, context.GetPlace(), &input_tensor); + math::set_constant(context, Out, 0.0); + OneHotCUDAKernel< + T, thread_size><<>>( + height, size_from_axis / size_out_axis, size_out_axis, + std::numeric_limits::lowest(), input_tensor.data(), + Out->data()); + } +}; + +template +__global__ void AddGumbelNoiseCUDAKernel(const T* input_data, T* output_data, + T* noise, const float temperature, + int64_t n) { + int index = threadIdx.x + blockIdx.x * blockDim.x; + int step = blockDim.x * gridDim.x; + for (int64_t i = index; i < n; i += step) { + T gumbel_noise = -log(-log(noise[i])); + output_data[i] = (gumbel_noise + input_data[i]) / temperature; + } +} + +template +struct GumbleNoiseGenerator { + static void Transform(const platform::CUDADeviceContext& context, + const T* input_data, T* output_data, int size_to_axis, + int size_from_axis, const float temperature) { + Tensor random_tensor; + int64_t size = size_to_axis * size_from_axis; + T* random_data = + random_tensor.mutable_data({size}, platform::CUDAPlace()); + thrust::counting_iterator index_sequence_begin(0); + const unsigned int seed = std::random_device()(); + + // generate gumbel noise + int device_id = + BOOST_GET_CONST(platform::CUDAPlace, context.GetPlace()).GetDeviceId(); + auto gen_cuda = framework::GetDefaultCUDAGenerator(device_id); + if (gen_cuda->GetIsInitPy()) { + auto seed_offset = gen_cuda->IncrementOffset(1); + int gen_offset = size * seed_offset.second; + thrust::transform( + index_sequence_begin, index_sequence_begin + size, + thrust::device_ptr(random_data), + UniformCUDAGenerator(0.00001, 1, seed_offset.first, gen_offset)); + } else { + thrust::transform(index_sequence_begin, index_sequence_begin + size, + thrust::device_ptr(random_data), + UniformCUDAGenerator(0.00001, 1, seed)); + } + + // add gumbel noise to X + const int thread_size = 512; + int64_t block_size = (size + thread_size) / thread_size; + AddGumbelNoiseCUDAKernel< + T><<>>( + input_data, output_data, random_data, temperature, size); + } +}; + +#endif +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +namespace plat = paddle::platform; +REGISTER_OP_CUDA_KERNEL( + gumbel_softmax, ops::GumbelSoftmaxKernel, + ops::GumbelSoftmaxKernel); +REGISTER_OP_CUDA_KERNEL( + gumbel_softmax_grad, + ops::GumbelSoftmaxGradKernel, + ops::GumbelSoftmaxGradKernel); diff --git a/cuda_code/h2d_bytes.cu b/cuda_code/h2d_bytes.cu new file mode 100644 index 0000000000000000000000000000000000000000..95766fb9ba95fe7f46cdefa3e144e97a38857789 --- /dev/null +++ b/cuda_code/h2d_bytes.cu @@ -0,0 +1,246 @@ +#include +#include +#include + +#include +#include +#include + +// 80 x 80 +//const int N = 80 * 80; + +// 160 x 160 +//const int N = 160 * 160; + +// 320 x 320 +//const int N = 320 * 320; + +// 640 x 640 +//const int N = 640 * 640; + +// 1k x 4 +//const int N = 1000; + +// 10k x 4 +//const int N = 1000 * 10; + +// 100k x 4 +//const int N = 1000 * 100; + +// 1M x 4 +//const int N = 1000 * 1000; + +// 10M x 4 +//const int N = 1000 * 1000 * 10; + +// 0.25m x 4 +//const int N = 1 << 18; + +// 0.5m x 4 +//const int N = 1 << 19; + +// 1m x 4 +//const int N = 1 << 20; + +// 2m x 4 +//const int N = 1 << 21; + +// 4m x 4 +//const int N = 1 << 22; + +// 8m x 4 = 32M bytes +//const int N = 1 << 23; + +#define FLTSIZE sizeof(float) +#define BYTSIZE sizeof(char) + +inline int BLK(int data, int blocksize) +{ + return (data + blocksize - 1) / blocksize; +} + +__global__ void kernel_vectorAdd (const float* __restrict__ a_d, + const float* __restrict__ b_d, + const int N, + const int offset, + float *c_d) +{ + int tid = threadIdx.x + __mul24(blockIdx.x, blockDim.x); + + if(tid < N) { + c_d[tid + offset] = a_d[tid + offset] + b_d[tid + offset]; + } +} + +int main( int argc, char **argv) +{ + int devid = 0 ; + + int num_streams = 8; + + int N = 80 * 80; + + if(argc >= 2) + num_streams = atoi(argv[1]); + + if(argc >= 3) + devid = atoi(argv[2]); + + if(argc >= 4) + N = atoi(argv[3]); + + printf("N,%d\n", N); + + cudaSetDevice(devid); + + /* + printf("\nrunning %d cuda streams on device %d\n", num_streams, devid); + + cudaDeviceProp prop; + cudaGetDeviceProperties(&prop, devid); + printf("Device Number: %d\n", devid); + printf(" Device name: %s\n", prop.name); + printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); + printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth); + printf(" Peak Memory Bandwidth (GB/s): %f\n\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); + printf(" Concurrent copy and execution: %s\n", (prop.deviceOverlap ? "Yes" : "No")); + printf(" Concurrent kernels: %d\n", (prop.concurrentKernels)); + printf(" Concurrent copy and kernel execution: %s with %d copy engine(s)\n", + (prop.deviceOverlap ? "Yes" : "No"), prop.asyncEngineCount); +*/ + + // allocate streams + cudaStream_t *streams = (cudaStream_t *) malloc(num_streams * sizeof(cudaStream_t)); + + // init + for (int i = 0; i < num_streams; i++) { + checkCudaErrors(cudaStreamCreate(&(streams[i]))); + } + + //------------------------------------------------------------------------// + // allocate data on the host + //------------------------------------------------------------------------// + size_t databytes = N * BYTSIZE; + + //float *a_h = (float*) malloc ( N * num_streams * BYTSIZE); + //float *b_h = (float*) malloc ( N * num_streams * BYTSIZE); + //float *c_h = (float*) malloc ( N * num_streams * BYTSIZE); + + float *a_h = NULL; + checkCudaErrors(cudaMallocHost((void **)&a_h, N * num_streams * BYTSIZE)); + + float *b_h = NULL; + checkCudaErrors(cudaMallocHost((void **)&b_h, N * num_streams * BYTSIZE)); + + float *c_h = NULL; + checkCudaErrors(cudaMallocHost((void **)&c_h, N * num_streams * BYTSIZE)); + + for(int i=0; i< N * num_streams; i++) { + a_h[i] = 1.1f; + b_h[i] = 2.2f; + } + + //------------------------------------------------------------------------// + // allocate data on the device + //------------------------------------------------------------------------// + float *a_d; + float *b_d; + float *c_d; + cudaMalloc((void**)&a_d, N * num_streams * BYTSIZE); + cudaMalloc((void**)&b_d, N * num_streams * BYTSIZE); + cudaMalloc((void**)&c_d, N * num_streams * BYTSIZE); + + // kernel configuration + dim3 threads = dim3(256, 1, 1); + dim3 blocks = dim3(BLK(N, threads.x), 1, 1); + + // create cuda event handles + cudaEvent_t start, stop; + checkCudaErrors(cudaEventCreate(&start)); + checkCudaErrors(cudaEventCreate(&stop)); + + cudaEventRecord(start,0); + + // copy data to deivce + for (int i = 0; i < num_streams; i++) { + int offset = i * N; + cudaMemcpyAsync(&a_d[offset], &a_h[offset], databytes, cudaMemcpyHostToDevice, streams[i]); + cudaMemcpyAsync(&b_d[offset], &b_h[offset], databytes, cudaMemcpyHostToDevice, streams[i]); + } + + /* + // launch one worker kernel per stream + for (int i = 0; i < num_streams; i++) { + int offset = i * N; + kernel_vectorAdd <<< blocks, threads, 0, streams[i] >>> (a_d, + b_d, + N, + offset, + c_d); + } + + // copy data back to host + for (int i = 0; i < num_streams; i++) { + int offset = i * N; + cudaMemcpyAsync(&c_h[offset], &c_d[offset], databytes, cudaMemcpyDeviceToHost, streams[i]); + } + */ + + // required for async copy + //cudaDeviceSynchronize(); + + //cudaEventSynchronize(stop); + cudaEventRecord(stop, 0); + + // have CPU do some work while waiting for stage 1 to finish + unsigned long int counter=0; + while (cudaEventQuery(stop) == cudaErrorNotReady) + { + counter++; + } + + + + float gpuTime_ms= 0; + cudaEventElapsedTime(&gpuTime_ms, start, stop); + + //printf("runtime (ms) : %f\n", gpuTime_ms); + +/* + // check data + bool success = 1; + for(int i=0; i< N * num_streams; i++) { + if (abs(c_h[i] - 3.3f) > 1e-6) { + fprintf(stderr, "%d : %f (error)!\n", i, c_h[i]); + success = 0; + break; + } + } + + if(success) { + printf("\nSuccess! Exit.\n"); + } + */ + + //------------------------------------------------------------------------// + // free + //------------------------------------------------------------------------// + for (int i = 0; i < num_streams; i++) { + checkCudaErrors(cudaStreamDestroy(streams[i])); + } + + checkCudaErrors(cudaEventDestroy(start)); + checkCudaErrors(cudaEventDestroy(stop)); + + cudaFreeHost(a_h); + cudaFreeHost(b_h); + cudaFreeHost(c_h); + + cudaFree(a_d); + cudaFree(b_d); + cudaFree(c_d); + + cudaDeviceReset(); + + return 0; +} diff --git a/cuda_code/half_float_ops.cu b/cuda_code/half_float_ops.cu new file mode 100644 index 0000000000000000000000000000000000000000..650f1414a0d8a181ce14d8a0e96280496c7bc27c --- /dev/null +++ b/cuda_code/half_float_ops.cu @@ -0,0 +1,74 @@ +/** + * Copyright (c) 2016-present, Facebook, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "caffe2/operators/half_float_ops.h" + +#include "caffe2/core/context_gpu.h" + +#ifdef CAFFE_HAS_CUDA_FP16 + +namespace caffe2 { +namespace { +__global__ void FloatToHalfKernel(const int N, const float* X, half* Y) { + CUDA_1D_KERNEL_LOOP(i, N) { + Y[i] = __float2half(X[i]); + } +} + +__global__ void HalfToFloatKernel(const int N, const half* X, float* Y) { + CUDA_1D_KERNEL_LOOP(i, N) { + Y[i] = __half2float(X[i]); + } +} +} + +template <> +bool FloatToHalfOp::RunOnDevice() { + auto& X = Input(0); + auto* Y = Output(0); + Y->ResizeLike(X); + FloatToHalfKernel<<< + CAFFE_GET_BLOCKS(X.size()), + CAFFE_CUDA_NUM_THREADS, + 0, + context_.cuda_stream()>>>( + X.size(), + X.data(), + reinterpret_cast(Y->mutable_data())); + return true; +} + +template <> +bool HalfToFloatOp::RunOnDevice() { + auto& X = Input(0); + auto* Y = Output(0); + Y->ResizeLike(X); + HalfToFloatKernel<<< + CAFFE_GET_BLOCKS(X.size()), + CAFFE_CUDA_NUM_THREADS, + 0, + context_.cuda_stream()>>>( + X.size(), + reinterpret_cast(X.data()), + Y->mutable_data()); + return true; +} + +REGISTER_CUDA_OPERATOR(FloatToHalf, FloatToHalfOp); +REGISTER_CUDA_OPERATOR(HalfToFloat, HalfToFloatOp); +} // namespace caffe2 + +#endif // CAFFE_HAS_CUDA_FP16 diff --git a/cuda_code/hdf5_output_layer_17.cu b/cuda_code/hdf5_output_layer_17.cu new file mode 100644 index 0000000000000000000000000000000000000000..b994825228510185d373e4e7c6c433c3ff08b35e --- /dev/null +++ b/cuda_code/hdf5_output_layer_17.cu @@ -0,0 +1,49 @@ +// Copyright 2014 BVLC and contributors. + +#include + +#include "hdf5.h" +#include "hdf5_hl.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/layer.hpp" +#include "caffe/util/io.hpp" +#include "caffe/vision_layers.hpp" + +namespace caffe { +using std::vector; + +template +Dtype HDF5OutputLayer::Forward_gpu(const vector*>& bottom, + vector*>* top) { + CHECK_GE(bottom.size(), 2); + CHECK_EQ(bottom[0]->num(), bottom[1]->num()); + data_blob_.Reshape(bottom[0]->num(), bottom[0]->channels(), + bottom[0]->height(), bottom[0]->width()); + label_blob_.Reshape(bottom[1]->num(), bottom[1]->channels(), + bottom[1]->height(), bottom[1]->width()); + const int data_datum_dim = bottom[0]->count() / bottom[0]->num(); + const int label_datum_dim = bottom[1]->count() / bottom[1]->num(); + + for (int i = 0; i < bottom[0]->num(); ++i) { + CUDA_CHECK(cudaMemcpy(&data_blob_.mutable_cpu_data()[i * data_datum_dim], + &bottom[0]->gpu_data()[i * data_datum_dim], + sizeof(Dtype) * data_datum_dim, cudaMemcpyDeviceToHost)); + CUDA_CHECK(cudaMemcpy(&label_blob_.mutable_cpu_data()[i * label_datum_dim], + &bottom[1]->gpu_data()[i * label_datum_dim], + sizeof(Dtype) * label_datum_dim, cudaMemcpyDeviceToHost)); + } + SaveBlobs(); + return Dtype(0.); +} + +template +void HDF5OutputLayer::Backward_gpu(const vector*>& top, + const bool propagate_down, vector*>* bottom) { + return; +} + +INSTANTIATE_CLASS(HDF5OutputLayer); + +} // namespace caffe diff --git a/cuda_code/hdia_dspmv.cu b/cuda_code/hdia_dspmv.cu new file mode 100644 index 0000000000000000000000000000000000000000..d3fac58c850280050df506798a72bf19cefb0673 --- /dev/null +++ b/cuda_code/hdia_dspmv.cu @@ -0,0 +1,33 @@ +/* + * spGPU - Sparse matrices on GPU library. + * + * Copyright (C) 2010 - 2014 + * Davide Barbieri - University of Rome Tor Vergata + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 3 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "cudadebug.h" +#include "cudalang.h" + +extern "C" +{ +#include "core.h" +#include "hdia.h" +} + +#include "debug.h" + +#define ENABLE_CACHE +#define VALUE_TYPE double +#define TYPE_SYMBOL D +#define TEX_FETCH_TYPE int2 +#include "hdia_spmv_base.cuh" + diff --git a/cuda_code/helloWorld_3.cu b/cuda_code/helloWorld_3.cu new file mode 100644 index 0000000000000000000000000000000000000000..61e6526eb939c82fd75205b98a4690c73e4a5bdd --- /dev/null +++ b/cuda_code/helloWorld_3.cu @@ -0,0 +1,12 @@ +#include + +__global__ void hello() { + printf("Hello world from device"); +} + +int main() { + hello<<<1, 1>>>(); + printf("Hello world from host"); + cudaDeviceSynchronize(); + return 0; +} diff --git a/cuda_code/hello_23.cu b/cuda_code/hello_23.cu new file mode 100644 index 0000000000000000000000000000000000000000..72ac8d47fab73bfb159b9b1e1e24959827bcc496 --- /dev/null +++ b/cuda_code/hello_23.cu @@ -0,0 +1,13 @@ +#include "hello.h" + +extern "C" +__global__ void hellofromGPU(void) +{ + printf("GPU:hello sunyi\n"); +} + +void showhello(void) +{ + hellofromGPU <<<1,10>>>(); + cudaDeviceSynchronize(); +} diff --git a/cuda_code/hello_44.cu b/cuda_code/hello_44.cu new file mode 100644 index 0000000000000000000000000000000000000000..8f3dafda76e5c0e1f1a674a2a4f594b2229f986d --- /dev/null +++ b/cuda_code/hello_44.cu @@ -0,0 +1,45 @@ +// +// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// * Neither the name of NVIDIA CORPORATION nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY +// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// + +#include + +#include "hello.h" +#include "helpers.h" + +extern "C" { +__constant__ Params params; +} + +extern "C" +__global__ void __raygen__draw_solid_color() +{ + uint3 launch_index = optixGetLaunchIndex(); + RayGenData* rtData = (RayGenData*)optixGetSbtDataPointer(); + params.image[launch_index.y * params.image_width + launch_index.x] = + make_color( make_float3( rtData->r, rtData->g, rtData->b ) ); +} diff --git a/cuda_code/hello_5.cu b/cuda_code/hello_5.cu new file mode 100644 index 0000000000000000000000000000000000000000..f3eec273747c0fff01fdac884a36ae792c97e80c --- /dev/null +++ b/cuda_code/hello_5.cu @@ -0,0 +1,20 @@ +/* + * hello.cu: + * + * + */ + +#include + +__global__ void mykernel() +{ + +} + +int main() +{ + mykernel<<<1,1>>>(); + printf("Hello, CUDA World!\n"); + return 0; +} + diff --git a/cuda_code/hello_61.cu b/cuda_code/hello_61.cu new file mode 100644 index 0000000000000000000000000000000000000000..233c948ed8c703d99cb4fbb9e5a375cfe7cb45cf --- /dev/null +++ b/cuda_code/hello_61.cu @@ -0,0 +1,50 @@ +// ========================================================================== +// SeqAn - The Library for Sequence Analysis +// ========================================================================== +// Copyright (c) 2006-2016, Knut Reinert, FU Berlin +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// * Neither the name of Knut Reinert or the FU Berlin nor the names of +// its contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL KNUT REINERT OR THE FU BERLIN BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +// OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +// DAMAGE. +// +// ========================================================================== +// Author: Enrico Siragusa +// ========================================================================== + +#include + +using namespace seqan; + +SEQAN_GLOBAL +void helloCUDA() +{ + printf("Hello CUDA!\n"); +} + +int main() +{ + helloCUDA<<<1,1>>>(); + cudaDeviceSynchronize(); + return 0; +} diff --git a/cuda_code/hello_world_02.cu b/cuda_code/hello_world_02.cu new file mode 100644 index 0000000000000000000000000000000000000000..531866c74c4252205583b3e3b0a9a2a5b8294f53 --- /dev/null +++ b/cuda_code/hello_world_02.cu @@ -0,0 +1,83 @@ +#include + +// Note: Needs compute capability > 2.0, so compile with: +// nvcc hello_world_02.cu -arch=compute_20 -code=sm_20,compute_20 -o hello_world_02.out +// Other notes: can have trouble when N is large... +// Default buffer is ~8MB +// cuCtxSetLimit(CU_LIMIT_PRINTF_FIFO_SIZE, ...) + + +#include +#include + +#define N 20000 +#define GRID_D1 20 +#define GRID_D2 2 +#define BLOCK_D1 512 +#define BLOCK_D2 1 +#define BLOCK_D3 1 + +__global__ void hello(void) +{ + int myblock = blockIdx.x + blockIdx.y * gridDim.x; + int blocksize = blockDim.x * blockDim.y * blockDim.z; + int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x; + int idx = myblock * blocksize + subthread; + if (idx < N){ + printf("Hello world! My block index is (%d,%d) [Grid dims=(%d,%d)], 3D-thread index within block=(%d,%d,%d) => thread index=%d\n", blockIdx.x, blockIdx.y, gridDim.x, gridDim.y, threadIdx.x, threadIdx.y, threadIdx.y, idx); + } else { + printf("Hello world! My block index is (%d,%d) [Grid dims=(%d,%d)], 3D-thread index within block=(%d,%d,%d) => thread index=%d [### this thread would not be used for N=%d ###]\n", blockIdx.x, blockIdx.y, gridDim.x, gridDim.y, threadIdx.x, threadIdx.y, threadIdx.y, idx, N); + } +} + + +int main(int argc,char **argv) +{ + // Increase buffer size for illustration... + size_t factor_increase = 4; + // First, find buffer size: + size_t buffer_size; + cudaError_t stat; + stat = cudaDeviceGetLimit(&buffer_size,cudaLimitPrintfFifoSize); + printf("Buffer size = %u\n",buffer_size); + if (stat == cudaSuccess){ + // Increase by a factor: + stat = cudaDeviceSetLimit(cudaLimitPrintfFifoSize, buffer_size*factor_increase); + stat = cudaDeviceGetLimit(&buffer_size,cudaLimitPrintfFifoSize); + printf("*NEW* Buffer size = %u\n",buffer_size); + if (stat == cudaSuccess){ + printf("Successfully increased printf buffer size...\n"); + } else { + printf("Failed to increase printf buffer size...\n"); + } + } else { + printf("Failed to retrieve printf buffer size...\n"); + } + + const dim3 blockSize(BLOCK_D1, BLOCK_D2, BLOCK_D3); + const dim3 gridSize(GRID_D1, GRID_D2, 1); + int nthreads = BLOCK_D1*BLOCK_D2*BLOCK_D3*GRID_D1*GRID_D2; + if (nthreads < N){ + printf("\n============ NOT ENOUGH THREADS TO COVER N=%d ===============\n\n",N); + } else { + printf("Launching %d threads (N=%d)\n",nthreads,N); + } + + // launch the kernel + hello<<>>(); + cudaError_t cudaerr = cudaDeviceSynchronize(); + if (cudaerr){ + printf("kernel launch failed with error \"%s\".\n", + cudaGetErrorString(cudaerr)); + } else { + printf("kernel launch success!\n"); + } + + printf("That's all!\n"); + + return 0; +} + + + + diff --git a/cuda_code/hip-pinned-shadow_1.cu b/cuda_code/hip-pinned-shadow_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..c58f7097af6ff8c4e437f0a8ae77efdbfe5937c9 --- /dev/null +++ b/cuda_code/hip-pinned-shadow_1.cu @@ -0,0 +1,25 @@ +// RUN: %clang_cc1 -triple amdgcn -fcuda-is-device -std=c++11 -fvisibility hidden -fapply-global-visibility-to-externs \ +// RUN: -emit-llvm -o - -x hip %s -fsyntax-only -verify +// RUN: %clang_cc1 -triple x86_64 -std=c++11 \ +// RUN: -emit-llvm -o - -x hip %s -fsyntax-only -verify + +#define __device__ __attribute__((device)) +#define __constant__ __attribute__((constant)) +#define __hip_pinned_shadow__ __attribute((hip_pinned_shadow)) + +struct textureReference { + int a; +}; + +template +struct texture : public textureReference { +texture() { a = 1; } +}; + +__hip_pinned_shadow__ texture tex; +__device__ __hip_pinned_shadow__ texture tex2; // expected-error{{'hip_pinned_shadow' and 'device' attributes are not compatible}} + // expected-error@-1{{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables}} + // expected-note@-2{{conflicting attribute is here}} +__constant__ __hip_pinned_shadow__ texture tex3; // expected-error{{'hip_pinned_shadow' and 'constant' attributes are not compatible}} + // expected-error@-1{{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables}} + // expected-note@-2{{conflicting attribute is here}} diff --git a/cuda_code/hist_cut_2.cu b/cuda_code/hist_cut_2.cu new file mode 100644 index 0000000000000000000000000000000000000000..4c0a838a4bebecc35fc8dac2ab673eeab321dbf8 --- /dev/null +++ b/cuda_code/hist_cut_2.cu @@ -0,0 +1,176 @@ +// +// Created by qinbin on 2018/5/9. +// + +#include "thundergbm/hist_cut.h" +#include "thundergbm/quantile_sketch.h" +#include "thundergbm/syncarray.h" +#include +#include +#include + +#include "thundergbm/util/device_lambda.cuh" +#include "thrust/unique.h" + +void HistCut::get_cut_points(SparseColumns &columns, InsStat &stats, int max_num_bins, int n_instances) { + LOG(TRACE) << "get cut points"; + LOG(DEBUG) << "val = " << columns.csc_val; + LOG(DEBUG) << "idx = " << columns.csc_row_idx; + LOG(DEBUG) << "ptr = " << columns.csc_col_ptr; + int n_features = columns.n_column; +// std::cout<<"n_featrues:"< sketchs(n_features); + //kFactor times more cut point candidates are considered when building the summary. + const int kFactor = 8; + for (int i = 0; i < n_features; i++) { + sketchs[i].Init(n_instances, 1.0 / (max_num_bins * kFactor)); + } + float_type *val_ptr = columns.csc_val.host_data(); + int *row_ptr = columns.csc_row_idx.host_data(); + int *col_ptr = columns.csc_col_ptr.host_data(); + auto stat_gh_ptr = stats.gh_pair.host_data(); +// std::cout<<"before add"<= col_ptr[i]; j--) { + float_type val = val_ptr[j]; + float_type weight = stat_gh_ptr[row_ptr[j]].h; + sketchs[i].Add(val, weight); + } + } +// std::cout<<"after add"< n_summary(n_features); +// summary n_summary[n_features]; +// std::cout<<"before prune"<>>(deviceImage, img_size, deviceHistos); + cudaDeviceSynchronize(); + kernelTime1.stop(); + // check whether the kernel invocation was successful + checkCudaCall(cudaGetLastError()); + + // reduce local histgrams of blocks to one final histogram + int reduce_blocks = (int) ceil(blocks / 2.0); + if(reduce_blocks % 2 != 0) { + reduce_blocks++; + } + int last_reduction = blocks; + + while(reduce_blocks >= 1) { + // execute reduce kernel + kernelTime1.start(); + reduceKernel<<>>(deviceHistos, reduce_blocks, last_reduction); + cudaDeviceSynchronize(); + kernelTime1.stop(); + // check whether the kernel invocation was successful + checkCudaCall(cudaGetLastError()); + + if(floor(reduce_blocks / 2.0) == 0) { + break; + } + + last_reduction = reduce_blocks; + reduce_blocks = (int) ceil(reduce_blocks / 2.0); + if(reduce_blocks % 2 != 0 && reduce_blocks != 1) { + reduce_blocks++; + } + } + + // copy result back + memoryTime.start(); + checkCudaCall(cudaMemcpy(histogram, deviceHistos, hist_size * sizeof(unsigned int), cudaMemcpyDeviceToHost)); + memoryTime.stop(); + + checkCudaCall(cudaFree(deviceImage)); + checkCudaCall(cudaFree(deviceHistos)); + + cout << "histogram (kernel): \t\t" << kernelTime1 << endl; + cout << "histogram (memory): \t\t" << memoryTime << endl; + cout << "histogram total: \t\t = " << (kernelTime1.getTimeInSeconds() + memoryTime.getTimeInSeconds()) << " seconds" << endl; +} + +/** + * Calculates the histogram in parallel where each thread does one pixel of the image. + */ +__global__ void histogramKernelSimple(unsigned char* __restrict__ image, long img_size, unsigned int* __restrict__ histogram) { + int i = threadIdx.x + blockDim.x * blockIdx.x; + if(i < img_size) { + atomicAdd(&histogram[image[i]], 1); + } +} + +/** + * Prepares the GPU for kernel execution of the simple histogram kernel. + */ +void histogramCudaSimple(unsigned char* image, long img_size, unsigned int* histogram) { + int threadBlockSize = 1024; + int blocks; + + // calculate number of blocks based on img_size + blocks = img_size / threadBlockSize; + if(img_size % threadBlockSize != 0) { + blocks++; + } + + // allocate the vectors on the GPU + unsigned char* deviceImage = NULL; + checkCudaCall(cudaMalloc((void **) &deviceImage, img_size * sizeof(unsigned char))); + if (deviceImage == NULL) { + cout << "could not allocate memory!" << endl; + return; + } + unsigned int* deviceHisto = NULL; + checkCudaCall(cudaMalloc((void **) &deviceHisto, hist_size * sizeof(unsigned int))); + if (deviceHisto == NULL) { + checkCudaCall(cudaFree(deviceImage)); + cout << "could not allocate memory!" << endl; + return; + } + + timer kernelTime1 = timer("kernelTime"); + timer memoryTime = timer("memoryTime"); + + // copy the original vectors to the GPU + memoryTime.start(); + checkCudaCall(cudaMemcpy(deviceImage, image, img_size*sizeof(unsigned char), cudaMemcpyHostToDevice)); + checkCudaCall(cudaMemset(deviceHisto, 0, hist_size * sizeof(unsigned int))); + memoryTime.stop(); + + // execute kernel + kernelTime1.start(); + histogramKernelSimple<<>>(deviceImage, img_size, deviceHisto); + cudaDeviceSynchronize(); + kernelTime1.stop(); + + // check whether the kernel invocation was successful + checkCudaCall(cudaGetLastError()); + + // copy result back + memoryTime.start(); + checkCudaCall(cudaMemcpy(histogram, deviceHisto, hist_size * sizeof(unsigned int), cudaMemcpyDeviceToHost)); + memoryTime.stop(); + + checkCudaCall(cudaFree(deviceImage)); + checkCudaCall(cudaFree(deviceHisto)); + + cout << "histogram simple (kernel): \t" << kernelTime1 << endl; + cout << "histogram simple (memory): \t" << memoryTime << endl; + cout << "histogram simple total: \t = " << (kernelTime1.getTimeInSeconds() + memoryTime.getTimeInSeconds()) << " seconds" << endl; +} + +/** + * Calculates the histogram sequentially. + */ +void histogramSeq(unsigned char* image, long img_size, unsigned int* histogram) { + int i; + + timer sequentialTime = timer("Sequential"); + + for (i=0; i + +#define BLOCK_SIZE 1024 +#define HISTOGRAM_SIZE 256 + +__global__ void RGB2GS(unsigned char *output, float *input, int size) { + int tid = blockIdx.x * blockDim.x + threadIdx.x; + int idx = tid / 3; + if (idx < size) { + unsigned char r = (unsigned char)(255 * input[idx * 3]); + unsigned char g = (unsigned char)(255 * input[idx * 3 + 1]); + unsigned char b = (unsigned char)(255 * input[idx * 3 + 2]); + output[idx] = (unsigned char)(0.21 * r + 0.71 * g + 0.07 * b); + } + __syncthreads(); +} + +__global__ void GS2RGB(float *output, unsigned char *input, int size) { + int tid = blockIdx.x * blockDim.x + threadIdx.x; + int idx = tid / 3; + if (idx < size) { + float tmp = input[idx] / 255.0; + output[3 * idx] = tmp; + output[3 * idx + 1] = tmp; + output[3 * idx + 2] = tmp; + } + __syncthreads(); +} + +__global__ void calcHistogram(float *histogram, unsigned char *input, int size, + bool priv) { + if (priv) { + // privatization opt, faster atomic Op with smem + __shared__ float cache[HISTOGRAM_SIZE]; + if (threadIdx.x < HISTOGRAM_SIZE) + cache[threadIdx.x] = 0; + __syncthreads(); + int tid = blockDim.x * blockIdx.x + threadIdx.x; + int stride = blockDim.x * gridDim.x; + while (tid < size) { + atomicAdd(&(cache[input[tid]]), 1); + tid += stride; + } + __syncthreads(); + if (threadIdx.x < HISTOGRAM_SIZE) + atomicAdd(&(histogram[threadIdx.x]), cache[threadIdx.x]); + } else { + int tid = blockDim.x * blockIdx.x + threadIdx.x; + int stride = blockDim.x * gridDim.x; + while (tid < size) { + atomicAdd(&(histogram[input[tid]]), 1); + tid += stride; + } + } +} + +__global__ void calcCDF(float *cdf, float *histogram, int size) { + // Kogge-Stone Scan + __shared__ float scan[HISTOGRAM_SIZE]; + int tid = blockDim.x * blockIdx.x + threadIdx.x; + if (tid < size) + scan[threadIdx.x] = histogram[tid]; + for (unsigned int stride = 1; stride < blockDim.x; stride *= 2) { + __syncthreads(); + if (threadIdx.x >= stride) { + scan[threadIdx.x] += scan[threadIdx.x - stride]; + } + } + __syncthreads(); + for (int i = 0; i < HISTOGRAM_SIZE; i++) { + cdf[i] = scan[i] / size; + } +} + +__global__ void equalHistogram(float *outputImageData, float *inputImageData, + float *cdf, int size) { + int tid = blockDim.x * blockIdx.x + threadIdx.x; + float minimum = cdf[0]; + if (tid < size) { + unsigned char tmp = (unsigned char)(255 * inputImageData[tid]); + tmp = 255 * (cdf[tmp] - minimum) / (1 - minimum); + tmp = min(255, tmp); + outputImageData[tid] = tmp / 255.0; + } + __syncthreads(); +} + +int main(int argc, char **argv) { + wbArg_t args; + int imageWidth; + int imageHeight; + int imageChannels; + int imageSize; + int gramDataSize; + const char *inputImageFile; + // declare host-side var + wbImage_t inputImage; + wbImage_t outputImage; + float *hostInputImageData; + float *hostOutputImageData; + // declare device-side var + float *deviceInputImageData; + unsigned char *deviceGSImageData; + float *deviceHistogram; + float *cdf; + float *deviceOutputImageData; + // import args, input files and get params + args = wbArg_read(argc, argv); + inputImageFile = wbArg_getInputFile(args, 0); + inputImage = wbImport(inputImageFile); + hostInputImageData = wbImage_getData(inputImage); + imageWidth = wbImage_getWidth(inputImage); + imageHeight = wbImage_getHeight(inputImage); + imageChannels = wbImage_getChannels(inputImage); + outputImage = wbImage_new(imageWidth, imageHeight, imageChannels); + hostOutputImageData = wbImage_getData(outputImage); + imageSize = imageWidth * imageHeight; + size_t imageDataSize = imageSize * imageChannels * sizeof(float); + gramDataSize = HISTOGRAM_SIZE * sizeof(float); + // device memory allocation & copy + cudaMalloc((void **)&deviceInputImageData, imageDataSize); + cudaMalloc((void **)&deviceGSImageData, imageSize); + cudaMalloc((void **)&deviceHistogram, gramDataSize); + cudaMalloc((void **)&cdf, gramDataSize); + cudaMalloc((void **)&deviceOutputImageData, imageDataSize); + cudaMemcpy(deviceInputImageData, hostInputImageData, imageDataSize, + cudaMemcpyHostToDevice); + int gridDimRGB = ceil(float(imageSize * imageChannels) / BLOCK_SIZE); + int gridDimGS = ceil(float(imageSize) / BLOCK_SIZE); + // launch CUDA kernels + RGB2GS<<>>(deviceGSImageData, deviceInputImageData, + imageSize); + // GS2RGB<<>> (deviceGSImageData, deviceOutputImageData, + // imageSize); // debug-only + calcHistogram<<>>(deviceHistogram, deviceGSImageData, + imageSize, true); + clock_t t0 = clock(); + for(int i = 0; i < 300000; i++) { + calcCDF<<<1, HISTOGRAM_SIZE>>>( + cdf, deviceHistogram, imageSize); + } + clock_t t1 = clock(); + printf("Time elapsed: %f\n", float(t1 - t0)/CLOCKS_PER_SEC); + equalHistogram<<>>(deviceOutputImageData, + deviceInputImageData, cdf, + imageSize * imageChannels); + // transfer results deivce to host + cudaMemcpy(hostOutputImageData, deviceOutputImageData, imageDataSize, + cudaMemcpyDeviceToHost); + + wbSolution(args, outputImage); + + // free allocated memory + cudaFree(deviceInputImageData); + cudaFree(deviceHistogram); + cudaFree(cdf); + cudaFree(deviceOutputImageData); + wbImage_delete(outputImage); + wbImage_delete(inputImage); + + return 0; +} diff --git a/cuda_code/hl_table_apply.cu b/cuda_code/hl_table_apply.cu new file mode 100644 index 0000000000000000000000000000000000000000..7411ae35d382833253e3ceabe36b3a1938138028 --- /dev/null +++ b/cuda_code/hl_table_apply.cu @@ -0,0 +1,124 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "hl_base.h" +#include "hl_cuda.h" +#include "hl_device_functions.cuh" +#include "paddle/legacy/utils/Logging.h" + +template +__global__ void KeMatrixAddRows(real* output, + int ldo, + real* table, + int ldt, + int* ids, + int numSamples, + int tableSize, + int dim) { + int idx = threadIdx.x; + int idy = blockIdx.x + threadIdx.y * gridDimX; + + while (idy < numSamples) { + int tableId = ids[idy]; + if ((0 <= tableId) && (tableId < tableSize)) { + real* out = output + idy * ldo; + real* tab = table + tableId * ldt; + for (int i = idx; i < dim; i += blockDimX) { + if (AddRow) { + paddle::paddleAtomicAdd(&tab[i], out[i]); + } else { + out[i] += tab[i]; + } + } + } + idy += blockDimY * gridDimX; + } +} + +void hl_matrix_select_rows(real* output, + int ldo, + real* table, + int ldt, + int* ids, + int numSamples, + int tableSize, + int dim) { + CHECK_NOTNULL(output); + CHECK_NOTNULL(table); + CHECK_NOTNULL(ids); + + dim3 threads(128, 8); + dim3 grid(8, 1); + KeMatrixAddRows<128, 8, 8, 0><<>>( + output, ldo, table, ldt, ids, numSamples, tableSize, dim); + + CHECK_SYNC("hl_matrix_select_rows failed"); +} + +void hl_matrix_add_to_rows(real* table, + int ldt, + real* input, + int ldi, + int* ids, + int numSamples, + int tableSize, + int dim) { + CHECK_NOTNULL(input); + CHECK_NOTNULL(table); + CHECK_NOTNULL(ids); + + dim3 threads(128, 8); + dim3 grid(8, 1); + KeMatrixAddRows<128, 8, 8, 1><<>>( + input, ldi, table, ldt, ids, numSamples, tableSize, dim); + + CHECK_SYNC("hl_matrix_add_to_rows failed"); +} + +template +__global__ void KeVectorSelect( + T* dst, int sized, const T* src, int sizes, const int* ids, int sizei) { + int idx = threadIdx.x + blockDimX * blockIdx.x; + while (idx < sizei) { + int index = ids[idx]; + // check(index < sizes); + dst[idx] = src[index]; + idx += blockDimX * gridDimX; + } +} + +template +void hl_vector_select_from( + T* dst, int sized, const T* src, int sizes, const int* ids, int sizei) { + CHECK_NOTNULL(dst); + CHECK_NOTNULL(src); + CHECK_NOTNULL(ids); + CHECK_EQ(sized, sizei); + + dim3 threads(512, 1); + dim3 grid(8, 1); + KeVectorSelect<<>>( + dst, sized, src, sizes, ids, sizei); + + CHECK_SYNC("hl_vector_select_from failed"); +} + +template void hl_vector_select_from(real* dst, + int sized, + const real* src, + int sizes, + const int* ids, + int sizei); +template void hl_vector_select_from( + int* dst, int sized, const int* src, int sizes, const int* ids, int sizei); diff --git a/cuda_code/hll_2.cu b/cuda_code/hll_2.cu new file mode 100644 index 0000000000000000000000000000000000000000..55a39dbacca8b0d8626ba34dd01db25cdfea4867 --- /dev/null +++ b/cuda_code/hll_2.cu @@ -0,0 +1,375 @@ +// Copyright (c) 2017-2018 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include +#include "query/algorithm.hpp" + +CGoCallResHandle HyperLogLog(DimensionColumnVector prevDimOut, + DimensionColumnVector curDimOut, + uint32_t *prevValuesOut, + uint32_t *curValuesOut, + int prevResultSize, + int curBatchSize, + bool isLastBatch, + uint8_t **hllVectorPtr, + size_t *hllVectorSizePtr, + uint16_t **hllRegIDCountPerDimPtr, + void *cudaStream, + int device) { + CGoCallResHandle resHandle = {nullptr, nullptr}; + try { +#ifdef RUN_ON_DEVICE + cudaSetDevice(device); +#endif + resHandle.res = + reinterpret_cast(ares::hyperloglog(prevDimOut, + curDimOut, + prevValuesOut, + curValuesOut, + prevResultSize, + curBatchSize, + isLastBatch, + hllVectorPtr, + hllVectorSizePtr, + hllRegIDCountPerDimPtr, + cudaStream)); + CheckCUDAError("HyperLogLog"); + return resHandle; + } + catch (std::exception &e) { + std::cerr << "Exception happend when doing HyperLogLog:" << e.what() + << std::endl; + resHandle.pStrErr = strdup(e.what()); + } + return resHandle; +} + +namespace ares { + +// hll sort batch sort current batch using higher 48bits of the hash produced +// from dim values + 16bit reg_id from hll value, +// Note: we store the actual dim values of the current batch into the +// prevDimOut.DimValues vector, but we write the index vector, hash vector, hll +// value vector into the curDimOut, index vector will be initialized to range +// [prevResultSize, preResultSize + curBatchSize) +void sortCurrentBatch(uint8_t *dimValues, uint64_t *hashValues, + uint32_t *indexVector, + uint8_t numDimsPerDimWidth[NUM_DIM_WIDTH], + int vectorCapacity, uint32_t *curValuesOut, + int prevResultSize, int curBatchSize, void *cudaStream) { + DimensionHashIterator hashIter(dimValues, indexVector, numDimsPerDimWidth, + vectorCapacity); + auto zippedValueIter = thrust::make_zip_iterator( + thrust::make_tuple(indexVector, curValuesOut)); +#ifdef RUN_ON_DEVICE + thrust::transform( + thrust::cuda::par.on(reinterpret_cast(cudaStream)), + hashIter, hashIter + curBatchSize, curValuesOut, hashValues, + HLLHashFunctor()); + thrust::stable_sort_by_key( + thrust::cuda::par.on(reinterpret_cast(cudaStream)), + hashValues, hashValues + curBatchSize, + zippedValueIter); +#else + thrust::transform(thrust::host, hashIter, hashIter + curBatchSize, + curValuesOut, hashValues, HLLHashFunctor()); + thrust::stable_sort_by_key(thrust::host, hashValues, + hashValues + curBatchSize, zippedValueIter); +#endif +} + +// prepareHeadFlags prepares dimHeadFlags determines whether a element is the +// head of a dimension partition +template +void prepareHeadFlags(uint64_t *hashVector, DimHeadIter dimHeadFlags, + int resultSize, void *cudaStream) { + HLLDimNotEqualFunctor dimNotEqual; +// TODO(jians): if we see performance issue here, we can try to use custome +// kernel to utilize shared memory +#ifdef RUN_ON_DEVICE + thrust::transform( + thrust::cuda::par.on(reinterpret_cast(cudaStream)), + hashVector, hashVector + resultSize - 1, hashVector + 1, dimHeadFlags + 1, + dimNotEqual); +#else + thrust::transform(thrust::host, hashVector, hashVector + resultSize - 1, + hashVector + 1, dimHeadFlags + 1, dimNotEqual); +#endif +} + +// createAndCopyHLLVector creates the hll vector based on +// scanned count of reg_id counts per dimension and copy hll value +// reg_id < 4096, copy hll measure value in sparse format +// reg_id >= 4096, copy hll measure value in dense format +void createAndCopyHLLVector(uint64_t *hashVector, + uint8_t **hllVectorPtr, + size_t *hllVectorSizePtr, + uint16_t **hllRegIDCountPerDimPtr, + unsigned int *dimCumCount, + uint32_t *values, + int resultSizeWithRegIDs, + int resultSize, + void *cudaStream) { + HLLRegIDHeadFlagIterator regIDHeadFlagIterator(hashVector); +#ifdef RUN_ON_DEVICE + // allocate dimRegIDCount vector + cudaMalloc(reinterpret_cast(hllRegIDCountPerDimPtr), + (size_t)resultSize * sizeof(uint16_t)); + // produce dimRegIDCount vector + thrust::reduce_by_key( + thrust::cuda::par.on(reinterpret_cast(cudaStream)), + dimCumCount, dimCumCount + resultSizeWithRegIDs, + regIDHeadFlagIterator, thrust::make_discard_iterator(), + *hllRegIDCountPerDimPtr); + + // iterator for get byte count for each dim according to reg id count + auto hllDimByteCountIter = thrust::make_transform_iterator( + *hllRegIDCountPerDimPtr, HLLDimByteCountFunctor()); + + auto hllDimRegIDCountIter = thrust::make_transform_iterator( + *hllRegIDCountPerDimPtr, CastFunctor()); + // get dim reg id cumulative count (cumulative count of reg_id per each + // dimension value) + thrust::device_vector hllDimRegIDCumCount(resultSize + 1, 0); + thrust::device_vector hllVectorOffsets(resultSize + 1, 0); + thrust::device_vector hllRegIDCumCount(resultSizeWithRegIDs); + thrust::inclusive_scan( + thrust::cuda::par.on(reinterpret_cast(cudaStream)), + hllDimRegIDCountIter, hllDimRegIDCountIter + resultSize, + hllDimRegIDCumCount.begin() + 1); + thrust::inclusive_scan( + thrust::cuda::par.on(reinterpret_cast(cudaStream)), + hllDimByteCountIter, hllDimByteCountIter + resultSize, + hllVectorOffsets.begin() + 1); + thrust::inclusive_scan( + thrust::cuda::par.on(reinterpret_cast(cudaStream)), + regIDHeadFlagIterator, + regIDHeadFlagIterator + resultSizeWithRegIDs, + hllRegIDCumCount.begin()); +#else + // allocate dimRegIDCount vector + *hllRegIDCountPerDimPtr = reinterpret_cast( + malloc((size_t) resultSize * sizeof(uint16_t))); + // produce dimRegIDCount vector + thrust::reduce_by_key( + thrust::host, dimCumCount, dimCumCount + resultSizeWithRegIDs, + regIDHeadFlagIterator, thrust::make_discard_iterator(), + *hllRegIDCountPerDimPtr); + // iterator for get byte count for each dim according to reg id count + auto hllDimByteCountIter = thrust::make_transform_iterator( + *hllRegIDCountPerDimPtr, HLLDimByteCountFunctor()); + + auto hllDimRegIDCountIter = thrust::make_transform_iterator( + *hllRegIDCountPerDimPtr, CastFunctor()); + // get dim reg id cumulative count (cumulative count of reg_id per each + // dimension value) + thrust::host_vector hllDimRegIDCumCount(resultSize + 1, 0); + thrust::host_vector hllVectorOffsets(resultSize + 1, 0); + thrust::host_vector hllRegIDCumCount(resultSizeWithRegIDs); + thrust::inclusive_scan(thrust::host, hllDimRegIDCountIter, + hllDimRegIDCountIter + resultSize, + hllDimRegIDCumCount.begin() + 1); + thrust::inclusive_scan(thrust::host, hllDimByteCountIter, + hllDimByteCountIter + resultSize, + hllVectorOffsets.begin() + 1); + thrust::inclusive_scan(thrust::host, regIDHeadFlagIterator, + regIDHeadFlagIterator + resultSizeWithRegIDs, + hllRegIDCumCount.begin()); +#endif + *hllVectorSizePtr = hllVectorOffsets[resultSize]; + HLLValueOutputIterator hllValueOutputIter( + dimCumCount, values, thrust::raw_pointer_cast(hllRegIDCumCount.data()), + thrust::raw_pointer_cast(hllDimRegIDCumCount.data()), + thrust::raw_pointer_cast(hllVectorOffsets.data())); + +#ifdef RUN_ON_DEVICE + // allocate dense vector + cudaMalloc(reinterpret_cast(hllVectorPtr), *hllVectorSizePtr); + cudaMemset(*hllVectorPtr, 0, *hllVectorSizePtr); + thrust::transform_if( + thrust::cuda::par.on(reinterpret_cast(cudaStream)), + hllValueOutputIter, hllValueOutputIter + resultSizeWithRegIDs, + regIDHeadFlagIterator, thrust::make_discard_iterator(), + CopyHLLFunctor(*hllVectorPtr), thrust::identity()); +#else + // allocate dense vector + *hllVectorPtr = reinterpret_cast(malloc(*hllVectorSizePtr)); + memset(*hllVectorPtr, 0, *hllVectorSizePtr); + thrust::transform_if( + thrust::host, hllValueOutputIter, + hllValueOutputIter + resultSizeWithRegIDs, regIDHeadFlagIterator, + thrust::make_discard_iterator(), CopyHLLFunctor(*hllVectorPtr), + thrust::identity()); +#endif +} + +// copyDim is the same as regular dimension copy in regular reduce operations +void copyDim(DimensionColumnVector inputKeys, + DimensionColumnVector outputKeys, int outputLength, + void *cudaStream) { + DimensionColumnPermutateIterator iterIn( + inputKeys.DimValues, outputKeys.IndexVector, inputKeys.VectorCapacity, + outputLength, inputKeys.NumDimsPerDimWidth); + + DimensionColumnOutputIterator iterOut(outputKeys.DimValues, + inputKeys.VectorCapacity, outputLength, + inputKeys.NumDimsPerDimWidth); + + int numDims = 0; + for (int i = 0; i < NUM_DIM_WIDTH; i++) { + numDims += inputKeys.NumDimsPerDimWidth[i]; + } +#ifdef RUN_ON_DEVICE + thrust::copy(thrust::cuda::par.on(reinterpret_cast(cudaStream)), + iterIn, iterIn + numDims * 2 * outputLength, iterOut); +#else + thrust::copy(thrust::host, iterIn, iterIn + numDims * 2 * outputLength, + iterOut); +#endif +} + +// merge merges previous batch results with current batch results +// based on hash value (asce) and hll value (desc) +void merge(uint64_t *inputHashValues, uint64_t *outputHashValues, + uint32_t *inputValues, uint32_t *outputValues, + uint32_t *inputIndexVector, uint32_t *outputIndexVector, + int prevResultSize, int curBatchResultSize, void *cudaStream) { + auto zippedPrevBatchMergeKey = thrust::make_zip_iterator( + thrust::make_tuple(inputHashValues, inputValues)); + auto zippedCurBatchMergeKey = thrust::make_zip_iterator(thrust::make_tuple( + inputHashValues + prevResultSize, inputValues + prevResultSize)); + auto zippedOutputKey = thrust::make_zip_iterator( + thrust::make_tuple(outputHashValues, outputValues)); + +#ifdef RUN_ON_DEVICE + thrust::merge_by_key( + thrust::cuda::par.on(reinterpret_cast(cudaStream)), + zippedPrevBatchMergeKey, zippedPrevBatchMergeKey + prevResultSize, + zippedCurBatchMergeKey, zippedCurBatchMergeKey + curBatchResultSize, + inputIndexVector, inputIndexVector + prevResultSize, zippedOutputKey, + outputIndexVector, HLLMergeComparator()); +#else + thrust::merge_by_key( + thrust::host, zippedPrevBatchMergeKey, + zippedPrevBatchMergeKey + prevResultSize, zippedCurBatchMergeKey, + zippedCurBatchMergeKey + curBatchResultSize, inputIndexVector, + inputIndexVector + prevResultSize, zippedOutputKey, outputIndexVector, + HLLMergeComparator()); +#endif +} + +int reduceCurrentBatch(uint64_t *inputHashValues, + uint32_t *inputIndexVector, + uint32_t *inputValues, + uint64_t *outputHashValues, + uint32_t *outputIndexVector, + uint32_t *outputValues, + int length, + void *cudaStream) { + thrust::equal_to binaryPred; + thrust::maximum maxOp; + ReduceByHashFunctor > reduceFunc(maxOp); + auto zippedInputIter = thrust::make_zip_iterator( + thrust::make_tuple(inputIndexVector, inputValues)); + auto zippedOutputIter = thrust::make_zip_iterator( + thrust::make_tuple(outputIndexVector, outputValues)); +#ifdef RUN_ON_DEVICE + auto resEnd = thrust::reduce_by_key( + thrust::cuda::par.on(reinterpret_cast(cudaStream)), + inputHashValues, inputHashValues + length, zippedInputIter, + outputHashValues, zippedOutputIter, binaryPred, reduceFunc); +#else + auto resEnd = thrust::reduce_by_key( + thrust::host, inputHashValues, inputHashValues + length, zippedInputIter, + outputHashValues, zippedOutputIter, binaryPred, reduceFunc); +#endif + return thrust::get<0>(resEnd) - outputHashValues; +} + +int makeHLLVector(uint64_t *hashValues, uint32_t *indexVector, + uint32_t *values, int resultSize, uint8_t **hllVectorPtr, + size_t *hllVectorSizePtr, uint16_t **hllRegIDCountPerDimPtr, + void *cudaStream) { +#ifdef RUN_ON_DEVICE + thrust::device_vector dimHeadFlags(resultSize, 1); +#else + thrust::host_vector dimHeadFlags(resultSize, 1); +#endif + prepareHeadFlags(hashValues, dimHeadFlags.begin(), resultSize, cudaStream); + +#ifdef RUN_ON_DEVICE + int reducedResultSize = + thrust::remove_if( + thrust::cuda::par.on(reinterpret_cast(cudaStream)), + indexVector, indexVector + resultSize, dimHeadFlags.begin(), + thrust::detail::equal_to_value(0)) - + indexVector; + thrust::inclusive_scan( + thrust::cuda::par.on(reinterpret_cast(cudaStream)), + dimHeadFlags.begin(), dimHeadFlags.end(), dimHeadFlags.begin()); +#else + int reducedResultSize = + thrust::remove_if(thrust::host, indexVector, indexVector + resultSize, + dimHeadFlags.begin(), + thrust::detail::equal_to_value(0)) - + indexVector; + thrust::inclusive_scan(thrust::host, dimHeadFlags.begin(), dimHeadFlags.end(), + dimHeadFlags.begin()); +#endif + createAndCopyHLLVector(hashValues, hllVectorPtr, hllVectorSizePtr, + hllRegIDCountPerDimPtr, + thrust::raw_pointer_cast(dimHeadFlags.data()), values, + resultSize, reducedResultSize, cudaStream); + return reducedResultSize; +} + +// the steps for hyperloglog: +// 1. sort current batch +// 2. reduce current batch +// 3. merge current batch result with result from previous batches +// 4. (last batch only) create dense hll vector +// 5. copy dimension values +int hyperloglog(DimensionColumnVector prevDimOut, + DimensionColumnVector curDimOut, uint32_t *prevValuesOut, + uint32_t *curValuesOut, int prevResultSize, int curBatchSize, + bool isLastBatch, uint8_t **hllVectorPtr, + size_t *hllVectorSizePtr, uint16_t **hllRegIDCountPerDimPtr, + void *cudaStream) { + sortCurrentBatch(prevDimOut.DimValues, curDimOut.HashValues, + curDimOut.IndexVector, curDimOut.NumDimsPerDimWidth, + curDimOut.VectorCapacity, curValuesOut, prevResultSize, + curBatchSize, cudaStream); + int curResultSize = reduceCurrentBatch( + curDimOut.HashValues, curDimOut.IndexVector, curValuesOut, + prevDimOut.HashValues + prevResultSize, + prevDimOut.IndexVector + prevResultSize, prevValuesOut + prevResultSize, + curBatchSize, cudaStream); + + merge(prevDimOut.HashValues, curDimOut.HashValues, prevValuesOut, + curValuesOut, prevDimOut.IndexVector, curDimOut.IndexVector, + prevResultSize, curResultSize, cudaStream); + + int resSize = prevResultSize + curResultSize; + if (isLastBatch && resSize > 0) { + resSize = makeHLLVector( + curDimOut.HashValues, curDimOut.IndexVector, curValuesOut, resSize, + hllVectorPtr, hllVectorSizePtr, hllRegIDCountPerDimPtr, cudaStream); + } + copyDim(prevDimOut, curDimOut, resSize, cudaStream); + return resSize; +} + +} // namespace ares diff --git a/cuda_code/hmm.cu b/cuda_code/hmm.cu new file mode 100644 index 0000000000000000000000000000000000000000..053aeb1ad484d7108bf569ef0f62cfd84d641feb --- /dev/null +++ b/cuda_code/hmm.cu @@ -0,0 +1,1436 @@ +#include "logsum.h" +#include "hmm.h" +#include "f5c.h" +#include "cuda_runtime.h" +#include "device_launch_parameters.h" +#include +#include + +#define eslINFINITY INFINITY +#define PSR9_KMER_SKIP 0 +#define PSR9_BAD_EVENT 1 +#define PSR9_MATCH 2 +#define PSR9_NUM_STATES 3 +#define HMT_FROM_SAME_M 0 +#define HMT_FROM_PREV_M 1 +#define HMT_FROM_SAME_B 2 +#define HMT_FROM_PREV_B 3 +#define HMT_FROM_PREV_K 4 + +#define HMT_FROM_SOFT 5 +#define HMT_NUM_MOVEMENT_TYPES 6 +#define HAF_ALLOW_PRE_CLIP 1 +#define HAF_ALLOW_POST_CLIP 2 +#define BAD_EVENT_PENALTY 0.0f +#define TRANS_START_TO_CLIP 0.5 +#define TRANS_CLIP_SELF 0.9 + +#define MIN_SEPARATION 10 +#define MIN_FLANK 10 + +#define MAX_EVENT_TO_BP_RATIO 20 + +#define METHYLATED_SYMBOL 'M' + +const char* complement_dna = "TGCA"; +const uint8_t rank_dna[256] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 2, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; + + +#define CUDA_CHK() \ + { gpu_assert(__FILE__, __LINE__); } + + static inline void gpu_assert(const char* file, uint64_t line) { + cudaError_t code = cudaGetLastError(); + if (code != cudaSuccess) { + fprintf(stdout, "[%s::ERROR]\033[1;31m Cuda error: %s \n in file : %s line number : %lu\033[0m\n", + __func__, cudaGetErrorString(code), file, line); + if (code == cudaErrorLaunchTimeout) { + ERROR("%s", "The kernel timed out. You have to first disable the cuda " + "time out."); + fprintf( + stdout, + "On Ubuntu do the following\nOpen the file /etc/X11/xorg.conf\nYou " + "will have a section about your NVIDIA device. Add the following " + "line to it.\nOption \"Interactive\" \"0\"\nIf you do not have a " + "section about your NVIDIA device in /etc/X11/xorg.conf or you do " + "not have a file named /etc/X11/xorg.conf, run the command sudo " + "nvidia-xconfig to generate a xorg.conf file and do as above.\n\n"); + } + exit(-1); + } + } + +static inline uint64_t cuda_freemem(int32_t devicenum) { + + uint64_t freemem, total; + cudaMemGetInfo(&freemem, &total); + CUDA_CHK(); + // fprintf(stdout, "[%s] (%lld) %.2f GB free of total %.2f GB GPU memory\n",__func__,freemem / double(1024 * 1024 * 1024), freemem,total / double(1024 * 1024 * 1024)); + + return freemem; +} + +static inline uint64_t tegra_freemem(int32_t devicenum) { + + uint64_t freemem, total; + cudaMemGetInfo(&freemem, &total); + CUDA_CHK(); + + // RAM //from tegrastats + FILE* f = fopen("/proc/meminfo", "r"); + int64_t totalRAMkB = -1, freeRAMkB = -1, memAvailablekB=-1, buffersRAMkB = -1, cachedRAMkB = -1; + + if(f) + { + // add if (blah) {} to get around compiler warning + if (fscanf(f, "MemTotal: %ld kB\n", &totalRAMkB)) {} + if (fscanf(f, "MemFree: %ld kB\n", &freeRAMkB)) {} + if (fscanf(f, "MemAvailable: %ld kB\n", &memAvailablekB)) {} + if (fscanf(f, "Buffers: %ld kB\n", &buffersRAMkB)) {} + if (fscanf(f, "Cached: %ld kB\n", &cachedRAMkB)) {} + fclose(f); + } + if(totalRAMkB>0 && freeRAMkB>0 && buffersRAMkB>0 && cachedRAMkB>0){ + freemem += (cachedRAMkB+buffersRAMkB)*1024; + } + else{ + WARNING("%s","Reading /proc/meminfo failed. Inferred free GPU memory might be wrong."); + } + + // fprintf(stdout, "[%s] %.2f GB free of total %.2f GB GPU memory\n",__func__,freemem / double(1024 * 1024 * 1024),total / double(1024 * 1024 * 1024)); + + return freemem; +} + + + + +#define log_inv_sqrt_2pi -0.918938f // Natural logarithm + +__device__ float +log_normal_pdf(float x, float gp_mean, float gp_stdv, float gp_log_stdv) { + /*INCOMPLETE*/ + // float log_inv_sqrt_2pi = -0.918938f; // Natural logarithm + float a = (x - gp_mean) / gp_stdv; + return log_inv_sqrt_2pi - gp_log_stdv + (-0.5f * a * a); + // return 1; +} + +__device__ float +log_probability_match_r9(scalings_t scaling, model_t cpgmodel, event_t* event, + int event_idx, uint32_t kmer_rank) { + + // assert(kmer_rank < 15625); + + float unscaledLevel = event[event_idx].mean; + float scaledLevel = unscaledLevel; + float gp_mean = scaling.scale * cpgmodel.level_mean + scaling.shift; + float gp_stdv = cpgmodel.level_stdv * scaling.var; + float gp_log_stdv = cpgmodel.level_log_stdv + scaling.log_var; + + float lp = log_normal_pdf(scaledLevel, gp_mean, gp_stdv, gp_log_stdv); + + return lp; +} + +__device__ double +add_logs(const double a, const double b) +{ + //return a+b; + if(a == -INFINITY && b == -INFINITY) + return -INFINITY; + + if(a > b) { + double diff = b - a; + return a + log(1.0 + exp(diff)); + } else { + double diff = a - b; + return b + log(1.0 + exp(diff)); +} +} + +static inline uint32_t get_rank(char base) { + if (base == 'A') { //todo: do we neeed simple alpha? + return 0; + } else if (base == 'C') { + return 1; + } else if (base == 'G') { + return 2; + } else if (base == 'M') { + return 3; + } else if (base == 'T') { + return 4; + } else { + WARNING("A None ACGMT base found : %c", base); + return 0; + } +} + +// reverse-complement a DNA string +std::string reverse_complement(const std::string& str) { + std::string out(str.length(), 'A'); + size_t i = 0; // input + int j = str.length() - 1; // output + while (i < str.length()) { + // complement a single base + assert(str[i] != METHYLATED_SYMBOL); + out[j--] = complement_dna[rank_dna[(int)str[i++]]]; + } + return out; +} + +// return the lexicographic rank of the kmer amongst all strings of +// length k for this alphabet +static inline uint32_t get_kmer_rank(const char* str, uint32_t k) { + uint32_t p = 1; + uint32_t r = 0; + + // from last base to first + for (uint32_t i = 0; i < k; ++i) { + //r += rank(str[k - i - 1]) * p; + //p *= size(); + r += get_rank(str[k - i - 1]) * p; + p *= 5; + } + return r; +} + +struct RecognitionMatch +{ + unsigned offset; // the matched position in the recognition site + unsigned length; // the length of the match, 0 indicates no match + bool covers_methylated_site; // does the match cover an M base? +}; + +const uint32_t num_recognition_sites = 1; +const uint32_t recognition_length = 2; +const char* recognition_sites[] = { "CG" }; +const char* recognition_sites_methylated[] = { "MG" }; +const char* recognition_sites_methylated_complement[] = { "GM" }; + +// Check whether a recognition site starts at position i of str +inline RecognitionMatch match_to_site(const std::string& str, size_t i, const char* recognition, size_t rl) +{ + RecognitionMatch match; + match.length = 0; + match.offset = 0; + match.covers_methylated_site = false; + + // Case 1: str is a substring of recognition + const char* p = strstr(recognition, str.c_str()); + if(i == 0 && p != NULL) { + match.offset = p - recognition; + match.length = str.length(); + } else { + // Case 2: the suffix str[i..n] is a prefix of recognition + size_t cl = std::min(rl, str.length() - i); + if(str.compare(i, cl, recognition, cl) == 0) { + match.offset = 0; + match.length = cl; + } + } + + //printf("Match site: %s %s %s %d %d\n", str.c_str(), str.substr(i).c_str(), recognition, match.offset, match.length); + if(match.length > 0) { + match.covers_methylated_site = + str.substr(i, match.length).find_first_of(METHYLATED_SYMBOL) != std::string::npos; + } + + return match; +} + +// If the alphabet supports methylated bases, convert str +// to a methylated string using the recognition sites +std::string methylate(const std::string& str) +{ + std::string out(str); + size_t i = 0; + while(i < out.length()) { + size_t stride = 1; + + // Does this location match a recognition site? + for(size_t j = 0; j < num_recognition_sites; ++j) { + + RecognitionMatch match = match_to_site(str, i, recognition_sites[j], recognition_length); + // Require the recognition site to be completely matched + if(match.length == recognition_length) { + // Replace by the methylated version + out.replace(i, recognition_length, recognition_sites_methylated[j]); + stride = match.length; // skip to end of match + break; + } + } + + i += stride; + } + return out; +} + +// reverse-complement a string meth aware +// when the string contains methylated bases, the methylation +// symbol transfered to the output strand in the appropriate position +std::string reverse_complement_meth(const std::string& str) +{ + std::string out(str.length(), 'A'); + size_t i = 0; // input + int j = str.length() - 1; // output + while(i < str.length()) { + int recognition_index = -1; + RecognitionMatch match; + + // Does this location (partially) match a methylated recognition site? + for(size_t j = 0; j < num_recognition_sites; ++j) { + match = match_to_site(str, i, recognition_sites_methylated[j], recognition_length); + if(match.length > 0 && match.covers_methylated_site) { + recognition_index = j; + break; + } + } + + // If this subsequence matched a methylated recognition site, + // copy the complement of the site to the output + if(recognition_index != -1) { + for(size_t k = match.offset; k < match.offset + match.length; ++k) { + out[j--] = recognition_sites_methylated_complement[recognition_index][k]; + i += 1; + } + } else { + // complement a single base + assert(str[i] != METHYLATED_SYMBOL); + //out[j--] = complement(str[i++]); + out[j--] = complement_dna[rank_dna[(int)str[i++]]]; + } + } + return out; +} + +__global__ void group_motif(ptr_t* read_ptr, char* read, uint32_t* cpg_sites, uint32_t* group_start, + uint32_t* group_end, uint32_t* group_size, uint32_t* num_site, uint32_t num_reads ) +{ + int thread_id = blockDim.x * blockIdx.x + threadIdx.x; + // printf("%d, %d, %d\n", thread_id,read_idx[thread_id], has_events[thread_id]); + + if(thread_id < num_reads) + { + + uint32_t site_index = 0; + + // Scan the sequence for CpGs + for (ptr_t j = read_ptr[thread_id]; j < (read_ptr[thread_id+1]-1); j++) + { + if(read[j] == 'C' && read[j+1] == 'G') { + cpg_sites[thread_id*1400 + site_index] = j-read_ptr[thread_id]; + site_index++; + } + } + num_site[thread_id] = site_index; + + + // Batch the CpGs together into groups that are separated by some minimum distance + int curr_idx = 0; + int group_index = 0; + while(curr_idx < site_index) + { + int end_idx = curr_idx + 1; + while(end_idx < site_index) { + if(cpg_sites[thread_id*1400 + end_idx] - cpg_sites[thread_id*1400 + end_idx - 1] > MIN_SEPARATION) + break; + end_idx += 1; + } + group_start[thread_id * 1400+group_index] = curr_idx; + group_end[thread_id * 1400+group_index] = end_idx; + group_index++; + curr_idx = end_idx; + } + + group_size[thread_id] = group_index; + + } +} + +__global__ void lookUpEvent(uint32_t* cpg_sites, uint32_t* group_start, uint32_t* group_end, uint32_t* group_size, + uint32_t* sub_start_pos, uint32_t* sub_end_pos, uint32_t* ref_start, ptr_t* alignment_ptr, AlignedPair* alignment, uint32_t* num_rows, uint32_t* num_cols, + uint32_t* event_start_idx, uint32_t* event_stop_idx,int8_t* event_stride, uint32_t* n_kmers, uint32_t* n_events, + uint32_t num_reads, uint32_t kmer_size) +{ + uint32_t i = blockDim.x * blockIdx.x + threadIdx.x; + + uint32_t read_id = i/1400; + uint32_t group_id = i%1400; + + if(group_id < group_size[read_id]) + { + + uint32_t start_idx = group_start[i]; + uint32_t end_idx = group_end[i]; + + if(start_idx>0||end_idx>0) + { + // the coordinates on the reference substring for this group of sites + sub_start_pos[i] = cpg_sites[read_id * 1400 + start_idx] - MIN_FLANK; + sub_end_pos[i] = cpg_sites[read_id * 1400 + end_idx - 1] + MIN_FLANK; + uint32_t span = cpg_sites[read_id * 1400 + end_idx - 1] - cpg_sites[read_id * 1400 + start_idx]; + + // skip if too close to the start of the read alignment or + // if the reference range is too large to efficiently call + event_start_idx[i] = -1; + event_stop_idx[i] = -1; + + if(sub_start_pos[i]>2000000||sub_start_pos[i]<=0) + sub_start_pos[i] = 0; + + + if(sub_start_pos[i] > MIN_SEPARATION && span <= 200) + { + + + uint32_t calling_start = sub_start_pos[i] + ref_start[read_id]; + uint32_t calling_end = sub_end_pos[i] + ref_start[read_id]; + + ptr_t start_index = alignment_ptr[read_id]; + ptr_t end_index = alignment_ptr[read_id+1]; + int e1 = -1, e2 = -1; + bool left_bounded, right_bounded; + + for(ptr_t i = start_index; i= calling_start) + { + e1 = alignment[i].read_pos; + break; + } + } + + for(ptr_t i = start_index; i= calling_end) + { + e2 = alignment[i].read_pos; + break; + } + } + + double ratio = fabs((double)(e2 - e1)) / (calling_start - calling_end); + if (abs(e2 - e1) <= 10 || ratio > MAX_EVENT_TO_BP_RATIO) { + e1=-1; + e2=-1; + } + + + + if(e1>=0&&e2>=0) + { + event_start_idx[i] = e1; + event_stop_idx[i] = e2; + } + else + { + event_start_idx[i] = 0; + event_stop_idx[i] = 0; + } + + uint32_t length_m_seq = sub_end_pos[i] - sub_start_pos[i] + 1; + uint32_t n_states; + if(length_m_seq>0) + { + n_kmers[i] = length_m_seq- kmer_size + 1; + n_states = PSR9_NUM_STATES * (n_kmers[i] + 2); // + 2 for explicit terminal states + }else{ + n_kmers[i]=0; + n_states = 0; + } + + if(event_stop_idx[i] > event_start_idx[i]) + n_events[i] = event_stop_idx[i] - event_start_idx[i] + 1; + else + n_events[i] = event_start_idx[i] - event_stop_idx[i] + 1; + if(n_events[i]>1000) + n_events[i]=0; + + num_rows[i] = n_events[i] + 1; + num_cols[i] = n_states; + event_stride[i] = event_start_idx[i] <= event_stop_idx[i] ? 1 : -1; + + } + } + + } +} + + + + +__global__ void profile_initialize_kernel(uint32_t* group_size, float* matrix, ptr_t* matrix_ptr, uint32_t* num_rows, + uint32_t* num_cols, uint32_t n_group) +{ + + int thread_id = blockDim.x * blockIdx.x + threadIdx.x; + + if(thread_id < n_group) + { + uint32_t read_id = thread_id/1400; + uint32_t group_id = thread_id%1400; + + if(group_id1) + { + uint32_t event_idx = event_start_idx[thread_id] + (num_events - 1) * event_stride[thread_id]; + // assert(event_idx == event_stop_idx[thread_id]); + // if(event_idx != event_stop_idx[thread_id]) + // { + + post_flank[post_ptr-2] = log(TRANS_START_TO_CLIP) + + -3.0f + + log(1 - TRANS_CLIP_SELF); + for(int i = post_ptr - 3; i >= post_flank_ptr[thread_id]; --i) { + post_flank[i] = log(TRANS_CLIP_SELF) + + -3.0f + + post_flank[i + 1]; + } + // } + } + } + } + +} + + +__global__ void profile_fill_kernel(uint32_t* group_size, BlockTransitions* transitions, uint32_t* trans_ptr,float* matrix,ptr_t* matrix_ptr, + uint32_t* num_rows, uint32_t* num_cols, ptr_t* kmer_ranks_ptr, uint32_t* kmer_ranks, uint32_t* event_start_idx, int8_t* event_stride, + scalings_t* scalings, model_t* cpgmodels, ptr_t* event_ptr, event_t* event_table, HMMUpdateScores* scores, uint32_t hmm_flags, float* pre_flank, + ptr_t* pre_flank_ptr, float* post_flank, ptr_t* post_flank_ptr, float* lp_end, uint32_t n_group) +{ + int thread_id = blockDim.x * blockIdx.x + threadIdx.x; + if(thread_id < n_group) + { + uint32_t read_id = thread_id/1400; + uint32_t group_id = thread_id%1400; + + if(group_idn_bam_rec; + + uint32_t num_groups = num_reads*1400; + + if(core->total_num_reads == 0) + + { + core->host_read_ptr = (ptr_t*)malloc(sizeof(ptr_t)*(num_reads+1)); + core->host_alignment_ptr = (ptr_t*)malloc(sizeof(ptr_t) * (num_reads+1)); + core->host_ref_start_pos = (uint32_t*)malloc(sizeof(uint32_t) * num_reads); + core->host_event_ptr = (ptr_t*)malloc(sizeof(ptr_t)*num_reads); + core->host_scalings = (scalings_t*)malloc(sizeof(scalings_t)*num_reads); + core->bam_rec = (bam1_t**)malloc(sizeof(bam1_t*) * num_reads); + core->qname = (char**)malloc(sizeof(char*)*num_reads); + + } + else{ + core->host_read_ptr = (ptr_t*)realloc(core->host_read_ptr, sizeof(ptr_t)*(core->total_num_reads+num_reads+1)); + core->host_alignment_ptr = (ptr_t*)realloc(core->host_alignment_ptr , sizeof(ptr_t)*(core->total_num_reads+num_reads+1)); + core->host_ref_start_pos = (uint32_t*)realloc(core->host_ref_start_pos , sizeof(uint32_t)*(core->total_num_reads+num_reads)); + core->host_event_ptr = (ptr_t*)realloc(core->host_event_ptr , sizeof(ptr_t)*(core->total_num_reads+num_reads)); + core->host_scalings = (scalings_t*)realloc(core->host_scalings, sizeof(scalings_t)*(core->total_num_reads+num_reads)); + core->bam_rec = (bam1_t**)realloc(core->bam_rec,sizeof(bam1_t*)*(core->total_num_reads+num_reads)); + core->qname = (char**)realloc(core->qname, sizeof(char*)*(core->total_num_reads+num_reads)); + } + + uint32_t nreads_new = 0; + + for (uint32_t i = 0; i < num_reads; i++) { + + + if(!db->read_stat_flag[i]) + { + uint32_t j = nreads_new + core->total_num_reads; + core->host_read_ptr[j] = core->sum_read_len; + std::string ref_seq = db->fasta_cache[i]; + ref_seq = disambiguate(ref_seq); + core->ref_seq.push_back(ref_seq); + core->sum_read_len += (ref_seq.size() + 1); //with null term + + core->host_alignment_ptr[j] = core->sum_alignment; + AlignedPair *event_align_record = NULL; + ptr_t event_align_record_size = + get_event_alignment_record(db->bam_rec[i], db->read_len[i], db->base_to_event_map[i], &event_align_record, core->kmer_size); + + core->sum_alignment += event_align_record_size; + + core->host_ref_start_pos[j]=db->bam_rec[i]->core.pos; + core->host_event_ptr[j] = core->sum_n_events; + core->sum_n_events += db->et[i].n; + core->host_scalings[j]=db->scalings[i]; + core->bam_rec[j]= bam_init1(); + memcpy(core->bam_rec[j],db->bam_rec[i], 8); + char* bam_qname = bam_get_qname(db->bam_rec[i]); + core->qname[j]=(char*)malloc(sizeof(char)*40); + strcpy(core->qname[j], bam_qname); + nreads_new++; + } +} + + + + core->host_read_ptr[nreads_new+core->total_num_reads] = core->sum_read_len; + core->host_alignment_ptr[nreads_new+core->total_num_reads] = core->sum_alignment; + + if(core->total_num_reads==0) + { + core->host_read = (char*)malloc(sizeof(char) * core->sum_read_len); + core->host_alignment = (AlignedPair*)malloc(sizeof(AlignedPair) * core->sum_alignment); + core->host_event_table = (event_t*)malloc(sizeof(event_t)* core->sum_n_events); + core->host_rc = (uint8_t*)malloc(sizeof(uint8_t) * core->sum_read_len); + } + else + { + core->host_read = (char*)realloc(core->host_read, sizeof(char)*core->sum_read_len); + core->host_alignment = (AlignedPair*)realloc(core->host_alignment, sizeof(AlignedPair)*core->sum_alignment); + core->host_event_table = (event_t*)realloc(core->host_event_table, sizeof(event_t)*core->sum_n_events); + core->host_rc = (uint8_t*)realloc(core->host_rc, sizeof(uint8_t)*core->sum_read_len); + } + + nreads_new=0; + for (uint32_t i = 0; i < num_reads; i++) { + + + if(!db->read_stat_flag[i]) + { + uint32_t j = nreads_new + core->total_num_reads; + ptr_t idx = core->host_read_ptr[j]; + std::string ref_seq = db->fasta_cache[i]; + ref_seq = disambiguate(ref_seq); + strcpy(&core->host_read[idx], ref_seq.c_str()); + + core->host_rc[j] = bam_is_rev(db->bam_rec[i]); + AlignedPair *event_align_record = NULL; + int32_t event_align_record_size = + get_event_alignment_record(db->bam_rec[i], db->read_len[i], db->base_to_event_map[i], &event_align_record, core->kmer_size); + + ptr_t alignment_idx = core->host_alignment_ptr[j]; + + memcpy(&core->host_alignment[alignment_idx], event_align_record, sizeof(AlignedPair) * event_align_record_size); + + idx = core->host_event_ptr[j]; + memcpy(&core->host_event_table[idx], db->et[i].event, sizeof(event_t) * db->et[i].n); + nreads_new++; + } +} + + core->total_num_reads += nreads_new; + + if(batch_id%3==2) + { + + profile_hmm_score_cuda(core, db); + + } + +} + +void profile_hmm_score_cuda(core_t *core, db_t* db) +{ + + + // fprintf(stdout,"meth_cuda start\n"); + + uint32_t num_reads = core->total_num_reads; + uint32_t num_groups = num_reads*1400; + //cuda data + char* read; + ptr_t* read_ptr; //index pointer for flattedned "reads" + uint32_t* cpg_sites ; + uint32_t* group_start ; + uint32_t* group_end ; + uint32_t* group_size ; + uint32_t* sub_start_pos; + uint32_t* sub_end_pos; + + ptr_t* alignment_ptr; + AlignedPair* alignment; + uint32_t* ref_start_pos; + + uint32_t* n_kmers; + uint32_t* n_events; + + float* matrix; + ptr_t* matrix_ptr; + uint32_t* num_rows; + uint32_t* num_cols; + uint32_t* trans_ptr; + BlockTransitions* transitions; + double* events_per_base; + + float* pre_flank; + ptr_t* pre_flank_ptr; + float* post_flank; + ptr_t* post_flank_ptr; + uint32_t* event_start_idx; + uint32_t* event_stop_idx; + int8_t* event_stride; + ptr_t* kmer_ranks_ptr; + uint32_t* kmer_ranks; + uint32_t* mcpg_kmer_ranks; + + ptr_t* event_ptr; + event_t* event_table; + scalings_t* scalings; + model_t* cpgmodels; + + HMMUpdateScores* scores; + float* lp_end; + float* mcpg_lp_end; + + + // fprintf(stdout,"num_reads %ld size * num_groups %ld sum_alignment %ld\n", num_reads, num_groups*sizeof(uint32_t),core->sum_alignment); + + /* for(int i=0;i<10;i++) + { + fprintf(stderr,"\n i %d\n",i); + for(int ptr=core->host_read_ptr[i]; ptrhost_read_ptr[i+1]; ptr++) + fprintf(stderr,"%c", core->host_read[ptr]); + }*/ + + cudaMalloc((void**)&read, core->sum_read_len*sizeof(char)); + CUDA_CHK(); + cudaMalloc((void **)&read_ptr, sizeof(ptr_t)*(num_reads+1)); + CUDA_CHK(); + cudaMalloc((void **)&cpg_sites, sizeof(uint32_t)*num_groups); + CUDA_CHK(); + cudaMalloc((void **)&group_start, sizeof(uint32_t)*num_groups); + CUDA_CHK(); + cudaMalloc((void **)&group_end, sizeof(uint32_t)*num_groups); + CUDA_CHK(); + cudaMalloc((void **)&group_size, sizeof(uint32_t)*num_reads); + CUDA_CHK(); + cudaMalloc((void **)&sub_start_pos, sizeof(uint32_t) * num_groups); + CUDA_CHK(); + cudaMalloc((void **)&sub_end_pos, sizeof(uint32_t) * num_groups); + CUDA_CHK(); + + cudaMalloc((void **)&ref_start_pos,sizeof(uint32_t)*num_reads ); + CUDA_CHK(); + cudaMalloc((void **)&alignment_ptr,sizeof(ptr_t) * (num_reads+1)); + CUDA_CHK(); + cudaMalloc((void **)&alignment,sizeof(AlignedPair) * core->sum_alignment); + CUDA_CHK(); + + cudaMalloc((void**)&num_rows, num_groups*sizeof(uint32_t)); + CUDA_CHK(); + cudaMalloc((void**)&num_cols, num_groups*sizeof(uint32_t)); + CUDA_CHK(); + ptr_t allo_size = num_groups*sizeof(uint32_t); + cudaMalloc((void**)&event_start_idx, allo_size); + CUDA_CHK(); + cudaMalloc((void**)&event_stop_idx, allo_size); + CUDA_CHK(); + cudaMalloc((void**)&event_stride, sizeof(int8_t)*num_groups); + CUDA_CHK(); + cudaMalloc((void**)&n_kmers, num_groups*sizeof(uint32_t)); + CUDA_CHK(); + cudaMalloc((void**)&n_events, num_groups*sizeof(uint32_t)); + CUDA_CHK(); + + + // fprintf(stdout,"start cudaMemcpy\n"); + + cudaMemcpy(read_ptr, core->host_read_ptr, sizeof(ptr_t)*(num_reads+1), cudaMemcpyHostToDevice); + CUDA_CHK(); + cudaMemcpy(read, core->host_read, core->sum_read_len * sizeof(char), cudaMemcpyHostToDevice); + CUDA_CHK(); + cudaMemcpy(ref_start_pos, core->host_ref_start_pos, sizeof(uint32_t)*num_reads, cudaMemcpyHostToDevice); + CUDA_CHK(); + cudaMemcpy(alignment_ptr, core->host_alignment_ptr, sizeof(ptr_t) * (num_reads+1), cudaMemcpyHostToDevice); + CUDA_CHK(); + cudaMemcpy(alignment, core->host_alignment, sizeof(AlignedPair) * core->sum_alignment, cudaMemcpyHostToDevice); + CUDA_CHK(); + + uint32_t* num_site; + cudaMalloc((void **)&num_site, sizeof(uint32_t)*num_reads); + CUDA_CHK(); + + int threadPerBlock = 512; + int blockPerGrid = num_reads/threadPerBlock + 1; + + group_motif <<< blockPerGrid, threadPerBlock >>> (read_ptr, read, cpg_sites, group_start, group_end, + group_size, num_site, num_reads); + cudaDeviceSynchronize(); + CUDA_CHK(); + + uint32_t* host_group_start = (uint32_t*)malloc(sizeof(uint32_t)*num_groups); + MALLOC_CHK(host_group_start); + cudaMemcpy(host_group_start, group_start, sizeof(uint32_t)*num_groups, cudaMemcpyDeviceToHost); + CUDA_CHK(); + + uint32_t* host_group_end = (uint32_t*)malloc(sizeof(uint32_t)*num_groups); + MALLOC_CHK(host_group_end); + cudaMemcpy(host_group_end, group_end, sizeof(uint32_t)*num_groups, cudaMemcpyDeviceToHost); + CUDA_CHK(); + + uint32_t* host_group_size = (uint32_t*)malloc(sizeof(uint32_t)*num_reads); + MALLOC_CHK(host_group_size); + cudaMemcpy(host_group_size, group_size, sizeof(uint32_t)*num_reads, cudaMemcpyDeviceToHost); + CUDA_CHK(); + +/* + for(int i =0;i<10;i++) + { + fprintf(stderr,"read %d\n",i); + for(int group=0;group>>(cpg_sites, group_start, group_end, group_size, + sub_start_pos, sub_end_pos, ref_start_pos, alignment_ptr, alignment, num_rows, num_cols, + event_start_idx, event_stop_idx, event_stride, n_kmers, n_events, + num_reads, core->kmer_size); + cudaDeviceSynchronize(); + CUDA_CHK(); + uint32_t* host_sub_start_pos = (uint32_t*)malloc(sizeof(uint32_t) * num_groups); + uint32_t* host_sub_end_pos = (uint32_t*)malloc(sizeof(uint32_t) * num_groups); + uint32_t* host_n_kmers = (uint32_t*)malloc(sizeof(uint32_t) * num_groups); + uint32_t* host_n_events = (uint32_t*)malloc(sizeof(uint32_t) * num_groups); + uint32_t* host_num_cols = (uint32_t*)malloc(sizeof(uint32_t) * num_groups); + uint32_t* host_event_start = (uint32_t*)malloc(sizeof(uint32_t)*num_groups); + uint32_t* host_event_stop = (uint32_t*)malloc(sizeof(uint32_t)*num_groups); + + cudaMemcpy(host_n_kmers, n_kmers, num_groups*sizeof(uint32_t),cudaMemcpyDeviceToHost); + CUDA_CHK(); + cudaMemcpy(host_n_events, n_events, num_groups*sizeof(uint32_t),cudaMemcpyDeviceToHost); + CUDA_CHK(); + cudaMemcpy(host_sub_start_pos, sub_start_pos, num_groups*sizeof(uint32_t),cudaMemcpyDeviceToHost); + CUDA_CHK(); + cudaMemcpy(host_sub_end_pos, sub_end_pos, num_groups*sizeof(uint32_t),cudaMemcpyDeviceToHost); + CUDA_CHK(); + cudaMemcpy(host_num_cols, num_cols,num_groups*sizeof(uint32_t),cudaMemcpyDeviceToHost ); + CUDA_CHK(); + cudaMemcpy(host_event_start, event_start_idx, num_groups*sizeof(uint32_t),cudaMemcpyDeviceToHost); + CUDA_CHK(); + cudaMemcpy(host_event_stop, event_stop_idx, num_groups*sizeof(uint32_t),cudaMemcpyDeviceToHost); + CUDA_CHK(); + + + + ptr_t* host_kmer_ranks_ptr = (ptr_t*)malloc(sizeof(ptr_t)*num_groups); + uint32_t* host_kmer_ranks = (uint32_t*)malloc(sizeof(uint32_t)*num_groups*250); + + + uint32_t* host_mcpg_kmer_ranks = (uint32_t*)malloc(sizeof(uint32_t)*num_groups*250); + + ptr_t sum_kmer_ranks = 0; + + for(uint32_t i = 0; i < num_groups; i++) + { + uint32_t read_id = i/1400; + + host_kmer_ranks_ptr[i] = sum_kmer_ranks; + if(host_sub_start_pos[i] > MIN_SEPARATION && (host_sub_end_pos[i]- host_sub_start_pos[i]-2*MIN_FLANK)<200) + { + std::string subseq = core->ref_seq[read_id].substr(host_sub_start_pos[i], host_sub_end_pos[i] - host_sub_start_pos[i] + 1); + std::string rc_subseq = reverse_complement(subseq); + // Methylate all CpGs in the sequence and score again + std::string mcpg_subseq = methylate(subseq); + std::string rc_mcpg_subseq = reverse_complement_meth(mcpg_subseq); + + const char* m_seq = subseq.c_str(); + const char* m_rc_seq = rc_subseq.c_str(); + + const char* m_mcpg_seq = mcpg_subseq.c_str(); + const char* m_rc_mcpg_seq = rc_mcpg_subseq.c_str(); + + uint32_t num_kmers = host_num_cols[i]/ PSR9_NUM_STATES - 2; + int32_t seq_len = strlen(m_seq); + int32_t mcpg_seq_len = strlen(m_mcpg_seq); + + + for(size_t ki = 0; ki < num_kmers; ++ki){ + const char* substring = 0; + const char* mcpg_substring = 0; + if(core->host_rc[read_id]==0){ + substring=m_seq+ki; + mcpg_substring = m_mcpg_seq+ki; + } + else{ + substring=m_rc_seq+seq_len-ki-core->kmer_size; + mcpg_substring = m_rc_mcpg_seq + mcpg_seq_len -ki-core->kmer_size; + } + + host_kmer_ranks[host_kmer_ranks_ptr[i]+ki] = get_kmer_rank(substring,core->kmer_size); + host_mcpg_kmer_ranks[host_kmer_ranks_ptr[i]+ki] = get_kmer_rank(mcpg_substring, core->kmer_size); + + } + sum_kmer_ranks+= num_kmers; + } + } + + ptr_t* host_matrix_ptr = (ptr_t*)malloc(sizeof(ptr_t)*num_groups); + + uint32_t* host_trans_ptr = (uint32_t*)malloc(sizeof(uint32_t)*(num_groups+1)); + + ptr_t* host_pre_flank_ptr = (ptr_t*)malloc(sizeof(ptr_t)*(num_groups+1)); + ptr_t* host_post_flank_ptr = (ptr_t*)malloc(sizeof(ptr_t)*(num_groups+1)); + + + ptr_t matrix_size = 0; + ptr_t num_kmers = 0; + ptr_t pre_flank_size = 0; + ptr_t post_flank_size = 0; + uint32_t ngroup_new = 0; + + for(uint32_t r = 0; rkmer_size-1)>0) + n_states = PSR9_NUM_STATES * (n_kmers + 2); + else + { + n_states = 0; + n_kmers = 0; + } + + host_matrix_ptr[p]=matrix_size; + matrix_size = matrix_size + n_rows*n_states; + + + host_trans_ptr[p] = num_kmers; + num_kmers += n_kmers; + + + host_pre_flank_ptr[p] = pre_flank_size; + host_post_flank_ptr[p] = post_flank_size; + pre_flank_size += n_events+1; + post_flank_size += n_events; + ngroup_new++; + } + + host_trans_ptr[1400*r+host_group_size[r]] = num_kmers; + host_pre_flank_ptr[1400*r+host_group_size[r]] = pre_flank_size; + host_post_flank_ptr[1400*r+host_group_size[r]] = post_flank_size; + +} + + host_trans_ptr[ngroup_new] = num_kmers; + host_pre_flank_ptr[ngroup_new] = pre_flank_size; + host_post_flank_ptr[ngroup_new] = post_flank_size; + + + uint32_t hmm_flags = HAF_ALLOW_PRE_CLIP | HAF_ALLOW_POST_CLIP; + + cudaMalloc((void**)&kmer_ranks_ptr, sizeof(ptr_t)*num_groups); + CUDA_CHK(); + + cudaMalloc((void**)&kmer_ranks, sizeof(uint32_t)*num_groups*250); + CUDA_CHK(); + + /* cudaMalloc((void**)&mcpg_kmer_ranks, sizeof(uint32_t)*num_groups*250); + CUDA_CHK(); +*/ + cudaMalloc((void**)&matrix, matrix_size*sizeof(float)); + CUDA_CHK(); + + cudaMalloc((void**)&matrix_ptr, num_groups*sizeof(ptr_t)); + CUDA_CHK(); + + cudaMalloc((void**)&trans_ptr, sizeof(uint32_t)*(num_groups+1)); + CUDA_CHK(); + + uint32_t block_size = sizeof(BlockTransitions)*num_kmers; + + cudaMalloc((void**)&transitions, block_size); + CUDA_CHK(); + + cudaMalloc((void**)&events_per_base, sizeof(double)*num_reads); + CUDA_CHK(); + + cudaMalloc((void**)&pre_flank, sizeof(float)*pre_flank_size); + CUDA_CHK(); + cudaMalloc((void**)&pre_flank_ptr, sizeof(ptr_t)*(num_groups+1)); + CUDA_CHK(); + cudaMalloc((void**)&post_flank, sizeof(float)*post_flank_size); + CUDA_CHK(); + cudaMalloc((void**)&post_flank_ptr, sizeof(ptr_t)*(num_groups+1)); + CUDA_CHK(); + + cudaMalloc((void**)&event_ptr, sizeof(ptr_t)*num_reads); + CUDA_CHK(); + cudaMalloc((void**)&event_table, sizeof(event_t) * core->sum_n_events); + CUDA_CHK(); + cudaMalloc((void**)&scalings, sizeof(scalings_t) *num_reads); + CUDA_CHK(); + cudaMalloc((void**)&cpgmodels,MAX_NUM_KMER_METH * sizeof(model_t)); + CUDA_CHK(); + + cudaMalloc((void**)&scores, sizeof(HMMUpdateScores)*num_groups); + CUDA_CHK(); + cudaMalloc((void**)&lp_end, sizeof(float)*num_groups); + CUDA_CHK(); + /*cudaMalloc((void**)&mcpg_lp_end, sizeof(float)*num_groups); + CUDA_CHK();*/ + + + +// fprintf(stdout,"after cudaMalloc\n"); + int32_t cuda_device_num = core->opt.cuda_dev_id; + cudaDeviceProp prop; + cudaGetDeviceProperties(&prop, cuda_device_num); + uint64_t free_mem = 0; + free_mem=cuda_freemem(cuda_device_num); + + cudaMemcpy(kmer_ranks_ptr, host_kmer_ranks_ptr, sizeof(ptr_t)*num_groups, cudaMemcpyHostToDevice); + CUDA_CHK(); + + cudaMemcpy(kmer_ranks, host_kmer_ranks, sizeof(uint32_t)*num_groups*250, cudaMemcpyHostToDevice); + CUDA_CHK(); + + + cudaMemcpy(trans_ptr, host_trans_ptr, sizeof(uint32_t)*(num_groups+1), cudaMemcpyHostToDevice); + CUDA_CHK(); + + cudaMemcpy(events_per_base, db->events_per_base, sizeof(double)*num_reads, cudaMemcpyHostToDevice); + CUDA_CHK(); + + cudaMemcpy(pre_flank_ptr, host_pre_flank_ptr, sizeof(ptr_t)*(num_groups+1), cudaMemcpyHostToDevice); + CUDA_CHK(); + + cudaMemcpy(post_flank_ptr, host_post_flank_ptr, sizeof(ptr_t)*(num_groups+1), cudaMemcpyHostToDevice); + CUDA_CHK(); + + cudaMemcpy(event_ptr, core->host_event_ptr, sizeof(ptr_t)*num_reads, cudaMemcpyHostToDevice); + CUDA_CHK(); + cudaMemcpy(event_table, core->host_event_table, sizeof(event_t) * core->sum_n_events, cudaMemcpyHostToDevice); + CUDA_CHK(); + cudaMemcpy(scalings, core->host_scalings, sizeof(scalings_t) * num_reads, cudaMemcpyHostToDevice); + CUDA_CHK(); + cudaMemcpy(cpgmodels, core->cpgmodel, MAX_NUM_KMER_METH * sizeof(model_t), cudaMemcpyHostToDevice); + CUDA_CHK(); + cudaMemcpy(matrix_ptr, host_matrix_ptr, num_groups * sizeof(ptr_t), cudaMemcpyHostToDevice); + CUDA_CHK(); + // fprintf(stdout,"cudaMemcpy end\n"); + + + threadPerBlock = 512; + blockPerGrid = num_groups/threadPerBlock + 1; + profile_initialize_kernel<<>>(group_size, matrix, matrix_ptr, num_rows, num_cols, num_groups); + cudaDeviceSynchronize(); + CUDA_CHK(); + + // fprintf(stdout,"profile_initialize_kernel end\n"); + + threadPerBlock = 512; + blockPerGrid = num_groups/threadPerBlock + 1; + calculate_transitions<<>>(group_size, transitions, trans_ptr, events_per_base,num_groups); + cudaDeviceSynchronize(); + CUDA_CHK(); + + // fprintf(stdout,"calculate_transitions end\n"); + + + threadPerBlock = 512; + blockPerGrid = num_groups/threadPerBlock + 1; + flank_fill_kernel<<>>(group_size, pre_flank, pre_flank_ptr, post_flank, post_flank_ptr, + event_start_idx, event_stride, event_stop_idx, num_groups); + cudaDeviceSynchronize(); + CUDA_CHK(); + + //fprintf(stdout,"flank_fill_kernel end\n"); + // float* host_post_flank = (float*)malloc(sizeof(float)*post_flank_size); + // cudaMemcpy(host_post_flank, post_flank, sizeof(float)*post_flank_size, cudaMemcpyDeviceToHost); + + + threadPerBlock = 512; + blockPerGrid = num_groups/threadPerBlock + 1; + profile_fill_kernel<<>>(group_size, transitions, trans_ptr,matrix,matrix_ptr, num_rows, num_cols, + kmer_ranks_ptr, kmer_ranks, event_start_idx, event_stride, scalings, cpgmodels, event_ptr, event_table, + scores, hmm_flags, pre_flank, pre_flank_ptr, post_flank, post_flank_ptr, + lp_end, num_groups); + + cudaDeviceSynchronize(); + CUDA_CHK(); + + // fprintf(stdout,"profile_fill_kernel end\n"); + + core->unmethylated_score = (float*)malloc(sizeof(float)*num_groups); + core->methylated_score = (float*)malloc(sizeof(float)*num_groups); + uint32_t* host_cpg_sites = (uint32_t*)malloc(sizeof(uint32_t)*num_groups); + core->site_score_map = (std::map **)malloc(sizeof(std::map *) * core->total_num_reads); + for (int i = 0; i < core->total_num_reads; ++i) { + core->site_score_map[i] = new std::map; + } + cudaMemcpy(core->unmethylated_score, lp_end, num_groups*sizeof(float),cudaMemcpyDeviceToHost); + CUDA_CHK(); + + cudaMemcpy(kmer_ranks, host_mcpg_kmer_ranks, sizeof(uint32_t)*num_groups*250, cudaMemcpyHostToDevice); + CUDA_CHK(); + + threadPerBlock = 512; + blockPerGrid = num_groups/threadPerBlock + 1; + profile_initialize_kernel<<>>(group_size, matrix, matrix_ptr, num_rows, num_cols, num_groups); + cudaDeviceSynchronize(); + CUDA_CHK(); + + threadPerBlock = 512; + blockPerGrid = num_groups/threadPerBlock + 1; + profile_fill_kernel<<>>(group_size, transitions, trans_ptr,matrix,matrix_ptr, num_rows, num_cols, + kmer_ranks_ptr, kmer_ranks, event_start_idx, event_stride, scalings, cpgmodels, event_ptr, event_table, + scores, hmm_flags, pre_flank, pre_flank_ptr, post_flank, post_flank_ptr, + lp_end, num_groups); + cudaDeviceSynchronize(); + CUDA_CHK(); + + cudaMemcpy(core->methylated_score, lp_end, num_groups*sizeof(float),cudaMemcpyDeviceToHost); + CUDA_CHK(); + cudaMemcpy(host_cpg_sites, cpg_sites, num_groups*sizeof(uint32_t),cudaMemcpyDeviceToHost); + CUDA_CHK(); + + for(int r=0;rmethylated_score[i],core->unmethylated_score[i]); + } + } + + // Aggregate score + for(int r=0;r<10;r++) + { + for(int g=0;g0||end_idx>0)&&host_event_start[i]>0&&host_event_stop[i]>0) + { + + uint32_t start_position = host_cpg_sites[r*1400+start_idx] + core->host_ref_start_pos[i/1400]; + auto iter = core->site_score_map[i/1400]->find(start_position); + if (iter == core->site_score_map[i/1400]->end()) { + // insert new score into the map + ScoredSite ss; + //ss.chromosome = contig; + ss.start_position = start_position; + ss.end_position = host_cpg_sites[r*1400+end_idx - 1] + core->host_ref_start_pos[i/1400]; + ss.n_cpg = end_idx - start_idx; + + // extract the CpG site(s) with a k-mers worth of surrounding context + size_t site_output_start = host_cpg_sites[r*1400+start_idx] - core->kmer_size + 1; + size_t site_output_end = host_cpg_sites[r*1400+end_idx - 1] + core->kmer_size; + + // fprintf(stdout,"\n output_start %d output_end %d \n",site_output_start, site_output_end); + + int k =0; + for(int j=core->host_read_ptr[i/1400]+site_output_start; + jhost_read_ptr[i/1400]+site_output_start+100&&j< core->host_read_ptr[i/1400]+site_output_end; j++) + { + // fprintf(stdout,"%c",core->host_read[j]); + ss.sequence[k] = core->host_read[j]; + k++; + } + ss.sequence[k]=0; + + // fprintf(stdout,"\n%s\n",ss.sequence); + + // insert into the map + iter = + core->site_score_map[i/1400]->insert(std::make_pair(start_position, ss)).first; + + + // set strand-specific score + // upon output below the strand scores will be summed + int strand_idx=0; + //iter->second.ll_methylated[strand_idx] = methylated_score; + iter->second.ll_unmethylated[strand_idx] = core->unmethylated_score[i]; + iter->second.ll_methylated[strand_idx] = core->methylated_score[i]; + iter->second.strands_scored += 1; + } + } + } + } + + //cuda data + cudaFree(read); + cudaFree(read_ptr); + cudaFree(cpg_sites); + cudaFree(group_start); + cudaFree(group_end); + cudaFree(group_size); + cudaFree(ref_start_pos); + cudaFree(alignment_ptr); + cudaFree(alignment); + cudaFree(num_rows); + cudaFree(num_cols); + cudaFree(event_start_idx); + cudaFree(event_stop_idx); + cudaFree(event_stride); + cudaFree(n_kmers); + cudaFree(n_events); + + + cudaFree(matrix); + cudaFree(matrix_ptr); + + cudaFree(trans_ptr); + cudaFree(transitions); + cudaFree(events_per_base); + + cudaFree(pre_flank); + cudaFree(pre_flank_ptr); + cudaFree(post_flank); + cudaFree(post_flank_ptr); + cudaFree(event_start_idx); + cudaFree(kmer_ranks); + // cudaFree(mcpg_kmer_ranks); + cudaFree(event_ptr); + cudaFree(event_table); + cudaFree(scalings); + cudaFree(cpgmodels); + cudaFree(scores); + cudaFree(lp_end); + // cudaFree(mcpg_lp_end); + + // core->total_num_reads = 0; + core->sum_read_len = 0; + core->sum_alignment = 0; + core->sum_n_events = 0; + + free(core->host_read_ptr); + free(core->host_alignment_ptr); + free(core->host_ref_start_pos); + free(core->host_event_ptr); + free(core->host_read); + free(core->host_alignment); + free(core->host_event_table); + free(core->host_scalings); + + free(core->unmethylated_score); + free(core->methylated_score); + // free(host_cpg_sites); + free(host_group_start); + free(host_group_end); + + core->ref_seq.clear(); + + +} diff --git a/cuda_code/holtwinters_test_4.cu b/cuda_code/holtwinters_test_4.cu new file mode 100644 index 0000000000000000000000000000000000000000..08eb62aa2e0ea9643e177619e4e54bfd2c311097 --- /dev/null +++ b/cuda_code/holtwinters_test_4.cu @@ -0,0 +1,275 @@ +/* + * Copyright (c) 2019-2021, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "time_series_datasets.h" + +namespace ML { + +template +struct HoltWintersInputs { + T* dataset_h; + T* test; + int n; + int h; + int batch_size; + int frequency; + ML::SeasonalType seasonal; + int start_periods; + T epsilon; + T mae_tolerance; +}; + +template +class HoltWintersTest : public ::testing::TestWithParam> { + public: + void basicTest() + { + params = ::testing::TestWithParam>::GetParam(); + dataset_h = params.dataset_h; + test = params.test; + n = params.n; + h = params.h; + batch_size = params.batch_size; + frequency = params.frequency; + ML::SeasonalType seasonal = params.seasonal; + start_periods = params.start_periods; + epsilon = params.epsilon; + mae_tolerance = params.mae_tolerance; + + CUDA_CHECK(cudaStreamCreate(&stream)); + + ML::HoltWinters::buffer_size( + n, + batch_size, + frequency, + &leveltrend_seed_len, // = batch_size + &season_seed_len, // = frequency*batch_size + &components_len, // = (n-w_len)*batch_size + &error_len, // = batch_size + &leveltrend_coef_offset, // = (n-wlen-1)*batch_size (last row) + &season_coef_offset); // = (n-wlen-frequency)*batch_size(last freq rows) + + raft::allocate(level_ptr, components_len, stream); + raft::allocate(trend_ptr, components_len, stream); + raft::allocate(season_ptr, components_len, stream); + raft::allocate(SSE_error_ptr, batch_size, stream); + raft::allocate(forecast_ptr, batch_size * h, stream); + + raft::allocate(data, batch_size * n); + raft::update_device(data, dataset_h, batch_size * n, stream); + + raft::handle_t handle; + handle.set_stream(stream); + + ML::HoltWinters::fit(handle, + n, + batch_size, + frequency, + start_periods, + seasonal, + epsilon, + data, + level_ptr, + trend_ptr, + season_ptr, + SSE_error_ptr); + + ML::HoltWinters::forecast(handle, + n, + batch_size, + frequency, + h, + seasonal, + level_ptr, + trend_ptr, + season_ptr, + forecast_ptr); + + CUDA_CHECK(cudaStreamSynchronize(stream)); + } + + void SetUp() override { basicTest(); } + + void TearDown() override + { + CUDA_CHECK(cudaFree(data)); + CUDA_CHECK(cudaFree(level_ptr)); + CUDA_CHECK(cudaFree(trend_ptr)); + CUDA_CHECK(cudaFree(season_ptr)); + CUDA_CHECK(cudaFree(SSE_error_ptr)); + CUDA_CHECK(cudaFree(forecast_ptr)); + CUDA_CHECK(cudaStreamDestroy(stream)); + } + + public: + cudaStream_t stream; + HoltWintersInputs params; + T *dataset_h, *test; + T* data; + int n, h; + int leveltrend_seed_len, season_seed_len, components_len; + int leveltrend_coef_offset, season_coef_offset; + int error_len; + int batch_size, frequency, start_periods; + T *SSE_error_ptr, *level_ptr, *trend_ptr, *season_ptr, *forecast_ptr; + T epsilon, mae_tolerance; +}; + +const std::vector> inputsf = {{additive_trainf.data(), + additive_testf.data(), + 90, + 10, + 1, + 25, + ML::SeasonalType::ADDITIVE, + 2, + 2.24e-3, + 1e-6}, + {multiplicative_trainf.data(), + multiplicative_testf.data(), + 132, + 12, + 1, + 12, + ML::SeasonalType::MULTIPLICATIVE, + 2, + 2.24e-3, + 3e-2}, + {additive_normalized_trainf.data(), + additive_normalized_testf.data(), + 90, + 10, + 1, + 25, + ML::SeasonalType::ADDITIVE, + 2, + 2.24e-3, + 1e-6}, + {multiplicative_normalized_trainf.data(), + multiplicative_normalized_testf.data(), + 132, + 12, + 1, + 12, + ML::SeasonalType::MULTIPLICATIVE, + 2, + 2.24e-3, + 2.5e-1}}; + +const std::vector> inputsd = {{additive_traind.data(), + additive_testd.data(), + 90, + 10, + 1, + 25, + ML::SeasonalType::ADDITIVE, + 2, + 2.24e-7, + 1e-6}, + {multiplicative_traind.data(), + multiplicative_testd.data(), + 132, + 12, + 1, + 12, + ML::SeasonalType::MULTIPLICATIVE, + 2, + 2.24e-7, + 3e-2}, + {additive_normalized_traind.data(), + additive_normalized_testd.data(), + 90, + 10, + 1, + 25, + ML::SeasonalType::ADDITIVE, + 2, + 2.24e-7, + 1e-6}, + {multiplicative_normalized_traind.data(), + multiplicative_normalized_testd.data(), + 132, + 12, + 1, + 12, + ML::SeasonalType::MULTIPLICATIVE, + 2, + 2.24e-7, + 5e-2}}; + +template +void normalise(T* data, int len) +{ + T min = *std::min_element(data, data + len); + T max = *std::max_element(data, data + len); + for (int i = 0; i < len; i++) { + data[i] = (data[i] - min) / (max - min); + } +} + +template +T calculate_MAE(T* test, T* forecast, int batch_size, int h) +{ + normalise(test, batch_size * h); + normalise(forecast, batch_size * h); + std::vector ae(batch_size * h); + for (int i = 0; i < batch_size * h; i++) { + ae[i] = raft::abs(test[i] - forecast[i]); + } + std::sort(ae.begin(), ae.end()); + T mae; + if (h % 2 == 0) { + mae = (ae[h / 2 - 1] + ae[h / 2]) / 2; + } else { + mae = ae[(int)h / 2]; + } + return mae; +} + +typedef HoltWintersTest HoltWintersTestF; +TEST_P(HoltWintersTestF, Fit) +{ + std::vector forecast_h(batch_size * h); + raft::update_host(forecast_h.data(), forecast_ptr, batch_size * h, stream); + raft::print_host_vector("forecast", forecast_h.data(), batch_size * h, std::cout); + float mae = calculate_MAE(test, forecast_h.data(), batch_size, h); + CUML_LOG_DEBUG("MAE: %f", mae); + ASSERT_TRUE(mae < mae_tolerance); +} + +typedef HoltWintersTest HoltWintersTestD; +TEST_P(HoltWintersTestD, Fit) +{ + std::vector forecast_h(batch_size * h); + raft::update_host(forecast_h.data(), forecast_ptr, batch_size * h, stream); + raft::print_host_vector("forecast", forecast_h.data(), batch_size * h, std::cout); + double mae = calculate_MAE(test, forecast_h.data(), batch_size, h); + CUML_LOG_DEBUG("MAE: %f", mae); + ASSERT_TRUE(mae < mae_tolerance); +} + +INSTANTIATE_TEST_CASE_P(HoltWintersTests, HoltWintersTestF, ::testing::ValuesIn(inputsf)); +INSTANTIATE_TEST_CASE_P(HoltWintersTests, HoltWintersTestD, ::testing::ValuesIn(inputsd)); + +} // namespace ML diff --git a/cuda_code/holtwinters_test_8.cu b/cuda_code/holtwinters_test_8.cu new file mode 100644 index 0000000000000000000000000000000000000000..a9a18dc7912fb30868bd34a53cbf5e3598e9ecd5 --- /dev/null +++ b/cuda_code/holtwinters_test_8.cu @@ -0,0 +1,191 @@ +/* + * Copyright (c) 2019, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include "holtwinters/holtwinters.h" +#include "time_series_datasets.h" + +namespace ML { + +using namespace MLCommon; + +template +struct HoltWintersInputs { + T *dataset_h; + T *test; + int n; + int h; + int batch_size; + int frequency; + ML::SeasonalType seasonal; + int start_periods; + T epsilon; + T mae_tolerance; +}; + +template +class HoltWintersTest : public ::testing::TestWithParam> { + public: + void basicTest() { + params = ::testing::TestWithParam>::GetParam(); + dataset_h = params.dataset_h; + test = params.test; + n = params.n; + h = params.h; + batch_size = params.batch_size; + frequency = params.frequency; + ML::SeasonalType seasonal = params.seasonal; + start_periods = params.start_periods; + epsilon = params.epsilon; + mae_tolerance = params.mae_tolerance; + + CUDA_CHECK(cudaStreamCreate(&stream)); + + ML::HoltWinters::buffer_size( + n, batch_size, frequency, + &leveltrend_seed_len, // = batch_size + &season_seed_len, // = frequency*batch_size + &components_len, // = (n-w_len)*batch_size + &error_len, // = batch_size + &leveltrend_coef_offset, // = (n-wlen-1)*batch_size (last row) + &season_coef_offset); // = (n-wlen-frequency)*batch_size(last freq rows) + + allocate(level_ptr, components_len, stream); + allocate(trend_ptr, components_len, stream); + allocate(season_ptr, components_len, stream); + allocate(SSE_error_ptr, batch_size, stream); + allocate(forecast_ptr, batch_size * h, stream); + + allocate(data, batch_size * n); + updateDevice(data, dataset_h, batch_size * n, stream); + + cumlHandle handle; + handle.setStream(stream); + + ML::HoltWinters::fit(handle, n, batch_size, frequency, start_periods, + seasonal, epsilon, data, level_ptr, trend_ptr, + season_ptr, SSE_error_ptr); + + ML::HoltWinters::forecast(handle, n, batch_size, frequency, h, seasonal, + level_ptr, trend_ptr, season_ptr, forecast_ptr); + + CUDA_CHECK(cudaStreamSynchronize(stream)); + } + + void SetUp() override { basicTest(); } + + void TearDown() override { + CUDA_CHECK(cudaFree(data)); + CUDA_CHECK(cudaFree(level_ptr)); + CUDA_CHECK(cudaFree(trend_ptr)); + CUDA_CHECK(cudaFree(season_ptr)); + CUDA_CHECK(cudaFree(SSE_error_ptr)); + CUDA_CHECK(cudaFree(forecast_ptr)); + CUDA_CHECK(cudaStreamDestroy(stream)); + } + + public: + cudaStream_t stream; + HoltWintersInputs params; + T *dataset_h, *test; + T *data; + int n, h; + int leveltrend_seed_len, season_seed_len, components_len; + int leveltrend_coef_offset, season_coef_offset; + int error_len; + int batch_size, frequency, start_periods; + T *SSE_error_ptr, *level_ptr, *trend_ptr, *season_ptr, *forecast_ptr; + T epsilon, mae_tolerance; +}; + +const std::vector> inputsf = { + {additive_trainf.data(), additive_testf.data(), 90, 10, 1, 25, + ML::SeasonalType::ADDITIVE, 2, 2.24e-3, 1e-6}, + {multiplicative_trainf.data(), multiplicative_testf.data(), 132, 12, 1, 12, + ML::SeasonalType::MULTIPLICATIVE, 2, 2.24e-3, 3e-2}, + {additive_normalized_trainf.data(), additive_normalized_testf.data(), 90, 10, + 1, 25, ML::SeasonalType::ADDITIVE, 2, 2.24e-3, 1e-6}, + {multiplicative_normalized_trainf.data(), + multiplicative_normalized_testf.data(), 132, 12, 1, 12, + ML::SeasonalType::MULTIPLICATIVE, 2, 2.24e-3, 2.5e-1}}; + +const std::vector> inputsd = { + {additive_traind.data(), additive_testd.data(), 90, 10, 1, 25, + ML::SeasonalType::ADDITIVE, 2, 2.24e-7, 1e-6}, + {multiplicative_traind.data(), multiplicative_testd.data(), 132, 12, 1, 12, + ML::SeasonalType::MULTIPLICATIVE, 2, 2.24e-7, 3e-2}, + {additive_normalized_traind.data(), additive_normalized_testd.data(), 90, 10, + 1, 25, ML::SeasonalType::ADDITIVE, 2, 2.24e-7, 1e-6}, + {multiplicative_normalized_traind.data(), + multiplicative_normalized_testd.data(), 132, 12, 1, 12, + ML::SeasonalType::MULTIPLICATIVE, 2, 2.24e-7, 5e-2}}; + +template +void normalise(T *data, int len) { + T min = *std::min_element(data, data + len); + T max = *std::max_element(data, data + len); + for (int i = 0; i < len; i++) { + data[i] = (data[i] - min) / (max - min); + } +} + +template +T calculate_MAE(T *test, T *forecast, int batch_size, int h) { + normalise(test, batch_size * h); + normalise(forecast, batch_size * h); + std::vector ae(batch_size * h); + for (int i = 0; i < batch_size * h; i++) { + ae[i] = abs(test[i] - forecast[i]); + } + std::sort(ae.begin(), ae.end()); + T mae; + if (h % 2 == 0) { + mae = (ae[h / 2 - 1] + ae[h / 2]) / 2; + } else { + mae = ae[(int)h / 2]; + } + return mae; +} + +typedef HoltWintersTest HoltWintersTestF; +TEST_P(HoltWintersTestF, Fit) { + std::vector forecast_h(batch_size * h); + updateHost(forecast_h.data(), forecast_ptr, batch_size * h, stream); + myPrintHostVector("forecast", forecast_h.data(), batch_size * h); + float mae = calculate_MAE(test, forecast_h.data(), batch_size, h); + std::cout << "MAE: " << mae << std::endl; + ASSERT_TRUE(mae < mae_tolerance); +} + +typedef HoltWintersTest HoltWintersTestD; +TEST_P(HoltWintersTestD, Fit) { + std::vector forecast_h(batch_size * h); + updateHost(forecast_h.data(), forecast_ptr, batch_size * h, stream); + myPrintHostVector("forecast", forecast_h.data(), batch_size * h); + double mae = calculate_MAE(test, forecast_h.data(), batch_size, h); + std::cout << "MAE: " << mae << std::endl; + ASSERT_TRUE(mae < mae_tolerance); +} + +INSTANTIATE_TEST_CASE_P(HoltWintersTests, HoltWintersTestF, + ::testing::ValuesIn(inputsf)); +INSTANTIATE_TEST_CASE_P(HoltWintersTests, HoltWintersTestD, + ::testing::ValuesIn(inputsd)); + +} // namespace ML \ No newline at end of file diff --git a/cuda_code/homogeneityScore_5.cu b/cuda_code/homogeneityScore_5.cu new file mode 100644 index 0000000000000000000000000000000000000000..05c7b2bf87c220ece4af83c5bc29d39d23fbbce5 --- /dev/null +++ b/cuda_code/homogeneityScore_5.cu @@ -0,0 +1,136 @@ +/* + * Copyright (c) 2019, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include +#include +#include "common/cuml_allocator.hpp" +#include "metrics/homogeneityScore.h" +#include "test_utils.h" + +namespace MLCommon { +namespace Metrics { + +//parameter structure definition +struct homogeneityParam { + int nElements; + int lowerLabelRange; + int upperLabelRange; + bool sameArrays; + double tolerance; +}; + +//test fixture class +template +class homogeneityTest : public ::testing::TestWithParam { + protected: + //the constructor + void SetUp() override { + //getting the parameters + params = ::testing::TestWithParam::GetParam(); + + nElements = params.nElements; + lowerLabelRange = params.lowerLabelRange; + upperLabelRange = params.upperLabelRange; + + //generating random value test input + std::vector arr1(nElements, 0); + std::vector arr2(nElements, 0); + std::random_device rd; + std::default_random_engine dre(rd()); + std::uniform_int_distribution intGenerator(lowerLabelRange, + upperLabelRange); + + std::generate(arr1.begin(), arr1.end(), + [&]() { return intGenerator(dre); }); + if (params.sameArrays) { + arr2 = arr1; + } else { + std::generate(arr2.begin(), arr2.end(), + [&]() { return intGenerator(dre); }); + } + + //allocating and initializing memory to the GPU + + CUDA_CHECK(cudaStreamCreate(&stream)); + MLCommon::allocate(truthClusterArray, nElements, true); + MLCommon::allocate(predClusterArray, nElements, true); + + MLCommon::updateDevice(truthClusterArray, &arr1[0], (int)nElements, stream); + MLCommon::updateDevice(predClusterArray, &arr2[0], (int)nElements, stream); + std::shared_ptr allocator( + new defaultDeviceAllocator); + + //calculating the golden output + double truthMI, truthEntropy; + + truthMI = MLCommon::Metrics::mutualInfoScore( + truthClusterArray, predClusterArray, nElements, lowerLabelRange, + upperLabelRange, allocator, stream); + truthEntropy = + MLCommon::Metrics::entropy(truthClusterArray, nElements, lowerLabelRange, + upperLabelRange, allocator, stream); + + if (truthEntropy) { + truthHomogeneity = truthMI / truthEntropy; + } else + truthHomogeneity = 1.0; + + if (nElements == 0) truthHomogeneity = 1.0; + + //calling the homogeneity CUDA implementation + computedHomogeneity = MLCommon::Metrics::homogeneityScore( + truthClusterArray, predClusterArray, nElements, lowerLabelRange, + upperLabelRange, allocator, stream); + } + + //the destructor + void TearDown() override { + CUDA_CHECK(cudaFree(truthClusterArray)); + CUDA_CHECK(cudaFree(predClusterArray)); + CUDA_CHECK(cudaStreamDestroy(stream)); + } + + //declaring the data values + homogeneityParam params; + T lowerLabelRange, upperLabelRange; + T* truthClusterArray = nullptr; + T* predClusterArray = nullptr; + int nElements = 0; + double truthHomogeneity = 0; + double computedHomogeneity = 0; + cudaStream_t stream; +}; + +//setting test parameter values +const std::vector inputs = { + {199, 1, 10, false, 0.000001}, {200, 15, 100, false, 0.000001}, + {100, 1, 20, false, 0.000001}, {10, 1, 10, false, 0.000001}, + {198, 1, 100, false, 0.000001}, {300, 3, 99, false, 0.000001}, + {199, 1, 10, true, 0.000001}, {200, 15, 100, true, 0.000001}, + {100, 1, 20, true, 0.000001}, {10, 1, 10, true, 0.000001}, + {198, 1, 100, true, 0.000001}, {300, 3, 99, true, 0.000001}}; + +//writing the test suite +typedef homogeneityTest homogeneityTestClass; +TEST_P(homogeneityTestClass, Result) { + ASSERT_NEAR(computedHomogeneity, truthHomogeneity, params.tolerance); +} +INSTANTIATE_TEST_CASE_P(homogeneity, homogeneityTestClass, + ::testing::ValuesIn(inputs)); + +} //end namespace Metrics +} //end namespace MLCommon diff --git a/cuda_code/horizontal_path_aggregation_test.cu b/cuda_code/horizontal_path_aggregation_test.cu new file mode 100644 index 0000000000000000000000000000000000000000..2116440d638e6b843f4f69d90b7f7d3f2bcc0bdc --- /dev/null +++ b/cuda_code/horizontal_path_aggregation_test.cu @@ -0,0 +1,56 @@ +#include +#include "horizontal_path_aggregation.hpp" +#include "internal.h" +#include "path_aggregation_test.hpp" +#include "generator.hpp" +#include "test_utility.hpp" + +#include "debug.hpp" + +TEST_P(PathAggregationTest, RandomLeft2Right){ + static constexpr size_t width = 631, height = 479, disparity = 128; + + const auto left = generate_random_sequence(width * height); + const auto right = generate_random_sequence(width * height); + const auto expect = path_aggregation( + left, right, width, height, disparity, min_disp_, p1_, p2_, 1, 0); + + const auto d_left = to_device_vector(left); + const auto d_right = to_device_vector(right); + thrust::device_vector d_cost(width * height * disparity); + sgm::path_aggregation::enqueue_aggregate_left2right_path( + d_cost.data().get(), + d_left.data().get(), + d_right.data().get(), + width, height, p1_, p2_, min_disp_, 0); + cudaStreamSynchronize(0); + CudaKernelCheck(); + + const auto actual = to_host_vector(d_cost); + EXPECT_EQ(actual, expect); + debug_compare(actual.data(), expect.data(), width, height, disparity); +} + +TEST_P(PathAggregationTest, RandomRight2Left){ + static constexpr size_t width = 640, height = 480, disparity = 64; + + const auto left = generate_random_sequence(width * height); + const auto right = generate_random_sequence(width * height); + const auto expect = path_aggregation( + left, right, width, height, disparity, min_disp_, p1_, p2_, -1, 0); + + const auto d_left = to_device_vector(left); + const auto d_right = to_device_vector(right); + thrust::device_vector d_cost(width * height * disparity); + sgm::path_aggregation::enqueue_aggregate_right2left_path( + d_cost.data().get(), + d_left.data().get(), + d_right.data().get(), + width, height, p1_, p2_, min_disp_, 0); + cudaStreamSynchronize(0); + CudaKernelCheck(); + + const auto actual = to_host_vector(d_cost); + EXPECT_EQ(actual, expect); + debug_compare(actual.data(), expect.data(), width, height, disparity); +} diff --git a/cuda_code/host_device_vector_20.cu b/cuda_code/host_device_vector_20.cu new file mode 100644 index 0000000000000000000000000000000000000000..39a0fbe9efb07672193bbff70aedfee06d8977b4 --- /dev/null +++ b/cuda_code/host_device_vector_20.cu @@ -0,0 +1,419 @@ +/*! + * Copyright 2017 XGBoost contributors + */ + +#include +#include + +#include +#include +#include + +#include "xgboost/data.h" +#include "xgboost/host_device_vector.h" +#include "xgboost/tree_model.h" +#include "device_helpers.cuh" + +namespace xgboost { + +// the handler to call instead of cudaSetDevice; only used for testing +static void (*cudaSetDeviceHandler)(int) = nullptr; // NOLINT + +void SetCudaSetDeviceHandler(void (*handler)(int)) { + cudaSetDeviceHandler = handler; +} + +template +class HostDeviceVectorImpl { + public: + HostDeviceVectorImpl(size_t size, T v, int device) : device_(device) { + if (device >= 0) { + gpu_access_ = GPUAccess::kWrite; + SetDevice(); + data_d_->resize(size, v); + } else { + data_h_.resize(size, v); + } + } + + // Initializer can be std::vector or std::initializer_list + template + HostDeviceVectorImpl(const Initializer& init, int device) : device_(device) { + if (device >= 0) { + gpu_access_ = GPUAccess::kWrite; + LazyResizeDevice(init.size()); + Copy(init); + } else { + data_h_ = init; + } + } + + HostDeviceVectorImpl(HostDeviceVectorImpl&& that) : + device_{that.device_}, + data_h_{std::move(that.data_h_)}, + data_d_{std::move(that.data_d_)}, + gpu_access_{that.gpu_access_} {} + + ~HostDeviceVectorImpl() { + if (device_ >= 0) { + SetDevice(); + } + } + + size_t Size() const { + return HostCanRead() ? data_h_.size() : data_d_ ? data_d_->size() : 0; + } + + int DeviceIdx() const { return device_; } + + T* DevicePointer() { + LazySyncDevice(GPUAccess::kWrite); + return data_d_->data().get(); + } + + const T* ConstDevicePointer() { + LazySyncDevice(GPUAccess::kRead); + return data_d_->data().get(); + } + + common::Span DeviceSpan() { + LazySyncDevice(GPUAccess::kWrite); + return {data_d_->data().get(), Size()}; + } + + common::Span ConstDeviceSpan() { + LazySyncDevice(GPUAccess::kRead); + return {data_d_->data().get(), Size()}; + } + + void Fill(T v) { // NOLINT + if (HostCanWrite()) { + std::fill(data_h_.begin(), data_h_.end(), v); + } else { + gpu_access_ = GPUAccess::kWrite; + SetDevice(); + thrust::fill(data_d_->begin(), data_d_->end(), v); + } + } + + void Copy(HostDeviceVectorImpl* other) { + CHECK_EQ(Size(), other->Size()); + SetDevice(other->device_); + // Data is on host. + if (HostCanWrite() && other->HostCanWrite()) { + std::copy(other->data_h_.begin(), other->data_h_.end(), data_h_.begin()); + return; + } + SetDevice(); + CopyToDevice(other); + } + + void Copy(const std::vector& other) { + CHECK_EQ(Size(), other.size()); + if (HostCanWrite()) { + std::copy(other.begin(), other.end(), data_h_.begin()); + } else { + CopyToDevice(other.data()); + } + } + + void Copy(std::initializer_list other) { + CHECK_EQ(Size(), other.size()); + if (HostCanWrite()) { + std::copy(other.begin(), other.end(), data_h_.begin()); + } else { + CopyToDevice(other.begin()); + } + } + + void Extend(HostDeviceVectorImpl* other) { + auto ori_size = this->Size(); + this->Resize(ori_size + other->Size(), T()); + if (HostCanWrite() && other->HostCanRead()) { + auto& h_vec = this->HostVector(); + auto& other_vec = other->HostVector(); + CHECK_EQ(h_vec.size(), ori_size + other->Size()); + std::copy(other_vec.cbegin(), other_vec.cend(), h_vec.begin() + ori_size); + } else { + auto ptr = other->ConstDevicePointer(); + SetDevice(); + CHECK_EQ(this->DeviceIdx(), other->DeviceIdx()); + dh::safe_cuda(cudaMemcpyAsync(this->DevicePointer() + ori_size, + ptr, + other->Size() * sizeof(T), + cudaMemcpyDeviceToDevice)); + } + } + + std::vector& HostVector() { + LazySyncHost(GPUAccess::kNone); + return data_h_; + } + + const std::vector& ConstHostVector() { + LazySyncHost(GPUAccess::kRead); + return data_h_; + } + + void SetDevice(int device) { + if (device_ == device) { return; } + if (device_ >= 0) { + LazySyncHost(GPUAccess::kNone); + } + device_ = device; + if (device_ >= 0) { + LazyResizeDevice(data_h_.size()); + } + } + + void Resize(size_t new_size, T v) { + if (new_size == Size()) { return; } + if ((Size() == 0 && device_ >= 0) || (DeviceCanWrite() && device_ >= 0)) { + // fast on-device resize + gpu_access_ = GPUAccess::kWrite; + SetDevice(); + data_d_->resize(new_size, v); + } else { + // resize on host + LazySyncHost(GPUAccess::kNone); + data_h_.resize(new_size, v); + } + } + + void LazySyncHost(GPUAccess access) { + if (HostCanAccess(access)) { return; } + if (HostCanRead()) { + // data is present, just need to deny access to the device + gpu_access_ = access; + return; + } + gpu_access_ = access; + if (data_h_.size() != data_d_->size()) { data_h_.resize(data_d_->size()); } + SetDevice(); + dh::safe_cuda(cudaMemcpy(data_h_.data(), + data_d_->data().get(), + data_d_->size() * sizeof(T), + cudaMemcpyDeviceToHost)); + } + + void LazySyncDevice(GPUAccess access) { + if (DeviceCanAccess(access)) { return; } + if (DeviceCanRead()) { + // deny read to the host + gpu_access_ = access; + return; + } + // data is on the host + LazyResizeDevice(data_h_.size()); + SetDevice(); + dh::safe_cuda(cudaMemcpyAsync(data_d_->data().get(), + data_h_.data(), + data_d_->size() * sizeof(T), + cudaMemcpyHostToDevice)); + gpu_access_ = access; + } + + bool HostCanAccess(GPUAccess access) const { return gpu_access_ <= access; } + bool HostCanRead() const { return HostCanAccess(GPUAccess::kRead); } + bool HostCanWrite() const { return HostCanAccess(GPUAccess::kNone); } + bool DeviceCanAccess(GPUAccess access) const { return gpu_access_ >= access; } + bool DeviceCanRead() const { return DeviceCanAccess(GPUAccess::kRead); } + bool DeviceCanWrite() const { return DeviceCanAccess(GPUAccess::kWrite); } + GPUAccess Access() const { return gpu_access_; } + + private: + int device_{-1}; + std::vector data_h_{}; + std::unique_ptr> data_d_{}; + GPUAccess gpu_access_{GPUAccess::kNone}; + + void CopyToDevice(HostDeviceVectorImpl* other) { + if (other->HostCanWrite()) { + CopyToDevice(other->data_h_.data()); + } else { + LazyResizeDevice(Size()); + gpu_access_ = GPUAccess::kWrite; + SetDevice(); + dh::safe_cuda(cudaMemcpyAsync(data_d_->data().get(), other->data_d_->data().get(), + data_d_->size() * sizeof(T), cudaMemcpyDefault)); + } + } + + void CopyToDevice(const T* begin) { + LazyResizeDevice(Size()); + gpu_access_ = GPUAccess::kWrite; + SetDevice(); + dh::safe_cuda(cudaMemcpyAsync(data_d_->data().get(), begin, + data_d_->size() * sizeof(T), cudaMemcpyDefault)); + } + + void LazyResizeDevice(size_t new_size) { + if (data_d_ && new_size == data_d_->size()) { return; } + SetDevice(); + data_d_->resize(new_size); + } + + void SetDevice() { + CHECK_GE(device_, 0); + if (cudaSetDeviceHandler == nullptr) { + dh::safe_cuda(cudaSetDevice(device_)); + } else { + (*cudaSetDeviceHandler)(device_); + } + + if (!data_d_) { + data_d_.reset(new dh::device_vector); + } + } +}; + +template +HostDeviceVector::HostDeviceVector(size_t size, T v, int device) + : impl_(new HostDeviceVectorImpl(size, v, device)) {} + +template +HostDeviceVector::HostDeviceVector(std::initializer_list init, int device) + : impl_(new HostDeviceVectorImpl(init, device)) {} + +template +HostDeviceVector::HostDeviceVector(const std::vector& init, int device) + : impl_(new HostDeviceVectorImpl(init, device)) {} + +template +HostDeviceVector::HostDeviceVector(HostDeviceVector&& other) + : impl_(new HostDeviceVectorImpl(std::move(*other.impl_))) {} + +template +HostDeviceVector& HostDeviceVector::operator=(HostDeviceVector&& other) { + if (this == &other) { return *this; } + + std::unique_ptr> new_impl( + new HostDeviceVectorImpl(std::move(*other.impl_))); + delete impl_; + impl_ = new_impl.release(); + return *this; +} + +template +HostDeviceVector::~HostDeviceVector() { + delete impl_; + impl_ = nullptr; +} + +template +size_t HostDeviceVector::Size() const { return impl_->Size(); } + +template +int HostDeviceVector::DeviceIdx() const { return impl_->DeviceIdx(); } + +template +T* HostDeviceVector::DevicePointer() { + return impl_->DevicePointer(); +} + +template +const T* HostDeviceVector::ConstDevicePointer() const { + return impl_->ConstDevicePointer(); +} + +template +common::Span HostDeviceVector::DeviceSpan() { + return impl_->DeviceSpan(); +} + +template +common::Span HostDeviceVector::ConstDeviceSpan() const { + return impl_->ConstDeviceSpan(); +} + +template +void HostDeviceVector::Fill(T v) { + impl_->Fill(v); +} + +template +void HostDeviceVector::Copy(const HostDeviceVector& other) { + impl_->Copy(other.impl_); +} + +template +void HostDeviceVector::Copy(const std::vector& other) { + impl_->Copy(other); +} + +template +void HostDeviceVector::Copy(std::initializer_list other) { + impl_->Copy(other); +} + +template +void HostDeviceVector::Extend(HostDeviceVector const& other) { + impl_->Extend(other.impl_); +} + +template +std::vector& HostDeviceVector::HostVector() { return impl_->HostVector(); } + +template +const std::vector& HostDeviceVector::ConstHostVector() const { + return impl_->ConstHostVector(); +} + +template +bool HostDeviceVector::HostCanRead() const { + return impl_->HostCanRead(); +} + +template +bool HostDeviceVector::HostCanWrite() const { + return impl_->HostCanWrite(); +} + +template +bool HostDeviceVector::DeviceCanRead() const { + return impl_->DeviceCanRead(); +} + +template +bool HostDeviceVector::DeviceCanWrite() const { + return impl_->DeviceCanWrite(); +} + +template +GPUAccess HostDeviceVector::DeviceAccess() const { + return impl_->Access(); +} + +template +void HostDeviceVector::SetDevice(int device) const { + impl_->SetDevice(device); +} + +template +void HostDeviceVector::Resize(size_t new_size, T v) { + impl_->Resize(new_size, v); +} + +// explicit instantiations are required, as HostDeviceVector isn't header-only +template class HostDeviceVector; +template class HostDeviceVector; +template class HostDeviceVector; // bst_node_t +template class HostDeviceVector; +template class HostDeviceVector; +template class HostDeviceVector; +template class HostDeviceVector; // bst_row_t +template class HostDeviceVector; // bst_feature_t +template class HostDeviceVector; + +#if defined(__APPLE__) +/* + * On OSX: + * + * typedef unsigned int uint32_t; + * typedef unsigned long long uint64_t; + * typedef unsigned long __darwin_size_t; + */ +template class HostDeviceVector; +#endif // defined(__APPLE__) + +} // namespace xgboost diff --git a/cuda_code/how-many-concurrent-blocks.cu b/cuda_code/how-many-concurrent-blocks.cu new file mode 100644 index 0000000000000000000000000000000000000000..1715af78d3f3fc0ab4706e23d6ea81ae335bb9e2 --- /dev/null +++ b/cuda_code/how-many-concurrent-blocks.cu @@ -0,0 +1,176 @@ +// CS 87 - Final Project +// Maria-Elena Solano +// +// This utility simply counts how many CPU cores are in this node +// + +#include // C's standard I/O library +#include // C's standard library +#include // C's exact width int types +#include // C's POSIX API +#include // CUDA runtime library + + +// macro/constant definitions +#define cuda_try(X) ((X) != cudaSuccess) +#define perror_out(X) perror(X), fflush(stderr) +#define stderr_out(...) fprintf(stderr, __VA_ARGS__), \ + fflush(stderr) +#define print_out(...) printf(__VA_ARGS__), fflush(stdout) + + +// simple helper container (used in how_many_warp_schedulers_per_SM) +typedef struct sm_to_ws{ + int sm; + int ws; +} sm_to_ws_t; + + +void print_count(); +void print_count_verbose(); +int how_many_warp_schedulers_per_SM(int arch_major_ver, int arch_minor_ver); + + +int main(int argc, char** argv){ + int ret; // return value from getopt, and + + // Greedily read all the command line options provided. + while((ret = getopt(argc, argv, "v")) != -1){ + switch(ret){ + // If option -v, display the full calculation instead + case 'v':{ + goto verbose; + } + } + } + + // Print the number of cores + print_count(); + + // And return + goto done; + + +verbose: + // If 'verbose' mode, display the model, and the full calculation + print_count_verbose(); + +done: + exit(EXIT_SUCCESS); +} + + +// This function prints out the number of independent blocks in the GPU. +// +void print_count(){ + cudaError_t ret; // return value of CUDA calls + int dev, devs; // number of devices + cudaDeviceProp pr; // device properties + uint32_t p; // number of concurrent blocks + + // Count how many devices are there. If err or no devices, set p=0 and print. + if(cuda_try(ret = cudaGetDeviceCount(&devs)) || devs == 0){ + stderr_out("cudaGetDeviceCount error: %s\n", cudaGetErrorString(ret)); + p = 0; + goto print; + } + + // Get the device properties of the last device + dev = devs - 1; + cudaSetDevice(dev); + cudaGetDeviceProperties(&pr, dev); + + // Compute the number of concurrent blocks according to the formula + // + // # of SMs x # of warp schedulers per SM + // + p=pr.multiProcessorCount*how_many_warp_schedulers_per_SM(pr.major,pr.minor); + +print: + print_out("%u\n", p); + + return; +} + + +// This function prints out the number of independent blocks in the GPU, +// showing how the calculation was made: that is, # of SMs x # of warp sche- +// dulers per SM available in the given model. +// +void print_count_verbose(){ + cudaError_t ret; // return value of CUDA calls + int dev, devs; // number of devices + cudaDeviceProp pr; // device properties + + // Count how many devices are there. If err, return. + if(cuda_try(ret = cudaGetDeviceCount(&devs))){ + stderr_out("cudaGetDeviceCount error: %s\n", cudaGetErrorString(ret)); + return; + } + + // If no devices, notify the user and return + if(devs == 0){ + print_out(" (No CUDA-enabled GPUs in this machine.)\n"); + return; + } + + // Get the device properties of the last device + dev = devs - 1; + cudaSetDevice(dev); + cudaGetDeviceProperties(&pr, dev); + + // Show the calculation + // + // # of SMs x # of warp schedulers per SM + // + print_out(" (%u SMs x %u warp schedulers per SM)\n", + pr.multiProcessorCount, + how_many_warp_schedulers_per_SM(pr.major, pr.minor)); + + return; +} + +// This function determines how many warp schedulers per SM are there in the +// GPU given ts major and minor architectural version. +// +// Adapted from helper_cuda.h in the CUDA SDK: +// (/usr/local/cuda/samples/common/inc/helper_cuda.h). +// +// major, minor: major and minor architecture version +// +// returns: number of warp schedulers per SM +// +int how_many_warp_schedulers_per_SM(int major, int minor){ + int i; + sm_to_ws_t t[13]; // Lookup table + + // Tesla architecture (1 warp scheduler per SM) + t[0] .sm = 0x10; t[0] .ws = 1; // Tesla (SM 1.0) G80 class + t[1] .sm = 0x11; t[1] .ws = 1; // Tesla (SM 1.1) G8X class + t[2] .sm = 0x12; t[2] .ws = 1; // Tesla (SM 1.2) G9X class + t[3] .sm = 0x13; t[3] .ws = 1; // Tesla (SM 1.3) GT200 class + + // Fermi architecture (2 warp schedulers per SM) + t[4] .sm = 0x20; t[4] .ws = 2; // Fermi (SM 2.0) GF100 class + t[5] .sm = 0x21; t[5] .ws = 2; // Fermi (SM 2.1) GF10x class + + // Kepler architecture (4 warp schedulers per SM) + t[6] .sm = 0x30; t[6] .ws = 4; // Kepler (SM 3.0) GK10x class + t[7] .sm = 0x32; t[7] .ws = 4; // Kepler (SM 3.2) GK10x class + t[8] .sm = 0x35; t[8] .ws = 4; // Kepler (SM 3.5) GK11x class + t[9] .sm = 0x37; t[9] .ws = 4; // Kepler (SM 3.7) GK21x class + + // Maxwell architecture (4 warp schedulers per SM) + t[10].sm = 0x50; t[10].ws = 4; // Maxwell (SM 5.0) GM10x class + t[11].sm = 0x52; t[11].ws = 4; // Maxwell (SM 5.2) GM20x class + + // Unknown architecture + t[12].sm = -1; t[12].ws = -1; // Unknown + + for(i=0; i<13; i++){ + if(t[i].sm == ((major << 4) + minor)){ + return t[i].ws; + } + } + return 0; +} diff --git a/cuda_code/ic_kernels.cu b/cuda_code/ic_kernels.cu new file mode 100644 index 0000000000000000000000000000000000000000..7c62f8be83bb411b929951f422ee6bc024b09a04 --- /dev/null +++ b/cuda_code/ic_kernels.cu @@ -0,0 +1,99 @@ +/************************************************************* +Copyright (c) 2017-2021, the Ginkgo authors +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*************************************************************/ + +#include "core/factorization/ic_kernels.hpp" + + +#include + + +#include "cuda/base/cusparse_bindings.hpp" +#include "cuda/base/device_guard.hpp" + + +namespace gko { +namespace kernels { +namespace cuda { +/** + * @brief The ic factorization namespace. + * + * @ingroup factor + */ +namespace ic_factorization { + + +template +void compute(std::shared_ptr exec, + matrix::Csr* m) +{ + const auto id = exec->get_device_id(); + auto handle = exec->get_cusparse_handle(); + gko::cuda::device_guard g{id}; + auto desc = cusparse::create_mat_descr(); + auto info = cusparse::create_ic0_info(); + + // get buffer size for IC + IndexType num_rows = m->get_size()[0]; + IndexType nnz = m->get_num_stored_elements(); + size_type buffer_size{}; + cusparse::ic0_buffer_size(handle, num_rows, nnz, desc, + m->get_const_values(), m->get_const_row_ptrs(), + m->get_const_col_idxs(), info, buffer_size); + + Array buffer{exec, buffer_size}; + + // set up IC(0) + cusparse::ic0_analysis(handle, num_rows, nnz, desc, m->get_const_values(), + m->get_const_row_ptrs(), m->get_const_col_idxs(), + info, CUSPARSE_SOLVE_POLICY_USE_LEVEL, + buffer.get_data()); + + cusparse::ic0(handle, num_rows, nnz, desc, m->get_values(), + m->get_const_row_ptrs(), m->get_const_col_idxs(), info, + CUSPARSE_SOLVE_POLICY_USE_LEVEL, buffer.get_data()); + + // CUDA 11.4 has a use-after-free bug on Turing +#if (CUDA_VERSION >= 11040) + exec->synchronize(); +#endif + + cusparse::destroy(info); + cusparse::destroy(desc); +} + +GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(GKO_DECLARE_IC_COMPUTE_KERNEL); + + +} // namespace ic_factorization +} // namespace cuda +} // namespace kernels +} // namespace gko diff --git a/cuda_code/im2col_89.cu b/cuda_code/im2col_89.cu new file mode 100644 index 0000000000000000000000000000000000000000..bf7894243919571c2ab15d53690b1ef05bfcc6ee --- /dev/null +++ b/cuda_code/im2col_89.cu @@ -0,0 +1,432 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/math/im2col.h" +#include "paddle/platform/cuda_helper.h" + +namespace paddle { +namespace operators { +namespace math { + +template +__global__ void im2col(const T* data_im, int num_outs, int im_height, + int im_width, int dilation_h, int dilation_w, + int filter_height, int filter_width, int stride_height, + int stride_width, int padding_height, int padding_width, + int col_height, int col_width, T* data_col) { + const int index = + (blockIdx.x * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x; + if (index < num_outs) { + int w_out = index % col_width; + int h_out = (index / col_width) % col_height; + int channel_in = index / col_width / col_height; + int channel_out = channel_in * filter_height * filter_width; + int h_in = h_out * stride_height - padding_height; + int w_in = w_out * stride_width - padding_width; + + data_col += (channel_out * col_height + h_out) * col_width + w_out; + data_im += (channel_in * im_height + h_in) * im_width + w_in; + for (int i = 0; i < filter_height; ++i) { + for (int j = 0; j < filter_width; ++j) { + int rIdx = h_in + i * dilation_h; + int cIdx = w_in + j * dilation_w; + *data_col = + (rIdx >= im_height || rIdx < 0 || cIdx >= im_width || cIdx < 0) + ? 0 + : data_im[i * dilation_h * im_width + j * dilation_w]; + data_col += col_height * col_width; + } + } + } +} + +/* + * im = [input_channels, input_height, input_width] + * col = + * [input_channels, filter_height, filter_width, output_height, output_width] + */ +template +class Im2ColFunctor { + public: + void operator()(const platform::DeviceContext& context, + const framework::Tensor& im, const std::vector& dilation, + const std::vector& stride, + const std::vector& padding, framework::Tensor* col) { + PADDLE_ENFORCE(im.dims().size() == 3); + PADDLE_ENFORCE(col->dims().size() == 5); + + int im_channels = im.dims()[0]; + int im_height = im.dims()[1]; + int im_width = im.dims()[2]; + int filter_height = col->dims()[1]; + int filter_width = col->dims()[2]; + int col_height = col->dims()[3]; + int col_width = col->dims()[4]; + + PADDLE_ENFORCE_EQ((im_height + padding[0] + padding[2] - + (dilation[0] * (filter_height - 1) + 1)) / + stride[0] + + 1, + col_height, + "Output_height and padding(padding_up, padding_down) are " + "inconsistent."); + PADDLE_ENFORCE_EQ((im_width + padding[1] + padding[3] - + (dilation[1] * (filter_width - 1) + 1)) / + stride[1] + + 1, + col_width, + "col_width and padding(padding_left, padding_right) are " + "inconsistent."); + + int num_outputs = im_channels * col_height * col_width; + int blocks = (num_outputs + 1024 - 1) / 1024; + int block_x = 512; + int block_y = (blocks + 512 - 1) / 512; + dim3 threads(1024, 1); + dim3 grid(block_x, block_y); + im2col<<(context) + .stream()>>>( + im.data(), num_outputs, im_height, im_width, dilation[0], + dilation[1], filter_height, filter_width, stride[0], stride[1], + padding[0], padding[1], col_height, col_width, col->data()); + } +}; + +template +__global__ void col2im(int n, const T* data_col, int im_height, int im_width, + int dilation_h, int dilation_w, int filter_height, + int filter_width, int stride_height, int stride_width, + int padding_height, int padding_width, int col_height, + int col_width, T* data_im) { + const int index = + (blockIdx.x * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x; + + const int d_filter_height = dilation_h * (filter_height - 1) + 1; + const int d_filter_width = dilation_w * (filter_width - 1) + 1; + + if (index < n) { + T val = 0; + int w = index % im_width + padding_width; + int h = (index / im_width) % im_height + padding_height; + int c = index / (im_width * im_height); + + // compute the start and end of the output + int w_col_start = + (w < d_filter_width) ? 0 : (w - d_filter_width) / stride_width + 1; + int w_col_end = min(w / stride_width + 1, col_width); + int h_col_start = + (h < d_filter_height) ? 0 : (h - d_filter_height) / stride_height + 1; + int h_col_end = min(h / stride_height + 1, col_height); + + for (int h_col = h_col_start; h_col < h_col_end; ++h_col) { + for (int w_col = w_col_start; w_col < w_col_end; ++w_col) { + int h_off = (h - h_col * stride_height); + int w_off = (w - w_col * stride_width); + if (h_off % dilation_h == 0 && w_off % dilation_w == 0) { + h_off /= dilation_h; + w_off /= dilation_w; + int data_col_index = + (((c * filter_height + h_off) * filter_width + w_off) * + col_height + + h_col) * + col_width + + w_col; + + val += data_col[data_col_index]; + } + } + } + data_im[index] = val; + } +} + +/* + * im = [input_channels, input_height, input_width] + * col = + * [input_channels, filter_height, filter_width, output_height, output_width] + */ +template +class Col2ImFunctor { + public: + void operator()(const platform::DeviceContext& context, + const framework::Tensor& col, + const std::vector& dilation, + const std::vector& stride, + const std::vector& padding, framework::Tensor* im) { + PADDLE_ENFORCE(im->dims().size() == 3); + PADDLE_ENFORCE(col.dims().size() == 5); + + int im_channels = im->dims()[0]; + int im_height = im->dims()[1]; + int im_width = im->dims()[2]; + int filter_height = col.dims()[1]; + int filter_width = col.dims()[2]; + int col_height = col.dims()[3]; + int col_width = col.dims()[4]; + + PADDLE_ENFORCE_EQ((im_height + padding[0] + padding[2] - + (dilation[0] * (filter_height - 1) + 1)) / + stride[0] + + 1, + col_height, + "Output_height and padding(padding_up, padding_down) are " + "inconsistent."); + PADDLE_ENFORCE_EQ((im_width + padding[1] + padding[3] - + (dilation[1] * (filter_width - 1) + 1)) / + stride[1] + + 1, + col_width, + "col_width and padding(padding_left, padding_right) are " + "inconsistent."); + + size_t num_kernels = im_channels * im_height * im_width; + + size_t blocks = (num_kernels + 1024 - 1) / 1024; + size_t block_x = 512; + size_t block_y = (blocks + 512 - 1) / 512; + dim3 threads(1024, 1); + dim3 grid(block_x, block_y); + + // To avoid involving atomic operations, we will launch one kernel per + // bottom dimension, and then in the kernel add up the top dimensions. + col2im<<(context) + .stream()>>>( + num_kernels, col.data(), im_height, im_width, dilation[0], + dilation[1], filter_height, filter_width, stride[0], stride[1], + padding[0], padding[2], col_height, col_width, im->data()); + } +}; + +template class Im2ColFunctor; +template class Im2ColFunctor; +template class Col2ImFunctor; +template class Col2ImFunctor; + +template +__global__ void im2colOCF(const T* im_data, int im_channels, int im_height, + int im_width, int filter_height, int filter_width, + int stride_height, int stride_width, + int padding_height, int padding_width, int col_height, + int col_width, T* col_data) { + int swid = blockIdx.x; + int shid = blockIdx.y; + for (int channelid = threadIdx.z; channelid < im_channels; + channelid += blockDim.z) { + for (int idy = threadIdx.y; idy < filter_height; idy += blockDim.y) { + for (int idx = threadIdx.x; idx < filter_width; idx += blockDim.x) { + int width_offset = idx + swid * stride_width - padding_width; + int height_offset = idy + shid * stride_height - padding_height; + int im_offset = width_offset + height_offset * im_width + + channelid * im_height * im_width; + + int col_offset = idx + idy * filter_width + + channelid * filter_height * filter_width + + (shid * col_width + swid) * + (im_channels * filter_height * filter_width); + + col_data[col_offset] = + (height_offset >= im_height || height_offset < 0 || + width_offset >= im_width || width_offset < 0) + ? T(0) + : im_data[im_offset]; + } + } + } +} + +/* + * im = [input_channels, input_height, input_width] + * col = + * [output_height, output_width, input_channels, filter_height, filter_width] + */ +template +class Im2ColFunctor { + public: + void operator()(const platform::DeviceContext& context, + const framework::Tensor& im, const std::vector& dilation, + const std::vector& stride, + const std::vector& padding, framework::Tensor* col) { + PADDLE_ENFORCE(im.dims().size() == 3); + PADDLE_ENFORCE(col->dims().size() == 5); + int im_channels = im.dims()[0]; + int im_height = im.dims()[1]; + int im_width = im.dims()[2]; + int filter_height = col->dims()[3]; + int filter_width = col->dims()[4]; + int col_height = col->dims()[0]; + int col_width = col->dims()[1]; + + PADDLE_ENFORCE_EQ((im_height + padding[0] + padding[2] - + (dilation[0] * (filter_height - 1) + 1)) / + stride[0] + + 1, + col_height, + "Output_height and padding(padding_up, padding_down) are " + "inconsistent."); + PADDLE_ENFORCE_EQ((im_width + padding[1] + padding[3] - + (dilation[1] * (filter_width - 1) + 1)) / + stride[1] + + 1, + col_width, + "col_width and padding(padding_left, padding_right) are " + "inconsistent."); + + int block_dim_x = 0; + int block_dim_y = 0; + if (filter_height <= 4 && filter_width <= 4) { + block_dim_x = 4; + block_dim_y = 4; + } else if (filter_height <= 8 && filter_width <= 8) { + block_dim_x = 8; + block_dim_y = 8; + } else if (filter_height <= 16 && filter_width <= 16) { + block_dim_x = 16; + block_dim_y = 16; + } else { + block_dim_x = 32; + block_dim_y = 32; + } + + int block_dim_z = 1024 / block_dim_x / block_dim_y; + dim3 threads(block_dim_x, block_dim_y, std::min(block_dim_z, im_channels)); + dim3 grid(col_width, col_height); + im2colOCF<<(context) + .stream()>>>( + im.data(), im_channels, im_height, im_width, filter_height, + filter_width, stride[0], stride[1], padding[0], padding[1], col_height, + col_width, col->data()); + } +}; + +template +__global__ void col2imOCF(const T* col_data, int im_channels, int im_height, + int im_width, int filter_height, int filter_width, + int stride_height, int stride_width, + int padding_height, int padding_width, int col_height, + int col_width, T* im_data) { + int swid = blockIdx.x; + int shid = blockIdx.y; + for (int channelid = threadIdx.z; channelid < im_channels; + channelid += blockDim.z) { + for (int idy = threadIdx.y; idy < filter_height; idy += blockDim.y) { + for (int idx = threadIdx.x; idx < filter_width; idx += blockDim.x) { + int width_offset = idx + swid * stride_width - padding_width; + int height_offset = idy + shid * stride_height - padding_height; + int im_offset = width_offset + height_offset * im_width + + channelid * im_height * im_width; + + int col_offset = idx + idy * filter_width + + channelid * filter_height * filter_width + + (shid * col_width + swid) * + (im_channels * filter_height * filter_width); + + if (height_offset >= 0 && height_offset < im_height && + width_offset >= 0 && width_offset < im_width) { + paddle::platform::CudaAtomicAdd(im_data + im_offset, + col_data[col_offset]); + } + } + } + } +} + +/* + * im = [input_channels, input_height, input_width] + * col = + * [output_height, output_width, input_channels, filter_height, filter_width] + */ +template +class Col2ImFunctor { + public: + void operator()(const platform::DeviceContext& context, + const framework::Tensor& col, + const std::vector& dilation, + const std::vector& stride, + const std::vector& padding, framework::Tensor* im) { + PADDLE_ENFORCE(im->dims().size() == 3); + PADDLE_ENFORCE(col.dims().size() == 5); + int im_channels = im->dims()[0]; + int im_height = im->dims()[1]; + int im_width = im->dims()[2]; + int filter_height = col.dims()[3]; + int filter_width = col.dims()[4]; + int col_height = col.dims()[0]; + int col_width = col.dims()[1]; + + PADDLE_ENFORCE_EQ((im_height + padding[0] + padding[2] - + (dilation[0] * (filter_height - 1) + 1)) / + stride[0] + + 1, + col_height, + "Output_height and padding(padding_up, padding_down) are " + "inconsistent."); + PADDLE_ENFORCE_EQ((im_width + padding[1] + padding[3] - + (dilation[1] * (filter_width - 1) + 1)) / + stride[1] + + 1, + col_width, + "col_width and padding(padding_left, padding_right) are " + "inconsistent."); + + int block_dim_x = 0; + int block_dim_y = 0; + if (filter_height <= 4 && filter_width <= 4) { + block_dim_x = 4; + block_dim_y = 4; + } else if (filter_height <= 8 && filter_width <= 8) { + block_dim_x = 8; + block_dim_y = 8; + } else if (filter_height <= 16 && filter_width <= 16) { + block_dim_x = 16; + block_dim_y = 16; + } else { + block_dim_x = 32; + block_dim_y = 32; + } + + int block_dim_z = 1024 / block_dim_x / block_dim_y; + dim3 threads(block_dim_x, block_dim_y, std::min(block_dim_z, im_channels)); + dim3 grid(col_width, col_height); + col2imOCF<<(context) + .stream()>>>( + col.data(), im_channels, im_height, im_width, filter_height, + filter_width, stride[0], stride[1], padding[0], padding[1], col_height, + col_width, im->data()); + } +}; + +template class Im2ColFunctor; +template class Im2ColFunctor; +template class Col2ImFunctor; +template class Col2ImFunctor; + +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/cuda_code/im2row.cu b/cuda_code/im2row.cu new file mode 100644 index 0000000000000000000000000000000000000000..76429fe0237b55d1dfadc51e292ea0daffd87bca --- /dev/null +++ b/cuda_code/im2row.cu @@ -0,0 +1,122 @@ +/* + * im2row.cu + * + * Created on: Dec 27, 2021 + * Author: Maciej Kozarzewski + */ + +#include +#include + +#include "activations.cuh" +#include "utilities.hpp" + +#include +#include + +#include + +namespace +{ + __device__ int remainder(int &number, int divisor) + { + int tmp = number / divisor; + int result = number - divisor * tmp; + number = tmp; + return result; + } + + template + __launch_bounds__(256, 8) + __global__ void kernel_im2row_conv2d(const T *input, TensorShape inputShape, T *matrix, TensorShape outputShape, int2 kernelSize, int2 padding, bool invert, + T paddingValue) + { + int ext_input_height = inputShape.height - 2 * padding.x; + int ext_input_width = inputShape.width - 2 * padding.y; + + int volume = inputShape.batch * ext_input_height * ext_input_width * inputShape.filters; + for (int tid = blockIdx.x * blockDim.x + threadIdx.x; tid < volume; tid += gridDim.x * blockDim.x) + { + int tmp = tid; + int in_f = remainder(tmp, inputShape.filters); + int in_w = remainder(tmp, ext_input_width) + padding.y; + int in_h = remainder(tmp, ext_input_height) + padding.x; + int in_b = remainder(tmp, inputShape.batch); + + T value = paddingValue; + if (in_h >= 0 and in_h < inputShape.height and in_w >= 0 and in_w < inputShape.width) + value = input[inputShape.offset_at(in_b, in_h, in_w, in_f)]; + + for (int i = 0; i < kernelSize.x; i++) + for (int j = 0; j < kernelSize.y; j++) + { + int x = in_h + i - kernelSize.x / 2; + int y = in_w + j - kernelSize.y / 2; + int offset = i * kernelSize.y + j; + if (invert == false) + offset = (kernelSize.x * kernelSize.y - 1) - offset; + if (x >= 0 and x < outputShape.height and y >= 0 and y < outputShape.width) + { + int tile_idx = in_b * outputShape.height * outputShape.width + x * outputShape.width + y; + int asdf = (tile_idx * kernelSize.x * kernelSize.y + offset) * inputShape.filters + in_f; + matrix[asdf] = value; + } + } + } + } +} + +namespace avocado +{ + namespace backend + { + using namespace BACKEND_NAMESPACE; + + bool is_conv(int expectedSize, const TensorDescriptor &wDesc) noexcept + { + for (int i = 0; i < wDesc.nbDims() - 2; i++) + if (wDesc.dimension(1 + i) != expectedSize) + return false; + return true; + } + + avStatus_t cudaIm2Row(avContextDescriptor_t context, const avConvolutionDescriptor_t config, const avTensorDescriptor_t filterDesc, + const avTensorDescriptor_t srcDesc, const avMemoryDescriptor_t srcMem, const avTensorDescriptor_t rowDesc, avMemoryDescriptor_t rowMem) + { + getContext(context).setDevice(); + const bool invert = (getConvolution(config).mode == AVOCADO_CROSS_CORRELATION_MODE); + TensorShape input_shape(getTensor(srcDesc)); + TensorShape output_shape(getConvolution(config).getOutputShape(getTensor(srcDesc), getTensor(filterDesc))); + + const int2 kernel_size = { getTensor(filterDesc).dimension(1), getTensor(filterDesc).dimension(2) }; + const int2 padding = { getConvolution(config).padding[0], getConvolution(config).padding[1] }; + + dim3 blockSize(256); + dim3 gridSize(std::min(2048, (getTensor(srcDesc).volume() + 255) / 256)); + cudaStream_t stream = getContext(context).getStream(); + +#define KERNEL_LAUNCH(type)\ + {type padding_value = getConvolution(config).getPaddingValue();\ + kernel_im2row_conv2d<<>>(getPointer(srcMem), input_shape, getPointer(rowMem), output_shape, kernel_size, padding, invert, padding_value);\ + break;} + + switch (dataTypeSize(getTensor(srcDesc).dtype())) + { + default: + case 1: + KERNEL_LAUNCH(char) + case 2: + KERNEL_LAUNCH(short) + case 4: + KERNEL_LAUNCH(int) + case 8: + KERNEL_LAUNCH(int2) + case 16: + KERNEL_LAUNCH(int4) + } +#undef KERNEL_LAUNCH + return checkForErrors(); + } + + } /* namespace backend */ +} /* namespace avocado */ diff --git a/cuda_code/imageKernels_2.cu b/cuda_code/imageKernels_2.cu new file mode 100644 index 0000000000000000000000000000000000000000..4537dce8663f60d4fae486c258fa632835f4982b --- /dev/null +++ b/cuda_code/imageKernels_2.cu @@ -0,0 +1,120 @@ +/* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of NVIDIA CORPORATION nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include + +// convert floating point rgba color to 32-bit integer +__device__ unsigned int rgbaFloatToInt(float4 rgba) { + rgba.x = __saturatef(rgba.x); // clamp to [0.0, 1.0] + rgba.y = __saturatef(rgba.y); + rgba.z = __saturatef(rgba.z); + rgba.w = __saturatef(rgba.w); + return ((unsigned int)(rgba.w * 255.0f) << 24) | + ((unsigned int)(rgba.z * 255.0f) << 16) | + ((unsigned int)(rgba.y * 255.0f) << 8) | + ((unsigned int)(rgba.x * 255.0f)); +} + +//////////////////////////////////////////////////////////////////////////////// +//! Rotate an image using texture lookups +//! @param outputData output data in global memory +//////////////////////////////////////////////////////////////////////////////// +static __global__ void transformKernel(unsigned int *outputData, int width, + int height, float theta, + cudaTextureObject_t tex) { + // calculate normalized texture coordinates + unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; + unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; + + float u = (float)x - (float)width / 2; + float v = (float)y - (float)height / 2; + float tu = u * cosf(theta) - v * sinf(theta); + float tv = v * cosf(theta) + u * sinf(theta); + + tu /= (float)width; + tv /= (float)height; + + // read from texture and write to global memory + float4 pix = tex2D(tex, tu + 0.5f, tv + 0.5f); + unsigned int pixelInt = rgbaFloatToInt(pix); + outputData[y * width + x] = pixelInt; +} + +static __global__ void rgbToGrayscaleKernel(unsigned int *rgbaImage, + size_t imageWidth, + size_t imageHeight) { + size_t gidX = blockDim.x * blockIdx.x + threadIdx.x; + + uchar4 *pixArray = (uchar4 *)rgbaImage; + + for (int pixId = gidX; pixId < imageWidth * imageHeight; + pixId += gridDim.x * blockDim.x) { + uchar4 dataA = pixArray[pixId]; + unsigned char grayscale = + (unsigned char)(dataA.x * 0.3 + dataA.y * 0.59 + dataA.z * 0.11); + uchar4 dataB = make_uchar4(grayscale, grayscale, grayscale, 0); + pixArray[pixId] = dataB; + } +} + +void launchGrayScaleKernel(unsigned int *d_rgbaImage, + std::string image_filename, size_t imageWidth, + size_t imageHeight, cudaStream_t stream) { + int numThreadsPerBlock = 1024; + int numOfBlocks = (imageWidth * imageHeight) / numThreadsPerBlock; + + rgbToGrayscaleKernel<<>>( + d_rgbaImage, imageWidth, imageHeight); + + unsigned int *outputData; + checkCudaErrors(cudaMallocHost(&outputData, sizeof(unsigned int) * imageWidth * imageHeight)); + checkCudaErrors(cudaMemcpyAsync( + outputData, d_rgbaImage, sizeof(unsigned int) * imageWidth * imageHeight, + cudaMemcpyDeviceToHost, stream)); + checkCudaErrors(cudaStreamSynchronize(stream)); + + char outputFilename[1024]; + strcpy(outputFilename, image_filename.c_str()); + strcpy(outputFilename + image_filename.length() - 4, "_out.ppm"); + sdkSavePPM4ub(outputFilename, (unsigned char *)outputData, imageWidth, + imageHeight); + printf("Wrote '%s'\n", outputFilename); + + checkCudaErrors(cudaFreeHost(outputData)); +} + +void rotateKernel(cudaTextureObject_t &texObj, const float angle, + unsigned int *d_outputData, const int imageWidth, + const int imageHeight, cudaStream_t stream) { + dim3 dimBlock(8, 8, 1); + dim3 dimGrid(imageWidth / dimBlock.x, imageHeight / dimBlock.y, 1); + + transformKernel<<>>(d_outputData, imageWidth, + imageHeight, angle, texObj); +} diff --git a/cuda_code/image_generator_4.cu b/cuda_code/image_generator_4.cu new file mode 100644 index 0000000000000000000000000000000000000000..3969ee7bd08802fb68d358b6061d3909aadc0be9 --- /dev/null +++ b/cuda_code/image_generator_4.cu @@ -0,0 +1,243 @@ +/* + * Software License Agreement (BSD License) + * + * Point Cloud Library (PCL) - www.pointclouds.org + * Copyright (c) 2011, Willow Garage, Inc. + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Willow Garage, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include "device.hpp" + +using namespace pcl::device; + +namespace pcl { +namespace device { +struct ImageGenerator { + enum { CTA_SIZE_X = 32, CTA_SIZE_Y = 8 }; + + PtrStep vmap; + PtrStep nmap; + + LightSource light; + + mutable PtrStepSz dst; + + __device__ __forceinline__ void operator()() const { + int x = threadIdx.x + blockIdx.x * CTA_SIZE_X; + int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y; + + if (x >= dst.cols || y >= dst.rows) + return; + + float3 v, n; + v.x = vmap.ptr(y)[x]; + n.x = nmap.ptr(y)[x]; + + uchar3 color = make_uchar3(0, 0, 0); + + if (!isnan(v.x) && !isnan(n.x)) { + v.y = vmap.ptr(y + dst.rows)[x]; + v.z = vmap.ptr(y + 2 * dst.rows)[x]; + + n.y = nmap.ptr(y + dst.rows)[x]; + n.z = nmap.ptr(y + 2 * dst.rows)[x]; + + float weight = 1.f; + + for (int i = 0; i < light.number; ++i) { + float3 vec = normalized(light.pos[i] - v); + + weight *= fabs(dot(vec, n)); + } + + int br = (int)(205 * weight) + 50; + br = max(0, min(255, br)); + color = make_uchar3(br, br, br); + } + dst.ptr(y)[x] = color; + } +}; + +__global__ void generateImageKernel(const ImageGenerator ig) { ig(); } +} // namespace device +} // namespace pcl + +void pcl::device::generateImage(const MapArr &vmap, const MapArr &nmap, + const LightSource &light, + PtrStepSz dst) { + ImageGenerator ig; + ig.vmap = vmap; + ig.nmap = nmap; + ig.light = light; + ig.dst = dst; + + dim3 block(ImageGenerator::CTA_SIZE_X, ImageGenerator::CTA_SIZE_Y); + dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y)); + + generateImageKernel<<>>(ig); + cudaSafeCall(cudaGetLastError()); + cudaSafeCall(cudaDeviceSynchronize()); +} + +////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace pcl { +namespace device { +__global__ void generateDepthKernel(const float3 R_inv_row3, const float3 t, + const PtrStep vmap, + PtrStepSz depth) { + int x = threadIdx.x + blockIdx.x * blockDim.x; + int y = threadIdx.y + blockIdx.y * blockDim.y; + + if (x < depth.cols && y < depth.rows) { + unsigned short result = 0; + + float3 v_g; + v_g.x = vmap.ptr(y)[x]; + if (!isnan(v_g.x)) { + v_g.y = vmap.ptr(y + depth.rows)[x]; + v_g.z = vmap.ptr(y + 2 * depth.rows)[x]; + + float v_z = dot(R_inv_row3, v_g - t); + + result = static_cast(v_z * 1000); + } + depth.ptr(y)[x] = result; + } +} +} // namespace device +} // namespace pcl + +void pcl::device::generateDepth(const Mat33 &R_inv, const float3 &t, + const MapArr &vmap, DepthMap &dst) { + dim3 block(32, 8); + dim3 grid(divUp(dst.cols(), block.x), divUp(dst.rows(), block.y)); + + generateDepthKernel<<>>(R_inv.data[2], t, vmap, dst); + cudaSafeCall(cudaGetLastError()); + cudaSafeCall(cudaDeviceSynchronize()); +} + +////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace pcl { +namespace device { +__global__ void generateNormalKernel(const Mat33 R_inv, const float3 t, + const PtrStep vmap, + const PtrStep nmap, + PtrStepSz normal) { + int x = threadIdx.x + blockIdx.x * blockDim.x; + int y = threadIdx.y + blockIdx.y * blockDim.y; + + if (x < normal.cols && y < normal.rows) { + float3 v, n; + v.x = vmap.ptr(y)[x]; + n.x = nmap.ptr(y)[x]; + + uchar3 color = make_uchar3(0, 0, 0); + + if (!isnan(v.x) && !isnan(n.x)) { + v.y = vmap.ptr(y + normal.rows)[x]; + v.z = vmap.ptr(y + 2 * normal.rows)[x]; + + n.y = nmap.ptr(y + normal.rows)[x]; + n.z = nmap.ptr(y + 2 * normal.rows)[x]; + + int rr = (int)(127.5 * (dot(R_inv.data[0], n) + 1.0)); + rr = max(0, min(255, rr)); + int gg = (int)(127.5 * (dot(R_inv.data[1], n) + 1.0)); + gg = max(0, min(255, gg)); + int bb = (int)(127.5 * (1.0 - dot(R_inv.data[2], n))); + bb = max(0, min(255, bb)); + + color = make_uchar3(rr, gg, bb); + } + normal.ptr(y)[x] = color; + } +} +} // namespace device +} // namespace pcl + +void pcl::device::generateNormal(const Mat33 &R_inv, const float3 &t, + const MapArr &vmap, const MapArr &nmap, + PtrStepSz dst) { + dim3 block(32, 8); + dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y)); + + generateNormalKernel<<>>(R_inv, t, vmap, nmap, dst); + cudaSafeCall(cudaGetLastError()); + cudaSafeCall(cudaDeviceSynchronize()); +} + +////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +namespace pcl { +namespace device { +__global__ void paint3DViewKernel(const PtrStep colors, + PtrStepSz dst, float colors_weight) { + int x = threadIdx.x + blockIdx.x * blockDim.x; + int y = threadIdx.y + blockIdx.y * blockDim.y; + + if (x < dst.cols && y < dst.rows) { + uchar3 value = dst.ptr(y)[x]; + uchar3 color = colors.ptr(y)[x]; + + if (value.x != 0 || value.y != 0 || value.z != 0) { + float cx = + value.x * (1.f - colors_weight) + color.x * colors_weight; + float cy = + value.y * (1.f - colors_weight) + color.y * colors_weight; + float cz = + value.z * (1.f - colors_weight) + color.z * colors_weight; + + value.x = min(255, max(0, __float2int_rn(cx))); + value.y = min(255, max(0, __float2int_rn(cy))); + value.z = min(255, max(0, __float2int_rn(cz))); + } + + dst.ptr(y)[x] = value; + } +} +} // namespace device +} // namespace pcl + +void pcl::device::paint3DView(const PtrStep &colors, + PtrStepSz dst, float colors_weight) { + dim3 block(32, 8); + dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y)); + + colors_weight = min(1.f, max(0.f, colors_weight)); + + paint3DViewKernel<<>>(colors, dst, colors_weight); + cudaSafeCall(cudaGetLastError()); + cudaSafeCall(cudaDeviceSynchronize()); +} \ No newline at end of file diff --git a/cuda_code/img_act_manycolor_kepler_fff_3.cu b/cuda_code/img_act_manycolor_kepler_fff_3.cu new file mode 100644 index 0000000000000000000000000000000000000000..4c7ff1fc8988bf4f9aa0714d0bb02da027edd153 --- /dev/null +++ b/cuda_code/img_act_manycolor_kepler_fff_3.cu @@ -0,0 +1,39 @@ +/** + * \file dnn/src/cuda/local/cuda-convnet2/img_acts/img_act_manycolor_kepler_fff.cu + * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") + * + * Copyright (c) 2014-2021 Megvii Inc. All rights reserved. + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + */ +/** + * Copyright 2014 Google Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * -------------------------------------------------------------------------- + * * This file has been modified by Megvii ("Megvii Modifications"). + * * All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved. + * -------------------------------------------------------------------------- + */ +#include "img_act_manycolor_kepler.cuh" +namespace megdnn { +namespace cuda { + +IMG_MANY_COLOR_K(false, false, false) + + +} // namespace cuda +} // namespace megdnn diff --git a/cuda_code/index_2.cu b/cuda_code/index_2.cu new file mode 100644 index 0000000000000000000000000000000000000000..168dbedd6972c447c4e9cebcf161409ddd18e51b --- /dev/null +++ b/cuda_code/index_2.cu @@ -0,0 +1,81 @@ +/* +* Copyright 2019-2020 NVIDIA CORPORATION. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +#include +#include +#include "index_gpu.cuh" +#include "minimizer.hpp" + +namespace claraparabricks +{ + +namespace genomeworks +{ + +namespace cudamapper +{ + +std::unique_ptr Index::create_index_async(DefaultDeviceAllocator allocator, + const io::FastaParser& parser, + const IndexDescriptor& descriptor, + const std::uint64_t kmer_size, + const std::uint64_t window_size, + const bool hash_representations, + const double filtering_parameter, + const cudaStream_t cuda_stream_generation, + const cudaStream_t cuda_stream_copy) +{ + GW_NVTX_RANGE(profiler, "create_index_async"); + return std::make_unique>(allocator, + parser, + descriptor, + kmer_size, + window_size, + hash_representations, + filtering_parameter, + cuda_stream_generation, + cuda_stream_copy); +} + +std::unique_ptr IndexHostCopyBase::create_host_copy_async(const Index& index, + const read_id_t first_read_id, + const std::uint64_t kmer_size, + const std::uint64_t window_size, + const cudaStream_t cuda_stream) +{ + GW_NVTX_RANGE(profiler, "cache_D2H"); + return std::make_unique(index, + first_read_id, + kmer_size, + window_size, + cuda_stream); +} + +IndexNotReadyException::IndexNotReadyException(const std::string& function_name) + : message_("Index::" + function_name + "() has been accessed before a call to wait_to_be_ready()") +{ +} + +const char* IndexNotReadyException::what() const noexcept +{ + return message_.c_str(); +} + +} // namespace cudamapper + +} // namespace genomeworks + +} // namespace claraparabricks diff --git a/cuda_code/indexing_op_6.cu b/cuda_code/indexing_op_6.cu new file mode 100644 index 0000000000000000000000000000000000000000..cb0152486d05dd174869c01d6ead0ff6172896a2 --- /dev/null +++ b/cuda_code/indexing_op_6.cu @@ -0,0 +1,570 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/*! + * Copyright (c) 2017 by Contributors + * \file indexing_op.cu + * \brief GPU implementation of indexing operator + * \author Siyi Li, Chi Zhang +*/ + +#include "./indexing_op.h" +#include "./util/tensor_util-inl.cuh" +#include "./util/tensor_util-inl.h" + +namespace mxnet { +namespace op { + +/*! \brief If there are out-of-bound indices, out will be assigned to 1. + */ + +struct is_valid_check { + template + MSHADOW_XINLINE static void Map(int i, char* out, const DType* data, + const DType min, const DType max) { + if (data[i] < min || data[i] > max) *out = 1; + } +}; + + +struct AddTakeGradRspGPUKernel { + template + __device__ __forceinline__ static void Map(int tid, + DType* out, + const nnvm::dim_t* prefix_sum, + const IType* data, + const DType* ograd, + const nnvm::dim_t row_length) { + using nnvm::dim_t; + const dim_t data_i = tid / row_length; + const dim_t grad_i = tid % row_length; + const dim_t irow = static_cast(data[data_i]); + const dim_t rsp_row = prefix_sum[irow] - 1; + const DType val = ograd[data_i * row_length + grad_i]; + atomicAdd(static_cast(&(out[rsp_row*row_length+grad_i])), val); + } +}; + +/* + * \brief kernel for backward computation for take, executed with deterministic order + * \param thread_id the thread id + * \param out the output gradient data + * \param lookup_table the table to lookup the position of an id in gradient array + * \param sorted_data the sorted data input + * \param original_idx the original indices of the sorted data input + * \param ograd head gradient + * \param row_length the output dimension + * \param num_threads_per_row the number of threads to process a row together + * \param SZ the number of features a thread is responsible for + */ +template +struct AddTakeGradRspDeterministicKernel { + template + __device__ __forceinline__ static void Map(int thread_id, + DType* out, + const nnvm::dim_t* lookup_table, + const nnvm::dim_t* sorted_data, + const nnvm::dim_t data_size, + const nnvm::dim_t* original_idx, + const DType* ograd, + const nnvm::dim_t row_length, + const nnvm::dim_t num_threads_per_row) { + using nnvm::dim_t; + int tid = thread_id / num_threads_per_row; + const int feature_start = thread_id % num_threads_per_row * SZ; + int num_features = SZ; + if (feature_start + num_features > row_length) { + num_features = row_length - feature_start; + } + if (tid == 0 || sorted_data[tid - 1] != sorted_data[tid]) { + DType acc[SZ]; + #pragma unroll + for (int i = 0; i < SZ; i++) { + acc[i] = 0; + } + const dim_t data = sorted_data[tid]; + const dim_t row_id = lookup_table[data]; + const dim_t out_offset = row_id * row_length + feature_start; + do { + const dim_t idx = original_idx[tid]; + const dim_t ograd_offset = idx * row_length + feature_start; + for (int i = 0; i < num_features; i++) { + acc[i] += ograd[ograd_offset + i]; + } + tid++; + } while (tid < data_size && sorted_data[tid - 1] == sorted_data[tid]); + for (int i = 0; i < num_features; i++) { + out[out_offset + i] += acc[i]; + } + } + } +}; + +/*! \brief name the struct Take instead of take + * to avoid conflict with the take function in mshadow + */ +template +struct TakeGPU { + // assume that idx have been flattened to a 1-D tensor (N,) + // assume that out_data and in_data have been flattened to 2-D tensors, (N, M) and (K, M) + // M is the number of columns of in_data and out_data + // K is the number of rows of in_data + // i is the index of out_data + template + MSHADOW_XINLINE static void Map(int i, DType* out_data, const DType* in_data, + const IType* idx, const int64_t M, const int64_t K) { + int64_t j = static_cast(idx[i/M]); + if (clip) { + if (j <= 0) j = 0; + else if (j >= K) j = K - 1; + } else { + j = j % K; + j += (j < 0) ? K : 0; + } + out_data[i] = in_data[j * M + i % M]; + } +}; + +/* + * \brief returns true if all indices are between [min, max] + * \param s the stream + * \param data_ptr the indices on the stream + * \param data_size the number of indices to examine + * \param min the expected min value for indices + * \param max the expected max value for indices + * \param is_valid_ptr the temparary workspace + */ +template +bool CheckIndexOutOfBound(mshadow::Stream *s, const DType* data_ptr, size_t data_size, + const DType min, const DType max, char* is_valid_ptr) { + using namespace mxnet_op; + int32_t is_valid = 0; + Kernel::Launch(s, 1, is_valid_ptr); + Kernel::Launch(s, data_size, is_valid_ptr, data_ptr, min, max); + CUDA_CALL(hipMemcpy(&is_valid, is_valid_ptr, sizeof(char), + hipMemcpyDeviceToHost)); + return is_valid == 0; +} + +// Embedding forward implementation with dense weight +template<> +void EmbeddingOpForwardDnsImpl(mshadow::Stream* s, + const TBlob& data, + const TBlob& weight, + const OpReqType req, + const TBlob& output) { + using namespace mxnet_op; + const mxnet::TShape& ishape = data.shape_; + const mxnet::TShape& oshape = output.shape_; + + MSHADOW_TYPE_SWITCH(output.type_flag_, DType, { + MSHADOW_TYPE_SWITCH(data.type_flag_, IType, { + Tensor idx = data.get_with_shape( + Shape1(ishape.ProdShape(0, ishape.ndim())), s); + Tensor wmat = weight.get(s); + Tensor out = output.get_with_shape( + Shape2(oshape.ProdShape(0, oshape.ndim()-1), oshape[oshape.ndim()-1]), s); + Kernel, gpu>::Launch(s, oshape.Size(), out.dptr_, wmat.dptr_, + idx.dptr_, wmat.shape_[1], wmat.shape_[0]); + }); + }); +} + +template<> +void SparseEmbeddingOpForwardRspImpl(const OpContext& ctx, + const TBlob& data, + const NDArray& weight, + const OpReqType req, + const TBlob& output) { + if (req == kNullOp) return; + using namespace rowsparse; + using namespace mxnet_op; + mshadow::Stream* s = ctx.get_stream(); + // zeros weight + if (req == kWriteTo && !weight.storage_initialized()) { + size_t out_size = output.shape_.Size(); + MSHADOW_TYPE_SWITCH(output.type_flag_, DType, { + Fill(s, TBlob(output.dptr(), mshadow::Shape1(out_size), + gpu::kDevMask), kWriteTo, 0); + }) + return; + } + // check out-of-bound indices + MSHADOW_TYPE_SWITCH(data.type_flag_, DType, { + DType min = 0; + DType max = static_cast(weight.shape()[0] - 1); + DType* data_ptr = data.dptr(); + size_t data_size = data.shape_.Size(); + Tensor workspace = ctx.requested[0] + .get_space_typed(Shape1(1), s); + char* is_valid_ptr = reinterpret_cast(workspace.dptr_); + bool is_valid = CheckIndexOutOfBound(s, data_ptr, data_size, min, max, is_valid_ptr); + CHECK(is_valid) << "SparseEmbedding input contains data out of bound"; + }) + // the weight is actually dense + if (weight.aux_shape(kIdx)[0] == weight.shape()[0]) { + EmbeddingOpForwardDnsImpl(s, data, weight.data(), req, output); + } else { + EmbeddingOpForwardRspImpl(s, data, weight, req, output); + } +} + +template +void SparseEmbeddingDeterministicKernelLaunch(const OpContext& ctx, + const TBlob& ograd, + const TBlob& data, + const OpReqType req, + const NDArray& output) { + using namespace mshadow; + using namespace mxnet_op; + using namespace expr; + using namespace rowsparse; + using nnvm::dim_t; + mshadow::Stream *s = ctx.get_stream(); + const dim_t num_rows = output.shape()[0]; + const dim_t row_length = output.shape()[1]; + const dim_t data_size = static_cast(data.shape_.Size()); + // temp resource declarations + dim_t* lookup_table = NULL; + void* temp_storage = NULL; + dim_t* sorted_data = NULL; + dim_t* original_idx = NULL; + // calculate number of bytes for temp resources + size_t lookup_table_bytes = num_rows * sizeof(dim_t); + size_t sorted_data_storage_bytes = data_size * sizeof(dim_t); + size_t original_idx_storage_bytes = data_size * sizeof(dim_t); + size_t sort_workspace_size = SortByKeyWorkspaceSize(data_size); + size_t unique_workspace_bytes = 0; + // estimate unique temp space + IType* data_ptr = data.dptr(); + size_t *null_ptr = nullptr; + // unique operations will be applied on sorted data + hipcub::DeviceSelect::Unique(NULL, unique_workspace_bytes, sorted_data, sorted_data, + null_ptr, data_size, Stream::GetStream(s)); + // One more space reserved for unique count + size_t temp_workspace_bytes = std::max(unique_workspace_bytes, + sort_workspace_size); + size_t total_storage_bytes = lookup_table_bytes + sorted_data_storage_bytes + + original_idx_storage_bytes + temp_workspace_bytes; + + // request resource and split it. layout is: + // lookup_table, sorted_data, original_idx, temp_storage + Tensor workspace = ctx.requested[0] + .get_space_typed(Shape1(total_storage_bytes), s); + lookup_table = reinterpret_cast(workspace.dptr_); + sorted_data = reinterpret_cast(workspace.dptr_ + lookup_table_bytes); + original_idx = reinterpret_cast(workspace.dptr_ + lookup_table_bytes + + sorted_data_storage_bytes); + temp_storage = workspace.dptr_ + total_storage_bytes - temp_workspace_bytes; + + // check out-of-bound indices + { + IType min = 0; + IType max = static_cast(output.shape()[0] - 1); + IType* data_ptr = data.dptr(); + size_t data_size = data.shape_.Size(); + bool is_valid = CheckIndexOutOfBound(s, data_ptr, data_size, min, max, + reinterpret_cast(temp_storage)); + CHECK(is_valid) << "Embedding input contains data out of bound"; + } + + // make a copy of the data, to be sorted + TBlob sorted_data_blob(sorted_data, Shape1(data_size), gpu::kDevMask); + auto sorted_data_tensor = sorted_data_blob.FlatTo1D(s); + mxnet_op::copy(s, sorted_data_blob, data); + + // generate original idx + Tensor original_idx_tensor(original_idx, Shape1(data_size), s); + Kernel::Launch(s, data_size, 1, static_cast(0), + static_cast(1), kWriteTo, original_idx); + // sort data with its original idx + int num_bits = common::ilog2ui(num_rows - 1); + char* temp_storage_ptr = reinterpret_cast(temp_storage); + Tensor temp_storage_tensor(temp_storage_ptr, + Shape1(sort_workspace_size), s); + SortByKey(sorted_data_tensor, original_idx_tensor, true, + &temp_storage_tensor, 0, num_bits); + + // compute unique row ids based on sorted values. + output.CheckAndAllocAuxData(kIdx, Shape1(data_size + 1)); + + // fill row_idx array of output matrix, using the row_flg values + RType* grad_row_idx = output.aux_data(kIdx).dptr(); + hipcub::DeviceSelect::Unique(temp_storage_ptr, unique_workspace_bytes, sorted_data, + grad_row_idx, grad_row_idx + data_size, data_size, Stream::GetStream(s)); + + dim_t nnr = 0; + CUDA_CALL(hipMemcpy(&nnr, grad_row_idx + data_size, sizeof(RType), + hipMemcpyDeviceToHost)); + CHECK_EQ(output.shape().ndim(), 2) << "Unexcepted ndim"; + output.CheckAndAllocData(Shape2(nnr, output.shape()[1])); + output.set_aux_shape(kIdx, Shape1(nnr)); + + // generate lookup table + Kernel::Launch(s, nnr, lookup_table, grad_row_idx); + + // accumulate gradients + DType* grad_data = output.data().dptr(); + Fill(s, TBlob(grad_data, Shape1(nnr * row_length), gpu::kDevMask), + kWriteTo, 0); + const int SZ = 4; + const nnvm::dim_t num_threads_per_row = (row_length + SZ - 1) / SZ; + Kernel, gpu>::Launch(s, data_size * num_threads_per_row, + grad_data, lookup_table, sorted_data, data_size, original_idx, + ograd.dptr(), row_length, num_threads_per_row); +} + +inline void SparseEmbeddingOpBackwardDeterministicRspImpl(const OpContext& ctx, + const TBlob& ograd, + const TBlob& data, + const OpReqType req, + const NDArray& output) { + using nnvm::dim_t; + if (req == kNullOp) return; + CHECK_EQ(req, kWriteTo) << "SparseEmbedding layer doesn't support " + << "weight gradient calculation with req != write"; + + mshadow::Stream *s = ctx.get_stream(); + const dim_t data_size = static_cast(data.shape_.Size()); + if (data_size == 0) { + FillZerosRspImpl(s, output); + return; + } + + MSHADOW_TYPE_SWITCH(data.type_flag_, IType, { + MSHADOW_TYPE_SWITCH(ograd.type_flag_, DType, { + MSHADOW_IDX_TYPE_SWITCH(output.aux_type(rowsparse::kIdx), RType, { + SparseEmbeddingDeterministicKernelLaunch(ctx, ograd, data, + req, output); + }); + }); + }); +} + + +template<> +inline void SparseEmbeddingOpBackwardRspImpl(const bool deterministic, + const OpContext& ctx, + const TBlob& ograd, + const TBlob& data, + const OpReqType req, + const NDArray& output) { + if (deterministic) { + SparseEmbeddingOpBackwardDeterministicRspImpl(ctx, ograd, data, req, output); + return; + } + using namespace mshadow; + using namespace mxnet_op; + using namespace mshadow::expr; + using namespace rowsparse; + using nnvm::dim_t; + if (req == kNullOp) return; + CHECK_EQ(req, kWriteTo) << "SparseEmbedding layer doesn't support " + << "weight gradient calculation with req != write"; + + // Request temporary storage for marking non-zero rows and prefix sum + Stream *s = ctx.get_stream(); + dim_t num_rows = output.shape()[0]; + dim_t row_length = output.shape()[1]; + dim_t data_size = static_cast(data.shape_.Size()); + dim_t num_threads; + + MSHADOW_TYPE_SWITCH(data.type_flag_, IType, { + MSHADOW_SGL_DBL_TYPE_SWITCH(ograd.type_flag_, DType, { + MSHADOW_IDX_TYPE_SWITCH(output.aux_type(kIdx), RType, { + dim_t* prefix_sum = NULL; + void* d_temp_storage = NULL; + size_t temp_storage_bytes = 0; + hipcub::DeviceScan::InclusiveSum(d_temp_storage, + temp_storage_bytes, + prefix_sum, + prefix_sum, + num_rows, + Stream::GetStream(s)); + Tensor workspace = ctx.requested[0] + .get_space_typed(Shape1(num_rows * sizeof(dim_t) + + temp_storage_bytes), s); + prefix_sum = reinterpret_cast(workspace.dptr_); + d_temp_storage = workspace.dptr_ + num_rows*sizeof(dim_t); + num_threads = num_rows; + Fill(s, TBlob(prefix_sum, Shape1(num_threads), gpu::kDevMask), kWriteTo, 0); + Kernel::Launch(s, data_size, prefix_sum, data.dptr()); + + hipcub::DeviceScan::InclusiveSum(d_temp_storage, + temp_storage_bytes, + prefix_sum, + prefix_sum, + num_rows, + mshadow::Stream::GetStream(s)); + dim_t nnr = 0; + CUDA_CALL(hipMemcpy(&nnr, &prefix_sum[num_rows-1], sizeof(dim_t), + hipMemcpyDeviceToHost)); + if (nnr == 0) { + FillZerosRspImpl(s, output); + return; + } + output.CheckAndAlloc({Shape1(nnr)}); + RType* grad_row_idx = output.aux_data(kIdx).dptr(); + // fill row_idx array of output matrix, using the row_flg values + Kernel::Launch(s, num_rows, + grad_row_idx, prefix_sum, num_rows); + // prefill with zeros + DType* grad_data = output.data().dptr(); + Fill(s, TBlob(grad_data, Shape1(nnr * row_length), gpu::kDevMask), + kWriteTo, 0); + // add the final gradients + num_threads = row_length * data_size; + Kernel::Launch(s, num_threads, grad_data, prefix_sum, + data.dptr(), ograd.dptr(), row_length); + }); + }); + }); +} + +struct backward_gather_nd_gpu { + template + MSHADOW_XINLINE static void Map(index_t i, index_t N, index_t M, index_t K, + const mshadow::Shape<10> strides, + DType* out, const DType* data, + const IType* indices) { + index_t offset = 0; + for (index_t j = 0; j < M; ++j) { + offset += strides[j] * static_cast(indices[j*N + i]); + } + for (index_t j = 0; j < K; ++j) { + atomicAdd(out + (offset + j), data[i * K + j]); + } + } +}; + +template +inline void GatherNDBackwardImpl(index_t N, index_t M, index_t K, + const mshadow::Shape<10> strides, + DType* out, + const DType* data, + const IType* indices, + mshadow::Stream *s) { + mxnet_op::Kernel::Launch(s, N, N, M, K, strides, out, data, indices); +} + +template<> +void TakeOpForward(const nnvm::NodeAttrs& attrs, + const OpContext& ctx, + const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { + using namespace mxnet_op; + if (req[take_::kOut] == kNullOp) return; + const TakeParam& param = nnvm::get(attrs.parsed); + CHECK_EQ(inputs.size(), 2U); + CHECK_EQ(outputs.size(), 1U); + + const mxnet::TShape& idxshape = inputs[take_::kIdx].shape_; + const mxnet::TShape& arrshape = inputs[take_::kArr].shape_; + const mxnet::TShape& oshape = outputs[take_::kOut].shape_; + + Stream *s = ctx.get_stream(); + const int actual_axis = param.axis + ((param.axis < 0) ? arrshape.ndim() : 0); + + MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { // output data type + MSHADOW_TYPE_SWITCH(inputs[1].type_flag_, IType, { // index data type + if (actual_axis == 0) { + if (param.mode == take_::kClip) { + Kernel, gpu>::Launch(s, oshape.Size(), + outputs[take_::kOut].dptr(), + inputs[take_::kArr].dptr(), + inputs[take_::kIdx].dptr(), + oshape.Size()/idxshape.Size(), arrshape[0]); + } else { + Kernel, gpu>::Launch(s, oshape.Size(), + outputs[take_::kOut].dptr(), + inputs[take_::kArr].dptr(), + inputs[take_::kIdx].dptr(), + oshape.Size()/idxshape.Size(), arrshape[0]); + } + } else { + mshadow::Shape<10> in_strides; + int stride = 1; + for (int i = arrshape.ndim() - 1; i >= 0; stride *= arrshape[i], --i) { + in_strides[i] = stride; + } + mshadow::Shape<10> out_strides; + stride = 1; + for (int i = oshape.ndim() - 1; i >= 0; stride *= oshape[i], --i) { + out_strides[i] = stride; + } + if (param.mode == take_::kClip) { + Kernel, gpu>::Launch(s, oshape.Size(), + outputs[take_::kOut].dptr(), + inputs[take_::kArr].dptr(), + inputs[take_::kIdx].dptr(), + in_strides, out_strides, arrshape.ndim(), oshape.ndim(), + idxshape.ndim(), arrshape[actual_axis], actual_axis); + } else if (param.mode == take_::kWrap) { + Kernel, gpu>::Launch(s, oshape.Size(), + outputs[take_::kOut].dptr(), + inputs[take_::kArr].dptr(), + inputs[take_::kIdx].dptr(), + in_strides, out_strides, arrshape.ndim(), oshape.ndim(), + idxshape.ndim(), arrshape[actual_axis], actual_axis); + } + } + }); + }); +} + +NNVM_REGISTER_OP(Embedding) +.set_attr("FCompute", EmbeddingOpForward) +.set_attr("FComputeEx", SparseEmbeddingOpForwardEx); + +NNVM_REGISTER_OP(_contrib_SparseEmbedding) +.set_attr("FComputeEx", SparseEmbeddingOpForwardEx); + +NNVM_REGISTER_OP(_backward_Embedding) +.set_attr("FCompute", EmbeddingOpBackward) +.set_attr("FComputeEx", EmbeddingOpBackwardEx); + +NNVM_REGISTER_OP(_backward_SparseEmbedding) +.set_attr("FComputeEx", SparseEmbeddingOpBackwardEx); + +NNVM_REGISTER_OP(take) +.set_attr("FCompute", TakeOpForward); + +NNVM_REGISTER_OP(_backward_take) +.set_attr("FCompute", TakeOpBackward); + +NNVM_REGISTER_OP(batch_take) +.set_attr("FCompute", BatchTakeOpForward); + +NNVM_REGISTER_OP(one_hot) +.set_attr("FCompute", OneHotOpForward); + +NNVM_REGISTER_OP(gather_nd) +.set_attr("FCompute", GatherNDForward); + +NNVM_REGISTER_OP(scatter_nd) +.set_attr("FCompute", ScatterNDForward); + +NNVM_REGISTER_OP(_backward_gather_nd) +.set_attr("FCompute", GatherNDBackward); + +NNVM_REGISTER_OP(_scatter_set_nd) +.set_attr("FCompute", ScatterSetNDForward); +} // namespace op +} // namespace mxnet diff --git a/cuda_code/infer_1.cu b/cuda_code/infer_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..1700406853c52ea1effbbbce68e9f8acdd65f598 --- /dev/null +++ b/cuda_code/infer_1.cu @@ -0,0 +1,705 @@ +/* + * Copyright (c) 2019-2021, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include +#include +#include "common.cuh" + +namespace ML { +namespace fil { + +// vec wraps float[N] for cub::BlockReduce +template +struct vec; + +template +struct Vectorized { + BinaryOp op; + __device__ Vectorized(BinaryOp op_) : op(op_) {} + template + constexpr __host__ __device__ __forceinline__ vec operator()( + vec a, vec b) const { + vec c; +#pragma unroll + for (int i = 0; i < NITEMS; i++) c[i] = op(a[i], b[i]); + return c; + } +}; +template +constexpr __host__ __device__ Vectorized vectorized(BinaryOp op) { + return op; +} + +template +struct vec { + static const int NITEMS = N; + T data[N]; + explicit __host__ __device__ vec(T t) { +#pragma unroll + for (int i = 0; i < N; ++i) data[i] = t; + } + __host__ __device__ vec() : vec(T()) {} + __host__ __device__ T& operator[](int i) { return data[i]; } + __host__ __device__ T operator[](int i) const { return data[i]; } + friend __host__ __device__ vec operator+(const vec& a, + const vec& b) { + return vectorized(cub::Sum())(a, b); + } + friend __host__ __device__ void operator+=(vec& a, const vec& b) { + a = a + b; + } + template + friend __host__ __device__ vec operator/(vec& a, const Vec& b) { + return vectorized(thrust::divides())(a, vec(b)); + } + template + friend __host__ __device__ void operator/=(vec& a, const Vec& b) { + a = a / b; + } +}; + +struct best_margin_label : cub::KeyValuePair { + __host__ __device__ best_margin_label(cub::KeyValuePair pair) + : cub::KeyValuePair(pair) {} + __host__ __device__ best_margin_label(int c = 0, float f = -INFINITY) + : cub::KeyValuePair({c, f}) {} +}; + +template +__device__ __forceinline__ vec to_vec( + int c, vec margin) { + vec ret; +#pragma unroll + for (int i = 0; i < NITEMS; ++i) ret[i] = best_margin_label(c, margin[i]); + return ret; +} + +struct ArgMax { + template + __host__ __device__ __forceinline__ vec operator()( + vec a, vec b) const { + vec c; +#pragma unroll + for (int i = 0; i < NITEMS; i++) c[i] = cub::ArgMax()(a[i], b[i]); + return c; + } +}; + +/** tree_leaf_output returns the leaf outputs from the tree with leaf indices + given by leaves for n_rows items. FULL_ITEMS indicates whether n_rows == + NITEMS, to allow the compiler to skip the conditional when unrolling the + loop. */ +template +__device__ __forceinline__ vec tree_leaf_output( + tree_type tree, int n_rows, int (&leaves)[NITEMS]) { + vec out(0); +#pragma unroll + for (int j = 0; j < NITEMS; ++j) { + if (FULL_NITEMS || j < n_rows) { + /** dependent names are not considered templates by default, unless it's a + member of a current [template] instantiation. As output<>() is a + member function inherited from the base class, template + output() is required. */ + out[j] = tree[leaves[j]].template output(); + } + } + return out; +} + +template +__device__ __forceinline__ vec infer_one_tree( + tree_type tree, const float* input, int cols, int n_rows) { + // find the leaf nodes for each row + int curr[NITEMS]; + // the first n_rows are active + int mask = (1 << n_rows) - 1; + for (int j = 0; j < NITEMS; ++j) curr[j] = 0; + do { +#pragma unroll + for (int j = 0; j < NITEMS; ++j) { + auto n = tree[curr[j]]; + mask &= ~(n.is_leaf() << j); + if ((mask & (1 << j)) != 0) { + float val = input[j * cols + n.fid()]; + bool cond = isnan(val) ? !n.def_left() : val >= n.thresh(); + curr[j] = n.left(curr[j]) + cond; + } + } + } while (mask != 0); + + // get the output from the leaves + if (n_rows == NITEMS) { + return tree_leaf_output(tree, n_rows, curr); + } else { + return tree_leaf_output(tree, n_rows, curr); + } +} + +template +__device__ __forceinline__ vec<1, output_type> infer_one_tree( + tree_type tree, const float* input, int cols, int rows) { + int curr = 0; + for (;;) { + auto n = tree[curr]; + if (n.is_leaf()) break; + float val = input[n.fid()]; + bool cond = isnan(val) ? !n.def_left() : val >= n.thresh(); + curr = n.left(curr) + cond; + } + vec<1, output_type> out; + out[0] = tree[curr].base_node::output(); + return out; +} + +/** +The shared memory requirements for finalization stage may differ based +on the set of PTX architectures the kernels were compiled for, as well as +the CUDA compute capability of the device chosen for computation. + +TODO (levsnv): run a test kernel during forest init to determine the compute capability +chosen for the inference, for an accurate sizeof(BlockReduce::TempStorage), +which is used in determining max NITEMS or max input data columns. + +600 is the __CUDA_ARCH__ for Pascal (6.0) GPUs, which is not defined in +host code. +6.0 is the earliest compute capability supported by FIL and RAPIDS in general. +See https://rapids.ai/start.html as well as cmake defaults. +*/ +// values below are defaults as of this change. +template +size_t block_reduce_footprint_host() { + return sizeof(typename cub::BlockReduce, FIL_TPB, + cub::BLOCK_REDUCE_WARP_REDUCTIONS, 1, + 1, 600>::TempStorage); +} + +template +size_t block_reduce_best_class_footprint_host() { + return sizeof( + typename cub::BlockReduce, FIL_TPB, + cub::BLOCK_REDUCE_WARP_REDUCTIONS, 1, 1, + 600>::TempStorage); +} + +// the device template should achieve the best performance, using up-to-date +// CUB defaults +template +__device__ __forceinline__ T block_reduce(T value, BinaryOp op, void* storage) { + typedef cub::BlockReduce BlockReduceT; + return BlockReduceT(*(typename BlockReduceT::TempStorage*)storage) + .Reduce(value, op, blockDim.x); +} + +template // = FLOAT_UNARY_BINARY +struct tree_aggregator_t { + vec acc; + void* tmp_storage; + + /** shared memory footprint of the accumulator during + the finalization of forest inference kernel, when infer_k output + value is computed. + num_classes is used for other template parameters */ + static size_t smem_finalize_footprint(size_t data_row_size, int num_classes, + bool predict_proba) { + return block_reduce_footprint_host(); + } + + /** shared memory footprint of the accumulator during + the accumulation of forest inference, when individual trees + are inferred and partial aggregates are accumulated. + num_classes is used for other template parameters */ + static size_t smem_accumulate_footprint(int num_classes) { return 0; } + + /** + num_classes is used for other template parameters */ + __device__ __forceinline__ tree_aggregator_t(predict_params params, + void* accumulate_workspace, + void* finalize_workspace) + : tmp_storage(finalize_workspace) {} + + __device__ __forceinline__ void accumulate( + vec single_tree_prediction, int tree, int num_rows) { + acc += single_tree_prediction; + } + + __device__ __forceinline__ void finalize(float* out, int num_rows, + int output_stride, + output_t transform, int num_trees) { + __syncthreads(); + acc = block_reduce(acc, vectorized(cub::Sum()), tmp_storage); + if (threadIdx.x > 0) return; +#pragma unroll + for (int row = 0; row < NITEMS; ++row) + if (row < num_rows) out[row * output_stride] = acc[row]; + } +}; + +// tmp_storage may overlap shared memory addressed by [begin, end) +// allreduce_shmem ensures no race conditions +template +__device__ __forceinline__ auto allreduce_shmem(Iterator begin, Iterator end, + BinaryOp op, + void* tmp_storage) { + typedef typename std::iterator_traits::value_type value_type; + value_type thread_partial; + for (Iterator it = begin + threadIdx.x; it < end; it += blockDim.x) + thread_partial = op(thread_partial, *it); + __syncthreads(); // free shared memory [begin, end) + auto res = block_reduce(thread_partial, op, tmp_storage); + // broadcast sum to all threads + __syncthreads(); // free up tmp_storage + if (threadIdx.x == 0) *(value_type*)tmp_storage = res; + __syncthreads(); + return *(value_type*)tmp_storage; +} + +// *begin and *end shall be struct vec +// tmp_storage may overlap shared memory addressed by [begin, end) +template +__device__ __forceinline__ void write_best_class(Iterator begin, Iterator end, + void* tmp_storage, float* out, + int num_rows) { + // reduce per-class candidate margins to one best class candidate + // per thread (for each of the NITEMS rows) + auto best = vecNITEMS, best_margin_label>(); + for (int c = threadIdx.x; c < end - begin; c += blockDim.x) + best = vectorized(cub::ArgMax())(best, to_vec(c, begin[c])); + // [begin, end) may overlap tmp_storage + __syncthreads(); + // find best class per block (for each of the NITEMS rows) + best = block_reduce(best, vectorized(cub::ArgMax()), tmp_storage); + // write it out to global memory + if (threadIdx.x > 0) return; +#pragma unroll + for (int row = 0; row < best.NITEMS; ++row) + if (row < num_rows) out[row] = best[row].key; +} + +/// needed for softmax +__device__ float shifted_exp(float margin, float max) { + return expf(margin - max); +} + +// *begin and *end shall be struct vec +// tmp_storage may NOT overlap shared memory addressed by [begin, end) +template +__device__ __forceinline__ void block_softmax(Iterator begin, Iterator end, + void* tmp_storage) { + // subtract max before exponentiating for numerical stability + typedef typename std::iterator_traits::value_type value_type; + value_type max = + allreduce_shmem(begin, end, vectorized(cub::Max()), tmp_storage); + for (Iterator it = begin + threadIdx.x; it < end; it += blockDim.x) + *it = vectorized(shifted_exp)(*it, max); + // sum of exponents + value_type soe = + allreduce_shmem(begin, end, vectorized(cub::Sum()), tmp_storage); + // softmax phase 2: normalization + for (Iterator it = begin + threadIdx.x; it < end; it += blockDim.x) + *it /= soe; +} + +// *begin and *end shall be struct vec +// tmp_storage may NOT overlap shared memory addressed by [begin, end) +template +__device__ __forceinline__ void normalize_softmax_and_write( + Iterator begin, Iterator end, output_t transform, int trees_per_class, + void* tmp_storage, float* out, int num_rows) { + if ((transform & output_t::AVG) != 0) { + for (Iterator it = begin + threadIdx.x; it < end; it += blockDim.x) + *it /= trees_per_class; + } + if ((transform & output_t::SOFTMAX) != 0) + block_softmax(begin, end, tmp_storage); +// write result to global memory +#pragma unroll + for (int row = 0; row < begin->NITEMS; ++row) { + for (int c = threadIdx.x; c < end - begin; c += blockDim.x) + if (row < num_rows) out[row * (end - begin) + c] = begin[c][row]; + } +} + +// *begin and *end shall be struct vec +// tmp_storage may NOT overlap shared memory addressed by [begin, end) +// in case num_outputs > 1 +template +__device__ __forceinline__ void class_margins_to_global_memory( + Iterator begin, Iterator end, output_t transform, int trees_per_class, + void* tmp_storage, float* out, int num_rows, int num_outputs) { + if (num_outputs == 1) { // will output class + // reduce per-class candidate margins to one best class candidate + // per thread (for each of the NITEMS rows) + write_best_class(begin, end, tmp_storage, out, num_rows); + } else { // output softmax-ed margin + normalize_softmax_and_write(begin, end, transform, trees_per_class, + tmp_storage, out, num_rows); + } +} + +template +struct tree_aggregator_t { + vec acc; + int num_classes; + vec* per_thread; + void* tmp_storage; + + static size_t smem_finalize_footprint(size_t data_row_size, int num_classes, + bool predict_proba) { + size_t phase1 = + (FIL_TPB - FIL_TPB % num_classes) * sizeof(vec); + size_t phase2 = predict_proba + ? block_reduce_footprint_host() + : block_reduce_best_class_footprint_host(); + return predict_proba ? phase1 + phase2 : std::max(phase1, phase2); + } + + static size_t smem_accumulate_footprint(int num_classes) { return 0; } + + __device__ __forceinline__ tree_aggregator_t(predict_params params, + void* accumulate_workspace, + void* finalize_workspace) + : num_classes(params.num_classes), + per_thread((vec*)finalize_workspace), + tmp_storage(params.predict_proba ? per_thread + num_classes + : finalize_workspace) {} + + __device__ __forceinline__ void accumulate( + vec single_tree_prediction, int tree, int num_rows) { + acc += single_tree_prediction; + } + + __device__ __forceinline__ void finalize(float* out, int num_rows, + int num_outputs, output_t transform, + int num_trees) { + __syncthreads(); // free up input row in case it was in shared memory + // load margin into shared memory + per_thread[threadIdx.x] = acc; + __syncthreads(); + acc = multi_sum<6>(per_thread, num_classes, blockDim.x / num_classes); + if (threadIdx.x < num_classes) per_thread[threadIdx.x] = acc; + __syncthreads(); // per_thread needs to be fully populated + + class_margins_to_global_memory(per_thread, per_thread + num_classes, + transform, num_trees / num_classes, + tmp_storage, out, num_rows, num_outputs); + } +}; + +template +struct tree_aggregator_t { + vec acc; + /// at first, per class margin, then, possibly, different softmax partials + vec* per_class_margin; + void* tmp_storage; + int num_classes; + + static size_t smem_finalize_footprint(size_t data_row_size, int num_classes, + bool predict_proba) { + size_t phase1 = data_row_size + smem_accumulate_footprint(num_classes); + size_t phase2 = predict_proba + ? block_reduce_footprint_host() + : block_reduce_best_class_footprint_host(); + return predict_proba ? phase1 + phase2 : std::max(phase1, phase2); + } + + static __host__ __device__ size_t smem_accumulate_footprint(int num_classes) { + return num_classes * sizeof(vec); + } + + __device__ __forceinline__ tree_aggregator_t(predict_params params, + void* accumulate_workspace, + void* finalize_workspace) + : per_class_margin((vec*)accumulate_workspace), + tmp_storage(params.predict_proba ? per_class_margin + num_classes + : finalize_workspace), + num_classes(params.num_classes) { + for (int c = threadIdx.x; c < num_classes; c += blockDim.x) + per_class_margin[c] = vec(0); + // __syncthreads() is called in infer_k + } + + __device__ __forceinline__ void accumulate( + vec single_tree_prediction, int tree, int num_rows) { + // since threads are assigned to consecutive classes, no need for atomics + per_class_margin[tree % num_classes] += single_tree_prediction; + // __syncthreads() is called in infer_k + } + + __device__ __forceinline__ void finalize(float* out, int num_rows, + int num_outputs, output_t transform, + int num_trees) { + class_margins_to_global_memory( + per_class_margin, per_class_margin + num_classes, transform, + num_trees / num_classes, tmp_storage, out, num_rows, num_outputs); + } +}; + +template +struct tree_aggregator_t { + // could switch to unsigned short to save shared memory + // provided raft::myAtomicAdd(short*) simulated with appropriate shifts + int* votes; + int num_classes; + + static size_t smem_finalize_footprint(size_t data_row_size, int num_classes, + bool predict_proba) { + // not accounting for lingering accumulate_footprint during finalize() + return 0; + } + static size_t smem_accumulate_footprint(int num_classes) { + return sizeof(int) * num_classes * NITEMS; + } + + __device__ __forceinline__ tree_aggregator_t(predict_params params, + void* accumulate_workspace, + void* finalize_workspace) + : num_classes(params.num_classes), votes((int*)accumulate_workspace) { + for (int c = threadIdx.x; c < num_classes; c += FIL_TPB * NITEMS) +#pragma unroll + for (int item = 0; item < NITEMS; ++item) votes[c * NITEMS + item] = 0; + // __syncthreads() is called in infer_k + } + __device__ __forceinline__ void accumulate( + vec single_tree_prediction, int tree, int num_rows) { +#pragma unroll + for (int item = 0; item < NITEMS; ++item) + raft::myAtomicAdd(votes + single_tree_prediction[item] * NITEMS + item, + 1); + } + // class probabilities or regression. for regression, num_classes + // is just the number of outputs for each data instance + __device__ __forceinline__ void finalize_multiple_outputs(float* out, + int num_rows) { + __syncthreads(); + for (int c = threadIdx.x; c < num_classes; c += blockDim.x) { +#pragma unroll + for (int row = 0; row < num_rows; ++row) + out[row * num_classes + c] = votes[c * NITEMS + row]; + } + } + // using this when predicting a single class label, as opposed to sparse class vector + // or class probabilities or regression + __device__ __forceinline__ void finalize_class_label(float* out, + int num_rows) { + __syncthreads(); + int item = threadIdx.x; + int row = item; + if (item < NITEMS && row < num_rows) { + int max_votes = 0; + int best_class = 0; + for (int c = 0; c < num_classes; ++c) { + if (votes[c * NITEMS + item] > max_votes) { + max_votes = votes[c * NITEMS + item]; + best_class = c; + } + } + out[row] = best_class; + } + } + __device__ __forceinline__ void finalize(float* out, int num_rows, + int num_outputs, output_t transform, + int num_trees) { + if (num_outputs > 1) { + // only supporting num_outputs == num_classes + finalize_multiple_outputs(out, num_rows); + } else { + finalize_class_label(out, num_rows); + } + } +}; + +template +__global__ void infer_k(storage_type forest, predict_params params) { + extern __shared__ char smem[]; + float* sdata = (float*)smem; + int num_cols = params.num_cols; + for (size_t block_row0 = blockIdx.x * NITEMS; block_row0 < params.num_rows; + block_row0 += NITEMS * gridDim.x) { + size_t num_input_rows = min((size_t)NITEMS, params.num_rows - block_row0); + const float* block_input = params.data + block_row0 * num_cols; + if (cols_in_shmem) { + // cache the row for all threads to reuse + size_t feature = 0; +#pragma unroll + for (feature = threadIdx.x; feature < num_input_rows * num_cols; + feature += blockDim.x) + sdata[feature] = block_input[feature]; +#pragma unroll + for (; feature < NITEMS * num_cols; feature += blockDim.x) + sdata[feature] = 0.0f; + } + + tree_aggregator_t acc( + params, (char*)sdata + params.cols_shmem_size(), sdata); + + __syncthreads(); // for both row cache init and acc init + + // one block works on NITEMS rows and the whole forest + for (int j = threadIdx.x; j - threadIdx.x < forest.num_trees(); + j += blockDim.x) { + /* j - threadIdx.x < forest.num_trees() is a necessary but block-uniform + condition for "j < forest.num_trees()". It lets use __syncthreads() + and is made exact below. + */ + if (j < forest.num_trees()) { + acc.accumulate(infer_one_tree::T>( + forest[j], cols_in_shmem ? sdata : block_input, + num_cols, num_input_rows), + j, num_input_rows); + } + if (leaf_algo == GROVE_PER_CLASS_MANY_CLASSES) __syncthreads(); + } + acc.finalize(params.preds + params.num_outputs * block_row0, num_input_rows, + params.num_outputs, params.transform, forest.num_trees()); + __syncthreads(); // free up acc's shared memory resources for next row set + } +} + +template +size_t shmem_size_params::get_smem_footprint() { + size_t finalize_footprint = + tree_aggregator_t::smem_finalize_footprint( + cols_shmem_size(), num_classes, predict_proba); + size_t accumulate_footprint = + tree_aggregator_t::smem_accumulate_footprint( + num_classes) + + cols_shmem_size(); + + return std::max(accumulate_footprint, finalize_footprint); +} + +template +size_t shmem_size_params::get_smem_footprint() { + switch (leaf_algo) { + case FLOAT_UNARY_BINARY: + return get_smem_footprint(); + case CATEGORICAL_LEAF: + return get_smem_footprint(); + case GROVE_PER_CLASS: + if (num_classes > FIL_TPB) + return get_smem_footprint(); + return get_smem_footprint(); + default: + ASSERT(false, "internal error: unexpected leaf_algo_t"); + } +} + +void shmem_size_params::compute_smem_footprint() { + switch (n_items) { + case 1: + shm_sz = get_smem_footprint<1>(); + break; + case 2: + shm_sz = get_smem_footprint<2>(); + break; + case 3: + shm_sz = get_smem_footprint<3>(); + break; + case 4: + shm_sz = get_smem_footprint<4>(); + break; + default: + ASSERT(false, "internal error: n_items > 4"); + } +} + +template +void infer_k_nitems_launcher(storage_type forest, predict_params params, + cudaStream_t stream, int block_dim_x) { + switch (params.n_items) { + case 1: + infer_k<1, leaf_algo, cols_in_shmem> + <<>>(forest, + params); + break; + case 2: + infer_k<2, leaf_algo, cols_in_shmem> + <<>>(forest, + params); + break; + case 3: + infer_k<3, leaf_algo, cols_in_shmem> + <<>>(forest, + params); + break; + case 4: + infer_k<4, leaf_algo, cols_in_shmem> + <<>>(forest, + params); + break; + default: + ASSERT(false, "internal error: nitems > 4"); + } + CUDA_CHECK(cudaPeekAtLastError()); +} + +template +void infer_k_launcher(storage_type forest, predict_params params, + cudaStream_t stream, int blockdim_x) { + params.num_blocks = params.num_blocks != 0 + ? params.num_blocks + : raft::ceildiv(int(params.num_rows), params.n_items); + if (params.cols_in_shmem) { + infer_k_nitems_launcher(forest, params, stream, + blockdim_x); + } else { + infer_k_nitems_launcher(forest, params, stream, + blockdim_x); + } +} + +template +void infer(storage_type forest, predict_params params, cudaStream_t stream) { + switch (params.leaf_algo) { + case FLOAT_UNARY_BINARY: + infer_k_launcher(forest, params, stream, FIL_TPB); + break; + case GROVE_PER_CLASS: + if (params.num_classes > FIL_TPB) { + params.leaf_algo = GROVE_PER_CLASS_MANY_CLASSES; + infer_k_launcher(forest, params, stream, + FIL_TPB); + } else { + params.leaf_algo = GROVE_PER_CLASS_FEW_CLASSES; + infer_k_launcher( + forest, params, stream, FIL_TPB - FIL_TPB % params.num_classes); + } + break; + case CATEGORICAL_LEAF: + infer_k_launcher(forest, params, stream, FIL_TPB); + break; + default: + ASSERT(false, "internal error: invalid leaf_algo"); + } +} + +template void infer(dense_storage forest, predict_params params, + cudaStream_t stream); +template void infer(sparse_storage16 forest, + predict_params params, + cudaStream_t stream); +template void infer(sparse_storage8 forest, + predict_params params, + cudaStream_t stream); + +} // namespace fil +} // namespace ML diff --git a/cuda_code/infer_4.cu b/cuda_code/infer_4.cu new file mode 100644 index 0000000000000000000000000000000000000000..427844a57214c79123d1fc41a903ce53728736f6 --- /dev/null +++ b/cuda_code/infer_4.cu @@ -0,0 +1,975 @@ +/* + * Copyright (c) 2019-2021, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "common.cuh" + +#include + +#include + +#include +#include + +#include + +#include +#include + +#ifndef CUDA_PRAGMA_UNROLL +#ifdef __CUDA_ARCH__ +#define CUDA_PRAGMA_UNROLL _Pragma("unroll") +#else +#define CUDA_PRAGMA_UNROLL +#endif // __CUDA_ARCH__ +#endif // CUDA_PRAGMA_UNROLL + +#define INLINE_CONFIG __forceinline__ + +namespace ML { +namespace fil { + +// vec wraps float[N] for cub::BlockReduce +template +struct vec; + +template +struct Vectorized { + BinaryOp op; + __device__ Vectorized(BinaryOp op_) : op(op_) {} + template + constexpr __host__ __device__ __forceinline__ vec operator()(vec a, + vec b) const + { + vec c; + CUDA_PRAGMA_UNROLL + for (int i = 0; i < NITEMS; i++) + c[i] = op(a[i], b[i]); + return c; + } +}; +template +constexpr __host__ __device__ Vectorized vectorized(BinaryOp op) +{ + return op; +} + +template +struct vec { + static const int NITEMS = N; + T data[N]; + explicit __host__ __device__ vec(T t) + { + CUDA_PRAGMA_UNROLL + for (int i = 0; i < N; ++i) + data[i] = t; + } + __host__ __device__ vec() : vec(T()) {} + __host__ __device__ T& operator[](int i) { return data[i]; } + __host__ __device__ T operator[](int i) const { return data[i]; } + friend __host__ __device__ vec operator+(const vec& a, const vec& b) + { + return vectorized(cub::Sum())(a, b); + } + friend __host__ __device__ void operator+=(vec& a, const vec& b) { a = a + b; } + template + friend __host__ __device__ vec operator/(vec& a, const Vec& b) + { + return vectorized(thrust::divides())(a, vec(b)); + } + template + friend __host__ __device__ void operator/=(vec& a, const Vec& b) + { + a = a / b; + } +}; + +struct best_margin_label : cub::KeyValuePair { + __host__ __device__ best_margin_label(cub::KeyValuePair pair) + : cub::KeyValuePair(pair) + { + } + __host__ __device__ best_margin_label(int c = 0, float f = -INFINITY) + : cub::KeyValuePair({c, f}) + { + } +}; + +template +__device__ __forceinline__ vec to_vec(int c, vec margin) +{ + vec ret; + CUDA_PRAGMA_UNROLL + for (int i = 0; i < NITEMS; ++i) + ret[i] = best_margin_label(c, margin[i]); + return ret; +} + +struct ArgMax { + template + __host__ __device__ __forceinline__ vec operator()( + vec a, vec b) const + { + vec c; + CUDA_PRAGMA_UNROLL + for (int i = 0; i < NITEMS; i++) + c[i] = cub::ArgMax()(a[i], b[i]); + return c; + } +}; + +/** tree_leaf_output returns the leaf outputs from the tree with leaf indices + given by leaves for n_rows items. FULL_ITEMS indicates whether n_rows == + NITEMS, to allow the compiler to skip the conditional when unrolling the + loop. */ +template +__device__ __forceinline__ vec tree_leaf_output(tree_type tree, + int n_rows, + int (&leaves)[NITEMS]) +{ + vec out(0); + CUDA_PRAGMA_UNROLL + for (int j = 0; j < NITEMS; ++j) { + if (FULL_NITEMS || j < n_rows) { + /** dependent names are not considered templates by default, unless it's a + member of a current [template] instantiation. As output<>() is a + member function inherited from the base class, template + output() is required. */ + out[j] = tree[leaves[j]].template output(); + } + } + return out; +} + +template +__device__ __forceinline__ vec infer_one_tree(tree_type tree, + const float* input, + int cols, + int n_rows) +{ + // find the leaf nodes for each row + int curr[NITEMS]; + // the first n_rows are active + int mask = (1 << n_rows) - 1; + for (int j = 0; j < NITEMS; ++j) + curr[j] = 0; + do { + CUDA_PRAGMA_UNROLL + for (int j = 0; j < NITEMS; ++j) { + auto n = tree[curr[j]]; + mask &= ~(n.is_leaf() << j); + if ((mask & (1 << j)) != 0) { + curr[j] = tree.child_index(n, curr[j], input[j * cols + n.fid()]); + } + } + } while (mask != 0); + + // get the output from the leaves + if (n_rows == NITEMS) { + return tree_leaf_output(tree, n_rows, curr); + } else { + return tree_leaf_output(tree, n_rows, curr); + } +} + +template +__device__ __forceinline__ vec<1, output_type> infer_one_tree(tree_type tree, + const float* input, + int cols, + int rows) +{ + int curr = 0; + for (;;) { + auto n = tree[curr]; + if (n.is_leaf()) break; + bool cond = tree.child_index(n, curr, input[n.fid()]); + curr = n.left(curr) + cond; + } + vec<1, output_type> out; + /** dependent names are not considered templates by default, + unless it's a member of a current [template] instantiation.**/ + out[0] = tree[curr].template output(); + return out; +} + +/** +The shared memory requirements for finalization stage may differ based +on the set of PTX architectures the kernels were compiled for, as well as +the CUDA compute capability of the device chosen for computation. + +TODO (levsnv): run a test kernel during forest init to determine the compute capability +chosen for the inference, for an accurate sizeof(BlockReduce::TempStorage), +which is used in determining max NITEMS or max input data columns. + +600 is the __CUDA_ARCH__ for Pascal (6.0) GPUs, which is not defined in +host code. +6.0 is the earliest compute capability supported by FIL and RAPIDS in general. +See https://rapids.ai/start.html as well as cmake defaults. +*/ +// values below are defaults as of this change. +template +size_t block_reduce_footprint_host() +{ + return sizeof( + typename cub:: + BlockReduce, FIL_TPB, cub::BLOCK_REDUCE_WARP_REDUCTIONS, 1, 1, 600>:: + TempStorage); +} + +template +size_t block_reduce_best_class_footprint_host() +{ + return sizeof(typename cub::BlockReduce, + FIL_TPB, + cub::BLOCK_REDUCE_WARP_REDUCTIONS, + 1, + 1, + 600>::TempStorage); +} + +// the device template should achieve the best performance, using up-to-date +// CUB defaults +template +__device__ __forceinline__ T block_reduce(T value, BinaryOp op, void* storage) +{ + typedef cub::BlockReduce BlockReduceT; + return BlockReduceT(*(typename BlockReduceT::TempStorage*)storage).Reduce(value, op, blockDim.x); +} + +template // = FLOAT_UNARY_BINARY +struct tree_aggregator_t { + vec acc; + void* tmp_storage; + + /** shared memory footprint of the accumulator during + the finalization of forest inference kernel, when infer_k output + value is computed. + num_classes is used for other template parameters */ + static size_t smem_finalize_footprint(size_t data_row_size, + int num_classes, + int log2_threads_per_tree, + bool predict_proba) + { + return log2_threads_per_tree != 0 ? FIL_TPB * NITEMS * sizeof(float) + : block_reduce_footprint_host(); + } + + /** shared memory footprint of the accumulator during + the accumulation of forest inference, when individual trees + are inferred and partial aggregates are accumulated. + num_classes is used for other template parameters */ + static size_t smem_accumulate_footprint(int num_classes) { return 0; } + + /** + num_classes is used for other template parameters */ + __device__ __forceinline__ tree_aggregator_t(predict_params params, + void* accumulate_workspace, + void* finalize_workspace, + float* vector_leaf) + : tmp_storage(finalize_workspace) + { + } + + __device__ __forceinline__ void accumulate(vec single_tree_prediction, + int tree, + int thread_num_rows) + { + acc += single_tree_prediction; + } + + __device__ INLINE_CONFIG void finalize(float* block_out, + int block_num_rows, + int output_stride, + output_t transform, + int num_trees, + int log2_threads_per_tree) + { + if (FIL_TPB != 1 << log2_threads_per_tree) { // anything to reduce? + // ensure input columns can be overwritten (no threads traversing trees) + __syncthreads(); + if (log2_threads_per_tree == 0) { + acc = block_reduce(acc, vectorized(cub::Sum()), tmp_storage); + } else { + auto per_thread = (vec*)tmp_storage; + per_thread[threadIdx.x] = acc; + __syncthreads(); + // We have two pertinent cases for splitting FIL_TPB == 256 values: + // 1. 2000 columns, which fit few threads/tree in shared memory, + // so ~256 groups. These are the models that will run the slowest. + // multi_sum performance is not sensitive to the radix here. + // 2. 50 columns, so ~32 threads/tree, so ~8 groups. These are the most + // popular. + acc = + multi_sum<5>(per_thread, 1 << log2_threads_per_tree, FIL_TPB >> log2_threads_per_tree); + } + } + + if (threadIdx.x * NITEMS >= block_num_rows) return; + CUDA_PRAGMA_UNROLL + for (int row = 0; row < NITEMS; ++row) { + int out_preds_i = threadIdx.x * NITEMS + row; + if (out_preds_i < block_num_rows) block_out[out_preds_i * output_stride] = acc[row]; + } + } +}; + +// tmp_storage may overlap shared memory addressed by [begin, end) +// allreduce_shmem ensures no race conditions +template +__device__ __forceinline__ auto allreduce_shmem(Iterator begin, + Iterator end, + BinaryOp op, + void* tmp_storage) +{ + typedef typename std::iterator_traits::value_type value_type; + value_type thread_partial; + for (Iterator it = begin + threadIdx.x; it < end; it += blockDim.x) + thread_partial = op(thread_partial, *it); + __syncthreads(); // free shared memory [begin, end) + auto res = block_reduce(thread_partial, op, tmp_storage); + // broadcast sum to all threads + __syncthreads(); // free up tmp_storage + if (threadIdx.x == 0) *(value_type*)tmp_storage = res; + __syncthreads(); + return *(value_type*)tmp_storage; +} + +// *begin and *end shall be struct vec +// tmp_storage may overlap shared memory addressed by [begin, end) +template +__device__ __forceinline__ void write_best_class( + Iterator begin, Iterator end, void* tmp_storage, float* out, int num_rows) +{ + // reduce per-class candidate margins to one best class candidate + // per thread (for each of the NITEMS rows) + auto best = vecNITEMS, best_margin_label>(); + for (int c = threadIdx.x; c < end - begin; c += blockDim.x) + best = vectorized(cub::ArgMax())(best, to_vec(c, begin[c])); + // [begin, end) may overlap tmp_storage + __syncthreads(); + // find best class per block (for each of the NITEMS rows) + best = block_reduce(best, vectorized(cub::ArgMax()), tmp_storage); + // write it out to global memory + if (threadIdx.x > 0) return; + CUDA_PRAGMA_UNROLL + for (int row = 0; row < best.NITEMS; ++row) + if (row < num_rows) out[row] = best[row].key; +} + +/// needed for softmax +__device__ float shifted_exp(float margin, float max) { return expf(margin - max); } + +// *begin and *end shall be struct vec +// tmp_storage may NOT overlap shared memory addressed by [begin, end) +template +__device__ __forceinline__ void block_softmax(Iterator begin, Iterator end, void* tmp_storage) +{ + // subtract max before exponentiating for numerical stability + typedef typename std::iterator_traits::value_type value_type; + value_type max = allreduce_shmem(begin, end, vectorized(cub::Max()), tmp_storage); + for (Iterator it = begin + threadIdx.x; it < end; it += blockDim.x) + *it = vectorized(shifted_exp)(*it, max); + // sum of exponents + value_type soe = allreduce_shmem(begin, end, vectorized(cub::Sum()), tmp_storage); + // softmax phase 2: normalization + for (Iterator it = begin + threadIdx.x; it < end; it += blockDim.x) + *it /= soe; +} + +// *begin and *end shall be struct vec +// tmp_storage may NOT overlap shared memory addressed by [begin, end) +template +__device__ __forceinline__ void normalize_softmax_and_write(Iterator begin, + Iterator end, + output_t transform, + int trees_per_class, + void* tmp_storage, + float* out, + int num_rows) +{ + if ((transform & output_t::AVG) != 0) { + for (Iterator it = begin + threadIdx.x; it < end; it += blockDim.x) + *it /= trees_per_class; + } + if ((transform & output_t::SOFTMAX) != 0) block_softmax(begin, end, tmp_storage); + // write result to global memory + CUDA_PRAGMA_UNROLL + for (int row = 0; row < begin->NITEMS; ++row) { + for (int c = threadIdx.x; c < end - begin; c += blockDim.x) + if (row < num_rows) out[row * (end - begin) + c] = begin[c][row]; + } +} + +// *begin and *end shall be struct vec +// tmp_storage may NOT overlap shared memory addressed by [begin, end) +// in case num_outputs > 1 +template +__device__ __forceinline__ void class_margins_to_global_memory(Iterator begin, + Iterator end, + output_t transform, + int trees_per_class, + void* tmp_storage, + float* out, + int num_rows, + int num_outputs) +{ + if (num_outputs == 1) { // will output class + // reduce per-class candidate margins to one best class candidate + // per thread (for each of the NITEMS rows) + write_best_class(begin, end, tmp_storage, out, num_rows); + } else { // output softmax-ed margin + normalize_softmax_and_write(begin, end, transform, trees_per_class, tmp_storage, out, num_rows); + } +} + +template +struct tree_aggregator_t { + vec acc; + int num_classes; + vec* per_thread; + void* tmp_storage; + + static size_t smem_finalize_footprint(size_t data_row_size, + int num_classes, + int log2_threads_per_tree, + bool predict_proba) + { + size_t phase1 = (FIL_TPB - FIL_TPB % num_classes) * sizeof(vec); + size_t phase2 = predict_proba ? block_reduce_footprint_host() + : block_reduce_best_class_footprint_host(); + return predict_proba ? phase1 + phase2 : std::max(phase1, phase2); + } + + static size_t smem_accumulate_footprint(int num_classes) { return 0; } + + __device__ __forceinline__ tree_aggregator_t(predict_params params, + void* accumulate_workspace, + void* finalize_workspace, + float* vector_leaf) + : num_classes(params.num_classes), + per_thread((vec*)finalize_workspace), + tmp_storage(params.predict_proba ? per_thread + num_classes : finalize_workspace) + { + } + + __device__ __forceinline__ void accumulate(vec single_tree_prediction, + int tree, + int thread_num_rows) + { + acc += single_tree_prediction; + } + + __device__ INLINE_CONFIG void finalize(float* out, + int num_rows, + int num_outputs, + output_t transform, + int num_trees, + int log2_threads_per_tree) + { + __syncthreads(); // free up input row in case it was in shared memory + // load margin into shared memory + per_thread[threadIdx.x] = acc; + __syncthreads(); + acc = multi_sum<6>(per_thread, num_classes, blockDim.x / num_classes); + if (threadIdx.x < num_classes) per_thread[threadIdx.x] = acc; + __syncthreads(); // per_thread needs to be fully populated + + class_margins_to_global_memory(per_thread, + per_thread + num_classes, + transform, + num_trees / num_classes, + tmp_storage, + out, + num_rows, + num_outputs); + } +}; + +template +struct tree_aggregator_t { + vec acc; + /// at first, per class margin, then, possibly, different softmax partials + vec* per_class_margin; + void* tmp_storage; + int num_classes; + + static size_t smem_finalize_footprint(size_t data_row_size, + int num_classes, + int log2_threads_per_tree, + bool predict_proba) + { + size_t phase1 = data_row_size + smem_accumulate_footprint(num_classes); + size_t phase2 = predict_proba ? block_reduce_footprint_host() + : block_reduce_best_class_footprint_host(); + return predict_proba ? phase1 + phase2 : std::max(phase1, phase2); + } + + static __host__ __device__ size_t smem_accumulate_footprint(int num_classes) + { + return num_classes * sizeof(vec); + } + + __device__ __forceinline__ tree_aggregator_t(predict_params params, + void* accumulate_workspace, + void* finalize_workspace, + float* vector_leaf) + : per_class_margin((vec*)accumulate_workspace), + tmp_storage(params.predict_proba ? per_class_margin + num_classes : finalize_workspace), + num_classes(params.num_classes) + { + for (int c = threadIdx.x; c < num_classes; c += blockDim.x) + per_class_margin[c] = vec(0); + // __syncthreads() is called in infer_k + } + + __device__ __forceinline__ void accumulate(vec single_tree_prediction, + int tree, + int thread_num_rows) + { + // since threads are assigned to consecutive classes, no need for atomics + if (thread_num_rows > 0) { per_class_margin[tree % num_classes] += single_tree_prediction; } + __syncthreads(); + } + + __device__ INLINE_CONFIG void finalize(float* out, + int num_rows, + int num_outputs, + output_t transform, + int num_trees, + int log2_threads_per_tree) + { + class_margins_to_global_memory(per_class_margin, + per_class_margin + num_classes, + transform, + num_trees / num_classes, + tmp_storage, + out, + num_rows, + num_outputs); + } +}; + +template +struct tree_aggregator_t { + // per_class_margin is a row-major matrix + // of size num_threads_per_class * num_classes + // used to acccumulate class values + vec* per_class_margin; + vec* vector_leaf_indices; + int* thread_num_rows; + int num_classes; + int num_threads_per_class; + float* vector_leaf; + void* tmp_storage; + + static size_t smem_finalize_footprint(size_t data_row_size, + int num_classes, + int log2_threads_per_tree, + bool predict_proba) + { + size_t phase1 = data_row_size + smem_accumulate_footprint(num_classes); + size_t phase2 = predict_proba ? block_reduce_footprint_host() + : block_reduce_best_class_footprint_host(); + return predict_proba ? phase1 + phase2 : std::max(phase1, phase2); + } + static size_t smem_accumulate_footprint(int num_classes) + { + return sizeof(vec) * num_classes * max(1, FIL_TPB / num_classes) + + sizeof(vec) * FIL_TPB + sizeof(int) * FIL_TPB; + } + + __device__ __forceinline__ tree_aggregator_t(predict_params params, + void* accumulate_workspace, + void* finalize_workspace, + float* vector_leaf) + : num_classes(params.num_classes), + num_threads_per_class(max(1, blockDim.x / params.num_classes)), + vector_leaf(vector_leaf), + tmp_storage(finalize_workspace) + { + // Assign workspace + char* ptr = (char*)accumulate_workspace; + per_class_margin = (vec*)ptr; + ptr += sizeof(vec) * num_classes * num_threads_per_class; + vector_leaf_indices = (vec*)ptr; + ptr += sizeof(vec) * blockDim.x; + thread_num_rows = (int*)ptr; + + // Initialise shared memory + for (int i = threadIdx.x; i < num_classes * num_threads_per_class; i += blockDim.x) { + per_class_margin[i] = vec(); + } + vector_leaf_indices[threadIdx.x] = vec(); + thread_num_rows[threadIdx.x] = 0; + // __syncthreads() is called in infer_k + } + + __device__ __forceinline__ void accumulate(vec single_tree_prediction, + int tree, + int num_rows) + { + // Perform a transpose in shared memory + // Assign each thread to a class, so they can accumulate without atomics + __syncthreads(); + // Write indices to shared memory + vector_leaf_indices[threadIdx.x] = single_tree_prediction; + thread_num_rows[threadIdx.x] = num_rows; + __syncthreads(); + // i here refers to each element of the matrix per_class_margin + for (int i = threadIdx.x; i < num_classes * num_threads_per_class; i += blockDim.x) { + // if num_threads_per_class == 1, then c == i + int c = i % num_classes; + // iterate over original thread inputs with stride num_threads_per_class + // j is the original thread input + // we have num_classes threads for each j + for (int j = i / num_classes; j < blockDim.x; j += num_threads_per_class) { + for (int item = 0; item < thread_num_rows[j]; ++item) { + float pred = vector_leaf[vector_leaf_indices[j][item] * num_classes + c]; + per_class_margin[i][item] += pred; + } + } + } + } + __device__ INLINE_CONFIG void finalize(float* out, + int num_rows, + int num_outputs, + output_t transform, + int num_trees, + int log2_threads_per_tree) + { + if (num_classes < blockDim.x) { + __syncthreads(); + // Efficient implementation for small number of classes + auto acc = multi_sum<6>(per_class_margin, num_classes, max(1, blockDim.x / num_classes)); + if (threadIdx.x < num_classes) per_class_margin[threadIdx.x] = acc; + __syncthreads(); + } + class_margins_to_global_memory(per_class_margin, + per_class_margin + num_classes, + transform, + num_trees, + tmp_storage, + out, + num_rows, + num_outputs); + } +}; + +template +struct tree_aggregator_t { + // could switch to uint16_t to save shared memory + // provided raft::myAtomicAdd(short*) simulated with appropriate shifts + int* votes; + int num_classes; + + static size_t smem_finalize_footprint(size_t data_row_size, + int num_classes, + int log2_threads_per_tree, + bool predict_proba) + { + // not accounting for lingering accumulate_footprint during finalize() + return 0; + } + static size_t smem_accumulate_footprint(int num_classes) + { + return sizeof(int) * num_classes * NITEMS; + } + + __device__ __forceinline__ tree_aggregator_t(predict_params params, + void* accumulate_workspace, + void* finalize_workspace, + float* vector_leaf) + : num_classes(params.num_classes), votes((int*)accumulate_workspace) + { + for (int c = threadIdx.x; c < num_classes; c += FIL_TPB * NITEMS) + CUDA_PRAGMA_UNROLL + for (int item = 0; item < NITEMS; ++item) + votes[c * NITEMS + item] = 0; + // __syncthreads() is called in infer_k + } + __device__ __forceinline__ void accumulate(vec single_tree_prediction, + int tree, + int thread_num_rows) + { + if (thread_num_rows == 0) return; + CUDA_PRAGMA_UNROLL + for (int item = 0; item < NITEMS; ++item) { + raft::myAtomicAdd(votes + single_tree_prediction[item] * NITEMS + item, 1); + } + } + // class probabilities or regression. for regression, num_classes + // is just the number of outputs for each data instance + __device__ __forceinline__ void finalize_multiple_outputs(float* out, int num_rows) + { + __syncthreads(); + for (int c = threadIdx.x; c < num_classes; c += blockDim.x) { + CUDA_PRAGMA_UNROLL + for (int row = 0; row < num_rows; ++row) + out[row * num_classes + c] = votes[c * NITEMS + row]; + } + } + // using this when predicting a single class label, as opposed to sparse class vector + // or class probabilities or regression + __device__ __forceinline__ void finalize_class_label(float* out, int num_rows) + { + __syncthreads(); // make sure all votes[] are final + int item = threadIdx.x; + int row = item; + if (item < NITEMS && row < num_rows) { + int max_votes = 0; + int best_class = 0; + for (int c = 0; c < num_classes; ++c) { + if (votes[c * NITEMS + item] > max_votes) { + max_votes = votes[c * NITEMS + item]; + best_class = c; + } + } + out[row] = best_class; + } + } + __device__ INLINE_CONFIG void finalize(float* out, + int num_rows, + int num_outputs, + output_t transform, + int num_trees, + int log2_threads_per_tree) + { + if (num_outputs > 1) { + // only supporting num_outputs == num_classes + finalize_multiple_outputs(out, num_rows); + } else { + finalize_class_label(out, num_rows); + } + } +}; + +__device__ INLINE_CONFIG void load_data(float* sdata, + const float* block_input, + predict_params params, + int rows_per_block, + int block_num_rows) +{ + int num_cols = params.num_cols; + int sdata_stride = params.sdata_stride(); + // cache the row for all threads to reuse + // 2021: latest SMs still do not have >256KiB of shared memory/block required to + // exceed the uint16_t + CUDA_PRAGMA_UNROLL + for (uint16_t input_idx = threadIdx.x; input_idx < block_num_rows * num_cols; + input_idx += blockDim.x) { + // for even num_cols, we need to pad sdata_stride to reduce bank conflicts + // assuming here that sdata_stride == num_cols + 1 + // then, idx / num_cols * sdata_stride + idx % num_cols == idx + idx / num_cols + uint16_t sdata_idx = + sdata_stride == num_cols ? input_idx : input_idx + input_idx / (uint16_t)num_cols; + sdata[sdata_idx] = block_input[input_idx]; + } + CUDA_PRAGMA_UNROLL + for (int idx = block_num_rows * sdata_stride; idx < rows_per_block * sdata_stride; + idx += blockDim.x) + sdata[idx] = 0.0f; +} + +template +__global__ void infer_k(storage_type forest, predict_params params) +{ + extern __shared__ char smem[]; + float* sdata = (float*)smem; + int sdata_stride = params.sdata_stride(); + int rows_per_block = NITEMS << params.log2_threads_per_tree; + int num_cols = params.num_cols; + int thread_row0 = NITEMS * modpow2(threadIdx.x, params.log2_threads_per_tree); + for (int64_t block_row0 = blockIdx.x * rows_per_block; block_row0 < params.num_rows; + block_row0 += rows_per_block * gridDim.x) { + int block_num_rows = + max(0, (int)min((int64_t)rows_per_block, (int64_t)params.num_rows - block_row0)); + const float* block_input = params.data + block_row0 * num_cols; + if constexpr (cols_in_shmem) + load_data(sdata, block_input, params, rows_per_block, block_num_rows); + + tree_aggregator_t acc( + params, (char*)sdata + params.cols_shmem_size(), sdata, forest.vector_leaf_); + + __syncthreads(); // for both row cache init and acc init + // one block works on NITEMS * threads_per_tree rows and the whole forest + // one thread works on NITEMS rows + + int thread_tree0 = threadIdx.x >> params.log2_threads_per_tree; + int tree_stride = blockDim.x >> params.log2_threads_per_tree; + int thread_num_rows = max(0, min(NITEMS, block_num_rows - thread_row0)); + for (int tree = thread_tree0; tree - thread_tree0 < forest.num_trees(); tree += tree_stride) { + /* tree - thread_tree0 < forest.num_trees() is a necessary but block-uniform + condition for "tree < forest.num_trees()". It lets use __syncthreads() + and is made exact below. + Same with thread_num_rows > 0 + */ + typedef typename leaf_output_t::T pred_t; + vec prediction; + if (tree < forest.num_trees() && thread_num_rows != 0) { + prediction = infer_one_tree( + forest[tree], + cols_in_shmem ? sdata + thread_row0 * sdata_stride : block_input + thread_row0 * num_cols, + cols_in_shmem ? sdata_stride : num_cols, + cols_in_shmem ? NITEMS : thread_num_rows); + } + // All threads must enter accumulate + // Dummy threads can be marked as having 0 rows + acc.accumulate(prediction, tree, tree < forest.num_trees() ? thread_num_rows : 0); + } + acc.finalize(params.preds + params.num_outputs * block_row0, + block_num_rows, + params.num_outputs, + params.transform, + forest.num_trees(), + params.log2_threads_per_tree); + __syncthreads(); // free up acc's shared memory resources for next row set + } +} + +template +size_t shmem_size_params::get_smem_footprint() +{ + size_t finalize_footprint = tree_aggregator_t::smem_finalize_footprint( + cols_shmem_size(), num_classes, log2_threads_per_tree, predict_proba); + size_t accumulate_footprint = + tree_aggregator_t::smem_accumulate_footprint(num_classes) + + cols_shmem_size(); + + return std::max(accumulate_footprint, finalize_footprint); +} + +template +size_t shmem_size_params::get_smem_footprint() +{ + switch (leaf_algo) { + case FLOAT_UNARY_BINARY: return get_smem_footprint(); + case CATEGORICAL_LEAF: return get_smem_footprint(); + case GROVE_PER_CLASS: + if (num_classes > FIL_TPB) return get_smem_footprint(); + return get_smem_footprint(); + case VECTOR_LEAF: return get_smem_footprint(); + default: ASSERT(false, "internal error: unexpected leaf_algo_t"); + } +} + +void shmem_size_params::compute_smem_footprint() +{ + switch (n_items) { + case 1: shm_sz = get_smem_footprint<1>(); break; + case 2: shm_sz = get_smem_footprint<2>(); break; + case 3: shm_sz = get_smem_footprint<3>(); break; + case 4: shm_sz = get_smem_footprint<4>(); break; + default: ASSERT(false, "internal error: n_items > 4"); + } +} + +template +void infer_k_nitems_launcher(storage_type forest, + predict_params params, + cudaStream_t stream, + int block_dim_x) +{ + switch (params.n_items) { + case 1: + infer_k<1, leaf_algo, COLS_IN_SHMEM, CATS_SUPPORTED> + <<>>(forest, params); + break; + case 2: + infer_k<2, leaf_algo, COLS_IN_SHMEM, CATS_SUPPORTED> + <<>>(forest, params); + break; + case 3: + infer_k<3, leaf_algo, COLS_IN_SHMEM, CATS_SUPPORTED> + <<>>(forest, params); + break; + case 4: + infer_k<4, leaf_algo, COLS_IN_SHMEM, CATS_SUPPORTED> + <<>>(forest, params); + break; + default: ASSERT(false, "internal error: nitems > 4"); + } + CUDA_CHECK(cudaPeekAtLastError()); +} + +template +void infer_k_categorical_launcher(storage_type forest, + predict_params params, + cudaStream_t stream, + int blockdim_x) +{ + if (forest.cats_present()) { + infer_k_nitems_launcher(forest, params, stream, blockdim_x); + } else { + infer_k_nitems_launcher(forest, params, stream, blockdim_x); + } +} + +template +void infer_k_cols_launcher(storage_type forest, + predict_params params, + cudaStream_t stream, + int blockdim_x) +{ + params.num_blocks = params.num_blocks != 0 ? params.num_blocks + : raft::ceildiv(int(params.num_rows), params.n_items); + if (params.cols_in_shmem) { + infer_k_categorical_launcher(forest, params, stream, blockdim_x); + } else { + infer_k_categorical_launcher(forest, params, stream, blockdim_x); + } +} + +template +void infer(storage_type forest, predict_params params, cudaStream_t stream) +{ + switch (params.leaf_algo) { + case FLOAT_UNARY_BINARY: + infer_k_cols_launcher(forest, params, stream, FIL_TPB); + break; + case GROVE_PER_CLASS: + if (params.num_classes > FIL_TPB) { + params.leaf_algo = GROVE_PER_CLASS_MANY_CLASSES; + infer_k_cols_launcher(forest, params, stream, FIL_TPB); + } else { + params.leaf_algo = GROVE_PER_CLASS_FEW_CLASSES; + infer_k_cols_launcher( + forest, params, stream, FIL_TPB - FIL_TPB % params.num_classes); + } + break; + case CATEGORICAL_LEAF: + infer_k_cols_launcher(forest, params, stream, FIL_TPB); + break; + case VECTOR_LEAF: infer_k_cols_launcher(forest, params, stream, FIL_TPB); break; + default: ASSERT(false, "internal error: invalid leaf_algo"); + } +} + +template void infer(dense_storage forest, + predict_params params, + cudaStream_t stream); +template void infer(sparse_storage16 forest, + predict_params params, + cudaStream_t stream); +template void infer(sparse_storage8 forest, + predict_params params, + cudaStream_t stream); + +} // namespace fil +} // namespace ML diff --git a/cuda_code/inicializaParticulas_kernel.cu b/cuda_code/inicializaParticulas_kernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..36fca3b3c4f98c0c666be320c6861dd6a396c878 --- /dev/null +++ b/cuda_code/inicializaParticulas_kernel.cu @@ -0,0 +1,39 @@ +/* + * inicializa particulas + */ + + +#ifndef _INICIAPARTICULAS_KERNEL_H_ +#define _INICIAPARTICULAS_KERNEL_H_ + +#include "Pso.h" +#include "NumeroRandom.cu" + +__global__ void +inicializaParticulas(float* xx, float* vx, float* pbestx, int* gbest, int dimensoes, int agentes, float IRang_L, float IRang_R, float MAXV) +{ + int bx = blockIdx.x; + int by = blockIdx.y; + int tx = threadIdx.x; + int ty = threadIdx.y; + int index = bx*gridDim.x*blockDim.x*blockDim.y + by*blockDim.x*blockDim.y + ty*blockDim.x + tx; + + if (index < dimensoes*agentes) { + xx[index] = (float) ((IRang_R - IRang_L) * ((float) numeroRandom(index) / (float) INT_MAX) + (float) IRang_L); +// xx[index] = (float) (IRang_L + ((float) numeroRandom(index) / (float) INT_MAX)* + pbestx[index] = xx[index]; + + float rnd = ((float) numeroRandom(index+1) / ((float) INT_MAX)); + vx[index] = (-MAXV + rnd*(MAXV - (-MAXV))); + +// if (rnd > 0.5) +// vx[index] = -vx[index]; + +// vx[index] = 25; + + if (index == 0) *gbest = 0; + + } +} + +#endif diff --git a/cuda_code/initBore_select_kernel.cu b/cuda_code/initBore_select_kernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..6e516a624cc46164ee9a7ad5b0544e58210c4688 --- /dev/null +++ b/cuda_code/initBore_select_kernel.cu @@ -0,0 +1,183 @@ +// +// auto-generated by op2.py +// + +//user function +__device__ void initBore_select_gpu( float *values, const float *center, + const float *x0, + const float *Hl, + const float *ul, + const float *vl, + const float *Hr, + const float *ur, + const float *vr) { + values[0] = center[0] < *x0 ? *Hl : *Hr; + values[1] = center[0] < *x0 ? *ul : *ur; + values[2] = center[0] < *x0 ? *vl : *vr; + +} + +// CUDA kernel function +__global__ void op_cuda_initBore_select( + float *arg0, + const float *__restrict arg1, + const float *arg2, + const float *arg3, + const float *arg4, + const float *arg5, + const float *arg6, + const float *arg7, + const float *arg8, + int set_size ) { + + + //process set elements + for ( int n=threadIdx.x+blockIdx.x*blockDim.x; n2) { + printf(" kernel routine w/o indirection: initBore_select"); + } + + int set_size = op_mpi_halo_exchanges_grouped(set, nargs, args, 2); + if (set_size > 0) { + + //transfer constants to GPU + int consts_bytes = 0; + consts_bytes += ROUND_UP(1*sizeof(float)); + consts_bytes += ROUND_UP(1*sizeof(float)); + consts_bytes += ROUND_UP(1*sizeof(float)); + consts_bytes += ROUND_UP(1*sizeof(float)); + consts_bytes += ROUND_UP(1*sizeof(float)); + consts_bytes += ROUND_UP(1*sizeof(float)); + consts_bytes += ROUND_UP(1*sizeof(float)); + reallocConstArrays(consts_bytes); + consts_bytes = 0; + arg2.data = OP_consts_h + consts_bytes; + arg2.data_d = OP_consts_d + consts_bytes; + for ( int d=0; d<1; d++ ){ + ((float *)arg2.data)[d] = arg2h[d]; + } + consts_bytes += ROUND_UP(1*sizeof(float)); + arg3.data = OP_consts_h + consts_bytes; + arg3.data_d = OP_consts_d + consts_bytes; + for ( int d=0; d<1; d++ ){ + ((float *)arg3.data)[d] = arg3h[d]; + } + consts_bytes += ROUND_UP(1*sizeof(float)); + arg4.data = OP_consts_h + consts_bytes; + arg4.data_d = OP_consts_d + consts_bytes; + for ( int d=0; d<1; d++ ){ + ((float *)arg4.data)[d] = arg4h[d]; + } + consts_bytes += ROUND_UP(1*sizeof(float)); + arg5.data = OP_consts_h + consts_bytes; + arg5.data_d = OP_consts_d + consts_bytes; + for ( int d=0; d<1; d++ ){ + ((float *)arg5.data)[d] = arg5h[d]; + } + consts_bytes += ROUND_UP(1*sizeof(float)); + arg6.data = OP_consts_h + consts_bytes; + arg6.data_d = OP_consts_d + consts_bytes; + for ( int d=0; d<1; d++ ){ + ((float *)arg6.data)[d] = arg6h[d]; + } + consts_bytes += ROUND_UP(1*sizeof(float)); + arg7.data = OP_consts_h + consts_bytes; + arg7.data_d = OP_consts_d + consts_bytes; + for ( int d=0; d<1; d++ ){ + ((float *)arg7.data)[d] = arg7h[d]; + } + consts_bytes += ROUND_UP(1*sizeof(float)); + arg8.data = OP_consts_h + consts_bytes; + arg8.data_d = OP_consts_d + consts_bytes; + for ( int d=0; d<1; d++ ){ + ((float *)arg8.data)[d] = arg8h[d]; + } + consts_bytes += ROUND_UP(1*sizeof(float)); + mvConstArraysToDevice(consts_bytes); + + //set CUDA execution parameters + #ifdef OP_BLOCK_SIZE_14 + int nthread = OP_BLOCK_SIZE_14; + #else + int nthread = OP_block_size; + #endif + + int nblocks = 200; + + op_cuda_initBore_select<<>>( + (float *) arg0.data_d, + (float *) arg1.data_d, + (float *) arg2.data_d, + (float *) arg3.data_d, + (float *) arg4.data_d, + (float *) arg5.data_d, + (float *) arg6.data_d, + (float *) arg7.data_d, + (float *) arg8.data_d, + set->size ); + } + op_mpi_set_dirtybit_cuda(nargs, args); + if (OP_diags>1) { + cutilSafeCall(cudaDeviceSynchronize()); + } + //update kernel record + op_timers_core(&cpu_t2, &wall_t2); + OP_kernels[14].time += wall_t2 - wall_t1; + OP_kernels[14].transfer += (float)set->size * arg0.size * 2.0f; + OP_kernels[14].transfer += (float)set->size * arg1.size; +} diff --git a/cuda_code/init_params_7.cu b/cuda_code/init_params_7.cu new file mode 100644 index 0000000000000000000000000000000000000000..cb467bd1d3b1c53e167fdac8c8b015745fb97b73 --- /dev/null +++ b/cuda_code/init_params_7.cu @@ -0,0 +1,532 @@ +#include "../../data/parameters.h" +#include "../../data/size.h" +#include "init_params.h" +#include "option.h" +#include +#include +#include + +FILE *wout = fopen("weight.dat","w"); + +int LoadConnectivityFile(const char *file_name,unsigned int **host_rptr, unsigned int **d_rptr, unsigned int **d_cindices, CTYPE **d_val, CTYPE weight ,int PreSN_num,int PostSN_num){ + PreSN_num = (PreSN_num < 1)? 1: PreSN_num; + PostSN_num = (PostSN_num < 1)? 1000: PostSN_num; + + + FILE *fp; + if((fp = fopen( file_name ,"r")) == NULL ){ + fprintf(stderr, "can't open file : %s\n",file_name); + exit(1); + } + + weight = (weight < 0)?(-1)*weight: weight; + int num_of_data = PostSN_num*10; + unsigned int *rptr = NULL; + unsigned int *cindices = NULL; + CTYPE *val =NULL; + + int max_conv = 0; + + + rptr = (unsigned int *)malloc( (PostSN_num+1)*sizeof(unsigned int) ); + cindices = (unsigned int *)malloc( num_of_data*sizeof(unsigned int) ); + val = (CTYPE *)malloc( num_of_data*sizeof(CTYPE) ); + + if(rptr == NULL || cindices == NULL || val == NULL){ + fprintf(stderr,"malloc error\n"); + exit(1); + } + + char str[256] = {'\0'}; + int i = 0; + int prev_post_id = 0; + int post_id; + rptr[0] = 0; + while( fgets(str, 256, fp) != NULL ){ + + //sscanf(str, "%d %d %f", &cindices[i], &post_id, &val[i] ); + sscanf(str, "%d %d", &cindices[i], &post_id ); + + // val[i] に現状距離が入っているので、weightに置き換える. + val[i] = weight; + + // 本来はpost_id > prev_post_id (ソート済み前提) + if(post_id != prev_post_id) { + for(int j=prev_post_id+1;j num_of_data-1){ + float avg = (post_id != 0)?(float)i/(float)(post_id):i; + num_of_data = (int)(avg*PostSN_num); + + //fprintf(stderr, "realloc phase %d to %d\n", i, num_of_data); + + unsigned int *i_tmp=NULL; + CTYPE *c_tmp=NULL; + if(( i_tmp = (unsigned int *)realloc(cindices, num_of_data*sizeof(unsigned int))) == NULL){ + free(cindices); + exit(1); + }else{ + if(cindices != i_tmp){ + cindices = i_tmp; + } + } + + if(( c_tmp = (CTYPE *)realloc(val, num_of_data*sizeof(CTYPE) )) == NULL){ + free(val); + exit(1); + }else{ + if(val != c_tmp){ + val = c_tmp; + } + } + + } + } + + if(num_of_data != i){ + num_of_data = i; + for(int j = post_id+1; j < PostSN_num+1;j++){ + rptr[j] = num_of_data; + } + + // 縮小 + //fprintf(stderr, "realloc phase :to %d\n", num_of_data); + unsigned int *i_tmp = NULL; + CTYPE *c_tmp = NULL; + if(( i_tmp = (unsigned int *)realloc(cindices, num_of_data*sizeof(unsigned int))) == NULL){ + fprintf(stderr, "can't realloc memory in roading phase: %s\n", file_name); + free(cindices); + exit(1); + }else{ + if(cindices != i_tmp)cindices = i_tmp; + } + + if(( c_tmp = (CTYPE *)realloc(val, num_of_data*sizeof(CTYPE))) == NULL){ + fprintf(stderr, "can't realloc memory in roading phase: %s\n", file_name); + free(val); + exit(1); + }else{ + if(val != c_tmp) val = c_tmp; + } + } + + for(int i = 0; i < PostSN_num; i++) max_conv = (max_conv < rptr[i+1]-rptr[i])?rptr[i+1]-rptr[i]:max_conv; + + CUDA_SAFE_CALL( cudaMalloc( d_rptr, sizeof(unsigned int)*(PostSN_num+1)) ); + CUDA_SAFE_CALL( cudaMalloc( d_cindices, sizeof(unsigned int)*num_of_data) ); + CUDA_SAFE_CALL( cudaMalloc( d_val, sizeof(CTYPE)*num_of_data)); + + CUDA_SAFE_CALL( cudaMemcpy( *d_rptr, rptr, sizeof(unsigned int)*(PostSN_num+1), cudaMemcpyHostToDevice)); + CUDA_SAFE_CALL( cudaMemcpy( *d_cindices, cindices, sizeof(unsigned int)*num_of_data, cudaMemcpyHostToDevice)); + CUDA_SAFE_CALL( cudaMemcpy( *d_val, val, sizeof(CTYPE)*num_of_data, cudaMemcpyHostToDevice)); + + *host_rptr = rptr; + + fclose(fp); + //free(rptr); + free(cindices); + free(val); + + return max_conv; +} +void set_neuron_params(Neuron *n,enum NeuronType type,const char* filename, int num, int base_id, CTYPE Cm, CTYPE tau_m, CTYPE El, CTYPE dt_ref, CTYPE Ie, CTYPE Vr, CTYPE Vth, CTYPE tau_exc, CTYPE tau_inh, CTYPE gL ){ + n[type].type = type; + strcpy(n[type].filename, filename); + n[type].num = num; + n[type].base_id = base_id; + n[type].Cm = Cm; + n[type].tau_m = tau_m; + n[type].El = El; + n[type].dt_ref = dt_ref; + n[type].Ie = Ie; + n[type].Vr = Vr; + n[type].Vth = Vth; + n[type].tau_exc = tau_exc; + n[type].tau_inh = tau_inh; + n[type].gL = gL; + return; +} +void set_connectivity_params(Connectivity *c, enum ConnectionType type,const char*filename, int preNum, int postNum, enum NeuronType preType, enum NeuronType postType, CTYPE initial_weight, CTYPE delay, int UseParallelReduction ){ + c[type].type = type; + c[type].preNum = preNum; + c[type].postNum = postNum; + c[type].preType = preType; + c[type].postType = postType; + c[type].initial_weight = initial_weight; + c[type].delay = delay; + c[type].max_conv = LoadConnectivityFile(filename,&c[type].host_rptr, &c[type].rptr, &c[type].cindices, &c[type].val,initial_weight, preNum, postNum ); + c[type].pr = (UseParallelReduction); + + + return; +} + +int set_base_id(Neuron *Neurons){ + int base = 0; + for(int i = 0;i < TotalNumOfCellTypes;i++){ + Neurons[i].base_id = base; + base += Neurons[i].num; + } + return base; +} + +__global__ void InitParams( CTYPE *u, CTYPE *g_exc, CTYPE *dg_exc, CTYPE *g_inh, CTYPE *dg_inh, int *refractory_time_left, char *spike , Neuron *Neurons ,char *type, const int total_nn){ + int i = threadIdx.x + blockIdx.x*blockDim.x; + if( i < total_nn){ + u[i] = Neurons[type[i]].Vr + (Neurons[type[i]].Vth - Neurons[type[i]].Vr)*u[i]; + g_exc[i] = 0.f; + dg_exc[i] = 0.f; + g_inh[i] = 0.f; + dg_inh[i] = 0.f; + refractory_time_left[i] = 0; + spike[i] = 0; + } +}; + + +void init_neurons_params( Neuron *Neurons){ + + set_neuron_params( + Neurons, + GranuleCell, + "GranuleCell.dat", + 206415, + 19578, + 7, + 24.15, + -62, + 1.5, + 0, + -70, + -41, + 5.8, + 13.6, + 1.5 + ); + + set_neuron_params( + Neurons, + PurkinjeCell, + "PurkinjeCell.dat", + 171, + 0, + 334, + 47, + -59, + 0.5, + 800, + -69, + -43, + 1.1, + 2.8, + 7.0 + ); + + set_neuron_params( + Neurons, + GolgiCell, + "GolgiCell.dat", + 486, + 187, + 145, + 44, + -62, + 2, + 36.8, + -75, + -55, + 0.5, + 10, + 3.6 + ); + + set_neuron_params( + Neurons, + StellateCell, + "StellateCell.dat", + 918, + 673, + 14.6, + 9.125, + -68, + 1, + 24.05, + -78, + -53, + 0.64, + 2, + 1.0 + ); + + set_neuron_params( + Neurons, + BasketCell, + "BasketCell.dat", + 1784, + 1591, + 14.6, + 9.125, + -68, + 1, + 24.05, + -78, + -53, + 0.64, + 2, + 1.0 + ); + + set_neuron_params( + Neurons, + DCNCell, + "DCNCell.dat", + 16, + 171, + 142, + 33, + -45, + 1.5, + 180, + -55, + -36, + 1, + 0.7, + 1.56 + ); + + set_neuron_params( + Neurons, + Glomerulus, + "Glomerulus.dat", + 16203, + 3375, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ); + +} + +void init_connectivity_params(Connectivity *connectivities){ + + set_connectivity_params( + connectivities, + GlomerulusGolgi, + "GlomerulusGolgi.dat", + 16203, + 486, + Glomerulus, + GolgiCell, + 2.0, + 4.0, + 0 + ); + + set_connectivity_params( + connectivities, + GlomerulusGranule, + "GlomerulusGranule.dat", + 16203, + 206415, + Glomerulus, + GranuleCell, + 9.0, + 4.0, + 0 + ); + + set_connectivity_params( + connectivities, + GranuleGolgi, + "GranuleGolgi.dat", + 206415, + 486, + GranuleCell, + GolgiCell, + 0.4, + 5.0, + 0 + ); + + set_connectivity_params( + connectivities, + GolgiGranule, + "GolgiGranule.dat", + 486, + 206415, + GolgiCell, + GranuleCell, + -5.0, + 2.0, + 0 + ); + + set_connectivity_params( + connectivities, + AscAxonPurkinje, + "AscAxonPurkinje.dat", + 206415, + 171, + GranuleCell, + PurkinjeCell, + 75.0, + 2.0, + 0 + ); + + set_connectivity_params( + connectivities, + PFPurkinje, + "PFPurkinje.dat", + 206415, + 171, + GranuleCell, + PurkinjeCell, + 0.02, + 5.0, + 1 + ); + + set_connectivity_params( + connectivities, + PFBasket, + "PFBasket.dat", + 206415, + 1784, + GranuleCell, + BasketCell, + 0.2, + 5.0, + 0 + ); + + set_connectivity_params( + connectivities, + PFStellate, + "PFStellate.dat", + 206415, + 918, + GranuleCell, + StellateCell, + 0.2, + 5.0, + 0 + ); + + set_connectivity_params( + connectivities, + GapJunctionsStellate, + "GapJunctionsStellate.dat", + 918, + 918, + StellateCell, + StellateCell, + -2.0, + 1.0, + 0 + ); + + set_connectivity_params( + connectivities, + GapJunctionsBasket, + "GapJunctionsBasket.dat", + 1784, + 1784, + BasketCell, + BasketCell, + -2.5, + 1.0, + 0 + ); + + set_connectivity_params( + connectivities, + GapJunctionsGolgi, + "GapJunctionsGolgi.dat", + 486, + 486, + GolgiCell, + GolgiCell, + -8.0, + 1.0, + 0 + ); + + set_connectivity_params( + connectivities, + PurkinjeDCN, + "PurkinjeDCN.dat", + 171, + 16, + PurkinjeCell, + DCNCell, + -0.0075, + 4.0, + 0 + ); + + set_connectivity_params( + connectivities, + GlomerulusDCN, + "GlomerulusDCN.dat", + 16203, + 16, + Glomerulus, + DCNCell, + 0.006, + 4.0, + 0 + ); + + set_connectivity_params( + connectivities, + BasketPurkinje, + "BasketPurkinje.dat", + 1784, + 171, + BasketCell, + PurkinjeCell, + -9.0, + 4.0, + 0 + ); + + set_connectivity_params( + connectivities, + StellatePurkinje, + "StellatePurkinje.dat", + 918, + 171, + StellateCell, + PurkinjeCell, + -8.5, + 5.0, + 0 + ); + + set_connectivity_params( + connectivities, + AscendingAxonGolgi, + "AscendingAxonGolgi.dat", + 206415, + 486, + GranuleCell, + GolgiCell, + 20.0, + 2.0, + 0 + ); + +} + diff --git a/cuda_code/initblocks_1.cu b/cuda_code/initblocks_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..b20bdf9072b03ac7641656448f286af09a44f206 --- /dev/null +++ b/cuda_code/initblocks_1.cu @@ -0,0 +1,439 @@ +#include +#include +#include +#include +#include +#include +# +HostError CheckBlocks(double *step, unsigned int M, string path){ + + int local_rank; + string temp; + char *output_name; + + MPISafeCall(MPI_Comm_rank(MPI_COMM_WORLD, &local_rank)); + + if(local_rank == 0){ + ofstream blocks; + temp = path + "Blocks.dat"; + output_name = to_char(temp); + blocks.open(output_name); + + int *bl = new int [35]; + double conto; + + for(int i = 0; i < 30; i++) + bl[i]=0; + + for(unsigned int i = 0; i < M; i++){ + conto = -1.00001*log(step[i])/log(2.0); + if(conto>30) + return HInvalidBlocks; + bl[(int)conto]++; + } + + for(int i = 0; i < 30; i++) + blocks<<-i<<" "<DTMAX) + dt = DTMAX; + + if(dt& devices, double plummer_core, double plummer_mass, double rscale, double mscale, string path){ + + HostSafeCall(CudaInit(GPUMINTHREADS, NGPU, rank, gpu_name, setdev, devices, path)); + + HostSafeCall(Max_dimension(TPB, BFMAX, N, MAXDIM, *GPUMINTHREADS)); +cout< 32){ + threads /= 2; + bfmax *= 2; + BL = ceil((double)nextsize/threads); + } + dim = threads * BL; + + + while(threads*BL < *GPUMINTHREADS && Bfactor < bfmax){ + BL *= 2; + Bfactor *= 2; + } + + unsigned int malloc_size = (*MAXDIM)*sizeof(double4); //it contains a, adot, a2dots sequentially + unsigned int malloc_db4 = nextsize*sizeof(double4); + unsigned int malloc_fl4 = nextsize*sizeof(float4); + unsigned int malloc_ui = nextsize*sizeof(unsigned int); + unsigned int malloc_db = nextsize*sizeof(double); + unsigned int malloc_db4_N = N*sizeof(double4); + + for(unsigned int i = 0; i < NGPU; i++){ + DeviceSafeCall(cudaSetDevice(devices[i])); + + DeviceSafeCall(cudaMalloc((void **)&a_D[i], malloc_size)); + DeviceSafeCall(cudaMalloc((void **)&a1_D[i], malloc_size)); + DeviceSafeCall(cudaMalloc((void **)&a2_D[i], malloc_size)); + DeviceSafeCall(cudaMalloc((void **)&a_tot_D[i], malloc_db4_N)); + DeviceSafeCall(cudaMalloc((void **)&a1_tot_D[i], malloc_db4_N)); + DeviceSafeCall(cudaMalloc((void **)&a2_tot_D[i], malloc_db4_N)); + + DeviceSafeCall(cudaMalloc((void **)&pos_PD[i], malloc_db4)); + DeviceSafeCall(cudaMalloc((void **)&pos_CD[i], malloc_db4)); + DeviceSafeCall(cudaMalloc((void **)&vel_CD[i], malloc_db4)); + DeviceSafeCall(cudaMalloc((void **)&vel_PD[i], malloc_fl4)); + DeviceSafeCall(cudaMalloc((void **)&acc_PD[i], malloc_fl4)); + DeviceSafeCall(cudaMalloc((void **)&next_D[i], malloc_ui)); + DeviceSafeCall(cudaMalloc((void **)&loc_D[i], malloc_db)); + DeviceSafeCall(cudaMalloc((void **)&a3_D[i], malloc_db4_N)); + DeviceSafeCall(cudaMalloc((void **)&a_temp_Dev[i], 3*malloc_db4_N)); + + DeviceSafeCall(cudaMemcpy( pos_PD[i], pos_PH, malloc_db4, cudaMemcpyHostToDevice )); + DeviceSafeCall(cudaMemcpy( vel_PD[i], vel_PH, malloc_fl4, cudaMemcpyHostToDevice )); + DeviceSafeCall(cudaMemcpy( pos_CD[i], pos_CH, malloc_db4, cudaMemcpyHostToDevice )); + DeviceSafeCall(cudaMemcpy( vel_CD[i], vel_CH, malloc_db4, cudaMemcpyHostToDevice )); + } + + int ppG = N/(NGPU*nodes); + + if(vir && (!warm)){ + double kk, pp, virial_old; + HostSafeCall(Calculate_Energy(pos_CD, vel_CD, N, EPS, TPB, NGPU, rank, ppG, &kk, &pp, plummer_core, plummer_mass, rscale, mscale, devices)); + virial_old = 2. * kk / fabs(pp); + + ofstream hlog; + temp = path + "HiGPUslog.dat"; + output_name = to_char(temp); + hlog.open(output_name, ios::app); + hlog<<"==============================================="<>>(a3_D[i], acc_PD[i]); + } + + unsigned int dim2 = ceil((double)nextsize/TPB)*TPB; + + for(unsigned int i = nextsize; i < dim2; i++) + next[i] = -1; + + for(unsigned int i = 0; i < NGPU; i++){ + DeviceSafeCall(cudaSetDevice(devices[i])); + DeviceSafeCall(cudaMemcpy( next_D[i], next, malloc_ui, cudaMemcpyHostToDevice )); + } + + int SHR = threads * (sizeof(double4) + 2 * sizeof(float4)); + + for(unsigned int i = 0; i < NGPU; i++){ + DeviceSafeCall(cudaSetDevice(devices[i])); + int istart = ppG*(i+rank*NGPU); + evaluation<<< BL, threads, SHR >>> ( N, pos_PD[i], vel_PD[i], acc_PD[i], a_D[i], a1_D[i], a2_D[i], + istart, ppG, Bfactor, dim, next_D[i], loc_D[i], 0.0, EPS, plummer_core, plummer_mass, rscale, mscale); + } + + for(unsigned int i = 0; i < NGPU; i++){ + DeviceSafeCall(cudaSetDevice(devices[i])); + update_local_time<<>>(next_D[i], loc_D[i], 0.0); + } + + for(unsigned int i = 0; i < NGPU; i++){ + DeviceSafeCall(cudaSetDevice(devices[i])); + DeviceSafeCall(cudaDeviceSynchronize()); + DeviceCheckErrors(); + } + + + int bl = BL; + int bf = Bfactor; + SHR = threads * sizeof(double4); + + while(bf != 1){ + bl>>=1; + bf>>=1; + for(unsigned int i = 0; i < NGPU; i++){ + DeviceSafeCall(cudaSetDevice(devices[i])); + reduce<<< 3*bl, threads, SHR>>>(a_D[i], a1_D[i], a2_D[i], bf, dim); + } + + for(unsigned int i = 0; i < NGPU; i++){ + DeviceSafeCall(cudaSetDevice(devices[i])); + DeviceSafeCall(cudaDeviceSynchronize()); + DeviceCheckErrors(); + } + } + + for(unsigned int i = 0; i < NGPU; i++){ + DeviceSafeCall(cudaSetDevice(devices[i])); + DeviceSafeCall(cudaDeviceSynchronize()); + } + + for(unsigned int i = 0; i < NGPU; i++){ + DeviceSafeCall(cudaSetDevice(devices[i])); + reposition<<>>(a_D[i], a1_D[i], a2_D[i], a_temp_Dev[i], nextsize); + } + + + + unsigned int cpy_size = 3*nextsize; + + for(unsigned int i = 0; i < NGPU; i++){ + DeviceSafeCall(cudaSetDevice(devices[i])); + DeviceSafeCall(cudaDeviceSynchronize()); + } + + for(unsigned int i = 0; i < NGPU; i++){ + DeviceSafeCall(cudaSetDevice(devices[i])); + DeviceCheckErrors(); + DeviceSafeCall(cudaMemcpy(&a_H[i*cpy_size], a_temp_Dev[i], cpy_size*sizeof(double4), cudaMemcpyDeviceToHost)); + } + + + HostSafeCall(ReduceAll(cpy_size, N, NGPU, nextsize, a_H, a_H0, mpi_red_aux, mpi_red, next)); + + for(unsigned int i = 2*N; i < 3*N; i++) //it puts the snap to 0.0 + a_H0[i].x = a_H0[i].y = a_H0[i].z = a_H0[i].w = 0.0; + + for(unsigned int i = 0; i < NGPU; i++){ + DeviceSafeCall(cudaSetDevice(devices[i])); + DeviceSafeCall(cudaMemcpy(a_tot_D[i], a_H0, N*sizeof(double4), cudaMemcpyHostToDevice)); + DeviceSafeCall(cudaMemcpy(a1_tot_D[i], &a_H0[N], N*sizeof(double4), cudaMemcpyHostToDevice)); + DeviceSafeCall(cudaMemcpy(a2_tot_D[i], &a_H0[2*N], N*sizeof(double4), cudaMemcpyHostToDevice)); + } + + for(unsigned int i = 0; i < NGPU; i++){ + DeviceSafeCall(cudaSetDevice(devices[i])); + int BLT = ppG/TPB + ceil((double)nextsize/TPB); + int istart = ppG*(i+rank*NGPU); + Predictor <<>> (stp, pos_PD[i], vel_PD[i], acc_PD[i], pos_CD[i], vel_CD[i], loc_D[i], a_tot_D[i], a1_tot_D[i], a2_tot_D[i], a3_D[i], istart, next_D[i], ppG, N); + } + for(unsigned int i = 0; i < NGPU; i++){ + DeviceSafeCall(cudaSetDevice(devices[i])); + DeviceCheckErrors(); + } + + + SHR = threads * (sizeof(double4) + 2 * sizeof(float4)); + + + for(unsigned int i = 0; i < NGPU; i++){ + DeviceSafeCall(cudaSetDevice(devices[i])); + int istart = ppG*(i+rank*NGPU); + evaluation<<< BL, threads, SHR >>> ( N, pos_PD[i], vel_PD[i], acc_PD[i], a_D[i], a1_D[i], a2_D[i], + istart, ppG, Bfactor, dim, next_D[i], loc_D[i], 0.0, EPS, plummer_core, plummer_mass, rscale, mscale); + } + + for(unsigned int i = 0; i < NGPU; i++){ + DeviceSafeCall(cudaSetDevice(devices[i])); + DeviceSafeCall(cudaDeviceSynchronize()); + DeviceCheckErrors(); + } + + + bl = BL; + bf = Bfactor; + SHR = threads * sizeof(double4); + + while(bf != 1){ + bl>>=1; + bf>>=1; + for(unsigned int i = 0; i < NGPU; i++){ + DeviceSafeCall(cudaSetDevice(devices[i])); + reduce<<< 3*bl, threads, SHR>>>(a_D[i], a1_D[i], a2_D[i], bf, dim); + } + + for(unsigned int i = 0; i < NGPU; i++){ + DeviceSafeCall(cudaSetDevice(devices[i])); + DeviceSafeCall(cudaDeviceSynchronize()); + DeviceCheckErrors(); + } + } + + for(unsigned int i = 0; i < NGPU; i++){ + DeviceSafeCall(cudaSetDevice(devices[i])); + DeviceSafeCall(cudaDeviceSynchronize()); + } + + for(unsigned int i = 0; i < NGPU; i++){ + DeviceSafeCall(cudaSetDevice(devices[i])); + reposition<<>>(a_D[i], a1_D[i], a2_D[i], a_temp_Dev[i], nextsize); + } + + cpy_size = 3*nextsize; + + for(unsigned int i = 0; i < NGPU; i++){ + DeviceSafeCall(cudaSetDevice(devices[i])); + DeviceSafeCall(cudaDeviceSynchronize()); + } + + for(unsigned int i = 0; i < NGPU; i++){ + DeviceSafeCall(cudaSetDevice(devices[i])); + DeviceCheckErrors(); + DeviceSafeCall(cudaMemcpy(&a_H[i*cpy_size], a_temp_Dev[i], cpy_size*sizeof(double4), cudaMemcpyDeviceToHost)); + } + + + HostSafeCall(ReduceAll(cpy_size, N, NGPU, nextsize, a_H, a_H1, mpi_red_aux, mpi_red, next)); + HostSafeCall(DetermineSteps(stp, N, M, a_H0, a_H1, ETA4, DTMIN, DTMAX, step, ACTUAL_TIME, local_time)); + HostSafeCall(CheckBlocks(step, M, path)); + + for(unsigned int i = 0; i < NGPU; i++){ + DeviceSafeCall(cudaSetDevice(devices[i])); + DeviceSafeCall(cudaFree(a_D[i])); + DeviceSafeCall(cudaFree(a1_D[i])); + DeviceSafeCall(cudaFree(a2_D[i])); + + DeviceSafeCall(cudaFree(a_tot_D[i])); + DeviceSafeCall(cudaFree(a1_tot_D[i])); + DeviceSafeCall(cudaFree(a2_tot_D[i])); + + DeviceSafeCall(cudaFree(pos_PD[i])); + DeviceSafeCall(cudaFree(pos_CD[i])); + DeviceSafeCall(cudaFree(vel_CD[i])); + DeviceSafeCall(cudaFree(vel_PD[i])); + DeviceSafeCall(cudaFree(acc_PD[i])); + DeviceSafeCall(cudaFree(next_D[i])); + DeviceSafeCall(cudaFree(loc_D[i])); + DeviceSafeCall(cudaFree(a3_D[i])); + } + + delete [] a_H; + delete [] a_H1; + delete [] mpi_red_aux; + delete [] mpi_red; + + delete [] next; + + HostSafeCall(CPU_memcheck(__FILE__, __LINE__,path)); + + return HNoError; + +} diff --git a/cuda_code/interpolator_mem_usage.cu b/cuda_code/interpolator_mem_usage.cu new file mode 100644 index 0000000000000000000000000000000000000000..137d7e74e94c25c264def6a88836f509f8ff51cb --- /dev/null +++ b/cuda_code/interpolator_mem_usage.cu @@ -0,0 +1,37 @@ +#include +#include +#include "cudaPagani/quad/GPUquad/Interp1D.cuh" +#include + +__global__ void +Evaluate(quad::Interp1D interpolator, + size_t size, + double* results) +{ + double val = 1.5; + results[0] = interpolator(val); +} + +int main(){ + const size_t s = 10000000; + std::vector xs(s); + std::vector ys(s); + + std::iota(xs.begin(), xs.end(), 1.); + std::iota(ys.begin(), ys.end(), 2.); + double* results = quad::cuda_malloc_managed(s); + + for(int i=0; i<1000; i++) + { + quad::Interp1D interpObj(xs.data(), ys.data(), s); + Evaluate<<<1,1>>>(interpObj, s, results); + cudaDeviceSynchronize(); + size_t free_physmem, total_physmem; + cudaMemGetInfo(&free_physmem, &total_physmem); + std::cout< template class and operators: + * Search for roots of a function using an interval Newton method. + * + * Use the command-line argument "--n=" to select which GPU implementation to use, + * otherwise the naive implementation will be used by default. + * 0: the naive implementation + * 1: the optimized implementation + * 2: the recursive implementation + * + */ + +const static char *sSDKsample = "Interval Computing"; + +#include +#include +#include "helper_cuda.h" +#include "interval.h" +#include "cuda_interval.h" +#include "cpu_interval.h" + +int main(int argc,char *argv[]) +{ + int implementation_choice = 0; + + printf("[%s] starting ...\n\n", sSDKsample); + + if (checkCmdLineFlag(argc, (const char **) argv, "n")) + { + implementation_choice = getCmdLineArgumentInt(argc, (const char **) argv, "n"); + } + + // Pick the best GPU available, or if the developer selects one at the command line + int devID = findCudaDevice(argc, (const char **)argv); + cudaDeviceProp deviceProp; + cudaGetDeviceProperties(&deviceProp, devID); + printf("> GPU Device has Compute Capabilities SM %d.%d\n\n", deviceProp.major, deviceProp.minor); + + int version = (deviceProp.major * 0x10 + deviceProp.minor); + + if (version < 0x13) + { + printf("%s: requires minimum of Compute Capability 1.3 or higher, waiving test...\n", sSDKsample); + exit(EXIT_SUCCESS); + } + + switch (implementation_choice) + { + case 0: + printf("GPU naive implementation\n"); + break; + + case 1: + printf("GPU optimized implementation\n"); + break; + + case 2: + if (deviceProp.major >= 2) + { + printf("GPU recursive implementation (requires Compute SM 2.0+)\n"); + } + else + { + printf("GPU naive implementation is used instead of the recursive implementation, which requires a GPU with CUDA capability 2.0+\n"); + implementation_choice = 0; + } + + break; + + default: + printf("GPU naive implementation\n"); + } + + interval_gpu *d_result; + int *d_nresults; + int *h_nresults = new int[THREADS]; + cudaEvent_t start, stop; + + CHECKED_CALL(cudaSetDevice(devID)); + CHECKED_CALL(cudaMalloc((void **)&d_result, THREADS * DEPTH_RESULT * sizeof(*d_result))); + CHECKED_CALL(cudaMalloc((void **)&d_nresults, THREADS * sizeof(*d_nresults))); + CHECKED_CALL(cudaEventCreate(&start)); + CHECKED_CALL(cudaEventCreate(&stop)); + + if (deviceProp.major >= 2) + { + // We need L1 cache to store the stack (only applicable to sm_20 and higher) + CHECKED_CALL(cudaFuncSetCacheConfig(test_interval_newton, cudaFuncCachePreferL1)); + + // Increase the stack size large enough for the non-inlined and recursive function calls (only applicable to sm_20 and higher) +#if CUDART_VERSION >= 4000 + CHECKED_CALL(cudaDeviceSetLimit(cudaLimitStackSize, 8192)); +#else + CHECKED_CALL(cudaThreadSetLimit(cudaLimitStackSize, 8192)); +#endif + } + + interval_gpu i(0.01f, 4.0f); + std::cout << "Searching for roots in [" << i.lower() << ", " << i.upper() << "]...\n"; + + CHECKED_CALL(cudaEventRecord(start, 0)); + + for (int it = 0; it < NUM_RUNS; ++it) + { + test_interval_newton<<>>(d_result, d_nresults, i, implementation_choice); + CHECKED_CALL(cudaGetLastError()); + } + + CHECKED_CALL(cudaEventRecord(stop, 0)); + CHECKED_CALL(cudaDeviceSynchronize()); + + I_CPU *h_result = new I_CPU[THREADS * DEPTH_RESULT]; + CHECKED_CALL(cudaMemcpy(h_result, d_result, THREADS * DEPTH_RESULT * sizeof(*d_result), cudaMemcpyDeviceToHost)); + CHECKED_CALL(cudaMemcpy(h_nresults, d_nresults, THREADS * sizeof(*d_nresults), cudaMemcpyDeviceToHost)); + + std::cout << "Found " << h_nresults[0] << " intervals that may contain the root(s)\n"; + std::cout.precision(15); + + for (int i = 0; i != h_nresults[0]; ++i) + { + std::cout << " i[" << i << "] =" + << " [" << h_result[THREADS * i + 0].lower() + << ", " << h_result[THREADS * i + 0].upper() << "]\n"; + } + + float time; + CHECKED_CALL(cudaEventElapsedTime(&time, start, stop)); + std::cout << "Number of equations solved: " << THREADS << "\n"; + std::cout << "Time per equation: " << 1000000.0f * (time / (float)(THREADS)) / NUM_RUNS << " us\n"; + + CHECKED_CALL(cudaEventDestroy(start)); + CHECKED_CALL(cudaEventDestroy(stop)); + CHECKED_CALL(cudaFree(d_result)); + CHECKED_CALL(cudaFree(d_nresults)); + + // Compute the results using a CPU implementation based on the Boost library + I_CPU i_cpu(0.01f, 4.0f); + I_CPU *h_result_cpu = new I_CPU[THREADS * DEPTH_RESULT]; + int *h_nresults_cpu = new int[THREADS]; + test_interval_newton_cpu(h_result_cpu, h_nresults_cpu, i_cpu); + + // Compare the CPU and GPU results + bool bTestResult = checkAgainstHost(h_nresults, h_nresults_cpu, h_result, h_result_cpu); + + delete [] h_result_cpu; + delete [] h_nresults_cpu; + delete [] h_result; + delete [] h_nresults; + + exit(bTestResult ? EXIT_SUCCESS : EXIT_FAILURE); +} diff --git a/cuda_code/inv.cu b/cuda_code/inv.cu new file mode 100644 index 0000000000000000000000000000000000000000..eccd5f244926c9afa3be498e441681ec2ca1198c --- /dev/null +++ b/cuda_code/inv.cu @@ -0,0 +1,389 @@ +//======================================================================================================= +// Copyright 2015 Asgeir Bjorgan, Lise Lyngsnes Randeberg, Norwegian University of Science and Technology +// Distributed under the MIT License. +// (See accompanying file LICENSE or copy at +// http://opensource.org/licenses/MIT) +//======================================================================================================= + + +#include "melanin.h" + +#define div13 1.0f/3.0f +#include "inv.h" +#define A 0.14386f + +//#define A 0.17f +#define d1 0.0001f +#define de 0.0001f +#define NUM_ITERATIONS 15 +__global__ void ReflIsoL2InvertMuae(float *muae, float *muse, float *muad, float *musd, float *gcol, float *lineData, size_t pitch, int startblockInd){ + #ifdef INV_MUAE_BLOCKDIM_64_QUICKFIX + //NB FIXME: indeksuthentingen er bygget for en mindre blokkdimensjon enn 160 + int egBlockInd = (blockIdx.x*64 + threadIdx.x)/160; //indeksen til blokkene slik de egentlig var ment å være i originalgriddet + int ind = ((gridDim.x*(blockIdx.y+startblockInd))*pitch*2)/5 + egBlockInd*pitch + blockIdx.x*64-egBlockInd*32*5 + threadIdx.x; //the pitch is made for the largest possible block size, but this uses too many registers for that block size to be valid and needs something less. Therefore some index hackin' + #else + int ind = (gridDim.x*(blockIdx.y+startblockInd) + blockIdx.x)*pitch+ threadIdx.x; + #endif + + float musr2 = gcol[ind]; //ps dette er ikke musr, det er g FIXME + + //reduced scattering coefficients + float musr1 = muse[ind]*(1.0f-musr2); + musr2 = musd[ind]*(1.0f-musr2); + + //move mua into shared memory + float mua1 = muae[ind]; + float mua2 = muad[ind]; + + //diffusion constant + + float D2 = fdividef(1.0f,3.0f*(musr2 + mua2)); + float del2 = sqrtf(fdividef(D2,mua2)); + + float res; + float derivmuae; + float currLineData = lineData[ind]; + float f2 = 1.0f + fdividef(del2 ,(D2 * 3.0f)); + float f6 = D2-del2*fdividef(del2, (D2*9.0f)); + + for (int i=0; i < NUM_ITERATIONS; ++i){ + float D1 = fdividef(1.0f,3.0f*(musr1 + mua1)); + float del1 = sqrtf(fdividef(D1,mua1)); + float div1D1D1 = fdividef(1.0f, D1*D1); + + float coshval = coshf(fdividef(d1, del1)); + float sinhval = sinhf(fdividef(d1, del1)); + + //result + float f1 = (del1*del1 * fdividef(del2 , 3.0f) - del1*del1 * D2) * coshval + (powf(del1, 3.0f) * fdividef(D2 , D1*3.0f) - del1 * del2 * D1) * sinhval; + float f3 = fdividef(musr2, musr1) * del2 * del2 * D1 * (del1*del1 * fdividef(div1D1D1,9.0f) - 1.0f) + del1*del1 * f6; + float f4 = expf(-fdividef(d1 , (D1 *3.0f))); + float f5 = del1*del1 * fdividef(div1D1D1,9.0f) - 1.0f; + float f7 = D1 * del1 * (D2 + del2 * A) * coshval + (del2 * D1*D1 + D2 * del1*del1 * A) * sinhval; + + float fact = fdividef((f1 * f2 + f3 * f4) , (f5 * f2 * f7)); + + res = del1 * musr1 * A * fact; + + + float dD1d1 = -3.0f*D1*D1; + float ddel1d1 = (dD1d1*mua1-D1)*fdividef(1.0f, mua1*mua1)*fdividef(1.0f, 2.0f*del1); + + + float df1d1 = (fdividef(2.0f , 3.0f) * del1 * del2 * ddel1d1 - 2.0f * del1 * D2 * ddel1d1) * coshval - (del1*del1 * fdividef(del2 , 3.0f) - del1*del1 * D2) * sinhval * d1 * fdividef(1.0f, del1*del1) * ddel1d1 + (del1*del1 * fdividef(D2 , D1) * ddel1d1 - powf(del1, 3.0f) * D2 * (div1D1D1) * fdividef(dD1d1 , 3.0f) - ddel1d1 * del2 * D1 - del1 * del2 * dD1d1) * sinhval - (powf(del1, 3.0f) * fdividef(D2 , (D1 * 3.0f)) - del1 * del2 * D1) * coshval * d1 * fdividef(1.0f, del1*del1) * ddel1d1; + float df3d1 = fdividef(musr2 , musr1) * del2 * del2 * dD1d1 * (del1*del1 * fdividef(div1D1D1,9.0f) - 1.0f) + fdividef(musr2 , musr1) * del2 * del2 * D1 * (fdividef(2.0f , 9.0f) * del1 * (div1D1D1) * ddel1d1 - fdividef(2.0f , 9.0f) * del1*del1 * powf(D1, -3.0f) * dD1d1) + 2.0f * del1 * (f6) * ddel1d1; + float df4d1 = d1 * (div1D1D1) * dD1d1 * f4 *fdividef(1.0f, 3.0f); + float df5d1 = fdividef(2.0f , 9.0f) * del1 * (div1D1D1) * ddel1d1 - fdividef(2.0f , 9.0f) * del1*del1 * powf(D1, -3.0f) * dD1d1; + float df7d1 = dD1d1 * del1 * (D2 + del2 * A) * coshval + D1 * ddel1d1 * (D2 + del2 * A) * coshval - fdividef(D1 , del1) * (D2 + del2 * A) * sinhval * d1 * ddel1d1 + (2.0f * del2 * D1 * dD1d1 + 2.0f * D2 * del1 * A * ddel1d1) * sinhval - (del2 * D1*D1 + D2 * del1*del1 * A) * coshval * d1 * fdividef(1.0f, del1*del1) * ddel1d1; + + derivmuae = ddel1d1 * musr1 * A * fact + del1 * musr1 * A * (df1d1 * f2 + df3d1 * f4 + f3 * df4d1) *fdividef(1.0f, (f5 * f2 * f7)) - del1 * musr1 * A * fact * fdividef(1.0f, f5) * df5d1 - del1 * musr1 * A * fact * fdividef(1.0f, f7) * df7d1; + //newton's method + mua1 = mua1 - fdividef(res-currLineData, derivmuae); + + //correction in case muad wants to be negative, which we seriously don't want + mua1 = mua1*(1-signbit(mua1)) + signbit(mua1); + } + muae[ind] = mua1; +} + +//takes in pre-allocated arrays and the arrays containing the bases of the different absorption coefficients, fills the skin data arrays with the optical properties +__global__ void calcSkinData(float *wlens, float *oxy_arr, float *Bd_arr, float *muam694_arr, float *melanintype_arr, float *muae, float *muse, float *muad, float *musd, + float *muh_oxy, float *muh_deoxy, float *melanin_base, float *musm, float *musr, float *musb_base, size_t pitch, int startblockind){ + //walk down the lines, walk along the blocks, walk along the threads inside the block + int index = (gridDim.x*(blockIdx.y+startblockind) + blockIdx.x)*pitch + threadIdx.x; + + //absorption properties + float H = 0.41; + float H0 = 0.45; + float Be = 0.002; + + + int chromInd = blockIdx.x*pitch + threadIdx.x; + float oxy = oxy_arr[chromInd]; + float Bd = Bd_arr[chromInd]; + float muam694 = muam694_arr[chromInd]; + int melanintype = melanintype_arr[chromInd]; + + + float mua_other = 25; //FIXME + float muab_blood = (muh_oxy[index]*oxy + muh_deoxy[index]*(1-oxy))*fdividef(H,H0); + + __shared__ float wlen; + if (threadIdx.x == 0){ + wlen = wlens[blockIdx.y+startblockind]; + } + __syncthreads(); + + float mua_melanin = muam694*((melanintype == SVAASAND_MELANIN_GPU)*powf(fdividef(694.0f,wlen), 3.46) + (melanintype == EUMELANIN_GPU)*expf(-kEu*fdividef(wlen-694.0f,694.0f)) + (melanintype == PHEOMELANIN_GPU)*expf(-kPheo*fdividef(wlen-694.0f,694.0f))); + muae[index] = mua_melanin + muab_blood*Be + mua_other*(1-Be); + muad[index] = muab_blood*Bd + mua_other*(1-Bd); + + //scattering properties + float c_ray = 1.05e12; + float c_mie = 105; + float must = musm[index]*c_mie*100 + musr[index]*c_ray*100; + + //float f = 0.64; + float aMie = 18.780, bMie = 0.22, aRay = 17.6; + must = 100*(aMie*pow(wlen/500.0f, -bMie) + aRay*pow(wlen/500, -4)); + float gcol = 0.62 + wlen*29e-5; + must /= (1-gcol); + + //float b = 0.91; + //must = 3000*(f*powf(wlen/500.0f,-4) + (1-f)*powf(wlen/500.0f, -b))/(1-(0.62 + wlen*29e-5)); + + float musb685 = 55.09e-12; + float ve = 1.25e-16; + float musb = musb685*H*(1-H)*(1.4-H)*fdividef(1.0f,ve)*musb_base[index]; + muse[index] = must;//*(1-Be);//+musb*Be; + musd[index] = must;//*(1-Bd);//+musb*Bd; +} + +__global__ void test(float *muae, float *muse, float *muad, float *musd, float *gcol, float *lineData, size_t pitch, int startblockind){ + int ind = (gridDim.x*(blockIdx.y+startblockind) + blockIdx.x)*pitch+ threadIdx.x; + float musr2 = gcol[ind]; //ps dette er ikke musr, det er g FIXME + + //reduced scattering coefficients + float musr1 = muse[ind]*(1.0f-musr2); + musr2 = musd[ind]*(1.0f-musr2); + + //move mua into local memory + float mua1 = muae[ind]; + float mua2 = muad[ind]; + + float currLineData = lineData[ind]; + float res; + float derivmuad; + + //diffusion constant + float D1 = fdividef(1.0f,3.0f*(musr1 + mua1)); + + float musr2dmusr1 = fdividef(musr2,musr1); //musr2 divided by musr1, keep for derivative calc + + + //optical penetration depth + float del1 = sqrtf(fdividef(D1,mua1)); + float sinhval = sinhf(fdividef(de, del1)); + float coshval = coshf(fdividef(de, del1)); + float expval = expf(-fdividef(de,D1)*div13); + + float D2 = fdividef(1.0f,3.0f*(musr2 + mua2)); + float del2 = sqrtf(fdividef(D2,mua2)); + + //from Svaasand 1995 + //calculate the reflectance value + float f1 = (del1*del1*del2*div13-del1*del1*D2)*coshval+(del1*del1*del1*fdividef(D2,D1)*div13 - del1*del2*D1)*sinhval; //keeping for derivative calc + float f2 = 1.0f+fdividef(del2, D2)*div13; //keeping for derivative calc + float f3 = (musr2dmusr1*del2*del2*D1*(del1*fdividef(del1,D1*D1)*div13*div13-1.0f)+del1*del1*(D2-del2*fdividef(del2,D2)*div13*div13))*expval; //keeping for derivative calc + float f4 = del1*fdividef(del1,D1*D1)*div13*div13 - 1.0f; //keep for derivative calc, f4 + float f5 = fdividef(del2,D2)*div13+1.0f; //keep for derivative calc, f5 + float f6 = D1*del1*(D2+del2*A)*coshval+(D1*D1*del2 + D2*del1*del1*A)*sinhval; //keep for derivative calc, f6 + float num = del1*musr1*A*(f1*f2+f3); + float denom = f4*f5*f6; + res = fdividef(num, denom); + + //calculate the derivative with respect to muad + float dD2dmuad = -3.0f*D2*D2; + float ddel2dmuad = (dD2dmuad*mua2-D2)*fdividef(1.0f, mua2*mua2)*fdividef(1.0f, 2.0f*del2); + float df2dmuad = div13*(ddel2dmuad*D2 - del2*dD2dmuad)*fdividef(1.0f, D2*D2); + derivmuad = (del1*musr1*A*((coshval*(del1*del1*div13*ddel2dmuad - del1*del1*dD2dmuad) + sinhval*(del1*del1*del1*fdividef(1.0f, D1)*div13*dD2dmuad - del1*D1*ddel2dmuad))*f2 + f1*df2dmuad + expval*(musr2dmusr1*2.0f*del2*ddel2dmuad*D1*(fdividef(del1*del1,9.0f*D1*D1)-1.0f) + del1*del1*(dD2dmuad + fdividef(1.0f, 9.0f*mua2*mua2))))*denom - (f4*(df2dmuad*f6 + D1*del1*(dD2dmuad + ddel2dmuad*A)*coshval + (D1*D1*ddel2dmuad + dD2dmuad*del1*del1*A)*sinhval*f5)*num))*fdividef(1.0f, denom*denom); + + //calculate next mua2 + //newton's method + mua2 = mua2 - fdividef(res-currLineData, derivmuad); + + //correction in case muad wants to be negative, which we seriously don't want + mua2 = mua2*(1-signbit(mua2)) + signbit(mua2); + +} + + +#define BLOCK_DIM 160 +__global__ void ReflIsoL2InvertMuad(float *muae, float *muse, float *muad, float *musd, float *gcol, float *lineData, size_t pitch, int startblockind){ + int ind = (gridDim.x*(blockIdx.y+startblockind) + blockIdx.x)*pitch+ threadIdx.x; + float musr2 = gcol[ind]; //ps dette er ikke musr, det er g FIXME + + //reduced scattering coefficients + float musr1 = muse[ind]*(1.0f-musr2); + musr2 = musd[ind]*(1.0f-musr2); + + //move mua into local memory + float mua1 = muae[ind]; + float mua2 = muad[ind]; + + float currLineData = lineData[ind]; + float res; + float derivmuad; + + //diffusion constant + float D1 = fdividef(1.0f,3.0f*(musr1 + mua1)); + + float musr2dmusr1 = fdividef(musr2,musr1); //musr2 divided by musr1, keep for derivative calc + + + //optical penetration depth + float del1 = sqrtf(fdividef(D1,mua1)); + float sinhval = sinhf(fdividef(de, del1)); + float coshval = coshf(fdividef(de, del1)); + float expval = expf(-fdividef(de,D1)*div13); + + for (int i=0; i < NUM_ITERATIONS; ++i){ + //result + //res = (musr1 * sqrtf(1.0f / (musr1 + mua1) / mua1) * sqrtf(3.0f) * ((-musr1 / 2.0f + mua1) * (3 * musr2 + mua2) * mua2 * sqrtf((1 / (3 * musr2 + mua2) / mua2)) + (3.0f / 2.0f * musr2 - mua2) * mua1 - 3.0f / 2.0f * musr1 * mua2) * sinh(de * sqrtf(3.0f) * powf(1.0f / (musr1 + mua1) / mua1, -1.0f / 2.0f)) + 4.0f * (-3.0f / 8.0f * musr2 + mua2) * musr1 * cosh(de * sqrtf(3.0f) * powf(1.0f / (musr1 + mua1) / mua1, -1.0f / 2.0f)) + expf(-de * (musr1 + mua1)) * (3.0f * musr2 * mua1 - 4.0f * musr1 * mua2)) * sqrtf(1.0f / (musr1 + mua1) / mua1) * mua1 * sqrtf(3.0f) * (musr1 + mua1) * A / (-musr1 / 2.0f + mua1) / (3.0f + sqrtf((1 / (3 * musr2 + mua2) / mua2)) * (3 * musr2 + mua2)) / ((mua1 * (3 * musr2 + mua2) * sqrtf((1 / (3 * musr2 + mua2) / mua2)) + 3.0f * A * (musr1 + mua1)) * sinh(de * sqrtf(3.0f) * powf(1.0f / (musr1 + mua1) / mua1, -1.0f / 2.0f)) + sqrtf(1.0f / (musr1 + mua1) / mua1) * mua1 * sqrtf(3.0f) * (musr1 + mua1) * cosh(de * sqrtf(3.0f) * powf(1.0f / (musr1 + mua1) / mua1, -1.0f / 2.0f)) * (1.0f + A * (3 * musr2 + mua2) * sqrtf((1 / (3 * musr2 + mua2) / mua2)))) / mua2; + + //derivative + //derivmuad = -powf((3 * musr2 + mua2), -1.0f / 2.0f) * A * musr2 * (expf(-de * (musr1 + mua1)) * (3.0f * sqrtf(mua2) * (A * (musr1 + mua1) + mua1 / 9.0f + 4.0f / 9.0f * musr1) * mua1 * sqrtf((3 * musr2 + mua2)) + (A * (musr1 + mua1) + mua1) * (2.0f * musr1 * mua2 + (3.0f / 2.0f * musr2 + mua2) * mua1)) * sinh(de * sqrtf(3.0f) * sqrtf(musr1 + mua1) * sqrtf(mua1)) + 2.0f * (expf(-de * (musr1 + mua1)) * (2.0f / 3.0f * ((A / 4.0f + 3.0f / 4.0f) * powf(mua1, 3.0f / 2.0f) + musr1 * sqrtf(mua1) * A) * sqrtf(mua2) * sqrtf((3 * musr2 + mua2)) + (A + 1.0f / 3.0f) * ((3.0f / 4.0f * musr2 + mua2 / 2.0f) * powf(mua1, 3.0f / 2.0f) + mua2 * musr1 * sqrtf(mua1))) * cosh(de * sqrtf(3.0f) * sqrtf(musr1 + mua1) * sqrtf(mua1)) - 5.0f / 4.0f * (mua2 + 3.0f / 5.0f * sqrtf((3 * musr2 + mua2)) * sqrtf(mua2) + 3.0f / 0.10e2 * musr2) * (A + 1.0f / 3.0f) * musr1 * sqrtf(mua1)) * sqrtf(3.0f) * sqrtf(musr1 + mua1)) * sqrtf(3.0f) * sqrtf(musr1 + mua1) * powf(mua2, 3.0f / 2.0f) * sqrtf(mua1) * powf(mua2 + sqrtf((3 * musr2 + mua2)) * sqrtf(mua2) / 3.0f, -2.0f) / (-musr1 / 2.0f + mua1) * powf((mua1 * sqrtf((3 * musr2 + mua2)) * sqrtf(mua2) / 3.0f + A * mua2 * (musr1 + mua1)) * sinh(de * sqrtf(3.0f) * sqrtf(musr1 + mua1) * sqrtf(mua1)) + sqrtf(3.0f) * sqrtf(musr1 + mua1) * sqrtf(mua1) * cosh(de * sqrtf(3.0f) * sqrtf(musr1 + mua1) * sqrtf(mua1)) * (mua2 + sqrtf((3 * musr2 + mua2)) * sqrtf(mua2) * A) / 3.0f, -2.0f) / 9.0f; + //res += currLineData*(i+1); + //derivmuad += currLineData*2*(i+1); + //res += gcol[ind]; + float D2 = fdividef(1.0f,3.0f*(musr2 + mua2)); + float del2 = sqrtf(fdividef(D2,mua2)); + + //from Svaasand 1995 + //calculate the reflectance value + float f1 = (del1*del1*del2*div13-del1*del1*D2)*coshval+(del1*del1*del1*fdividef(D2,D1)*div13 - del1*del2*D1)*sinhval; //keeping for derivative calc + float f2 = 1.0f+fdividef(del2, D2)*div13; //keeping for derivative calc + float f3 = (musr2dmusr1*del2*del2*D1*(del1*fdividef(del1,D1*D1)*div13*div13-1.0f)+del1*del1*(D2-del2*fdividef(del2,D2)*div13*div13))*expval; //keeping for derivative calc + float f4 = del1*fdividef(del1,D1*D1)*div13*div13 - 1.0f; //keep for derivative calc, f4 + float f5 = fdividef(del2,D2)*div13+1.0f; //keep for derivative calc, f5 + float f6 = D1*del1*(D2+del2*A)*coshval+(D1*D1*del2 + D2*del1*del1*A)*sinhval; //keep for derivative calc, f6 + float num = del1*musr1*A*(f1*f2+f3); + float denom = f4*f5*f6; + res = fdividef(num, denom); + + //calculate the derivative with respect to muad + float dD2dmuad = -3.0f*D2*D2; + float ddel2dmuad = (dD2dmuad*mua2-D2)*fdividef(1.0f, mua2*mua2)*fdividef(1.0f, 2.0f*del2); + float df2dmuad = div13*(ddel2dmuad*D2 - del2*dD2dmuad)*fdividef(1.0f, D2*D2); + derivmuad = (del1*musr1*A*((coshval*(del1*del1*div13*ddel2dmuad - del1*del1*dD2dmuad) + sinhval*(del1*del1*del1*fdividef(1.0f, D1)*div13*dD2dmuad - del1*D1*ddel2dmuad))*f2 + f1*df2dmuad + expval*(musr2dmusr1*2.0f*del2*ddel2dmuad*D1*(fdividef(del1*del1,9.0f*D1*D1)-1.0f) + del1*del1*(dD2dmuad + fdividef(1.0f, 9.0f*mua2*mua2))))*denom - (f4*(df2dmuad*f6 + D1*del1*(dD2dmuad + ddel2dmuad*A)*coshval + (D1*D1*ddel2dmuad + dD2dmuad*del1*del1*A)*sinhval*f5)*num))*fdividef(1.0f, denom*denom); + + //calculate next mua2 + //newton's method + mua2 = mua2 - fdividef(res-currLineData, derivmuad); + + //correction in case muad wants to be negative, which we seriously don't want + mua2 = mua2*(1-signbit(mua2)) + signbit(mua2); + } + muad[ind] = mua2; +} + +//find straight line through input mua, return as the value at wavelength w +__global__ void StraightLine(float *wavelengths, float *mua, float wlen, float *res, int startwlenind, int endwlenind, size_t pitch){ + float xbar = 0; + float ybar = 0; + float xybar = 0; + float x2bar = 0; + int num = 0; + for (int i=startwlenind; i < endwlenind; i++){ + num++; + int ind = (gridDim.x*i + blockIdx.x)*pitch + threadIdx.x; + __shared__ float w; + if (threadIdx.x == 0){ + w = wavelengths[i]; + } + __syncthreads(); + //float w = wavelengths[i]; + float abs = mua[ind]; + xbar += w; + ybar += abs; + xybar += w*abs; + x2bar += w*w; + } + xbar /= num; + ybar /= num; + xybar /= num; + x2bar /= num; + float a = (xybar - xbar*ybar)/(x2bar - xbar*xbar); + float b = ybar - a*xbar; + int ind = blockIdx.x*pitch + threadIdx.x; + res[ind] = a*wlen + b; +} + +__global__ void MultVector(float *multVec, float *mua, float *res, float factor, int startwlenind, int endwlenind, size_t pitch){ + int i=0; + float restemp = 0; + for (i=startwlenind; i < endwlenind; i++){ + int ind = (gridDim.x*i + blockIdx.x)*pitch + threadIdx.x; + restemp += mua[ind]*multVec[ind]; + } + restemp *= factor; + int ind = blockIdx.x*pitch + threadIdx.x; + res[ind] = restemp; +} + + +__global__ void ReflIsoL2(float *muae, float *muse, float *muad, float *musd, float *gcol, float *res, size_t pitch, int startblockind){ + int ind = (gridDim.x*(blockIdx.y+startblockind) + blockIdx.x)*pitch+ threadIdx.x; + float musr2 = gcol[ind]; //ps dette er ikke musr, det er g FIXME + + //reduced scattering coefficients + float musr1 = muse[ind]*(1.0f-musr2); + musr2 = musd[ind]*(1.0f-musr2); + + //move mua into local memory + float mua1 = muae[ind]; + float mua2 = muad[ind]; + + res[ind] = ReflIsoL2Calc(mua1, musr1, mua2, musr2); +} + + +__device__ float ReflIsoL2Calc(float mua1, float musr1, float mua2, float musr2){ + //diffusion constant + float D1 = fdividef(1.0f,3.0f*(musr1 + mua1)); + + float musr2dmusr1 = fdividef(musr2,musr1); //musr2 divided by musr1, keep for derivative calc + + + //optical penetration depth + float del1 = sqrtf(fdividef(D1,mua1)); + float sinhval = sinhf(fdividef(de, del1)); + float coshval = coshf(fdividef(de, del1)); + float fact1 = del1*musr1*A; + float expval = expf(-fdividef(de,D1)*div13); + + float D2 = fdividef(1.0f,3.0f*(musr2 + mua2)); + float del2 = sqrtf(fdividef(D2,mua2)); + + //from Svaasand 1995 + //calculate the reflectance value + float f1 = (del1*del1*del2*div13-del1*del1*D2)*coshval+(del1*del1*del1*fdividef(D2,D1)*div13 - del1*del2*D1)*sinhval; //keeping for derivative calc + float f2 = 1.0f+fdividef(del2, D2)*div13; //keeping for derivative calc + float f3 = (musr2dmusr1*del2*del2*D1*(del1*fdividef(del1,D1*D1)*div13*div13-1.0f)+del1*del1*(D2-del2*fdividef(del2,D2)*div13*div13))*expval; //keeping for derivative calc + float f4 = del1*fdividef(del1,D1*D1)*div13*div13 - 1.0f; //keep for derivative calc, f4 + float f5 = fdividef(del2,D2)*div13+1.0f; //keep for derivative calc, f5 + float f6 = D1*del1*(D2+del2*A)*coshval+(D1*D1*del2 + D2*del1*del1*A)*sinhval; //keep for derivative calc, f6 + float num = fact1*(f1*f2+f3); + float denom = f4*f5*f6; + return fdividef(num,denom); +} + +__global__ void ReflIsoL2ErrorCheck(float *muae, float *muse, float *musd, float *gcol, float *AT, float *x, int endmembers, int numbands, float *inputres, float *outputres, size_t pitch, int startblockind, int diff){ + float error = 0; + for (int i=0; i < numbands; i++){ + int ind = (gridDim.x*(i+startblockind) + blockIdx.x)*pitch+ threadIdx.x; + float musr2 = gcol[ind]; //ps dette er ikke musr, det er g FIXME + + //reduced scattering coefficients + float musr1 = muse[ind]*(1.0f-musr2); + musr2 = musd[ind]*(1.0f-musr2); + + //calculate mua + float mua1 = muae[ind]; + float mua2 = 0; + float temp; + for (int j=0; j < endmembers; j++){ + __shared__ float Aval; + if (threadIdx.x == 0){ + Aval = AT[j*numbands + i + diff]; + } + __syncthreads(); + mua2 += Aval*x[(gridDim.x*j + blockIdx.x)*pitch + threadIdx.x]; + } + float res = ReflIsoL2Calc(mua1, musr1, mua2, musr2); + float measval = inputres[ind]; + error += (res-measval)*(res-measval); + } + outputres[blockIdx.x*pitch + threadIdx.x] = sqrtf(error); + +} diff --git a/cuda_code/inversion.cu b/cuda_code/inversion.cu new file mode 100644 index 0000000000000000000000000000000000000000..a7384612dc0316263a6e648565957f37559008f9 --- /dev/null +++ b/cuda_code/inversion.cu @@ -0,0 +1,64 @@ +#include +#include +#include +#include +#include + +#include + +// CUDAカーネル +__global__ void inversionGpu +( + const cv::cuda::PtrStepSz src, + cv::cuda::PtrStepSz dst +) +{ + const int x = blockDim.x * blockIdx.x + threadIdx.x; + const int y = blockDim.y * blockIdx.y + threadIdx.y; + if((y >= 0) && (y < src.rows)) + { + if((x >= 0) && (x < src.cols)) + { + dst.ptr(y)[x] = (255 - src.ptr(y)[x]); + } + } +} + +void launchInversionGpu +( + cv::cuda::GpuMat& src, + cv::cuda::GpuMat& dst +) +{ + const dim3 block(32, 32); + const dim3 grid(cv::cudev::divUp(dst.cols, block.x), cv::cudev::divUp(dst.rows, block.y)); + + // CUDAカーネル起動 + inversionGpu<<>>(src, dst); + + CV_CUDEV_SAFE_CALL(cudaGetLastError()); + CV_CUDEV_SAFE_CALL(cudaDeviceSynchronize()); +} + +int main(int argc, char *argv[]) +{ + cv::Mat src = cv::imread("lena.jpg", cv::IMREAD_GRAYSCALE); + if (src.empty()) + { + std::cerr << "could not load image." << std::endl; + return -1; + } + + cv::cuda::GpuMat d_src(src); + cv::cuda::GpuMat d_dst(d_src.size(), d_src.type()); + launchInversionGpu(d_src, d_dst); + + cv::Mat dst; + d_dst.download(dst); + + cv::imshow("dst", dst); + cv::waitKey(0); + cv::destroyAllWindows(); + + return 0; +} diff --git a/cuda_code/io_4.cu b/cuda_code/io_4.cu new file mode 100644 index 0000000000000000000000000000000000000000..855594bfa3b9980dc111d76633a1ce697d13210d --- /dev/null +++ b/cuda_code/io_4.cu @@ -0,0 +1,525 @@ +/***************************************************************************//** + * \file io.cu + * \author Anush Krishnan (anush@bu.edu) + * \brief Implementation of the functions of the namespace \c io. + */ + + +#include +#include "io.h" +#include +#include "preconditioner.h" +#include + +using std::string; +using std::ios; + + +/** + * \brief Converts a string to a number. + * + * \param str a string + * + * \return a number (\c double or \c integer) + */ +template +T toNumber(string str) +{ + T num; + std::stringstream ss(str); //turn the string into a stream + ss >> num; //convert + return num; +} + +/** + * \namespace io + * \brief Contains functions related to I/O tasks. + */ +namespace io +{ + +/** + * \brief Splits a string given a delimiter. + * + * \param s the string to split + * \param delim the delimiter + * \param elems the vector that contains the different elements of the string + * + * \return a vector that contains the different elements of the string + */ +std::vector &split(const std::string &s, char delim, std::vector &elems) { + std::stringstream ss(s); + std::string item; + while (std::getline(ss, item, delim)) { + elems.push_back(item); + } + return elems; +} + +/** + * \brief Splits a string given a delimiter. + * + * \param s the string to split + * \param delim the delimiter + * + * \return a vector that contains the different elements of the string + */ +std::vector split(const std::string &s, char delim) { + std::vector elems; + split(s, delim, elems); + return elems; +} + +/** + * \brief Creates a directory. + * + * If the parent directory does not exist, it will be created. + * + * \param folderPath the path of the directory to create + */ +void makeDirectory(const std::string folderPath) +{ + std::vector x = split(folderPath, '/'); + int n = x.size(); + int i = 0; + std::string folder = x[i]; + mkdir(folder.c_str(), S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH); + i++; + while(i(); + std::cout<<"Parsing...\n"; + // read the simulation file + string fname = folder + "/simParams.yaml"; + parseSimulationFile(fname, DB); + + // read the flow file + fname = folder + "/flow.yaml"; + parseFlowFile(fname, DB); + + // read the domain file + fname = folder + "/domain.yaml"; + parseDomainFile(fname, D); + + // read the body file + fname = folder + "/bodies.yaml"; + parseBodiesFile(fname, DB); + std::cout<<"...Finished Parsing\n"; + + // second pass of command line -- overwrite values in DB + commandLineParse2(argc, argv, DB); +} + +/** + * \brief Initializes the database with default values. + * + * \param DB database that contains all the simulation parameters + */ +void initialiseDefaultDB(parameterDB &DB) +{ + DB["inputs"] = componentParameter(); + DB["flow"] = componentParameter(); + DB["simulation"] = componentParameter(); + DB["velocitySolve"] = componentParameter(); + DB["PoissonSolve"] = componentParameter(); + + // default input files + string inputs = "inputs"; + DB[inputs]["caseFolder"].set("/scratch/cases/cuIBM/cases/cylinder/Re40"); + DB[inputs]["deviceNumber"].set(0); + + // flow parameters + string flow = "flow"; + DB[flow]["nu"].set(0.01); + DB[flow]["uInitial"].set(1.0); + DB[flow]["vInitial"].set(0.0); + DB[flow]["numBodies"].set(0); + std::vector *bodyVec = new std::vector; + DB[flow]["bodies"].set *>(bodyVec); + + // boundary conditions + boundaryCondition **bc = new boundaryCondition*[4]; + for (int i=0; i<4; i++) + bc[i] = new boundaryCondition[2]; + DB[flow]["boundaryConditions"].set(bc); + + // simulation parameters + string sim = "simulation"; + DB[sim]["dt"].set(0.02); + DB[sim]["nt"].set(100); + DB[sim]["nsave"].set(100); + DB[sim]["restart"].set(false); + DB[sim]["startStep"].set(0); + DB[sim]["Ured"].set(3); + DB[sim]["VIV"].set(0); + DB[sim]["SolverType"].set(NAVIERSTOKES); + + // velocity solver + string solver = "velocitySolve"; + DB[solver]["solver"].set("CG"); + DB[solver]["preconditioner"].set(DIAGONAL); + DB[solver]["tolerance"].set(1e-5); + DB[solver]["maxIterations"].set(10000); + + // Poisson solver + solver = "PoissonSolve"; + DB[solver]["solver"].set("CG"); + DB[solver]["preconditioner"].set(DIAGONAL); + DB[solver]["tolerance"].set(1e-5); + DB[solver]["maxIterations"].set(20000); +} + +/** + * \brief Parses the command-line to get the case folder name + * and the device number. + * + * \param argc number of arguments in the command-line + * \param argv arguments of the command-line + * \param DB database that contains all the simulation parameters + */ +void commandLineParse1(int argc, char **argv, parameterDB &DB) +{ + for (int i=1; i(string(argv[i])); + } + else if (strcmp(argv[i],"-deviceNumber")==0) + { + i++; + int devNum = toNumber(string(argv[i])); + DB["inputs"]["deviceNumber"].set(devNum); + // sets devNum as the current device for the calling host thread + cudaSetDevice(devNum); + } + } +} + +/** + * \brief Overwrites parameters with additional arguments of the command-line. + * + * \param argc number of arguments in the command-line + * \param argv arguments of the command-line + * \param DB database that contains all the simulation parameters + */ +void commandLineParse2(int argc, char **argv, parameterDB &DB) +{ + for (int i=1; i(toNumber(string(argv[i]))); + } + //// angle of attack + //if ( strcmp(argv[i],"-alpha")==0 ) + //{ + // i++; + // DB["flow"]["nu"].set(toNumber(string(argv[i]))); + //} + // perturbation in the x-velocity + if ( strcmp(argv[i],"-uPerturb")==0 ) + { + i++; + DB["flow"]["uPerturb"].set(toNumber(string(argv[i]))); + } + // perturbation in the y-velocity + if ( strcmp(argv[i],"-vPerturb")==0 ) + { + i++; + DB["flow"]["vPerturb"].set(toNumber(string(argv[i]))); + } + // scale the CV with respect to the body + if ( strcmp(argv[i],"-scaleCV")==0 ) + { + i++; + DB["simulation"]["scaleCV"].set(toNumber(string(argv[i]))); + } + // frequency of saving the data + if ( strcmp(argv[i],"-nsave")==0 ) + { + i++; + DB["simulation"]["nsave"].set(toNumber(string(argv[i]))); + } + // total number of time steps + if ( strcmp(argv[i],"-nt")==0 ) + { + i++; + DB["simulation"]["nt"].set(toNumber(string(argv[i]))); + } + // size of time increment + if ( strcmp(argv[i],"-dt")==0 ) + { + i++; + DB["simulation"]["dt"].set(toNumber(string(argv[i]))); + } + // tolerance for the velocity solve + if ( strcmp(argv[i],"-velocityTol")==0 ) + { + i++; + DB["velocitySolve"]["tolerance"].set(toNumber(string(argv[i]))); + } + // tolerance for the Poisson solve + if ( strcmp(argv[i],"-poissonTol")==0 ) + { + i++; + DB["PoissonSolve"]["tolerance"].set(toNumber(string(argv[i]))); + } + } +} + +//############################################################################## +// OUTPUT +//############################################################################## + +/** + * \brief Converts a \c preconditionerType to a \c std::string. + * + * \param s a preconditioner + * + * \return a string + */ +/* +string stringFromPreconditionerType(preconditionerType s) +{ + if (s == NONE) + return "None"; + else if (s == DIAGONAL) + return "Diagonal"; + else if (s == SMOOTHED_AGGREGATION) + return "Smoothed Aggregation"; + else if (s == AINV) + return "Approximate Inverse"; + else + return "Unrecognised preconditioner"; +}*/ + +/** + * \brief Prints the parameters of the simulation. + * + * \param DB database that contains all the simulation parameters + * \param D information about the computational grid + */ +void printSimulationInfo(parameterDB &DB, domain &D) +{ + double dt = DB["simulation"]["dt"].get(), + scaleCV = DB["simulation"]["scaleCV"].get(); + int nt = DB["simulation"]["nt"].get(), + nsave = DB["simulation"]["nsave"].get(), + startStep = DB["simulation"]["startStep"].get(); + + std::cout << '\n'; + + std::cout << "\nFlow parameters" << '\n'; + std::cout << "---------------" << '\n'; + std::cout << "nu = " << DB["flow"]["nu"].get() << '\n'; + std::cout << "Re(1/nu) = " << 1/DB["flow"]["nu"].get() << '\n'; + + std::cout << "\nDomain" << '\n'; + std::cout << "------" << '\n'; + std::cout << D.nx << " x " << D.ny << '\n'; + + std::cout << "\nSimulation parameters" << '\n'; + std::cout << "---------------------" << '\n'; + std::cout << "dt = " << dt << '\n'; + std::cout << "scaleCV = " << scaleCV << '\n'; + std::cout << "startStep = " << startStep << '\n'; + std::cout << "nt = " << nt << '\n'; + std::cout << "nsave = " << nsave << '\n'; + + std::cout << "\nVelocity Solve" << '\n'; + std::cout << "--------------" << '\n'; + std::cout << "Solver = " << DB["velocitySolve"]["solver"].get() << '\n'; + //std::cout << "Preconditioner = " << stringFromPreconditionerType(DB["velocitySolve"]["preconditioner"].get()) << '\n'; + std::cout << "Tolerance = " << DB["velocitySolve"]["tolerance"].get() << '\n'; + + std::cout << "\nPoisson Solve" << '\n'; + std::cout << "-------------" << '\n'; + std::cout << "Solver = " << DB["PoissonSolve"]["solver"].get() << '\n'; + //std::cout << "Preconditioner = " << stringFromPreconditionerType(DB["PoissonSolve"]["preconditioner"].get()) << '\n'; + std::cout << "Tolerance = " << DB["PoissonSolve"]["tolerance"].get() << '\n'; + + std::cout << "\nOutput parameters" << '\n'; + std::cout << "-----------------" << '\n'; + std::cout << "Output folder = " << DB["inputs"]["caseFolder"].get() << '\n'; + std::cout << "nsave = " << DB["simulation"]["nsave"].get() << '\n'; + + cudaDeviceProp deviceProp; + int gpu = DB["inputs"]["deviceNumber"].get(); + cudaGetDeviceProperties(&deviceProp, gpu); + std::cout << "\nDevice Properties" << '\n'; + std::cout << "-----------------" << '\n'; + std::cout << "Name = " << deviceProp.name << '\n'; + std::cout << "Number = " << gpu << '\n'; + std::string ecc = deviceProp.ECCEnabled ? "yes" : "no"; + std::cout << "Compute capability = " << deviceProp.major << "." << deviceProp.minor << '\n'; + std::cout << "ECC Enabled = " << ecc << std::endl; +} + +/** + * \brief Prints the time spent to execute tasks. + * + * \param logger object that contains the name and time spent of tasks + */ +void printTimingInfo(Logger &logger) +{ + logger.printAllTime(); + std::cout << std::endl; +} + +/** + * \brief Writes information about the run into the file \a run.info. + * + * \param DB database that contains all the simulation parameters + * \param D information about the computational grid + */ +void writeInfoFile(parameterDB &DB, domain &D) +{ + std::string folder = DB["inputs"]["caseFolder"].get(); + std::ofstream infofile((folder+"/run.info").c_str()); + infofile << std::setw(20) << "--nx" << "\t" << D.nx << '\n'; + infofile << std::setw(20) << "--ny" << "\t" << D.ny << '\n'; + infofile << std::setw(20) << "--startStep" << "\t" << DB["simulation"]["startStep"].get() << '\n'; + infofile << std::setw(20) << "--nt" << "\t" << DB["simulation"]["nt"].get() << '\n'; + infofile << std::setw(20) << "--nsave" << "\t" << DB["simulation"]["nsave"].get() << '\n'; + infofile << std::setw(20) << "--dt" << "\t" << DB["simulation"]["dt"].get() << '\n'; + infofile << std::setw(20) << "--vortlim"<< "\t" << 15 << '\n'; + infofile << std::setw(20) << "--folder" << "\t" << folder << '\n'; + infofile << std::setw(20) << "--nu" << "\t" << DB["flow"]["nu"].get() << '\n'; + infofile.close(); +} + +/** + * \brief Writes grid-points coordinates into the file \a grid. + * + * \param caseFolder the directory of the simulation + * \param D information about the computational grid + */ +void writeGrid(std::string &caseFolder, domain &D) +{ + std::stringstream out; + out << caseFolder << "/grid"; + std::ofstream file(out.str().c_str(), ios::binary); + double x[D.nx], y[D.ny]; + for (int i=0; i < D.nx; i++) + x[i] = D.x[i]; + for (int i=0; i < D.ny; i++) + y[i] = D.y[i]; + + file.write((char*)(&D.nx), sizeof(int)); + file.write((char*)(&x[0]), (D.nx+1)*sizeof(double));//flag, wrong size? + file.write((char*)(&D.ny), sizeof(int)); + file.write((char*)(&y[0]), (D.ny+1)*sizeof(double)); + file.close(); +} + +/** + * \brief Writes numerical data at a given time-step (on the device). + * + * It creates a directory whose name is the time-step number + * and writes the flux, the pressure (and eventually the body forces) + * into the files \a q, \a lambda, respectively. + * + * \param caseFolder directory of the simulation + * \param n the time-step number + * \param q array that contains the fluxes + * \param lambda array that contains the pressures (and eventually the body forces) + * \param D information about the computational grid + */ +template <> +void writeData< cusp::array1d >(std::string &caseFolder, int n, cusp::array1d &u, cusp::array1d &p, domain &D)//, bodies &B) +{ + std::string path; + std::stringstream out; + int N; + + out << caseFolder << '/' << std::setfill('0') << std::setw(7) << n; + path = out.str(); + + mkdir(path.c_str(), S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH); + + out.str(""); + out << path << "/q"; + std::ofstream file(out.str().c_str(), ios::binary); + N = u.size(); + file.write((char*)(&N), sizeof(int)); + file.write((char*)(&u[0]), N*sizeof(double)); + file.close(); + + out.str(""); + out << path << "/lambda"; + file.open(out.str().c_str(), ios::binary); + N = p.size(); + file.write((char*)(&N), sizeof(int)); + file.write((char*)(&p[0]), N*sizeof(double)); + file.close(); + + std::cout << "Data saved to folder " << path << std::endl; +} + +/** + * \brief Writes numerical data at a given time-step (on the device). + * + * It creates a directory whose name is the time-step number + * and writes the flux, the pressure (and eventually the body forces) + * into the files \a q, \a lambda, respectively. + * + * \param caseFolder directory of the simulation + * \param n the time-step number + * \param q array that contains the fluxes + * \param lambda array that contains the pressures (and eventually the body forces) + * \param D information about the computational grid + */ +template <> +void writeData< cusp::array1d >(std::string &caseFolder, int n, cusp::array1d &u, cusp::array1d &p, domain &D)//, bodies &B) +{ + cusp::array1d uH = u, + pH = p; + + writeData(caseFolder, n, uH, pH, D); +} + +/** + * \brief Prints device memory usage. + * + * \param label the label of the device + */ +void printDeviceMemoryUsage() +{ + size_t _free, _total; + cudaMemGetInfo(&_free, &_total); + std::cout << '\n' << "Initialisation complete\nFlux capacitors charged" << ": Memory Usage " << std::setprecision(3) << (_total-_free)/(1024.0*1024*1024) \ + << " / " << std::setprecision(3) << _total/(1024.0*1024*1024) << " GB" << std::setprecision(6) << '\n' << std::endl; +} + +} // end namespace io diff --git a/cuda_code/iou3d_nms_kernel_5.cu b/cuda_code/iou3d_nms_kernel_5.cu new file mode 100644 index 0000000000000000000000000000000000000000..89d99c44238dce731698b7d2659630b5fe78140b --- /dev/null +++ b/cuda_code/iou3d_nms_kernel_5.cu @@ -0,0 +1,710 @@ +#include +#include +#include +#include +#include "cuda_nms.h" +#define MIN_THREADS_PER_BLOCK 128 + +using namespace std; + +namespace NAMESPACE +{ + +inline int GetMaxOccupacy(int SMs, int processNum) +{ + int threshold = processNum / SMs; + int thread = MIN_THREADS_PER_BLOCK; + + while(thread < threshold) + thread = thread << 1; + + thread = thread >> 1; + + thread = thread > 512 ? 512 : thread; + + return thread; + +} + +// #define DEBUG +const int THREADS_PER_BLOCK_NMS = sizeof(unsigned long long) * 8; +const float EPS = 1e-8; + +struct Point { + float x, y; + __device__ Point() {} + __device__ Point(double _x, double _y){ + x = _x, y = _y; + } + + __device__ void set(float _x, float _y){ + x = _x; y = _y; + } + + __device__ Point operator +(const Point &b)const{ + return Point(x + b.x, y + b.y); + } + + __device__ Point operator -(const Point &b)const{ + return Point(x - b.x, y - b.y); + } +}; + +__device__ inline float cross(const Point &a, const Point &b){ + return a.x * b.y - a.y * b.x; +} + +__device__ inline float cross(const Point &p1, const Point &p2, const Point &p0){ + return (p1.x - p0.x) * (p2.y - p0.y) - (p2.x - p0.x) * (p1.y - p0.y); +} + +__device__ int check_rect_cross(const Point &p1, const Point &p2, const Point &q1, const Point &q2){ + int ret = min(p1.x,p2.x) <= max(q1.x,q2.x) && + min(q1.x,q2.x) <= max(p1.x,p2.x) && + min(p1.y,p2.y) <= max(q1.y,q2.y) && + min(q1.y,q2.y) <= max(p1.y,p2.y); + return ret; +} + +__device__ inline int check_in_box2d(const float *box, const Point &p){ + //params: (7) [x, y, z, dx, dy, dz, heading] + const float MARGIN = 1e-2; + + float center_x = box[0], center_y = box[1]; + float angle_cos = cos(-box[6]), angle_sin = sin(-box[6]); // rotate the point in the opposite direction of box + float rot_x = (p.x - center_x) * angle_cos + (p.y - center_y) * (-angle_sin); + float rot_y = (p.x - center_x) * angle_sin + (p.y - center_y) * angle_cos; + + return (fabs(rot_x) < box[3] / 2 + MARGIN && fabs(rot_y) < box[4] / 2 + MARGIN); +} + +__device__ inline int intersection(const Point &p1, const Point &p0, const Point &q1, const Point &q0, Point &ans){ + // fast exclusion + if (check_rect_cross(p0, p1, q0, q1) == 0) return 0; + + // check cross standing + float s1 = cross(q0, p1, p0); + float s2 = cross(p1, q1, p0); + float s3 = cross(p0, q1, q0); + float s4 = cross(q1, p1, q0); + + if (!(s1 * s2 > 0 && s3 * s4 > 0)) return 0; + + // calculate intersection of two lines + float s5 = cross(q1, p1, p0); + if(fabs(s5 - s1) > EPS){ + ans.x = (s5 * q0.x - s1 * q1.x) / (s5 - s1); + ans.y = (s5 * q0.y - s1 * q1.y) / (s5 - s1); + + } + else{ + float a0 = p0.y - p1.y, b0 = p1.x - p0.x, c0 = p0.x * p1.y - p1.x * p0.y; + float a1 = q0.y - q1.y, b1 = q1.x - q0.x, c1 = q0.x * q1.y - q1.x * q0.y; + float D = a0 * b1 - a1 * b0; + + ans.x = (b0 * c1 - b1 * c0) / D; + ans.y = (a1 * c0 - a0 * c1) / D; + } + + return 1; +} + +__device__ inline void rotate_around_center(const Point ¢er, const float angle_cos, const float angle_sin, Point &p){ + float new_x = (p.x - center.x) * angle_cos + (p.y - center.y) * (-angle_sin) + center.x; + float new_y = (p.x - center.x) * angle_sin + (p.y - center.y) * angle_cos + center.y; + p.set(new_x, new_y); +} + +__device__ inline int point_cmp(const Point &a, const Point &b, const Point ¢er){ + return atan2(a.y - center.y, a.x - center.x) > atan2(b.y - center.y, b.x - center.x); +} + +__device__ inline float box_overlap(const float *box_a, const float *box_b){ + // params box_a: [x, y, z, dx, dy, dz, heading] + // params box_b: [x, y, z, dx, dy, dz, heading] + + float a_angle = box_a[6], b_angle = box_b[6]; + float a_dx_half = box_a[3] / 2, b_dx_half = box_b[3] / 2, a_dy_half = box_a[4] / 2, b_dy_half = box_b[4] / 2; + float a_x1 = box_a[0] - a_dx_half, a_y1 = box_a[1] - a_dy_half; + float a_x2 = box_a[0] + a_dx_half, a_y2 = box_a[1] + a_dy_half; + float b_x1 = box_b[0] - b_dx_half, b_y1 = box_b[1] - b_dy_half; + float b_x2 = box_b[0] + b_dx_half, b_y2 = box_b[1] + b_dy_half; + + Point center_a(box_a[0], box_a[1]); + Point center_b(box_b[0], box_b[1]); + + Point box_a_corners[5]; + box_a_corners[0].set(a_x1, a_y1); + box_a_corners[1].set(a_x2, a_y1); + box_a_corners[2].set(a_x2, a_y2); + box_a_corners[3].set(a_x1, a_y2); + + Point box_b_corners[5]; + box_b_corners[0].set(b_x1, b_y1); + box_b_corners[1].set(b_x2, b_y1); + box_b_corners[2].set(b_x2, b_y2); + box_b_corners[3].set(b_x1, b_y2); + + // get oriented corners + float a_angle_cos = cos(a_angle), a_angle_sin = sin(a_angle); + float b_angle_cos = cos(b_angle), b_angle_sin = sin(b_angle); + + for (int k = 0; k < 4; k++){ + rotate_around_center(center_a, a_angle_cos, a_angle_sin, box_a_corners[k]); + rotate_around_center(center_b, b_angle_cos, b_angle_sin, box_b_corners[k]); + } + + box_a_corners[4] = box_a_corners[0]; + box_b_corners[4] = box_b_corners[0]; + + // get intersection of lines + Point cross_points[16]; + Point poly_center; + int cnt = 0, flag = 0; + + poly_center.set(0, 0); + for (int i = 0; i < 4; i++){ + for (int j = 0; j < 4; j++){ + flag = intersection(box_a_corners[i + 1], box_a_corners[i], box_b_corners[j + 1], box_b_corners[j], cross_points[cnt]); + if (flag){ + poly_center = poly_center + cross_points[cnt]; + cnt++; + } + } + } + + // check corners + for (int k = 0; k < 4; k++){ + if (check_in_box2d(box_a, box_b_corners[k])){ + poly_center = poly_center + box_b_corners[k]; + cross_points[cnt] = box_b_corners[k]; + cnt++; + } + if (check_in_box2d(box_b, box_a_corners[k])){ + poly_center = poly_center + box_a_corners[k]; + cross_points[cnt] = box_a_corners[k]; + cnt++; + } + } + + poly_center.x /= cnt; + poly_center.y /= cnt; + + // sort the points of polygon + Point temp; + for (int j = 0; j < cnt - 1; j++){ + for (int i = 0; i < cnt - j - 1; i++){ + if (point_cmp(cross_points[i], cross_points[i + 1], poly_center)){ + temp = cross_points[i]; + cross_points[i] = cross_points[i + 1]; + cross_points[i + 1] = temp; + } + } + } + + // get the overlap areas + float area = 0; + for (int k = 0; k < cnt - 1; k++){ + area += cross(cross_points[k] - cross_points[0], cross_points[k + 1] - cross_points[0]); + } + + return fabs(area) / 2.0; +} + +__device__ inline float iou_bev(const float *box_a, const float *box_b){ + // params box_a: [x, y, z, dx, dy, dz, heading] + // params box_b: [x, y, z, dx, dy, dz, heading] + float sa = box_a[3] * box_a[4]; + float sb = box_b[3] * box_b[4]; + float s_overlap = box_overlap(box_a, box_b); + float union_area = fmaxf(sa + sb - s_overlap, 0); + if(union_area<=0) return 0.0; + return s_overlap / union_area; +} + +__device__ inline float iou_normal(float const * const a, float const * const b) { + //params: a: [x, y, dx, dy] + //params: b: [x, y, dx, dy] + + float left = fmaxf(a[0] - a[2] / 2, b[0] - b[2] / 2), right = fminf(a[0] + a[2] / 2, b[0] + b[2] / 2); + float top = fmaxf(a[1] - a[3] / 2, b[1] - b[3] / 2), bottom = fminf(a[1] + a[3] / 2, b[1] + b[3] / 2); + float width = fmaxf(right - left, 0.f), height = fmaxf(bottom - top, 0.f); + float interS = width * height; + float Sa = a[3] * a[2]; + float Sb = b[3] * b[2]; + float union_area = fmaxf(Sa + Sb - interS, 0); + if(union_area<=0) return 0.0; + return interS / union_area; +} + + +extern "C" +__global__ +void squeeze_for_score_kernel(const float *cls_rw, float *score, int *cls_index, int *range_index_rw, int* counter, int num_cls, int num_box, float score_thresh) +{ + int i = threadIdx.x + blockIdx.x * blockDim.x; + + __shared__ int dCounter; + if(threadIdx.x == 0) dCounter = 0; + __syncthreads(); + + bool isValid = i >= num_box ? false : true; + + int pos, cls; + float scoreTmp; + if(isValid) + { + cls = 0; + scoreTmp = cls_rw[i * num_cls]; + + for(int j = 1; j < num_cls; ++j) + { + float scoreNow = cls_rw[i * num_cls + j]; + bool isSmaller = (scoreTmp < scoreNow); + + scoreTmp = isSmaller ? scoreNow : scoreTmp; + cls = isSmaller ? j : cls; + } + isValid = (score_thresh < scoreTmp); + } + + if(isValid) pos = atomicAdd(&dCounter, 1); + __syncthreads(); + if(threadIdx.x == 0) dCounter = atomicAdd(counter, dCounter); + __syncthreads(); + + if(isValid) + { + pos += dCounter; + cls_index [i] = cls; + range_index_rw[pos] = i; + score [pos] = -scoreTmp; + } + + +} + + +extern "C" +__global__ +void squeeze_for_score_half_kernel(const __half *cls_rw, float *score, int *cls_index, int *range_index_rw, int* counter, int num_cls, int num_box, float score_thresh) +{ + int i = threadIdx.x + blockIdx.x * blockDim.x; + + __shared__ int dCounter; + if(threadIdx.x == 0) dCounter = 0; + __syncthreads(); + + bool isValid = i >= num_box ? false : true; + + int pos, cls; + float scoreTmp; + if(isValid) + { + cls = 0; + scoreTmp = __half2float(cls_rw[i * num_cls]); + + for(int j = 1; j < num_cls; ++j) + { + float scoreNow = __half2float(cls_rw[i * num_cls + j]); + bool isSmaller = (scoreTmp < scoreNow); + + scoreTmp = isSmaller ? scoreNow : scoreTmp; + cls = isSmaller ? j : cls; + } + isValid = (score_thresh < scoreTmp); + } + + if(isValid) pos = atomicAdd(&dCounter, 1); + __syncthreads(); + if(threadIdx.x == 0) dCounter = atomicAdd(counter, dCounter); + __syncthreads(); + + if(isValid) + { + pos += dCounter; + cls_index [i] = cls; + range_index_rw[pos] = i; + score [pos] = -scoreTmp; + } + + +} + +__global__ void copy_to_temp_kernel_bev(int *range_index_rw, float *score, int *cls_index, const float *box_s_rw, int *cls_temp, float *box_temp, + int num, int original_num, int num_box_info) +{ + + int idx = threadIdx.x + blockIdx.x * blockDim.x; + int stride = blockDim.x * gridDim.x; + + for(int i = idx; i < num; i += stride) + { + int index = range_index_rw[i]; + cls_temp[i] = cls_index[index]; + #pragma unroll 7 + for (int j = 0; j < 7; ++j) + box_temp[i * 7 + j] = box_s_rw[index * num_box_info + j]; + } + +} + +__global__ void copy_to_temp_kernel_nor(int *range_index_rw, float *score, int *cls_index, const float *box_s_rw, int *cls_temp, float *box_temp, + int num, int original_num, int num_box_info) +{ + + int idx = threadIdx.x + blockIdx.x * blockDim.x; + int stride = blockDim.x * gridDim.x; + + for(int i = idx; i < num; i += stride) + { + int index = range_index_rw[i]; + cls_temp[i] = cls_index[index]; + + #pragma unroll 4 + for (int j = 0; j < 4; ++j) + box_temp[i * 4 + j] = box_s_rw[index * num_box_info + j]; + } + +} + +__global__ void copy_to_temp_half_kernel_bev(int *range_index_rw, float *score, int *cls_index, const __half *box_s_rw, int *cls_temp, float *box_temp, + int num, int original_num, int num_box_info) +{ + + int idx = threadIdx.x + blockIdx.x * blockDim.x; + int stride = blockDim.x * gridDim.x; + + for(int i = idx; i < num; i += stride) + { + int index = range_index_rw[i]; + cls_temp[i] = cls_index[index]; + #pragma unroll 7 + for (int j = 0; j < 7; ++j) + box_temp[i * 7 + j] = __half2float(box_s_rw[index * num_box_info + j]); + } + +} + +__global__ void copy_to_temp_half_kernel_nor(int *range_index_rw, float *score, int *cls_index, const __half *box_s_rw, int *cls_temp, float *box_temp, + int num, int original_num, int num_box_info) +{ + + int idx = threadIdx.x + blockIdx.x * blockDim.x; + int stride = blockDim.x * gridDim.x; + + for(int i = idx; i < num; i += stride) + { + int index = range_index_rw[i]; + cls_temp[i] = cls_index[index]; + + #pragma unroll 4 + for (int j = 0; j < 4; ++j) + box_temp[i * 4 + j] = __half2float(box_s_rw[index * num_box_info + j]); + } + +} + + + +__global__ void iou_self_bev_kernel(int nms_pre_maxsize, const float *boxes, int *index, float *ans_iou) +{ + // params boxes: (N, 7) [x, y, z, dx, dy, dz, heading] + + int idx = threadIdx.x + blockIdx.x * blockDim.x; + int stride = blockDim.x * gridDim.x; + + for(int i = idx; i < nms_pre_maxsize * nms_pre_maxsize; i += stride) + { + int row = i / nms_pre_maxsize; //rows + int col = i - (row * nms_pre_maxsize); //cols + + int a_idx = row; + int b_idx = col; + + if(a_idx!=b_idx){ + float cur_iou_bev = iou_bev(boxes + a_idx * 7, boxes + b_idx * 7); + ans_iou[row * nms_pre_maxsize + col] = cur_iou_bev; + ans_iou[col * nms_pre_maxsize + row] = cur_iou_bev; + }else{ + ans_iou[row * nms_pre_maxsize + col] = 1.0; + } + } +} + +__global__ void iou_self_kernel(int nms_pre_maxsize, const float *boxes, int *index, float *ans_iou){ + // params boxes: (N, 4) [x, y, dx, dy] + + int idx = threadIdx.x + blockIdx.x * blockDim.x; + int stride = blockDim.x * gridDim.x; + + for(int i = idx; i < nms_pre_maxsize * nms_pre_maxsize; i += stride) + { + int row = i / nms_pre_maxsize; //rows + int col = i - (row * nms_pre_maxsize); //cols + + int a_idx = index[row]; + int b_idx = index[col]; + + if(a_idx!=b_idx){ + float cur_iou = iou_normal(boxes + a_idx * 4, boxes + b_idx * 4); + ans_iou[row * nms_pre_maxsize + col] = cur_iou; + ans_iou[col * nms_pre_maxsize + row] = cur_iou; + } + else{ + ans_iou[row * nms_pre_maxsize + col] = 1.0; + } + } + +} + + +__global__ +void nms_kernel(int nms_pre_maxsize, int *range_index, float *ans_iou, float nms_thresh, int box_idx) +{ + int idx = threadIdx.x + blockIdx.x * blockDim.x; + int stride = blockDim.x * gridDim.x; + + for(int i = idx; i < nms_pre_maxsize; i += stride) + { + if(range_index[box_idx]<0) continue; + if(i<=box_idx) continue; + if(ans_iou[box_idx * nms_pre_maxsize + i] > nms_thresh) range_index[i] = -1; + + } +} + +void nms_func(int nms_pre_maxsize, int *range_index, float *ans_iou, float nms_thresh) +{ + + int blockSize; // The launch configurator returned block size + int minGridSize; // The minimum grid size needed to achieve the + // maximum occupancy for a full device launch + checkCudaErrors(cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, nms_kernel, 0, nms_pre_maxsize)); + minGridSize = std::min(minGridSize, DivUp(nms_pre_maxsize, blockSize)); + for (int i = 0; i < nms_pre_maxsize; ++i) + nms_kernel<<>>(nms_pre_maxsize, range_index, ans_iou, nms_thresh, i); + //cudaDeviceSynchronize(); + +} + +__global__ +void concat_outputs_kernel_bev(float *box_temp, float *score_temp, int *cls_temp, int *range_index_rw, + int total_box, int nms_post_maxsize, float *dst_s_rw, int orign_num) +{ + + int idx = threadIdx.x + blockIdx.x * blockDim.x; + int stride = blockDim.x * gridDim.x; + + for (int i = idx; i < total_box; i += stride) + { + int index = range_index_rw[i]; + #pragma unroll 7 + for (int j = 0; j < 7; ++j) + dst_s_rw[i * 9 + j] = box_temp[index * 7 + j]; + + dst_s_rw[i * 9 + 7] = -score_temp[index];//前面排序时存为负值 + dst_s_rw[i * 9 + 8] = (float)cls_temp[index]; + + } + +} + +__global__ +void concat_outputs_kernel_nor(float *box_temp, float *score_temp, int *cls_temp, int *range_index_rw, + int total_box, int nms_post_maxsize, float *dst_s_rw, int orign_num) +{ + + int idx = threadIdx.x + blockIdx.x * blockDim.x; + int stride = blockDim.x * gridDim.x; + + for (int i = idx; i < total_box; i += stride) + { + int index = range_index_rw[i]; + #pragma unroll 4 + for (int j = 0; j < 4; ++j) + dst_s_rw[i * 6 + j] = box_temp[index * 4 + j]; + + dst_s_rw[i * 6 + 4] = -score_temp[index];//前面排序时存为负值 + dst_s_rw[i * 6 + 5] = (float)cls_temp[index]; + + } + +} + +__global__ +void concat_outputs_half_kernel_bev(float *box_temp, float *score_temp, int *cls_temp, int *range_index_rw, + int total_box, int nms_post_maxsize, __half *dst_s_rw, int orign_num) +{ + + int idx = threadIdx.x + blockIdx.x * blockDim.x; + int stride = blockDim.x * gridDim.x; + + for (int i = idx; i < total_box; i += stride) + { + int index = range_index_rw[i]; + #pragma unroll 7 + for (int j = 0; j < 7; ++j) + dst_s_rw[i * 9 + j] = __float2half(box_temp[index * 7 + j]); + + dst_s_rw[i * 9 + 7] = __float2half(-score_temp[index]);//前面排序时存为负值 + dst_s_rw[i * 9 + 8] = __float2half((float)cls_temp[index]); + + } + +} + +__global__ +void concat_outputs_half_kernel_nor(float *box_temp, float *score_temp, int *cls_temp, int *range_index_rw, + int total_box, int nms_post_maxsize, __half *dst_s_rw, int orign_num) +{ + + int idx = threadIdx.x + blockIdx.x * blockDim.x; + int stride = blockDim.x * gridDim.x; + + for (int i = idx; i < total_box; i += stride) + { + int index = range_index_rw[i]; + #pragma unroll 4 + for (int j = 0; j < 4; ++j) + dst_s_rw[i * 6 + j] = __float2half(box_temp[index * 4 + j]); + + dst_s_rw[i * 6 + 4] = __float2half(-score_temp[index]);//前面排序时存为负值 + dst_s_rw[i * 6 + 5] = __float2half((float)cls_temp[index]); + + } + +} + +__global__ +void range_kernel(int *index, int num) +{ + int idx = threadIdx.x + blockIdx.x * blockDim.x; + int stride = blockDim.x * gridDim.x; + + for (int i = idx; i < num; i += stride) + { + index[i] = i; + } + +} + + +struct is_neg +{ + __host__ __device__ + bool operator()(const int x) + { + return x < 0; + } +}; + +void cuda_nms(const float *batch_box, const float *batch_cls_rw, float *score, int *cls_index, int *range_index_rw, int* pos_rw, int *cls_temp, float *box_temp, + float *ious_rw, float *dst, int num_box, int num_cls, int nms_pre_maxsize, int nms_post_maxsize, float nms_thresh, int batch_size, + float score_thresh, int use_bev, int* validboxes_rw) +{ + std::vector host_validboxes(batch_size, 0); + int SMs = 0; + checkCudaErrors(cudaDeviceGetAttribute(&SMs, cudaDevAttrMultiProcessorCount, 0)); + int num_box_info = use_bev == 0 ? 4 : 7; + + for (int i = 0; i < batch_size; ++i){ + const float *cls_s_rw = batch_cls_rw + i * num_box * num_cls; + const float *box_s_rw = batch_box + i * num_box * num_box_info; + + float *dst_s_rw = dst + i * nms_post_maxsize * (num_box_info + 2); + int *pos_s_rw = pos_rw + i; + + //挑选每个box中score最大的类,记录类别索引和score;记录box索引,不满足阈值的box,索引记为-1 + squeeze_for_score_kernel<<>>(cls_s_rw, score, cls_index, range_index_rw, pos_s_rw, num_cls, num_box, score_thresh); //提取cls信息和score信息 + + int num; + checkCudaErrors(cudaMemcpy(&num, pos_s_rw, sizeof(int), cudaMemcpyDeviceToHost)); + if(num < 1) continue; + + //将有效的box按照置信度排序,注意,此时score,range_index_rw都是排序的结果 + thrust::stable_sort_by_key(thrust::device, score, score + num, range_index_rw); + + if(num > nms_pre_maxsize) num = nms_pre_maxsize; + + if(use_bev != 0) copy_to_temp_kernel_bev<<>>(range_index_rw, score, cls_index, box_s_rw, cls_temp, box_temp, num, num_box, num_box_info); + else copy_to_temp_kernel_nor<<>>(range_index_rw, score, cls_index, box_s_rw, cls_temp, box_temp, num, num_box, num_box_info); + + if(use_bev != 0) iou_self_bev_kernel<<>>(num, box_temp, range_index_rw, ious_rw); + else iou_self_kernel <<>>(num, box_temp, range_index_rw, ious_rw); + + range_kernel<<>>(range_index_rw, num);// mark temp index + nms_func(num, range_index_rw, ious_rw, nms_thresh); + + //聚合索引大于-1的box,为有效box + int *new_end = thrust::remove_if(thrust::device, range_index_rw, range_index_rw + num, is_neg()); + int valid_num = new_end - range_index_rw; + + if(valid_num < 1) continue; + valid_num = valid_num > nms_post_maxsize ? nms_post_maxsize : valid_num; + + if(use_bev != 0) concat_outputs_kernel_bev<<>>(box_temp, score, cls_temp, range_index_rw, valid_num, nms_post_maxsize, dst_s_rw, num); + else concat_outputs_kernel_nor<<>>(box_temp, score, cls_temp, range_index_rw, valid_num, nms_post_maxsize, dst_s_rw, num); + + host_validboxes[i] = valid_num; + } + + checkCudaErrors(cudaMemcpy(validboxes_rw, host_validboxes.data(), sizeof(int) * batch_size, cudaMemcpyHostToDevice)); + +} + +void cuda_nms_fp16(const __half *batch_box, const __half *batch_cls_rw, float *score, int *cls_index, int *range_index_rw, int* pos_rw, int *cls_temp, float *box_temp, + float *ious_rw, __half *dst, int num_box, int num_cls, int nms_pre_maxsize, int nms_post_maxsize, float nms_thresh, int batch_size, + float score_thresh, int use_bev, int* validboxes_rw) +{ + std::vector host_validboxes(batch_size, 0); + int SMs = 0; + checkCudaErrors(cudaDeviceGetAttribute(&SMs, cudaDevAttrMultiProcessorCount, 0)); + int num_box_info = use_bev == 0 ? 4 : 7; + + for (int i = 0; i < batch_size; ++i){ + const __half *cls_s_rw = batch_cls_rw + i * num_box * num_cls; + const __half *box_s_rw = batch_box + i * num_box * num_box_info; + + __half *dst_s_rw = dst + i * nms_post_maxsize * (num_box_info + 2); + int *pos_s_rw = pos_rw + i; + + //挑选每个box中score最大的类,记录类别索引和score;记录box索引,不满足阈值的box,索引记为-1 + squeeze_for_score_half_kernel<<>>(cls_s_rw, score, cls_index, range_index_rw, pos_s_rw, num_cls, num_box, score_thresh); //提取cls信息和score信息 + + int num; + checkCudaErrors(cudaMemcpy(&num, pos_s_rw, sizeof(int), cudaMemcpyDeviceToHost)); + if(num < 1) continue; + + //将有效的box按照置信度排序,注意,此时score,range_index_rw都是排序的结果 + thrust::stable_sort_by_key(thrust::device, score, score + num, range_index_rw); + + if(num > nms_pre_maxsize) num = nms_pre_maxsize; + + if(use_bev != 0) copy_to_temp_half_kernel_bev<<>>(range_index_rw, score, cls_index, box_s_rw, cls_temp, box_temp, num, num_box, num_box_info); + else copy_to_temp_half_kernel_nor<<>>(range_index_rw, score, cls_index, box_s_rw, cls_temp, box_temp, num, num_box, num_box_info); + + if(use_bev != 0) iou_self_bev_kernel<<>>(num, box_temp, range_index_rw, ious_rw); + else iou_self_kernel <<>>(num, box_temp, range_index_rw, ious_rw); + + range_kernel<<>>(range_index_rw, num);// mark temp index + nms_func(num, range_index_rw, ious_rw, nms_thresh); + + //聚合索引大于-1的box,为有效box + int *new_end = thrust::remove_if(thrust::device, range_index_rw, range_index_rw + num, is_neg()); + int valid_num = new_end - range_index_rw; + + if(valid_num < 1) continue; + valid_num = valid_num > nms_post_maxsize ? nms_post_maxsize : valid_num; + + if(use_bev != 0) concat_outputs_half_kernel_bev<<>>(box_temp, score, cls_temp, range_index_rw, valid_num, nms_post_maxsize, dst_s_rw, num); + else concat_outputs_half_kernel_nor<<>>(box_temp, score, cls_temp, range_index_rw, valid_num, nms_post_maxsize, dst_s_rw, num); + + host_validboxes[i] = valid_num; + } + + checkCudaErrors(cudaMemcpy(validboxes_rw, host_validboxes.data(), sizeof(int) * batch_size, cudaMemcpyHostToDevice)); + +} + +}//namespace \ No newline at end of file diff --git a/cuda_code/ivfpq_topk.cu b/cuda_code/ivfpq_topk.cu new file mode 100644 index 0000000000000000000000000000000000000000..b33257ad5a87e9a6fd451557c262a83119fdb32d --- /dev/null +++ b/cuda_code/ivfpq_topk.cu @@ -0,0 +1,1586 @@ +#define _VOLATILE_ +#define likely(x) __builtin_expect(!!(x), 1) +#define unlikely(x) __builtin_expect(!!(x), 0) +#define load(x) __ldcg(x) +#define store(x, value) __stcs(x, value) +#ifndef INFINITY +#define INFINITY __int_as_float(0x7f800000) +#endif + +typedef unsigned char uint8_t; +typedef long long ll_t; + +typedef struct __builtin_align__(8) +{ + float value; + float index; +} pair; + +typedef struct __device_builtin__ __builtin_align__(_NCS_) +{ + uint8_t _VARNAMES_; +} _uint8n_t; + +typedef union { + _uint8n_t u8n; + uint8_t val[_NCS_]; +} uint8n_t; + +__device__ __forceinline__ float atomicMax(float *address, float val) +{ + int ret = __float_as_int(*address); + while(val > __int_as_float(ret)) + { + int old = ret; + if((ret = atomicCAS((int *)address, old, __float_as_int(val))) == old) + break; + } + return __int_as_float(ret); +} + +__device__ __forceinline__ unsigned int bfe( + unsigned int source, + unsigned int bitIndex +) { + unsigned int bit; + asm volatile("bfe.u32 %0, %1, %2, %3;" : "=r"(bit) : "r"((unsigned int) source), "r"(bitIndex), "r"(1)); + return bit; +} + +__device__ __forceinline__ void warp_comparator( + float &value, + float &index, + const int stride, + const int direction +){ + const float otherValue = __shfl_xor_sync(0xFFFFFFFF, value, stride); + const float otherIndex = __shfl_xor_sync(0xFFFFFFFF, index, stride); + bool condition = value < otherValue == direction; + index = condition ? otherIndex : index; + value = condition ? otherValue : value; +} + +__device__ __forceinline__ void block_comparator( + float &value, + float &index, + const int stride, + const int direction, + const int laneID, + _VOLATILE_ float sMem[] +){ + float tempPrecomputed1 = sMem[laneID]; + float tempPrecomputed2 = sMem[_TPB_ + laneID]; + __syncthreads(); + + sMem[laneID] = value; + sMem[_TPB_ + laneID] = index; + __syncthreads(); + + float otherValue = sMem[laneID ^ stride]; + float otherIndex = sMem[_TPB_ + laneID ^ stride]; + __syncthreads(); + + sMem[laneID] = tempPrecomputed1; + sMem[_TPB_ + laneID] = tempPrecomputed2; + __syncthreads(); + + bool condition = value < otherValue == direction; + value = condition ? otherValue : value; + index = condition ? otherIndex : index; + /* + */ +} + +__device__ __forceinline__ void block_comparator_noop( +){ + __syncthreads(); + __syncthreads(); + __syncthreads(); + __syncthreads(); +} + +__device__ __forceinline__ void thread_comparator( + float &value, + float &index, + float otherValue, + float otherIndex, + const int direction +){ + bool condition = value > otherValue == direction; + if (condition){ + value = otherValue; + index = otherIndex; + } +} + +__device__ void bitonic_sort_2( + float &value, + float &index, + int laneID +){ + warp_comparator(value, index, 1, bfe(laneID, 1) ^ bfe(laneID, 0)); +} + +__device__ void bitonic_sort_4( + float &value, + float &index, + int laneID +){ + bitonic_sort_2(value, index, laneID); + warp_comparator(value, index, 2, bfe(laneID, 2) ^ bfe(laneID, 1)); + warp_comparator(value, index, 1, bfe(laneID, 2) ^ bfe(laneID, 0)); +} + +__device__ void bitonic_sort_8( + float &value, + float &index, + int laneID +){ + bitonic_sort_4(value, index, laneID); + warp_comparator(value, index, 4, bfe(laneID, 3) ^ bfe(laneID, 2)); + warp_comparator(value, index, 2, bfe(laneID, 3) ^ bfe(laneID, 1)); + warp_comparator(value, index, 1, bfe(laneID, 3) ^ bfe(laneID, 0)); +} + +__device__ void bitonic_sort_16( + float &value, + float &index, + int laneID +){ + bitonic_sort_8(value, index, laneID); + warp_comparator(value, index, 8, bfe(laneID, 4) ^ bfe(laneID, 3)); + warp_comparator(value, index, 4, bfe(laneID, 4) ^ bfe(laneID, 2)); + warp_comparator(value, index, 2, bfe(laneID, 4) ^ bfe(laneID, 1)); + warp_comparator(value, index, 1, bfe(laneID, 4) ^ bfe(laneID, 0)); +} + +__device__ void bitonic_sort_32( + float &value, + float &index, + int laneID +){ + bitonic_sort_16(value, index, laneID); + warp_comparator(value, index, 16, bfe(laneID, 5) ^ bfe(laneID, 4)); + warp_comparator(value, index, 8, bfe(laneID, 5) ^ bfe(laneID, 3)); + warp_comparator(value, index, 4, bfe(laneID, 5) ^ bfe(laneID, 2)); + warp_comparator(value, index, 2, bfe(laneID, 5) ^ bfe(laneID, 1)); + warp_comparator(value, index, 1, bfe(laneID, 5) ^ bfe(laneID, 0)); +} + +__device__ void bitonic_sort_global_2( + float &value, + float &index, + float otherValue, + float otherIndex, + int laneID +) { + if (_TPB_ - 32 <= threadIdx.x){ + thread_comparator(value, index, otherValue, otherIndex, 0); + warp_comparator(value, index, 1, !bfe(laneID, 0)); + } +} + +__device__ void bitonic_sort_global_4( + float &value, + float &index, + float otherValue, + float otherIndex, + int laneID +) { + if (_TPB_ - 32 <= threadIdx.x){ + thread_comparator(value, index, otherValue, otherIndex, 0); + warp_comparator(value, index, 2, !bfe(laneID, 1)); + warp_comparator(value, index, 1, !bfe(laneID, 0)); + } +} + +__device__ void bitonic_sort_global_8( + float &value, + float &index, + float otherValue, + float otherIndex, + int laneID +) { + if (_TPB_ - 32 <= threadIdx.x){ + thread_comparator(value, index, otherValue, otherIndex, 0); + warp_comparator(value, index, 4, !bfe(laneID, 2)); + warp_comparator(value, index, 2, !bfe(laneID, 1)); + warp_comparator(value, index, 1, !bfe(laneID, 0)); + } +} + +__device__ void bitonic_sort_global_16( + float &value, + float &index, + float otherValue, + float otherIndex, + int laneID +) { + if (_TPB_ - 32 <= threadIdx.x){ + thread_comparator(value, index, otherValue, otherIndex, 0); + warp_comparator(value, index, 8, !bfe(laneID, 3)); + warp_comparator(value, index, 4, !bfe(laneID, 2)); + warp_comparator(value, index, 2, !bfe(laneID, 1)); + warp_comparator(value, index, 1, !bfe(laneID, 0)); + } +} + +__device__ void bitonic_sort_global_32( + float &value, + float &index, + float otherValue, + float otherIndex, + int laneID +) { + if (_TPB_ - 32 <= threadIdx.x){ + thread_comparator(value, index, otherValue, otherIndex, 0); + warp_comparator(value, index, 16, !bfe(laneID, 4)); + warp_comparator(value, index, 8, !bfe(laneID, 3)); + warp_comparator(value, index, 4, !bfe(laneID, 2)); + warp_comparator(value, index, 2, !bfe(laneID, 1)); + warp_comparator(value, index, 1, !bfe(laneID, 0)); + } +} + +#if _TPB_ >= 64 +__device__ void bitonic_sort_64( + float &value, + float &index, + _VOLATILE_ float sMem[], + int laneID +){ + bitonic_sort_32(value, index, laneID); + block_comparator(value, index, 32, bfe(laneID, 6) ^ bfe(laneID, 5), laneID, sMem); + warp_comparator(value, index, 16, bfe(laneID, 6) ^ bfe(laneID, 4)); + warp_comparator(value, index, 8, bfe(laneID, 6) ^ bfe(laneID, 3)); + warp_comparator(value, index, 4, bfe(laneID, 6) ^ bfe(laneID, 2)); + warp_comparator(value, index, 2, bfe(laneID, 6) ^ bfe(laneID, 1)); + warp_comparator(value, index, 1, bfe(laneID, 6) ^ bfe(laneID, 0)); +} +#endif +__device__ void bitonic_sort_global_64( + float &value, + float &index, + float otherValue, + float otherIndex, + _VOLATILE_ float sMem[], + int laneID +) { + if (_TPB_ - 64 <= threadIdx.x){ + thread_comparator(value, index, otherValue, otherIndex, 0); + block_comparator(value, index, 32, !bfe(laneID, 5), laneID, sMem); + warp_comparator(value, index, 16, !bfe(laneID, 4)); + warp_comparator(value, index, 8, !bfe(laneID, 3)); + warp_comparator(value, index, 4, !bfe(laneID, 2)); + warp_comparator(value, index, 2, !bfe(laneID, 1)); + + warp_comparator(value, index, 1, !bfe(laneID, 0)); + } else { + block_comparator_noop(); + } +} + +#if _TPB_ >= 128 +__device__ void bitonic_sort_128( + float &value, + float &index, + _VOLATILE_ float sMem[], + int laneID +){ + bitonic_sort_64(value, index, sMem, laneID); + block_comparator(value, index, 64, bfe(laneID, 7) ^ bfe(laneID, 6), laneID, sMem); + block_comparator(value, index, 32, bfe(laneID, 7) ^ bfe(laneID, 5), laneID, sMem); + warp_comparator(value, index, 16, bfe(laneID, 7) ^ bfe(laneID, 4)); + warp_comparator(value, index, 8, bfe(laneID, 7) ^ bfe(laneID, 3)); + warp_comparator(value, index, 4, bfe(laneID, 7) ^ bfe(laneID, 2)); + warp_comparator(value, index, 2, bfe(laneID, 7) ^ bfe(laneID, 1)); + warp_comparator(value, index, 1, bfe(laneID, 7) ^ bfe(laneID, 0)); +} +#endif +__device__ void bitonic_sort_global_128( + float &value, + float &index, + float otherValue, + float otherIndex, + _VOLATILE_ float sMem[], + int laneID +) { + if (_TPB_ - 128 <= threadIdx.x){ + thread_comparator(value, index, otherValue, otherIndex, 0); + block_comparator(value, index, 64, !bfe(laneID, 6), laneID, sMem); + block_comparator(value, index, 32, !bfe(laneID, 5), laneID, sMem); + warp_comparator(value, index, 16, !bfe(laneID, 4)); + warp_comparator(value, index, 8, !bfe(laneID, 3)); + warp_comparator(value, index, 4, !bfe(laneID, 2)); + warp_comparator(value, index, 2, !bfe(laneID, 1)); + warp_comparator(value, index, 1, !bfe(laneID, 0)); + } else { + block_comparator_noop(); + block_comparator_noop(); + } +} + +#if _TPB_ >= 256 +__device__ void bitonic_sort_256( + float &value, + float &index, + _VOLATILE_ float sMem[], + int laneID +){ + bitonic_sort_128(value, index, sMem, laneID); + block_comparator(value, index, 128, bfe(laneID, 8) ^ bfe(laneID, 7), laneID, sMem); + block_comparator(value, index, 64, bfe(laneID, 8) ^ bfe(laneID, 6), laneID, sMem); + block_comparator(value, index, 32, bfe(laneID, 8) ^ bfe(laneID, 5), laneID, sMem); + warp_comparator(value, index, 16, bfe(laneID, 8) ^ bfe(laneID, 4)); + warp_comparator(value, index, 8, bfe(laneID, 8) ^ bfe(laneID, 3)); + warp_comparator(value, index, 4, bfe(laneID, 8) ^ bfe(laneID, 2)); + warp_comparator(value, index, 2, bfe(laneID, 8) ^ bfe(laneID, 1)); + warp_comparator(value, index, 1, bfe(laneID, 8) ^ bfe(laneID, 0)); +} +#endif +__device__ void bitonic_sort_global_256( + float &value, + float &index, + float otherValue, + float otherIndex, + _VOLATILE_ float sMem[], + int laneID +) { + if (_TPB_ - 256 <= threadIdx.x){ + thread_comparator(value, index, otherValue, otherIndex, 0); + block_comparator(value, index, 128, !bfe(laneID, 7), laneID, sMem); + block_comparator(value, index, 64, !bfe(laneID, 6), laneID, sMem); + block_comparator(value, index, 32, !bfe(laneID, 5), laneID, sMem); + warp_comparator(value, index, 16, !bfe(laneID, 4)); + warp_comparator(value, index, 8, !bfe(laneID, 3)); + warp_comparator(value, index, 4, !bfe(laneID, 2)); + warp_comparator(value, index, 2, !bfe(laneID, 1)); + warp_comparator(value, index, 1, !bfe(laneID, 0)); + } else { + block_comparator_noop(); + block_comparator_noop(); + block_comparator_noop(); + } +} + +#if _TPB_ >= 512 +__device__ void bitonic_sort_512( + float &value, + float &index, + _VOLATILE_ float sMem[], + int laneID +){ + bitonic_sort_256(value, index, sMem, laneID); + block_comparator(value, index, 256, bfe(laneID, 9) ^ bfe(laneID, 8), laneID, sMem); + block_comparator(value, index, 128, bfe(laneID, 9) ^ bfe(laneID, 7), laneID, sMem); + block_comparator(value, index, 64, bfe(laneID, 9) ^ bfe(laneID, 6), laneID, sMem); + block_comparator(value, index, 32, bfe(laneID, 9) ^ bfe(laneID, 5), laneID, sMem); + warp_comparator(value, index, 16, bfe(laneID, 9) ^ bfe(laneID, 4)); + warp_comparator(value, index, 8, bfe(laneID, 9) ^ bfe(laneID, 3)); + warp_comparator(value, index, 4, bfe(laneID, 9) ^ bfe(laneID, 2)); + warp_comparator(value, index, 2, bfe(laneID, 9) ^ bfe(laneID, 1)); + warp_comparator(value, index, 1, bfe(laneID, 9) ^ bfe(laneID, 0)); +} +#endif +__device__ void bitonic_sort_global_512( + float &value, + float &index, + float otherValue, + float otherIndex, + _VOLATILE_ float sMem[], + int laneID +) { + if (_TPB_ - 512 <= threadIdx.x){ + thread_comparator(value, index, otherValue, otherIndex, 0); + block_comparator(value, index, 256, !bfe(laneID, 8), laneID, sMem); + block_comparator(value, index, 128, !bfe(laneID, 7), laneID, sMem); + block_comparator(value, index, 64, !bfe(laneID, 6), laneID, sMem); + block_comparator(value, index, 32, !bfe(laneID, 5), laneID, sMem); + warp_comparator(value, index, 16, !bfe(laneID, 4)); + warp_comparator(value, index, 8, !bfe(laneID, 3)); + warp_comparator(value, index, 4, !bfe(laneID, 2)); + warp_comparator(value, index, 2, !bfe(laneID, 1)); + warp_comparator(value, index, 1, !bfe(laneID, 0)); + } else { + block_comparator_noop(); + block_comparator_noop(); + block_comparator_noop(); + block_comparator_noop(); + } +} + + +#if _TPB_ >= 1024 +__device__ void bitonic_sort_1024( + float &value, + float &index, + _VOLATILE_ float sMem[], + int laneID +){ + bitonic_sort_512(value, index, sMem, laneID); + block_comparator(value, index, 512, bfe(laneID, 10) ^ bfe(laneID, 9), laneID, sMem); + block_comparator(value, index, 256, bfe(laneID, 10) ^ bfe(laneID, 8), laneID, sMem); + block_comparator(value, index, 128, bfe(laneID, 10) ^ bfe(laneID, 7), laneID, sMem); + block_comparator(value, index, 64, bfe(laneID, 10) ^ bfe(laneID, 6), laneID, sMem); + block_comparator(value, index, 32, bfe(laneID, 10) ^ bfe(laneID, 5), laneID, sMem); + warp_comparator(value, index, 16, bfe(laneID, 10) ^ bfe(laneID, 4)); + warp_comparator(value, index, 8, bfe(laneID, 10) ^ bfe(laneID, 3)); + warp_comparator(value, index, 4, bfe(laneID, 10) ^ bfe(laneID, 2)); + warp_comparator(value, index, 2, bfe(laneID, 10) ^ bfe(laneID, 1)); + warp_comparator(value, index, 1, bfe(laneID, 10) ^ bfe(laneID, 0)); +} +#endif +__device__ void bitonic_sort_global_1024( + float &value, + float &index, + float otherValue, + float otherIndex, + _VOLATILE_ float sMem[], + int laneID +) { + if (_TPB_ - 1024 <= threadIdx.x){ + thread_comparator(value, index, otherValue, otherIndex, 0); + block_comparator(value, index, 512, !bfe(laneID, 9), laneID, sMem); + block_comparator(value, index, 256, !bfe(laneID, 8), laneID, sMem); + block_comparator(value, index, 128, !bfe(laneID, 7), laneID, sMem); + block_comparator(value, index, 64, !bfe(laneID, 6), laneID, sMem); + block_comparator(value, index, 32, !bfe(laneID, 5), laneID, sMem); + warp_comparator(value, index, 16, !bfe(laneID, 4)); + warp_comparator(value, index, 8, !bfe(laneID, 3)); + warp_comparator(value, index, 4, !bfe(laneID, 2)); + warp_comparator(value, index, 2, !bfe(laneID, 1)); + warp_comparator(value, index, 1, !bfe(laneID, 0)); + } else { + block_comparator_noop(); + block_comparator_noop(); + block_comparator_noop(); + block_comparator_noop(); + block_comparator_noop(); + } +} + +__device__ void load_precomputed_v1( + const float *precomputed, + _VOLATILE_ float *sMem, + int nQuery +){ + const int tid = threadIdx.x; + const int qid = blockIdx.x; + if (tid < 256){ + #pragma unroll + for (int i = 0; i < _M_; i++){ + #if _TPB_ >= 256 + int adr = (i * nQuery * _K_) + (qid * _K_) + (tid); + sMem[i * _K_ + tid] = precomputed[adr]; + + #else + #pragma unroll + for (int j = 0; j < _K_ / _TPB_; j++){ + int adr = (i * nQuery * _K_) + (qid * _K_) + (j * _TPB_ + tid); + sMem[i * _K_ + j * _TPB_ + tid] = precomputed[adr]; + } + #endif + } + } + __syncthreads(); +} + +__device__ void load_precomputed_v2( + const float *precomputed, + _VOLATILE_ float *sMem, + int iProbe, int nProbe +){ + const int tid = threadIdx.x; + const int qid = blockIdx.x; + if (tid < 256){ + #pragma unroll + for (int i = 0; i < _M_; i++){ + #if _TPB_ >= 256 + // int adr = (i * nQuery * _K_) + (qid * _K_) + (tid); + int adr = + (qid) * nProbe * _M_ * _K_ +\ + (iProbe) * _M_ * _K_ +\ + (i) * _K_ +\ + (tid); + sMem[i * _K_ + tid] = precomputed[adr]; + + #else + #pragma unroll + for (int j = 0; j < _K_ / _TPB_; j++){ + int adr = (qid) * nProbe * _M_ * _K_ +\ + (iProbe) * _M_ * _K_ +\ + (i) * _K_ +\ + (j * _TPB_ + tid); + sMem[i * _K_ + j * _TPB_ + tid] = precomputed[adr]; + } + #endif + } + } + __syncthreads(); +} + +__device__ void load_precomputed_v3( + const float* part1, + const float* part2, + _VOLATILE_ float *sMem, + int iCell +){ + const int tid = threadIdx.x; + const int qid = blockIdx.x; + if (tid < 256){ + #pragma unroll + for (int i = 0; i < _M_; i++){ + #if _TPB_ >= 256 + // int adr = (i * nQuery * _K_) + (qid * _K_) + (tid); + int adr1 =\ + (qid) * _M_ * _K_ +\ + (i) * _K_ +\ + (tid); + float precomputedValue = part1[adr1]; + + int adr2 =\ + (iCell) * _M_ * _K_ +\ + (i) * _K_ +\ + (tid); + sMem[i * _K_ + tid] = precomputedValue + part2[adr2]; + + #else + #pragma unroll + for (int j = 0; j < _K_ / _TPB_; j++){ + int adr1 =\ + (qid) * _M_ * _K_ +\ + (i) * _K_ +\ + (j * _TPB_ + tid); + float precomputedValue = part1[adr1]; + + int adr2 =\ + (iCell) * _M_ * _K_ +\ + (i) * _K_ +\ + (j * _TPB_ + tid); + sMem[i * _K_ + j * _TPB_ + tid] = precomputedValue + part2[adr2]; + } + #endif + } + } + __syncthreads(); +} + +__device__ void load_part1_to_cache( + const float* part1, + float part1Cache[_M_] +){ + const int tid = threadIdx.x; + const int qid = blockIdx.x; + if (tid < 256){ + #pragma unroll + for (int i = 0; i < _M_; i++){ + #if _TPB_ >= 256 + int adr1 =\ + (qid) * _M_ * _K_ +\ + (i) * _K_ +\ + (tid); + part1Cache[i] = part1[adr1]; + #endif + } + } +} + +__device__ void load_part2_to_cache( + const float* part2, + float part2Cache[_M_], + int iCell +){ + const int tid = threadIdx.x; + const int qid = blockIdx.x; + if (tid < 256){ + #pragma unroll + for (int i = 0; i < _M_; i++){ + #if _TPB_ >= 256 + int adr2 =\ + (iCell) * _M_ * _K_ +\ + (i) * _K_ +\ + (tid); + part2Cache[i] = part2[adr2]; + #endif + } + } +} + +__device__ void store_precomputed_to_smem( + float part1Cache[_M_], + float part2Cache[_M_], + _VOLATILE_ float *sMem +){ + const int tid = threadIdx.x; + const int qid = blockIdx.x; + __syncthreads(); + if (tid < 256){ + #pragma unroll + for (int i = 0; i < _M_; i++){ + #if _TPB_ >= 256 + float part1Value = part1Cache[i]; + float part2Value = part2Cache[i]; + sMem[i * _K_ + tid] = part1Value + part2Value; + #endif + } + } + __syncthreads(); +} + +__device__ void load_consume_data( + const uint8n_t* data, + _VOLATILE_ float sMem[], + float &value, + int iN, int nData +){ + #pragma unroll + for (int i = 0; i < _M_ / _NCS_; i++){ + uint8n_t threadData = data[(i * nData) + iN]; + float pre0 = sMem[(i * _NCS_ + 0) * _K_ + int(threadData.val[0]) ]; + float pre1 = sMem[(i * _NCS_ + 1) * _K_ + int(threadData.val[1]) ]; + float pre2 = sMem[(i * _NCS_ + 2) * _K_ + int(threadData.val[2]) ]; + float pre3 = sMem[(i * _NCS_ + 3) * _K_ + int(threadData.val[3]) ]; + value += pre0; + value += pre1; + value += pre2; + value += pre3; + } +} + +__device__ void load_data( + const uint8n_t* data, + uint8n_t dataCache[_M_ / _NCS_], + int iN, int nData +){ + #pragma unroll + for (int i = 0; i < _M_ / _NCS_; i++){ + uint8n_t threadData = data[(i * nData) + iN]; + dataCache[i] = threadData; + } +} + +__device__ void consume_data( + _VOLATILE_ float sMem[], + uint8n_t dataCache[_M_ / _NCS_], + float &value +){ + #pragma unroll + for (int i = 0; i < _M_ / _NCS_; i++){ + uint8n_t threadData = dataCache[i]; + float pre0 = sMem[(i * _NCS_ + 0) * _K_ + int(threadData.val[0]) ]; + float pre1 = sMem[(i * _NCS_ + 1) * _K_ + int(threadData.val[1]) ]; + float pre2 = sMem[(i * _NCS_ + 2) * _K_ + int(threadData.val[2]) ]; + float pre3 = sMem[(i * _NCS_ + 3) * _K_ + int(threadData.val[3]) ]; + value += pre0; + value += pre1; + value += pre2; + value += pre3; + } +} + +__device__ void sort( + float &finalValue, + float &finalIndex, + float value, + float index, + _VOLATILE_ float sMem[], + int nCandidates +){ + const int tid = threadIdx.x; + #if _TPB_ == 32 + bitonic_sort_32(value, index, tid); + + #elif _TPB_ == 64 + bitonic_sort_64(value, index, sMem, tid); + + #elif _TPB_ == 128 + bitonic_sort_128(value, index, sMem, tid); + + #elif _TPB_ == 256 + bitonic_sort_256(value, index, sMem, tid); + + #elif _TPB_ == 512 + bitonic_sort_512(value, index, sMem, tid); + + #elif _TPB_ == 1024 + bitonic_sort_1024(value, index, sMem, tid); + #endif + + switch (nCandidates){ + case 2: + bitonic_sort_global_2( + finalValue, finalIndex, value, index, + tid); + break; + case 4: + bitonic_sort_global_4( + finalValue, finalIndex, value, index, + tid); + break; + case 8: + bitonic_sort_global_8( + finalValue, finalIndex, value, index, + tid); + break; + case 16: + bitonic_sort_global_16( + finalValue, finalIndex, value, index, + tid); + break; + case 32: + bitonic_sort_global_32( + finalValue, finalIndex, value, index, + tid); + break; + case 64: + bitonic_sort_global_64( + finalValue, finalIndex, value, index, + sMem, tid); + break; + case 128: + bitonic_sort_global_128( + finalValue, finalIndex, value, index, + sMem, tid); + break; + case 256: + bitonic_sort_global_256( + finalValue, finalIndex, value, index, + sMem, tid); + break; + case 512: + bitonic_sort_global_512( + finalValue, finalIndex, value, index, + sMem, tid); + break; + case 1024: + bitonic_sort_global_1024( + finalValue, finalIndex, value, index, + sMem, tid); + break; + } +} + +__device__ bool is_stack_empty( + int stackSize +){ + return stackSize <= 0; +} + +__device__ bool is_stack_full( + int stackSize +){ + return stackSize >= _STACKCAP_ - 1; +} + +__device__ void push_stack( + pair stack[_STACKCAP_], + pair newPair, + int &stackSize +) { + if (is_stack_full(stackSize)){ + return; + } else { + #pragma unroll + for (int i = _STACKCAP_ - 1; i >= 1; i--){ + stack[i] = stack[i - 1]; + } + stack[0] = newPair; + stackSize ++; + } +} + +__device__ void pop_stack( + pair stack[_STACKCAP_], + pair &outPair, + int &stackSize +) { + if (is_stack_empty(stackSize)){ + return; + } else { + outPair = stack[0]; + #pragma unroll + for (int i=0; i<_STACKCAP_-1; i++){ + stack[i] = stack[i+1]; + } + stackSize--; + } +} + +__device__ void init_stack( + pair stack[_STACKCAP_] +){ + pair emptyPair; + emptyPair.value = -INFINITY; + emptyPair.index = -1; + #pragma unroll + for (int i=0; i < _STACKCAP_; i++){ + stack[i] = emptyPair; + } +} + +extern "C" +__global__ void ivfpq_topk( + const uint8n_t* __restrict__ data, + const float* __restrict__ precomputed, + const uint8_t* __restrict__ isEmpty, + const ll_t* __restrict__ cellStart, + const ll_t* __restrict__ cellSize, + const ll_t* __restrict__ totalSize, + float* __restrict__ gValue, + ll_t* __restrict__ gIndex, + int nData, int nQuery, int nProbe, int nCandidates +) { + const int tid = threadIdx.x; // thread ID + const int qid = blockIdx.x; // query ID + + pair stack[_STACKCAP_]; + int stackSize = 0; + init_stack(stack); + + extern __shared__ _VOLATILE_ float sMem[]; // M * K + load_precomputed_v1(precomputed, sMem, nQuery); + float finalValue = -INFINITY; + float finalIndex = -1; + float minValue = -INFINITY; + + const ll_t threadTotalSize = totalSize[qid]; + const int nIter = (threadTotalSize + _TPB_ - 1) / _TPB_; + int cCell = 0; + int cCellStart = cellStart[qid * nProbe + cCell]; + int cCellSize = cellSize[qid * nProbe + cCell]; + int cCellEnd = cCellStart + cCellSize; + int iN = cCellStart + tid; + + for (int i = 0; i < nIter; i++){ + while (iN >= cCellEnd){ + cCell ++; // increment cell index by 1 + if (unlikely(cCell >= nProbe)) + break; + int pCellEnd = cCellEnd; + cCellStart = cellStart[qid * nProbe + cCell]; + cCellSize = cellSize[qid * nProbe + cCell]; + cCellEnd = cCellStart + cCellSize; + iN = iN - pCellEnd + cCellStart; + } + pair newPair; + newPair.value = -INFINITY; + newPair.index = -1; + int cIsEmpty = 0; + if (likely(iN < cCellEnd)){ + newPair.value = 0.f; + newPair.index = iN; + cIsEmpty = isEmpty[iN]; + uint8n_t dataCache[_M_ / _NCS_]; + load_data(data, dataCache, iN, nData); + consume_data(sMem, dataCache, newPair.value); + } + newPair.value = cIsEmpty == 0 ? newPair.value : -INFINITY; + newPair.index = cIsEmpty == 0 ? newPair.index : -1; + + __syncthreads(); + float temp1, temp2; + if (tid == 0){ + temp1 = sMem[0]; + temp2 = sMem[1]; + sMem[0] = 0; + } + __syncthreads(); + + pair oldPair; + oldPair.value = -INFINITY; + oldPair.index = -1; + if (is_stack_full(stackSize)){ + pop_stack(stack, oldPair, stackSize); + if (oldPair.value > minValue){ + sMem[0] = 1; + } + } + + if (newPair.value > minValue){ + push_stack(stack, newPair, stackSize); + } + __syncthreads(); + + if (sMem[0] > 0){ + __syncthreads(); + sort( + finalValue, finalIndex, + oldPair.value, oldPair.index, + sMem, nCandidates + ); + __syncthreads(); + if (tid == _TPB_ - 1){ + sMem[1] = finalValue; + } + __syncthreads(); + minValue = sMem[1]; + } + __syncthreads(); + if (tid == 0){ + sMem[0] = temp1; + sMem[1] = temp2; + } + __syncthreads(); + + iN += _TPB_; + } + + sMem[0] = 0; + __syncthreads(); + for (int i=0; i<_STACKCAP_; i++){ + pair oldPair; + oldPair.value = -INFINITY; + oldPair.index = -1; + if (!is_stack_empty(stackSize)){ + pop_stack(stack, oldPair, stackSize); + if (oldPair.value > minValue){ + sMem[0] = 1; + } + } + __syncthreads(); + + if (sMem[0] > 0){ + __syncthreads(); + sort( + finalValue, finalIndex, + oldPair.value, oldPair.index, + sMem, nCandidates + ); + __syncthreads(); + sMem[0] = 0; + if (tid == _TPB_ - 1){ + sMem[1] = finalValue; + } + __syncthreads(); + minValue = sMem[1]; + } + __syncthreads(); + } + + if (_TPB_ - nCandidates <= tid){ + const int writeAddress = (qid * nCandidates) + tid - ( _TPB_ - nCandidates); + gValue[writeAddress] = finalValue; + gIndex[writeAddress] = finalIndex; + } +} + +extern "C" +__global__ void ivfpq_topk_residual( + const uint8n_t* __restrict__ data, + const float* __restrict__ precomputed, + const float* __restrict__ baseSims, + const uint8_t* __restrict__ isEmpty, + const ll_t* __restrict__ cellStart, + const ll_t* __restrict__ cellSize, + const ll_t* __restrict__ totalSize, + float* __restrict__ gValue, + ll_t* __restrict__ gIndex, + int nData, int nQuery, int nProbe, int nCandidates +) { + const int tid = threadIdx.x; // thread ID + const int qid = blockIdx.x; // query ID + + extern __shared__ _VOLATILE_ float sMem[]; // M * K + const ll_t threadTotalSize = totalSize[qid]; + float finalValue = -INFINITY; + float finalIndex = -1; + int cCellStart = -1; + for (int cCell = 0; cCell < nProbe; cCell++){ + int pCellStart = cCellStart; + cCellStart = cellStart[qid * nProbe + cCell]; + if (cCellStart == pCellStart){ + continue; + } + int cCellSize = cellSize[qid * nProbe + cCell]; + load_precomputed_v2(precomputed, sMem, cCell, nProbe); + float cBaseSim = baseSims[qid * nProbe + cCell]; + int cCellEnd = cCellStart + cCellSize; + int nIter = (cCellSize + _TPB_ - 1) / _TPB_; + for (int iter = 0; iter < nIter; iter++ ){ + int iN = cCellStart + iter * _TPB_ + tid; + float value; + float index = iN; + int cIsEmpty = 0; + if (cCellStart <= iN && iN < cCellEnd){ + value = cBaseSim; + cIsEmpty = isEmpty[iN]; + uint8n_t dataCache[_M_ / _NCS_]; + load_data(data, dataCache, iN, nData); + consume_data(sMem, dataCache, value); + } else { + value = -INFINITY; + } + value = cIsEmpty == 0 ? value : -INFINITY; + index = cIsEmpty == 0 ? index : -1; + + sort( + finalValue, finalIndex, + value, index, + sMem, nCandidates + ); + } + } + + if (_TPB_ - nCandidates <= tid){ + const int writeAddress = (qid * nCandidates) + tid - ( _TPB_ - nCandidates); + gValue[writeAddress] = finalValue; + gIndex[writeAddress] = finalIndex; + } +} + +extern "C" +__global__ void ivfpq_topk_residual_precomputed( + const uint8n_t* __restrict__ data, + const float* __restrict__ part1, + const float* __restrict__ part2, + const ll_t* __restrict__ cells, + const float* __restrict__ baseSims, + const uint8_t* __restrict__ isEmpty, + const ll_t* __restrict__ cellStart, + const ll_t* __restrict__ cellSize, + const ll_t* __restrict__ totalSize, + float* __restrict__ gValue, + ll_t* __restrict__ gIndex, + int nData, int nQuery, int nProbe, int nCandidates +) { + const int tid = threadIdx.x; // thread ID + const int qid = blockIdx.x; // query ID + + pair stack[_STACKCAP_]; + int stackSize = 0; + init_stack(stack); + + extern __shared__ _VOLATILE_ float sMem[]; // M * K + const ll_t threadTotalSize = totalSize[qid]; + float finalValue = -INFINITY; + float finalIndex = -1; + float minValue = -INFINITY; + float part1Cache[_M_]; + float part2Cache[_M_]; + load_part1_to_cache(part1, part1Cache); + + int nCellStart = cellStart[qid * nProbe]; + int nCellSize = cellSize[qid * nProbe]; + int nCellEnd = nCellStart + nCellSize; + int iCell = cells[qid * nProbe]; + bool nCellRepeated = false; + bool cCellRepeated = false; + load_part2_to_cache(part2, part2Cache, iCell); + + for (int cCell = 0; cCell < nProbe; cCell++){ + int cCellStart = nCellStart; + int cCellSize = nCellSize; + int cCellEnd = nCellEnd; + if (!cCellRepeated){ + store_precomputed_to_smem(part1Cache, part2Cache, sMem); + } + + if (cCell < nProbe - 1){ + int tCellStart = cellStart[qid * nProbe + cCell + 1]; + if (tCellStart != cCellStart){ + nCellStart = tCellStart; + nCellSize = cellSize[qid * nProbe + cCell + 1]; + nCellEnd = nCellStart + nCellSize; + iCell = cells[qid * nProbe + cCell + 1]; + load_part2_to_cache(part2, part2Cache, iCell); + nCellRepeated = false; + } else { + nCellRepeated = true; + } + } + if (cCellRepeated){ + cCellRepeated = nCellRepeated; + continue; + } + cCellRepeated = nCellRepeated; + float cBaseSim = baseSims[qid * nProbe + cCell]; + int nIter = (cCellSize + _TPB_ - 1) / _TPB_; + for (int iter = 0; iter < nIter; iter++ ){ + int iN = cCellStart + iter * _TPB_ + tid; + pair newPair; + newPair.value = -INFINITY; + newPair.index = -1; + int cIsEmpty = 0; + if (iN < cCellEnd){ + newPair.value = cBaseSim; + newPair.index = iN; + cIsEmpty = isEmpty[iN]; + uint8n_t dataCache[_M_ / _NCS_]; + load_data(data, dataCache, iN, nData); + consume_data(sMem, dataCache, newPair.value); + } + + newPair.value = cIsEmpty == 0 ? newPair.value : -INFINITY; + newPair.index = cIsEmpty == 0 ? newPair.index : -1; + + __syncthreads(); + float temp1, temp2; + if (tid == 0){ + temp1 = sMem[0]; + temp2 = sMem[1]; + sMem[0] = 0; + } + __syncthreads(); + + pair oldPair; + oldPair.value = -INFINITY; + oldPair.index = -1; + if (is_stack_full(stackSize)){ + pop_stack(stack, oldPair, stackSize); + if (oldPair.value > minValue){ + sMem[0] = 1; + } + } + if (newPair.value > minValue){ + push_stack(stack, newPair, stackSize); + } + __syncthreads(); + if (sMem[0] > 0){ + __syncthreads(); + sort( + finalValue, finalIndex, + oldPair.value, oldPair.index, + sMem, nCandidates + ); + __syncthreads(); + if (tid == _TPB_ - 1){ + sMem[1] = finalValue; + } + __syncthreads(); + minValue = sMem[1]; + } + __syncthreads(); + if (tid == 0){ + sMem[0] = temp1; + sMem[1] = temp2; + } + __syncthreads(); + } + } + // __syncthreads(); + sMem[0] = 0; + __syncthreads(); + for (int i=0; i < _STACKCAP_; i++){ + pair oldPair; + oldPair.value = -INFINITY; + oldPair.index = -1; + if (!is_stack_empty(stackSize)){ + pop_stack(stack, oldPair, stackSize); + if (oldPair.value > minValue){ + sMem[0] = 1; + } + } + __syncthreads(); + + if (sMem[0] > 0){ + __syncthreads(); + sort( + finalValue, finalIndex, + oldPair.value, oldPair.index, + sMem, nCandidates + ); + __syncthreads(); + sMem[0] = 0; + if (tid == _TPB_ - 1){ + sMem[1] = finalValue; + } + __syncthreads(); + minValue = sMem[1]; + } + __syncthreads(); + } + + if (_TPB_ - nCandidates <= tid){ + const int writeAddress = (qid * nCandidates) + tid - ( _TPB_ - nCandidates); + gValue[writeAddress] = finalValue; + gIndex[writeAddress] = finalIndex; + } +} + +extern "C" +__global__ void ivfpq_topk_smart_probing( + const uint8n_t* __restrict__ data, + const float* __restrict__ precomputed, + const uint8_t* __restrict__ isEmpty, + const ll_t* __restrict__ cellStart, + const ll_t* __restrict__ cellSize, + const ll_t* __restrict__ totalSize, + const ll_t* __restrict__ nProbeList, + float* __restrict__ gValue, + ll_t* __restrict__ gIndex, + int nData, int nQuery, int maxNProbe, int nCandidates +) { + const int tid = threadIdx.x; // thread ID + const int qid = blockIdx.x; // query ID + const int nProbe = nProbeList[qid]; + + pair stack[_STACKCAP_]; + int stackSize = 0; + init_stack(stack); + + extern __shared__ _VOLATILE_ float sMem[]; // M * K + load_precomputed_v1(precomputed, sMem, nQuery); + float finalValue = -INFINITY; + float finalIndex = -1; + float minValue = -INFINITY; + const ll_t threadTotalSize = totalSize[qid]; + const int nIter = (threadTotalSize + _TPB_ - 1) / _TPB_; + int cCell = 0; + int cCellStart = cellStart[qid * maxNProbe + cCell]; + int cCellSize = cellSize[qid * maxNProbe + cCell]; + int cCellEnd = cCellStart + cCellSize; + int iN = cCellStart + tid; + + for (int i = 0; i < nIter; i++){ + while (iN >= cCellEnd){ + cCell ++; // increment cell index by 1 + if (cCell >= nProbe) + break; + int pCellEnd = cCellEnd; + int pCellStart = cCellStart; + cCellStart = cellStart[qid * maxNProbe + cCell]; + if (cCellStart == pCellStart){ + continue; + } + cCellSize = cellSize[qid * maxNProbe + cCell]; + cCellEnd = cCellStart + cCellSize; + iN = iN - pCellEnd + cCellStart; + } + pair newPair; + newPair.value = -INFINITY; + newPair.index = -1; + int cIsEmpty = 0; + if (likely(iN < cCellEnd)){ + newPair.value = 0.f; + newPair.index = iN; + cIsEmpty = isEmpty[iN]; + uint8n_t dataCache[_M_ / _NCS_]; + load_data(data, dataCache, iN, nData); + consume_data(sMem, dataCache, newPair.value); + } + newPair.value = cIsEmpty == 0 ? newPair.value : -INFINITY; + newPair.index = cIsEmpty == 0 ? newPair.index : -1; + + __syncthreads(); + float temp1, temp2; + if (tid == 0){ + temp1 = sMem[0]; + temp2 = sMem[1]; + sMem[0] = 0; + } + __syncthreads(); + + pair oldPair; + oldPair.value = -INFINITY; + oldPair.index = -1; + if (is_stack_full(stackSize)){ + pop_stack(stack, oldPair, stackSize); + if (oldPair.value > minValue){ + sMem[0] = 1; + } + } + + if (newPair.value > minValue){ + push_stack(stack, newPair, stackSize); + } + __syncthreads(); + + if (sMem[0] > 0){ + __syncthreads(); + sort( + finalValue, finalIndex, + oldPair.value, oldPair.index, + sMem, nCandidates + ); + __syncthreads(); + if (tid == _TPB_ - 1){ + sMem[1] = finalValue; + } + __syncthreads(); + minValue = sMem[1]; + } + __syncthreads(); + if (tid == 0){ + sMem[0] = temp1; + sMem[1] = temp2; + } + __syncthreads(); + iN += _TPB_; + } + + sMem[0] = 0; + __syncthreads(); + #pragma unroll + for (int i=0; i<_STACKCAP_; i++){ + pair oldPair; + oldPair.value = -INFINITY; + oldPair.index = -1; + if (!is_stack_empty(stackSize)){ + pop_stack(stack, oldPair, stackSize); + if (oldPair.value > minValue){ + sMem[0] = 1; + } + } + __syncthreads(); + + if (sMem[0] > 0){ + __syncthreads(); + sort( + finalValue, finalIndex, + oldPair.value, oldPair.index, + sMem, nCandidates + ); + __syncthreads(); + sMem[0] = 0; + if (tid == _TPB_ - 1){ + sMem[1] = finalValue; + } + __syncthreads(); + minValue = sMem[1]; + } + __syncthreads(); + } + + if (_TPB_ - nCandidates <= tid){ + const int writeAddress = (qid * nCandidates) + tid - ( _TPB_ - nCandidates); + gValue[writeAddress] = finalValue; + gIndex[writeAddress] = finalIndex; + } +} + +extern "C" +__global__ void ivfpq_topk_residual_smart_probing( + const uint8n_t* __restrict__ data, + const float* __restrict__ precomputed, + const float* __restrict__ baseSims, + const uint8_t* __restrict__ isEmpty, + const ll_t* __restrict__ cellStart, + const ll_t* __restrict__ cellSize, + const ll_t* __restrict__ totalSize, + const ll_t* __restrict__ nProbeList, + float* __restrict__ gValue, + ll_t* __restrict__ gIndex, + int nData, int nQuery, int maxNProbe, int nCandidates +) { + const int tid = threadIdx.x; // thread ID + const int qid = blockIdx.x; // query ID + const int nProbe = nProbeList[qid]; + + extern __shared__ _VOLATILE_ float sMem[]; // M * K + const ll_t threadTotalSize = totalSize[qid]; + float finalValue = -INFINITY; + float finalIndex = -1; + int cCellStart = -1; + for (int cCell = 0; cCell < nProbe; cCell++){ + int pCellStart = cCellStart; + cCellStart = cellStart[qid * maxNProbe + cCell]; + if (cCellStart == pCellStart){ + continue; + } + int cCellSize = cellSize[qid * maxNProbe + cCell]; + load_precomputed_v2(precomputed, sMem, cCell, maxNProbe); + float cBaseSim = baseSims[qid * maxNProbe + cCell]; + int cCellEnd = cCellStart + cCellSize; + int nIter = (cCellSize + _TPB_ - 1) / _TPB_; + for (int iter = 0; iter < nIter; iter++ ){ + int iN = cCellStart + iter * _TPB_ + tid; + float value; + float index = iN; + int cIsEmpty = 0; + if (cCellStart <= iN && iN < cCellEnd){ + value = cBaseSim; + cIsEmpty = isEmpty[iN]; + uint8n_t dataCache[_M_ / _NCS_]; + load_data(data, dataCache, iN, nData); + consume_data(sMem, dataCache, value); + } else { + value = -INFINITY; + } + value = cIsEmpty == 0 ? value : -INFINITY; + index = cIsEmpty == 0 ? index : -1; + + sort( + finalValue, finalIndex, + value, index, + sMem, nCandidates + ); + } + } + + if (_TPB_ - nCandidates <= tid){ + const int writeAddress = (qid * nCandidates) + tid - ( _TPB_ - nCandidates); + gValue[writeAddress] = finalValue; + gIndex[writeAddress] = finalIndex; + } +} + +extern "C" +__global__ void ivfpq_topk_residual_precomputed_smart_probing( + const uint8n_t* __restrict__ data, + const float* __restrict__ part1, + const float* __restrict__ part2, + const ll_t* __restrict__ cells, + const float* __restrict__ baseSims, + const uint8_t* __restrict__ isEmpty, + const ll_t* __restrict__ cellStart, + const ll_t* __restrict__ cellSize, + const ll_t* __restrict__ totalSize, + const ll_t* __restrict__ nProbeList, + float* __restrict__ gValue, + ll_t* __restrict__ gIndex, + int nData, int nQuery, int maxNProbe, int nCandidates +) { + const int tid = threadIdx.x; // thread ID + const int qid = blockIdx.x; // query ID + const int nProbe = nProbeList[qid]; + + pair stack[_STACKCAP_]; + int stackSize = 0; + init_stack(stack); + + extern __shared__ _VOLATILE_ float sMem[]; // M * K + const ll_t threadTotalSize = totalSize[qid]; + float finalValue = -INFINITY; + float finalIndex = -1; + float minValue = -INFINITY; + float part1Cache[_M_]; + float part2Cache[_M_]; + load_part1_to_cache(part1, part1Cache); + + int nCellStart = cellStart[qid * maxNProbe]; + int nCellSize = cellSize[qid * maxNProbe]; + int nCellEnd = nCellStart + nCellSize; + int iCell = cells[qid * maxNProbe]; + bool nCellRepeated = false; + bool cCellRepeated = false; + load_part2_to_cache(part2, part2Cache, iCell); + + for (int cCell = 0; cCell < nProbe; cCell++){ + int cCellStart = nCellStart; + int cCellSize = nCellSize; + int cCellEnd = nCellEnd; + if (!cCellRepeated){ + store_precomputed_to_smem(part1Cache, part2Cache, sMem); + } + + if (cCell < nProbe - 1){ + int tCellStart = cellStart[qid * maxNProbe + cCell + 1]; + if (tCellStart != cCellStart){ + nCellStart = tCellStart; + nCellSize = cellSize[qid * maxNProbe + cCell + 1]; + nCellEnd = nCellStart + nCellSize; + iCell = cells[qid * maxNProbe + cCell + 1]; + load_part2_to_cache(part2, part2Cache, iCell); + nCellRepeated = false; + } else { + nCellRepeated = true; + } + } + if (cCellRepeated){ + cCellRepeated = nCellRepeated; + continue; + } + cCellRepeated = nCellRepeated; + float cBaseSim = baseSims[qid * maxNProbe + cCell]; + int nIter = (cCellSize + _TPB_ - 1) / _TPB_; + for (int iter = 0; iter < nIter; iter++ ){ + int iN = cCellStart + iter * _TPB_ + tid; + pair newPair; + newPair.value = -INFINITY; + newPair.index = -1; + int cIsEmpty = 0; + if (iN < cCellEnd){ + newPair.value = cBaseSim; + newPair.index = iN; + cIsEmpty = isEmpty[iN]; + uint8n_t dataCache[_M_ / _NCS_]; + load_data(data, dataCache, iN, nData); + consume_data(sMem, dataCache, newPair.value); + } + + newPair.value = cIsEmpty == 0 ? newPair.value : -INFINITY; + newPair.index = cIsEmpty == 0 ? newPair.index : -1; + + __syncthreads(); + float temp1, temp2; + if (tid == 0){ + temp1 = sMem[0]; + temp2 = sMem[1]; + sMem[0] = 0; + } + __syncthreads(); + + pair oldPair; + oldPair.value = -INFINITY; + oldPair.index = -1; + if (is_stack_full(stackSize)){ + pop_stack(stack, oldPair, stackSize); + if (oldPair.value > minValue){ + sMem[0] = 1; + } + } + if (newPair.value > minValue){ + push_stack(stack, newPair, stackSize); + } + __syncthreads(); + if (sMem[0] > 0){ + __syncthreads(); + sort( + finalValue, finalIndex, + oldPair.value, oldPair.index, + sMem, nCandidates + ); + __syncthreads(); + if (tid == _TPB_ - 1){ + sMem[1] = finalValue; + } + __syncthreads(); + minValue = sMem[1]; + } + __syncthreads(); + if (tid == 0){ + sMem[0] = temp1; + sMem[1] = temp2; + } + __syncthreads(); + } + } + + sMem[0] = 0; + __syncthreads(); + for (int i=0; i < _STACKCAP_; i++){ + pair oldPair; + oldPair.value = -INFINITY; + oldPair.index = -1; + if (!is_stack_empty(stackSize)){ + pop_stack(stack, oldPair, stackSize); + if (oldPair.value > minValue){ + sMem[0] = 1; + } + } + __syncthreads(); + + if (sMem[0] > 0){ + __syncthreads(); + sort( + finalValue, finalIndex, + oldPair.value, oldPair.index, + sMem, nCandidates + ); + __syncthreads(); + sMem[0] = 0; + if (tid == _TPB_ - 1){ + sMem[1] = finalValue; + } + __syncthreads(); + minValue = sMem[1]; + } + __syncthreads(); + } + + if (_TPB_ - nCandidates <= tid){ + const int writeAddress = (qid * nCandidates) + tid - ( _TPB_ - nCandidates); + gValue[writeAddress] = finalValue; + gIndex[writeAddress] = finalIndex; + } +} \ No newline at end of file diff --git a/cuda_code/j3d27pt-64x16-3-256_kernel_1.cu b/cuda_code/j3d27pt-64x16-3-256_kernel_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..4ac4e6e6924cd536190a9e0c788c8b224f6c0b6d --- /dev/null +++ b/cuda_code/j3d27pt-64x16-3-256_kernel_1.cu @@ -0,0 +1,689 @@ +#include "j3d27pt-64x16-3-256_kernel.hu" +__device__ float __sbref_wrap(float *sb, size_t index) { return sb[index]; } + +__global__ void kernel0_3(float *A, int dimsize, int timestep, int c0) +{ +#ifndef AN5D_TYPE +#define AN5D_TYPE unsigned +#endif + const AN5D_TYPE __c0Len = (timestep - 0); + const AN5D_TYPE __c0Pad = (0); + #define __c0 c0 + const AN5D_TYPE __c1Len = (dimsize - 1 - 1); + const AN5D_TYPE __c1Pad = (1); + #define __c1 c1 + const AN5D_TYPE __c2Len = (dimsize - 1 - 1); + const AN5D_TYPE __c2Pad = (1); + #define __c2 c2 + const AN5D_TYPE __c3Len = (dimsize - 1 - 1); + const AN5D_TYPE __c3Pad = (1); + #define __c3 c3 + const AN5D_TYPE __halo1 = 1; + const AN5D_TYPE __halo2 = 1; + const AN5D_TYPE __halo3 = 1; + const AN5D_TYPE __side0Len = 3; + const AN5D_TYPE __side1Len = 256; + const AN5D_TYPE __side2Len = 10; + const AN5D_TYPE __side3Len = 58; + const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); + const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); + const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); + const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); + const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); + const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); + const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; + const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; + const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; + const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len; + const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; + const AN5D_TYPE __local_c2 = __tid / __side3LenOl; + const AN5D_TYPE __local_c3 = __tid % __side3LenOl; + const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num; + const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; + const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3; + float __reg_0; + float __reg_1_0; + float __reg_1_1; + float __reg_1_2; + float __reg_2_0; + float __reg_2_1; + float __reg_2_2; + float __reg_3_0; + float __reg_3_1; + float __reg_3_2; + __shared__ float __a_sb_double[__blockSize * 2]; + float *__a_sb = __a_sb_double; + const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3; + const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len; + const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1); + const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2); + const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3) && __local_c3 >= (__halo3 * 3) && __local_c3 < __side3LenOl - (__halo3 * 3); + const AN5D_TYPE __storeValid = __writeValid3; + AN5D_TYPE __c1; + AN5D_TYPE __h; + const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; + #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0) + #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) + #define __REGREF(reg, i2, i3) reg + #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) + #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((1.500f * (__REGREF(__a, 0, 0))) + (0.500f * (__SBREF(__a_sb, -1, -1)))) + (0.700f * (__SBREF(__a_sb, -1, 0)))) + (0.900f * (__SBREF(__a_sb, -1, 1)))) + (1.200f * (__SBREF(__a_sb, 0, -1)))) + (1.201f * (__SBREF(__a_sb, 0, 1)))) + (0.901f * (__SBREF(__a_sb, 1, -1)))) + (0.701f * (__SBREF(__a_sb, 1, 0)))) + (0.501f * (__SBREF(__a_sb, 1, 1)))))))))))))))))))))) / 159); } while (0) + #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) + #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) + #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); + #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) + #define __REGREF(reg, i2, i3) reg + #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) + #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((1.510f * (__REGREF(__a, 0, 0)))) + (0.510f * (__SBREF(__a_sb, -1, -1)))) + (0.710f * (__SBREF(__a_sb, -1, 0)))) + (0.910f * (__SBREF(__a_sb, -1, 1)))) + (1.210f * (__SBREF(__a_sb, 0, -1)))) + (1.211f * (__SBREF(__a_sb, 0, 1)))) + (0.911f * (__SBREF(__a_sb, 1, -1)))) + (0.711f * (__SBREF(__a_sb, 1, 0)))) + (0.511f * (__SBREF(__a_sb, 1, 1))))))))))))) / 159); } while (0) + #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) + #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) + #define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); + #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) + #define __REGREF(reg, i2, i3) reg + #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) + #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = (((((((((((1.520f * (__REGREF(__a, 0, 0)))) + (0.520f * (__SBREF(__a_sb, -1, -1)))) + (0.720f * (__SBREF(__a_sb, -1, 0)))) + (0.920f * (__SBREF(__a_sb, -1, 1)))) + (1.220f * (__SBREF(__a_sb, 0, -1)))) + (1.221f * (__SBREF(__a_sb, 0, 1)))) + (0.921f * (__SBREF(__a_sb, 1, -1)))) + (0.721f * (__SBREF(__a_sb, 1, 0)))) + (0.521f * (__SBREF(__a_sb, 1, 1)))) / 159); } while (0) + #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) + #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) + #define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); + #define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0); + #define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) + #define __CALC2(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) + #define __CALC3(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) + #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) + if (__c1Id == 0) + { + __LOAD(__reg_0, 0); + __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); + __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_0); + __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_0); + __LOAD(__reg_0, 1); + __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); + __LOAD(__reg_0, 2); + __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); + __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); + __LOAD(__reg_0, 3); + __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); + __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); + __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); + __LOAD(__reg_0, 4); + __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); + __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); + __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); + __STORE(1, __reg_3_1); + __LOAD(__reg_0, 5); + __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); + __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); + __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); + __STORE(2, __reg_3_2); + __LOAD(__reg_0, 6); + __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); + __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); + __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); + __STORE(3, __reg_3_0); + } + else + { + __LOAD(__reg_0, 0); + __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); + __LOAD(__reg_0, 1); + __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); + __LOAD(__reg_0, 2); + __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); + __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); + __LOAD(__reg_0, 3); + __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); + __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); + __LOAD(__reg_0, 4); + __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); + __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); + __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); + __LOAD(__reg_0, 5); + __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); + __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); + __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); + __LOAD(__reg_0, 6); + __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); + __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); + __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); + __STORE(3, __reg_3_0); + __DB_SWITCH(); __syncthreads(); + } + __a_sb = __a_sb_double + __blockSize * 0; + if (__c1Id == __side1Num - 1) + { + for (__h = 7; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;) + { + __LOAD(__reg_0, __h); + __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); + __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); + __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); + __STORE(__h - 3, __reg_3_1); + __h++; + __LOAD(__reg_0, __h); + __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); + __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); + __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); + __STORE(__h - 3, __reg_3_2); + __h++; + __LOAD(__reg_0, __h); + __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); + __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); + __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); + __STORE(__h - 3, __reg_3_0); + __h++; + __DB_SWITCH(); __syncthreads(); + } + if (0) {} + else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) + { + __LOAD(__reg_0, __h + 0); + __CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0); + __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); + __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); + __STORE(__h - 3, __reg_3_1); + __reg_1_1 = __reg_0; + __CALC2(__reg_2_2, __reg_2_2, __reg_2_0, __reg_1_1); + __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); + __STORE(__h - 2, __reg_3_2); + __reg_2_1 = __reg_1_1; + __CALC3(__reg_3_2, __reg_3_2, __reg_3_0, __reg_2_1); + __STORE(__h - 1, __reg_3_0); + } + else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) + { + __LOAD(__reg_0, __h + 0); + __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); + __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); + __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); + __STORE(__h - 3, __reg_3_1); + __LOAD(__reg_0, __h + 1); + __CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0); + __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); + __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); + __STORE(__h - 2, __reg_3_2); + __reg_1_2 = __reg_0; + __CALC2(__reg_2_0, __reg_2_0, __reg_2_1, __reg_1_2); + __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); + __STORE(__h - 1, __reg_3_0); + __reg_2_2 = __reg_1_2; + __CALC3(__reg_3_0, __reg_3_0, __reg_3_1, __reg_2_2); + __STORE(__h + 0, __reg_3_1); + } + else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) + { + __LOAD(__reg_0, __h + 0); + __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); + __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); + __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); + __STORE(__h - 3, __reg_3_1); + __LOAD(__reg_0, __h + 1); + __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); + __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); + __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); + __STORE(__h - 2, __reg_3_2); + __LOAD(__reg_0, __h + 2); + __CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0); + __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); + __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); + __STORE(__h - 1, __reg_3_0); + __reg_1_0 = __reg_0; + __CALC2(__reg_2_1, __reg_2_1, __reg_2_2, __reg_1_0); + __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); + __STORE(__h + 0, __reg_3_1); + __reg_2_0 = __reg_1_0; + __CALC3(__reg_3_1, __reg_3_1, __reg_3_2, __reg_2_0); + __STORE(__h + 1, __reg_3_2); + } + } + else + { + for (__h = 7; __h <= __side1LenOl - 3;) + { + __LOAD(__reg_0, __h); + __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); + __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); + __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); + __STORE(__h - 3, __reg_3_1); + __h++; + __LOAD(__reg_0, __h); + __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); + __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); + __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); + __STORE(__h - 3, __reg_3_2); + __h++; + __LOAD(__reg_0, __h); + __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); + __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); + __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); + __STORE(__h - 3, __reg_3_0); + __h++; + __DB_SWITCH(); __syncthreads(); + } + if (__h == __side1LenOl) return; + __LOAD(__reg_0, __h); + __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); + __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); + __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); + __STORE(__h - 3, __reg_3_1); + __h++; + if (__h == __side1LenOl) return; + __LOAD(__reg_0, __h); + __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); + __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); + __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); + __STORE(__h - 3, __reg_3_2); + __h++; + if (__h == __side1LenOl) return; + __LOAD(__reg_0, __h); + __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); + __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); + __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); + __STORE(__h - 3, __reg_3_0); + __h++; + } +} +__global__ void kernel0_2(float *A, int dimsize, int timestep, int c0) +{ +#ifndef AN5D_TYPE +#define AN5D_TYPE unsigned +#endif + const AN5D_TYPE __c0Len = (timestep - 0); + const AN5D_TYPE __c0Pad = (0); + #define __c0 c0 + const AN5D_TYPE __c1Len = (dimsize - 1 - 1); + const AN5D_TYPE __c1Pad = (1); + #define __c1 c1 + const AN5D_TYPE __c2Len = (dimsize - 1 - 1); + const AN5D_TYPE __c2Pad = (1); + #define __c2 c2 + const AN5D_TYPE __c3Len = (dimsize - 1 - 1); + const AN5D_TYPE __c3Pad = (1); + #define __c3 c3 + const AN5D_TYPE __halo1 = 1; + const AN5D_TYPE __halo2 = 1; + const AN5D_TYPE __halo3 = 1; + const AN5D_TYPE __side0Len = 2; + const AN5D_TYPE __side1Len = 256; + const AN5D_TYPE __side2Len = 12; + const AN5D_TYPE __side3Len = 60; + const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); + const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); + const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); + const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); + const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); + const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); + const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; + const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; + const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; + const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len; + const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; + const AN5D_TYPE __local_c2 = __tid / __side3LenOl; + const AN5D_TYPE __local_c3 = __tid % __side3LenOl; + const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num; + const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; + const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3; + float __reg_0; + float __reg_1_0; + float __reg_1_1; + float __reg_1_2; + float __reg_2_0; + float __reg_2_1; + float __reg_2_2; + __shared__ float __a_sb_double[__blockSize * 2]; + float *__a_sb = __a_sb_double; + const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3; + const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len; + const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1); + const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2); + const AN5D_TYPE __storeValid = __writeValid2; + AN5D_TYPE __c1; + AN5D_TYPE __h; + const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; + #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0) + #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) + #define __REGREF(reg, i2, i3) reg + #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) + #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((1.500f * (__REGREF(__a, 0, 0))) + (0.500f * (__SBREF(__a_sb, -1, -1)))) + (0.700f * (__SBREF(__a_sb, -1, 0)))) + (0.900f * (__SBREF(__a_sb, -1, 1)))) + (1.200f * (__SBREF(__a_sb, 0, -1)))) + (1.201f * (__SBREF(__a_sb, 0, 1)))) + (0.901f * (__SBREF(__a_sb, 1, -1)))) + (0.701f * (__SBREF(__a_sb, 1, 0)))) + (0.501f * (__SBREF(__a_sb, 1, 1)))))))))))))))))))))) / 159); } while (0) + #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) + #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) + #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); + #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) + #define __REGREF(reg, i2, i3) reg + #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) + #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((1.510f * (__REGREF(__a, 0, 0)))) + (0.510f * (__SBREF(__a_sb, -1, -1)))) + (0.710f * (__SBREF(__a_sb, -1, 0)))) + (0.910f * (__SBREF(__a_sb, -1, 1)))) + (1.210f * (__SBREF(__a_sb, 0, -1)))) + (1.211f * (__SBREF(__a_sb, 0, 1)))) + (0.911f * (__SBREF(__a_sb, 1, -1)))) + (0.711f * (__SBREF(__a_sb, 1, 0)))) + (0.511f * (__SBREF(__a_sb, 1, 1))))))))))))) / 159); } while (0) + #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) + #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) + #define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); + #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) + #define __REGREF(reg, i2, i3) reg + #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) + #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = (((((((((((1.520f * (__REGREF(__a, 0, 0)))) + (0.520f * (__SBREF(__a_sb, -1, -1)))) + (0.720f * (__SBREF(__a_sb, -1, 0)))) + (0.920f * (__SBREF(__a_sb, -1, 1)))) + (1.220f * (__SBREF(__a_sb, 0, -1)))) + (1.221f * (__SBREF(__a_sb, 0, 1)))) + (0.921f * (__SBREF(__a_sb, 1, -1)))) + (0.721f * (__SBREF(__a_sb, 1, 0)))) + (0.521f * (__SBREF(__a_sb, 1, 1)))) / 159); } while (0) + #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) + #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) + #define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); + #define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0); + #define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) + #define __CALC2(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) + #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) + if (__c1Id == 0) + { + __LOAD(__reg_0, 0); + __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); + __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_0); + __LOAD(__reg_0, 1); + __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); + __LOAD(__reg_0, 2); + __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); + __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); + __LOAD(__reg_0, 3); + __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); + __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); + __STORE(1, __reg_2_1); + __LOAD(__reg_0, 4); + __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); + __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); + __STORE(2, __reg_2_2); + } + else + { + __LOAD(__reg_0, 0); + __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); + __LOAD(__reg_0, 1); + __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); + __LOAD(__reg_0, 2); + __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); + __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); + __LOAD(__reg_0, 3); + __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); + __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); + __LOAD(__reg_0, 4); + __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); + __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); + __STORE(2, __reg_2_2); + __DB_SWITCH(); __syncthreads(); + } + __a_sb = __a_sb_double + __blockSize * 1; + if (__c1Id == __side1Num - 1) + { + for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;) + { + __LOAD(__reg_0, __h); + __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); + __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); + __STORE(__h - 2, __reg_2_0); + __h++; + __LOAD(__reg_0, __h); + __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); + __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); + __STORE(__h - 2, __reg_2_1); + __h++; + __LOAD(__reg_0, __h); + __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); + __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); + __STORE(__h - 2, __reg_2_2); + __h++; + } + if (0) {} + else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) + { + __LOAD(__reg_0, __h + 0); + __CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0); + __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); + __STORE(__h - 2, __reg_2_0); + __reg_1_2 = __reg_0; + __CALC2(__reg_2_0, __reg_2_0, __reg_2_1, __reg_1_2); + __STORE(__h - 1, __reg_2_1); + } + else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) + { + __LOAD(__reg_0, __h + 0); + __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); + __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); + __STORE(__h - 2, __reg_2_0); + __LOAD(__reg_0, __h + 1); + __CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0); + __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); + __STORE(__h - 1, __reg_2_1); + __reg_1_0 = __reg_0; + __CALC2(__reg_2_1, __reg_2_1, __reg_2_2, __reg_1_0); + __STORE(__h + 0, __reg_2_2); + } + else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) + { + __LOAD(__reg_0, __h + 0); + __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); + __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); + __STORE(__h - 2, __reg_2_0); + __LOAD(__reg_0, __h + 1); + __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); + __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); + __STORE(__h - 1, __reg_2_1); + __LOAD(__reg_0, __h + 2); + __CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0); + __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); + __STORE(__h + 0, __reg_2_2); + __reg_1_1 = __reg_0; + __CALC2(__reg_2_2, __reg_2_2, __reg_2_0, __reg_1_1); + __STORE(__h + 1, __reg_2_0); + } + } + else + { + for (__h = 5; __h <= __side1LenOl - 3;) + { + __LOAD(__reg_0, __h); + __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); + __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); + __STORE(__h - 2, __reg_2_0); + __h++; + __LOAD(__reg_0, __h); + __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); + __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); + __STORE(__h - 2, __reg_2_1); + __h++; + __LOAD(__reg_0, __h); + __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); + __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); + __STORE(__h - 2, __reg_2_2); + __h++; + } + if (__h == __side1LenOl) return; + __LOAD(__reg_0, __h); + __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); + __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); + __STORE(__h - 2, __reg_2_0); + __h++; + if (__h == __side1LenOl) return; + __LOAD(__reg_0, __h); + __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); + __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); + __STORE(__h - 2, __reg_2_1); + __h++; + if (__h == __side1LenOl) return; + __LOAD(__reg_0, __h); + __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); + __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); + __STORE(__h - 2, __reg_2_2); + __h++; + } +} +__global__ void kernel0_1(float *A, int dimsize, int timestep, int c0) +{ +#ifndef AN5D_TYPE +#define AN5D_TYPE unsigned +#endif + const AN5D_TYPE __c0Len = (timestep - 0); + const AN5D_TYPE __c0Pad = (0); + #define __c0 c0 + const AN5D_TYPE __c1Len = (dimsize - 1 - 1); + const AN5D_TYPE __c1Pad = (1); + #define __c1 c1 + const AN5D_TYPE __c2Len = (dimsize - 1 - 1); + const AN5D_TYPE __c2Pad = (1); + #define __c2 c2 + const AN5D_TYPE __c3Len = (dimsize - 1 - 1); + const AN5D_TYPE __c3Pad = (1); + #define __c3 c3 + const AN5D_TYPE __halo1 = 1; + const AN5D_TYPE __halo2 = 1; + const AN5D_TYPE __halo3 = 1; + const AN5D_TYPE __side0Len = 1; + const AN5D_TYPE __side1Len = 256; + const AN5D_TYPE __side2Len = 14; + const AN5D_TYPE __side3Len = 62; + const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); + const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); + const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); + const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); + const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); + const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); + const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; + const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; + const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; + const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len; + const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; + const AN5D_TYPE __local_c2 = __tid / __side3LenOl; + const AN5D_TYPE __local_c3 = __tid % __side3LenOl; + const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num; + const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; + const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3; + float __reg_0; + float __reg_1_0; + float __reg_1_1; + float __reg_1_2; + __shared__ float __a_sb_double[__blockSize * 2]; + float *__a_sb = __a_sb_double; + const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3; + const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len; + const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1); + const AN5D_TYPE __storeValid = __writeValid1; + AN5D_TYPE __c1; + AN5D_TYPE __h; + const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; + #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0) + #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) + #define __REGREF(reg, i2, i3) reg + #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) + #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((1.500f * (__REGREF(__a, 0, 0))) + (0.500f * (__SBREF(__a_sb, -1, -1)))) + (0.700f * (__SBREF(__a_sb, -1, 0)))) + (0.900f * (__SBREF(__a_sb, -1, 1)))) + (1.200f * (__SBREF(__a_sb, 0, -1)))) + (1.201f * (__SBREF(__a_sb, 0, 1)))) + (0.901f * (__SBREF(__a_sb, 1, -1)))) + (0.701f * (__SBREF(__a_sb, 1, 0)))) + (0.501f * (__SBREF(__a_sb, 1, 1)))))))))))))))))))))) / 159); } while (0) + #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) + #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) + #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); + #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) + #define __REGREF(reg, i2, i3) reg + #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) + #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((1.510f * (__REGREF(__a, 0, 0)))) + (0.510f * (__SBREF(__a_sb, -1, -1)))) + (0.710f * (__SBREF(__a_sb, -1, 0)))) + (0.910f * (__SBREF(__a_sb, -1, 1)))) + (1.210f * (__SBREF(__a_sb, 0, -1)))) + (1.211f * (__SBREF(__a_sb, 0, 1)))) + (0.911f * (__SBREF(__a_sb, 1, -1)))) + (0.711f * (__SBREF(__a_sb, 1, 0)))) + (0.511f * (__SBREF(__a_sb, 1, 1))))))))))))) / 159); } while (0) + #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) + #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) + #define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); + #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) + #define __REGREF(reg, i2, i3) reg + #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) + #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = (((((((((((1.520f * (__REGREF(__a, 0, 0)))) + (0.520f * (__SBREF(__a_sb, -1, -1)))) + (0.720f * (__SBREF(__a_sb, -1, 0)))) + (0.920f * (__SBREF(__a_sb, -1, 1)))) + (1.220f * (__SBREF(__a_sb, 0, -1)))) + (1.221f * (__SBREF(__a_sb, 0, 1)))) + (0.921f * (__SBREF(__a_sb, 1, -1)))) + (0.721f * (__SBREF(__a_sb, 1, 0)))) + (0.521f * (__SBREF(__a_sb, 1, 1)))) / 159); } while (0) + #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) + #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) + #define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); + #define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0); + #define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) + #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) + if (__c1Id == 0) + { + __LOAD(__reg_0, 0); + __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); + __LOAD(__reg_0, 1); + __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); + __LOAD(__reg_0, 2); + __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); + __STORE(1, __reg_1_1); + } + else + { + __LOAD(__reg_0, 0); + __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); + __LOAD(__reg_0, 1); + __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); + __LOAD(__reg_0, 2); + __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); + __STORE(1, __reg_1_1); + } + __a_sb = __a_sb_double + __blockSize * 1; + if (__c1Id == __side1Num - 1) + { + for (__h = 3; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;) + { + __LOAD(__reg_0, __h); + __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); + __STORE(__h - 1, __reg_1_2); + __h++; + __LOAD(__reg_0, __h); + __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); + __STORE(__h - 1, __reg_1_0); + __h++; + __LOAD(__reg_0, __h); + __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); + __STORE(__h - 1, __reg_1_1); + __h++; + __DB_SWITCH(); __syncthreads(); + } + if (0) {} + else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) + { + __LOAD(__reg_0, __h + 0); + __CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0); + __STORE(__h - 1, __reg_1_2); + } + else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) + { + __LOAD(__reg_0, __h + 0); + __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); + __STORE(__h - 1, __reg_1_2); + __LOAD(__reg_0, __h + 1); + __CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0); + __STORE(__h + 0, __reg_1_0); + } + else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) + { + __LOAD(__reg_0, __h + 0); + __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); + __STORE(__h - 1, __reg_1_2); + __LOAD(__reg_0, __h + 1); + __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); + __STORE(__h + 0, __reg_1_0); + __LOAD(__reg_0, __h + 2); + __CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0); + __STORE(__h + 1, __reg_1_1); + } + } + else + { + for (__h = 3; __h <= __side1LenOl - 3;) + { + __LOAD(__reg_0, __h); + __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); + __STORE(__h - 1, __reg_1_2); + __h++; + __LOAD(__reg_0, __h); + __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); + __STORE(__h - 1, __reg_1_0); + __h++; + __LOAD(__reg_0, __h); + __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); + __STORE(__h - 1, __reg_1_1); + __h++; + __DB_SWITCH(); __syncthreads(); + } + if (__h == __side1LenOl) return; + __LOAD(__reg_0, __h); + __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); + __STORE(__h - 1, __reg_1_2); + __h++; + if (__h == __side1LenOl) return; + __LOAD(__reg_0, __h); + __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); + __STORE(__h - 1, __reg_1_0); + __h++; + if (__h == __side1LenOl) return; + __LOAD(__reg_0, __h); + __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); + __STORE(__h - 1, __reg_1_1); + __h++; + } +} diff --git a/cuda_code/jac_mpi_hybkernels.cu b/cuda_code/jac_mpi_hybkernels.cu new file mode 100644 index 0000000000000000000000000000000000000000..34569a2a85bb3c975599644f19cc7b558e30b0dc --- /dev/null +++ b/cuda_code/jac_mpi_hybkernels.cu @@ -0,0 +1,118 @@ +// +// auto-generated by op2.py +// + +//header +#ifdef GPUPASS +#define op_par_loop_res op_par_loop_res_gpu +#define op_par_loop_update op_par_loop_update_gpu +#include "jac_mpi_kernels.cu" +#undef op_par_loop_res +#undef op_par_loop_update +#else +#define op_par_loop_res op_par_loop_res_cpu +#define op_par_loop_update op_par_loop_update_cpu +#include "jac_mpi_kernels.cpp" +#undef op_par_loop_res +#undef op_par_loop_update + +//user kernel files + +void op_par_loop_res_gpu(char const *name, op_set set, + op_arg arg0, + op_arg arg1, + op_arg arg2, + op_arg arg3); + +//GPU host stub function +#if OP_HYBRID_GPU +void op_par_loop_res(char const *name, op_set set, + op_arg arg0, + op_arg arg1, + op_arg arg2, + op_arg arg3){ + + if (OP_hybrid_gpu) { + op_par_loop_res_gpu(name, set, + arg0, + arg1, + arg2, + arg3); + + }else{ + op_par_loop_res_cpu(name, set, + arg0, + arg1, + arg2, + arg3); + + } +} +#else +void op_par_loop_res(char const *name, op_set set, + op_arg arg0, + op_arg arg1, + op_arg arg2, + op_arg arg3){ + + op_par_loop_res_gpu(name, set, + arg0, + arg1, + arg2, + arg3); + + } +#endif //OP_HYBRID_GPU + +void op_par_loop_update_gpu(char const *name, op_set set, + op_arg arg0, + op_arg arg1, + op_arg arg2, + op_arg arg3, + op_arg arg4); + +//GPU host stub function +#if OP_HYBRID_GPU +void op_par_loop_update(char const *name, op_set set, + op_arg arg0, + op_arg arg1, + op_arg arg2, + op_arg arg3, + op_arg arg4){ + + if (OP_hybrid_gpu) { + op_par_loop_update_gpu(name, set, + arg0, + arg1, + arg2, + arg3, + arg4); + + }else{ + op_par_loop_update_cpu(name, set, + arg0, + arg1, + arg2, + arg3, + arg4); + + } +} +#else +void op_par_loop_update(char const *name, op_set set, + op_arg arg0, + op_arg arg1, + op_arg arg2, + op_arg arg3, + op_arg arg4){ + + op_par_loop_update_gpu(name, set, + arg0, + arg1, + arg2, + arg3, + arg4); + + } +#endif //OP_HYBRID_GPU +#endif diff --git a/cuda_code/json_gpu_11.cu b/cuda_code/json_gpu_11.cu new file mode 100644 index 0000000000000000000000000000000000000000..b693484b5f84db0dd41243fefeef1d39c38f8633 --- /dev/null +++ b/cuda_code/json_gpu_11.cu @@ -0,0 +1,630 @@ +/* + * Copyright (c) 2020, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "json_common.h" +#include "json_gpu.h" + +#include + +#include + +#include +#include + +#include + +#include +#include + +namespace cudf { +namespace experimental { +namespace io { +namespace json { +namespace gpu { + +using namespace::cudf; + +using string_pair = std::pair; + +namespace { + +/** + * @brief CUDA Kernel that modifies the start and stop offsets to exclude + * the sections outside of the top level brackets. + * + * The top level brackets characters are excluded from the resulting range. + * Parameter stop has the same semantics as end() in STL containers + * (one past the last element) + * + * @param[in] data Pointer to the device buffer containing the data to process + * @param[in,out] start Offset of the first character in the range + * @param[in,out] stop Offset of the first character after the range + * + * @return void + **/ +__device__ void limit_range_to_brackets(const char *data, long &start, long &stop) { + while (start < stop && data[start] != '[' && data[start] != '{') { + start++; + } + start++; + + while (start < stop && data[stop - 1] != ']' && data[stop - 1] != '}') { + stop--; + } + stop--; +} + +/** + * @brief CUDA kernel that finds the end position of the next field name, + * including the colon that separates the name from the field value. + * + * Returns the position after the colon that preceeds the value token. + * + * @param[in] data Pointer to the device buffer containing the data to process + * @param[in] opts Parsing options (e.g. delimiter and quotation character) + * @param[in] start Offset of the first character in the range + * @param[in] stop Offset of the first character after the range + * + * @return long Position of the first character after the field name. + **/ +__device__ long seek_field_name_end(const char *data, const ParseOptions opts, long start, long stop) { + bool quotation = false; + for (auto pos = start; pos < stop; ++pos) { + // Ignore escaped quotes + if (data[pos] == opts.quotechar && data[pos - 1] != '\\') { + quotation = !quotation; + } else if (!quotation && data[pos] == ':') { + return pos + 1; + } + } + return stop; +} + +/** + * @brief Decodes a numeric value base on templated cudf type T with specified + * base. + * + * @param data The character string for parse + * @param start The index within data to start parsing from + * @param end The end index within data to end parsing + * @param opts The global parsing behavior options + * + * @return The parsed numeric value + **/ +template +__inline__ __device__ T decode_value(const char *data, long start, long end, + ParseOptions const &opts) { + return cudf::experimental::io::gpu::parse_numeric(data, start, end, opts, base); +} + +/** + * @brief Decodes a numeric value base on templated cudf type T + * + * @param data The character string for parse + * @param start The index within data to start parsing from + * @param end The end index within data to end parsing + * @param opts The global parsing behavior options + * + * @return The parsed numeric value + **/ +template +__inline__ __device__ T decode_value(const char *data, long start, long end, + ParseOptions const &opts) { + return cudf::experimental::io::gpu::parse_numeric(data, start, end, opts); +} + +/** + * @brief Decodes a timestamp_D + * + * @param data The character string for parse + * @param start The index within data to start parsing from + * @param end The end index within data to end parsing + * @param opts The global parsing behavior options + * + * @return The parsed timestamp_D + **/ +template <> +__inline__ __device__ cudf::timestamp_D decode_value(const char *data, + long start, long end, + ParseOptions const &opts) { + return parseDateFormat(data, start, end, opts.dayfirst); +} + +/** + * @brief Decodes a timestamp_s + * + * @param data The character string for parse + * @param start The index within data to start parsing from + * @param end The end index within data to end parsing + * @param opts The global parsing behavior options + * + * @return The parsed timestamp_s + **/ +template <> +__inline__ __device__ cudf::timestamp_s decode_value(const char *data, + long start, long end, + ParseOptions const &opts) { + auto milli = parseDateTimeFormat(data, start, end, opts.dayfirst); + return milli / 1000; +} + +/** + * @brief Decodes a timestamp_ms + * + * @param data The character string for parse + * @param start The index within data to start parsing from + * @param end The end index within data to end parsing + * @param opts The global parsing behavior options + * + * @return The parsed timestamp_ms + **/ +template <> +__inline__ __device__ cudf::timestamp_ms decode_value( + const char *data, long start, long end, ParseOptions const &opts) { + + auto milli = parseDateTimeFormat(data, start, end, opts.dayfirst); + return milli; +} + +/** + * @brief Decodes a timestamp_us + * + * @param data The character string for parse + * @param start The index within data to start parsing from + * @param end The end index within data to end parsing + * @param opts The global parsing behavior options + * + * @return The parsed timestamp_us + **/ +template <> +__inline__ __device__ cudf::timestamp_us decode_value( + const char *data, long start, long end, ParseOptions const &opts) { + auto milli = parseDateTimeFormat(data, start, end, opts.dayfirst); + return milli * 1000; +} + +/** + * @brief Decodes a timestamp_ns + * + * @param data The character string for parse + * @param start The index within data to start parsing from + * @param end The end index within data to end parsing + * @param opts The global parsing behavior options + * + * @return The parsed timestamp_ns + **/ +template <> +__inline__ __device__ cudf::timestamp_ns decode_value( + const char *data, long start, long end, ParseOptions const &opts) { + auto milli = parseDateTimeFormat(data, start, end, opts.dayfirst); + return milli * 1000000; +} + +// The purpose of these is merely to allow compilation ONLY +template <> +__inline__ __device__ cudf::string_view decode_value(const char *data, + long start, long end, + ParseOptions const &opts) { + return cudf::string_view{}; +} +template <> +__inline__ __device__ cudf::dictionary32 decode_value(const char *data, + long start, long end, + ParseOptions const &opts) { + return cudf::dictionary32{}; +} + +/** + * @brief Functor for converting plain text data to cuDF data type value. + **/ +struct ConvertFunctor { + /** + * @brief Template specialization for operator() for types whose values can be + * convertible to a 0 or 1 to represent false/true. The converting is done by + * checking against the default and user-specified true/false values list. + * + * It is handled here rather than within convertStrToValue() as that function + * is used by other types (ex. timestamp) that aren't 'booleable'. + **/ + template ::value> * = nullptr> + __host__ __device__ __forceinline__ bool operator()(const char *data, void *output_columns, long row, long start, + long end, const ParseOptions &opts) { + T &value{static_cast(output_columns)[row]}; + + // Check for user-specified true/false values first, where the output is + // replaced with 1/0 respectively + const size_t field_len = end - start + 1; + if (serializedTrieContains(opts.trueValuesTrie, data + start, field_len)) { + value = 1; + } else if (serializedTrieContains(opts.falseValuesTrie, data + start, field_len)) { + value = 0; + } else { + value = decode_value(data, start, end, opts); + } + + return true; + } + + /** + * @brief Dispatch for floating points, which are set to NaN if the input + * is not valid. In such case, the validity mask is set to zero too. + */ + template ::value> * = nullptr> + __host__ __device__ __forceinline__ bool operator()( + const char *data, void *out_buffer, size_t row, long start, long end, + ParseOptions const &opts) { + auto &value{static_cast(out_buffer)[row]}; + value = decode_value(data, start, end, opts); + return !std::isnan(value); + } + + /** + * @brief Default template operator() dispatch specialization all data types + * (including wrapper types) that is not covered by above. + **/ + template ::value and + !std::is_integral::value> * = nullptr> + __host__ __device__ __forceinline__ bool operator()(const char *data, void *output_columns, long row, long start, + long end, const ParseOptions &opts) { + T &value{static_cast(output_columns)[row]}; + value = decode_value(data, start, end, opts); + + return true; + } +}; + +/** + * @brief Checks whether the given character is a whitespace character. + * + * @param[in] ch The character to check + * + * @return True if the input is whitespace, False otherwise + **/ +__inline__ __device__ bool is_whitespace(char ch) { + return ch == '\t' || ch == ' '; +} + +/** + * @brief Scans a character stream within a range, and adjusts the start and end + * indices of the range to ignore whitespace and quotation characters. + * + * @param[in] data The character stream to scan + * @param[in,out] start The start index to adjust + * @param[in,out] end The end index to adjust + * @param[in] quotechar The character used to denote quotes + * + * @return Adjusted or unchanged start_idx and end_idx + **/ +__inline__ __device__ void trim_field_start_end(const char* data, long* start, + long* end, char quotechar = '\0') { + while ((*start < *end) && is_whitespace(data[*start])) { + (*start)++; + } + if ((*start < *end) && data[*start] == quotechar) { + (*start)++; + } + while ((*start <= *end) && is_whitespace(data[*end])) { + (*end)--; + } + if ((*start <= *end) && data[*end] == quotechar) { + (*end)--; + } +} + +/** + * @brief Returns true is the input character is a valid digit. + * Supports both decimal and hexadecimal digits (uppercase and lowercase). + * + * @param c Chracter to check + * @param is_hex Whether to check as a hexadecimal + * + * @return `true` if it is digit-like, `false` otherwise + */ +__device__ __inline__ bool is_digit(char c, bool is_hex = false) { + if (c >= '0' && c <= '9') return true; + + if (is_hex) { + if (c >= 'A' && c <= 'F') return true; + if (c >= 'a' && c <= 'f') return true; + } + + return false; +} + +/** + * @brief Returns true if the counters indicate a potentially valid float. + * False positives are possible because positions are not taken into account. + * For example, field "e.123-" would match the pattern. + */ +__device__ __inline__ bool is_like_float(long len, long digit_cnt, long decimal_cnt, long dash_cnt, long exponent_cnt) { + // Can't have more than one exponent and one decimal point + if (decimal_cnt > 1) + return false; + if (exponent_cnt > 1) + return false; + // Without the exponent or a decimal point, this is an integer, not a float + if (decimal_cnt == 0 && exponent_cnt == 0) + return false; + + // Can only have one '-' per component + if (dash_cnt > 1 + exponent_cnt) + return false; + + // If anything other than these characters is present, it's not a float + if (digit_cnt + decimal_cnt + dash_cnt + exponent_cnt != len) + return false; + + // Needs at least 1 digit, 2 if exponent is present + if (digit_cnt < 1 + exponent_cnt) + return false; + + return true; +} + +/** + * @brief CUDA kernel that parses and converts plain text data into cuDF column data. + * + * Data is processed one record at a time + * + * @param[in] data The entire data to read + * @param[in] data_size Size of the data buffer, in bytes + * @param[in] rec_starts The start of each data record + * @param[in] num_records The number of lines/rows + * @param[in] dtypes The data type of each column + * @param[in] opts A set of parsing options + * @param[out] output_columns The output column data + * @param[in] num_columns The number of columns + * @param[out] valid_fields The bitmaps indicating whether column fields are valid + * @param[out] num_valid_fields The numbers of valid fields in columns + * + * @return void + **/ +__global__ void convert_json_to_columns_kernel(const char *data, size_t data_size, const uint64_t *rec_starts, + cudf::size_type num_records, const data_type *dtypes, ParseOptions opts, + void *const *output_columns, int num_columns, bitmask_type *const *valid_fields, + cudf::size_type *num_valid_fields) { + const long rec_id = threadIdx.x + (blockDim.x * blockIdx.x); + if (rec_id >= num_records) + return; + + long start = rec_starts[rec_id]; + // has the same semantics as end() in STL containers (one past last element) + long stop = ((rec_id < num_records - 1) ? rec_starts[rec_id + 1] : data_size); + + limit_range_to_brackets(data, start, stop); + const bool is_object = (data[start - 1] == '{'); + + for (int col = 0; col < num_columns && start < stop; col++) { + if (is_object) { + start = seek_field_name_end(data, opts, start, stop); + } + // field_end is at the next delimiter/newline + const long field_end = cudf::experimental::io::gpu::seek_field_end(data, opts, start, stop); + long field_data_last = field_end - 1; + // Modify start & end to ignore whitespace and quotechars + trim_field_start_end(data, &start, &field_data_last, opts.quotechar); + // Empty fields are not legal values + if (start <= field_data_last && !serializedTrieContains(opts.naValuesTrie, data + start, field_end - start)) { + // Type dispatcher does not handle strings + if (dtypes[col].id() == STRING) { + auto str_list = static_cast(output_columns[col]); + str_list[rec_id].first = data + start; + str_list[rec_id].second = field_data_last - start + 1; + + // set the valid bitmap - all bits were set to 0 to start + set_bit(valid_fields[col], rec_id); + atomicAdd(&num_valid_fields[col], 1); + } else { + if(cudf::experimental::type_dispatcher(dtypes[col], ConvertFunctor{}, data, output_columns[col], rec_id, start, field_data_last, opts)){ + // set the valid bitmap - all bits were set to 0 to start + set_bit(valid_fields[col], rec_id); + atomicAdd(&num_valid_fields[col], 1); + } + } + } else if (dtypes[col].id() == STRING) { + auto str_list = static_cast(output_columns[col]); + str_list[rec_id].first = nullptr; + str_list[rec_id].second = 0; + } + start = field_end + 1; + } +} + +/** + * @brief CUDA kernel that processes a buffer of data and determines information about the + * column types within. + * + * Data is processed in one row/record at a time, so the number of total + * threads (tid) is equal to the number of rows. + * + * @param[in] data Input data buffer + * @param[in] data_size Size of the data buffer, in bytes + * @param[in] opts A set of parsing options + * @param[in] num_columns The number of columns of input data + * @param[in] rec_starts The start the input data of interest + * @param[in] num_records The number of lines/rows of input data + * @param[out] column_infos The count for each column data type + * + * @returns void + **/ +__global__ void detect_json_data_types(const char *data, size_t data_size, const ParseOptions opts, int num_columns, + const uint64_t *rec_starts, cudf::size_type num_records, ColumnInfo *column_infos) { + long rec_id = threadIdx.x + (blockDim.x * blockIdx.x); + if (rec_id >= num_records) + return; + + long start = rec_starts[rec_id]; + // has the same semantics as end() in STL containers (one past last element) + long stop = ((rec_id < num_records - 1) ? rec_starts[rec_id + 1] : data_size); + + limit_range_to_brackets(data, start, stop); + const bool is_object = (data[start - 1] == '{'); + + for (int col = 0; col < num_columns; col++) { + if (is_object) { + start = seek_field_name_end(data, opts, start, stop); + } + const long field_end = cudf::experimental::io::gpu::seek_field_end(data, opts, start, stop); + long field_data_last = field_end - 1; + trim_field_start_end(data, &start, &field_data_last); + const int field_len = field_data_last - start + 1; + + // Checking if the field is empty + if (start > field_data_last || serializedTrieContains(opts.naValuesTrie, data + start, field_len)) { + atomicAdd(&column_infos[col].null_count, 1); + start = field_end + 1; + continue; + } + + int digit_count = 0; + int decimal_count = 0; + int slash_count = 0; + int dash_count = 0; + int colon_count = 0; + int exponent_count = 0; + int other_count = 0; + + const bool maybe_hex = ((field_len > 2 && data[start] == '0' && data[start + 1] == 'x') || + (field_len > 3 && data[start] == '-' && data[start + 1] == '0' && data[start + 2] == 'x')); + for (long pos = start; pos <= field_data_last; pos++) { + if (is_digit(data[pos], maybe_hex)) { + digit_count++; + continue; + } + // Looking for unique characters that will help identify column types + switch (data[pos]) { + case '.': + decimal_count++; + break; + case '-': + dash_count++; + break; + case '/': + slash_count++; + break; + case ':': + colon_count++; + break; + case 'e': + case 'E': + if (!maybe_hex && pos > start && pos < field_data_last) + exponent_count++; + break; + default: + other_count++; + break; + } + } + + // Integers have to have the length of the string + int int_req_number_cnt = field_len; + // Off by one if they start with a minus sign + if (data[start] == '-' && field_len > 1) { + --int_req_number_cnt; + } + // Off by one if they are a hexadecimal number + if (maybe_hex) { + --int_req_number_cnt; + } + if (serializedTrieContains(opts.trueValuesTrie, data + start, field_len) || + serializedTrieContains(opts.falseValuesTrie, data + start, field_len)) { + atomicAdd(&column_infos[col].bool_count, 1); + } else if (digit_count == int_req_number_cnt) { + atomicAdd(&column_infos[col].int_count, 1); + } else if (is_like_float(field_len, digit_count, decimal_count, dash_count, exponent_count)) { + atomicAdd(&column_infos[col].float_count, 1); + } + // A date-time field cannot have more than 3 non-special characters + // A number field cannot have more than one decimal point + else if (other_count > 3 || decimal_count > 1) { + atomicAdd(&column_infos[col].string_count, 1); + } else { + // A date field can have either one or two '-' or '\'; A legal combination will only have one of them + // To simplify the process of auto column detection, we are not covering all the date-time formation permutations + if ((dash_count > 0 && dash_count <= 2 && slash_count == 0) || + (dash_count == 0 && slash_count > 0 && slash_count <= 2)) { + if (colon_count <= 2) { + atomicAdd(&column_infos[col].datetime_count, 1); + } else { + atomicAdd(&column_infos[col].string_count, 1); + } + } else { + // Default field type is string + atomicAdd(&column_infos[col].string_count, 1); + } + } + start = field_end + 1; + } +} + +} // namespace anonymous + +/** + * @copydoc cudf::io::json::gpu::convert_json_to_columns + * + **/ +void convert_json_to_columns(rmm::device_buffer const& input_data, + data_type *const dtypes, void *const *output_columns, + cudf::size_type num_records, + cudf::size_type num_columns, + const uint64_t *rec_starts, + bitmask_type *const *valid_fields, cudf::size_type *num_valid_fields, + ParseOptions const& opts, + cudaStream_t stream) { + int block_size; + int min_grid_size; + CUDA_TRY(cudaOccupancyMaxPotentialBlockSize(&min_grid_size, &block_size, convert_json_to_columns_kernel)); + + const int grid_size = (num_records + block_size - 1) / block_size; + + convert_json_to_columns_kernel <<< grid_size, block_size, 0, stream >>> ( + static_cast(input_data.data()), input_data.size(), + rec_starts, num_records, dtypes, opts, output_columns, + num_columns, valid_fields, num_valid_fields); + + CUDA_TRY(cudaGetLastError()); +} + +/** + * @copydoc cudf::io::json::gpu::detect_data_types + * + **/ +void detect_data_types( + ColumnInfo *column_infos, + const char *data, size_t data_size, + const ParseOptions &options, int num_columns, + const uint64_t *rec_starts, cudf::size_type num_records, + cudaStream_t stream) { + int block_size; + int min_grid_size; + CUDA_TRY(cudaOccupancyMaxPotentialBlockSize(&min_grid_size, &block_size, detect_json_data_types)); + + // Calculate actual block count to use based on records count + const int grid_size = (num_records + block_size - 1) / block_size; + + detect_json_data_types <<< grid_size, block_size, 0, stream >>> ( + data, data_size, options, num_columns, + rec_starts, num_records, column_infos); + + CUDA_TRY(cudaGetLastError()); +} + +} // namespace gpu +} // namespace json +} // namespace io +} // namespace experimental +} // namespace cudf diff --git a/cuda_code/json_gpu_4.cu b/cuda_code/json_gpu_4.cu new file mode 100644 index 0000000000000000000000000000000000000000..119d7a3e1046b2da6e949a2a0ad7a0d4f46d6c2e --- /dev/null +++ b/cuda_code/json_gpu_4.cu @@ -0,0 +1,671 @@ +/* + * Copyright (c) 2020, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "json_common.h" +#include "json_gpu.h" + +#include + +#include + +#include +#include + +#include + +#include +#include + +namespace cudf { +namespace experimental { +namespace io { +namespace json { +namespace gpu { +using namespace ::cudf; + +using string_pair = std::pair; + +namespace { +/** + * @brief CUDA Kernel that modifies the start and stop offsets to exclude + * the sections outside of the top level brackets. + * + * The top level brackets characters are excluded from the resulting range. + * Parameter stop has the same semantics as end() in STL containers + * (one past the last element) + * + * @param[in] data Pointer to the device buffer containing the data to process + * @param[in,out] start Offset of the first character in the range + * @param[in,out] stop Offset of the first character after the range + * + * @return void + **/ +__device__ void limit_range_to_brackets(const char *data, long &start, long &stop) +{ + while (start < stop && data[start] != '[' && data[start] != '{') { start++; } + start++; + + while (start < stop && data[stop - 1] != ']' && data[stop - 1] != '}') { stop--; } + stop--; +} + +/** + * @brief CUDA kernel that finds the end position of the next field name, + * including the colon that separates the name from the field value. + * + * Returns the position after the colon that preceeds the value token. + * + * @param[in] data Pointer to the device buffer containing the data to process + * @param[in] opts Parsing options (e.g. delimiter and quotation character) + * @param[in] start Offset of the first character in the range + * @param[in] stop Offset of the first character after the range + * + * @return long Position of the first character after the field name. + **/ +__device__ long seek_field_name_end(const char *data, + const ParseOptions opts, + long start, + long stop) +{ + bool quotation = false; + for (auto pos = start; pos < stop; ++pos) { + // Ignore escaped quotes + if (data[pos] == opts.quotechar && data[pos - 1] != '\\') { + quotation = !quotation; + } else if (!quotation && data[pos] == ':') { + return pos + 1; + } + } + return stop; +} + +/** + * @brief Decodes a numeric value base on templated cudf type T with specified + * base. + * + * @param data The character string for parse + * @param start The index within data to start parsing from + * @param end The end index within data to end parsing + * @param opts The global parsing behavior options + * + * @return The parsed numeric value + **/ +template +__inline__ __device__ T +decode_value(const char *data, long start, long end, ParseOptions const &opts) +{ + return cudf::experimental::io::gpu::parse_numeric(data, start, end, opts); +} + +/** + * @brief Decodes a numeric value base on templated cudf type T + * + * @param data The character string for parse + * @param start The index within data to start parsing from + * @param end The end index within data to end parsing + * @param opts The global parsing behavior options + * + * @return The parsed numeric value + **/ +template +__inline__ __device__ T +decode_value(const char *data, long start, long end, ParseOptions const &opts) +{ + return cudf::experimental::io::gpu::parse_numeric(data, start, end, opts); +} + +/** + * @brief Decodes a timestamp_D + * + * @param data The character string for parse + * @param start The index within data to start parsing from + * @param end The end index within data to end parsing + * @param opts The global parsing behavior options + * + * @return The parsed timestamp_D + **/ +template <> +__inline__ __device__ cudf::timestamp_D decode_value(const char *data, + long start, + long end, + ParseOptions const &opts) +{ + return parseDateFormat(data, start, end, opts.dayfirst); +} + +/** + * @brief Decodes a timestamp_s + * + * @param data The character string for parse + * @param start The index within data to start parsing from + * @param end The end index within data to end parsing + * @param opts The global parsing behavior options + * + * @return The parsed timestamp_s + **/ +template <> +__inline__ __device__ cudf::timestamp_s decode_value(const char *data, + long start, + long end, + ParseOptions const &opts) +{ + auto milli = parseDateTimeFormat(data, start, end, opts.dayfirst); + return milli / 1000; +} + +/** + * @brief Decodes a timestamp_ms + * + * @param data The character string for parse + * @param start The index within data to start parsing from + * @param end The end index within data to end parsing + * @param opts The global parsing behavior options + * + * @return The parsed timestamp_ms + **/ +template <> +__inline__ __device__ cudf::timestamp_ms decode_value(const char *data, + long start, + long end, + ParseOptions const &opts) +{ + auto milli = parseDateTimeFormat(data, start, end, opts.dayfirst); + return milli; +} + +/** + * @brief Decodes a timestamp_us + * + * @param data The character string for parse + * @param start The index within data to start parsing from + * @param end The end index within data to end parsing + * @param opts The global parsing behavior options + * + * @return The parsed timestamp_us + **/ +template <> +__inline__ __device__ cudf::timestamp_us decode_value(const char *data, + long start, + long end, + ParseOptions const &opts) +{ + auto milli = parseDateTimeFormat(data, start, end, opts.dayfirst); + return milli * 1000; +} + +/** + * @brief Decodes a timestamp_ns + * + * @param data The character string for parse + * @param start The index within data to start parsing from + * @param end The end index within data to end parsing + * @param opts The global parsing behavior options + * + * @return The parsed timestamp_ns + **/ +template <> +__inline__ __device__ cudf::timestamp_ns decode_value(const char *data, + long start, + long end, + ParseOptions const &opts) +{ + auto milli = parseDateTimeFormat(data, start, end, opts.dayfirst); + return milli * 1000000; +} + +// The purpose of these is merely to allow compilation ONLY +template <> +__inline__ __device__ cudf::string_view decode_value(const char *data, + long start, + long end, + ParseOptions const &opts) +{ + return cudf::string_view{}; +} +template <> +__inline__ __device__ cudf::dictionary32 decode_value(const char *data, + long start, + long end, + ParseOptions const &opts) +{ + return cudf::dictionary32{}; +} + +/** + * @brief Functor for converting plain text data to cuDF data type value. + **/ +struct ConvertFunctor { + /** + * @brief Template specialization for operator() for types whose values can be + * convertible to a 0 or 1 to represent false/true. The converting is done by + * checking against the default and user-specified true/false values list. + * + * It is handled here rather than within convertStrToValue() as that function + * is used by other types (ex. timestamp) that aren't 'booleable'. + **/ + template ::value> * = nullptr> + __host__ __device__ __forceinline__ bool operator()(const char *data, + void *output_columns, + long row, + long start, + long end, + const ParseOptions &opts) + { + T &value{static_cast(output_columns)[row]}; + + // Check for user-specified true/false values first, where the output is + // replaced with 1/0 respectively + const size_t field_len = end - start + 1; + if (serializedTrieContains(opts.trueValuesTrie, data + start, field_len)) { + value = 1; + } else if (serializedTrieContains(opts.falseValuesTrie, data + start, field_len)) { + value = 0; + } else { + value = decode_value(data, start, end, opts); + } + + return true; + } + + /** + * @brief Dispatch for floating points, which are set to NaN if the input + * is not valid. In such case, the validity mask is set to zero too. + */ + template ::value> * = nullptr> + __host__ __device__ __forceinline__ bool operator()( + const char *data, void *out_buffer, size_t row, long start, long end, ParseOptions const &opts) + { + auto &value{static_cast(out_buffer)[row]}; + value = decode_value(data, start, end, opts); + return !std::isnan(value); + } + + /** + * @brief Default template operator() dispatch specialization all data types + * (including wrapper types) that is not covered by above. + **/ + template ::value and + !std::is_integral::value> * = nullptr> + __host__ __device__ __forceinline__ bool operator()(const char *data, + void *output_columns, + long row, + long start, + long end, + const ParseOptions &opts) + { + T &value{static_cast(output_columns)[row]}; + value = decode_value(data, start, end, opts); + + return true; + } +}; + +/** + * @brief Checks whether the given character is a whitespace character. + * + * @param[in] ch The character to check + * + * @return True if the input is whitespace, False otherwise + **/ +__inline__ __device__ bool is_whitespace(char ch) { return ch == '\t' || ch == ' '; } + +/** + * @brief Scans a character stream within a range, and adjusts the start and end + * indices of the range to ignore whitespace and quotation characters. + * + * @param[in] data The character stream to scan + * @param[in,out] start The start index to adjust + * @param[in,out] end The end index to adjust + * @param[in] quotechar The character used to denote quotes + * + * @return Adjusted or unchanged start_idx and end_idx + **/ +__inline__ __device__ void trim_field_start_end(const char *data, + long *start, + long *end, + char quotechar = '\0') +{ + while ((*start < *end) && is_whitespace(data[*start])) { (*start)++; } + if ((*start < *end) && data[*start] == quotechar) { (*start)++; } + while ((*start <= *end) && is_whitespace(data[*end])) { (*end)--; } + if ((*start <= *end) && data[*end] == quotechar) { (*end)--; } +} + +/** + * @brief Returns true is the input character is a valid digit. + * Supports both decimal and hexadecimal digits (uppercase and lowercase). + * + * @param c Chracter to check + * @param is_hex Whether to check as a hexadecimal + * + * @return `true` if it is digit-like, `false` otherwise + */ +__device__ __inline__ bool is_digit(char c, bool is_hex = false) +{ + if (c >= '0' && c <= '9') return true; + + if (is_hex) { + if (c >= 'A' && c <= 'F') return true; + if (c >= 'a' && c <= 'f') return true; + } + + return false; +} + +/** + * @brief Returns true if the counters indicate a potentially valid float. + * False positives are possible because positions are not taken into account. + * For example, field "e.123-" would match the pattern. + */ +__device__ __inline__ bool is_like_float( + long len, long digit_cnt, long decimal_cnt, long dash_cnt, long exponent_cnt) +{ + // Can't have more than one exponent and one decimal point + if (decimal_cnt > 1) return false; + if (exponent_cnt > 1) return false; + // Without the exponent or a decimal point, this is an integer, not a float + if (decimal_cnt == 0 && exponent_cnt == 0) return false; + + // Can only have one '-' per component + if (dash_cnt > 1 + exponent_cnt) return false; + + // If anything other than these characters is present, it's not a float + if (digit_cnt + decimal_cnt + dash_cnt + exponent_cnt != len) return false; + + // Needs at least 1 digit, 2 if exponent is present + if (digit_cnt < 1 + exponent_cnt) return false; + + return true; +} + +/** + * @brief CUDA kernel that parses and converts plain text data into cuDF column data. + * + * Data is processed one record at a time + * + * @param[in] data The entire data to read + * @param[in] data_size Size of the data buffer, in bytes + * @param[in] rec_starts The start of each data record + * @param[in] num_records The number of lines/rows + * @param[in] dtypes The data type of each column + * @param[in] opts A set of parsing options + * @param[out] output_columns The output column data + * @param[in] num_columns The number of columns + * @param[out] valid_fields The bitmaps indicating whether column fields are valid + * @param[out] num_valid_fields The numbers of valid fields in columns + * + * @return void + **/ +__global__ void convert_json_to_columns_kernel(const char *data, + size_t data_size, + const uint64_t *rec_starts, + cudf::size_type num_records, + const data_type *dtypes, + ParseOptions opts, + void *const *output_columns, + int num_columns, + bitmask_type *const *valid_fields, + cudf::size_type *num_valid_fields) +{ + const long rec_id = threadIdx.x + (blockDim.x * blockIdx.x); + if (rec_id >= num_records) return; + + long start = rec_starts[rec_id]; + // has the same semantics as end() in STL containers (one past last element) + long stop = ((rec_id < num_records - 1) ? rec_starts[rec_id + 1] : data_size); + + limit_range_to_brackets(data, start, stop); + const bool is_object = (data[start - 1] == '{'); + + for (int col = 0; col < num_columns && start < stop; col++) { + if (is_object) { start = seek_field_name_end(data, opts, start, stop); } + // field_end is at the next delimiter/newline + const long field_end = cudf::experimental::io::gpu::seek_field_end(data, opts, start, stop); + long field_data_last = field_end - 1; + // Modify start & end to ignore whitespace and quotechars + trim_field_start_end(data, &start, &field_data_last, opts.quotechar); + // Empty fields are not legal values + if (start <= field_data_last && + !serializedTrieContains(opts.naValuesTrie, data + start, field_end - start)) { + // Type dispatcher does not handle strings + if (dtypes[col].id() == STRING) { + auto str_list = static_cast(output_columns[col]); + str_list[rec_id].first = data + start; + str_list[rec_id].second = field_data_last - start + 1; + + // set the valid bitmap - all bits were set to 0 to start + set_bit(valid_fields[col], rec_id); + atomicAdd(&num_valid_fields[col], 1); + } else { + if (cudf::experimental::type_dispatcher(dtypes[col], + ConvertFunctor{}, + data, + output_columns[col], + rec_id, + start, + field_data_last, + opts)) { + // set the valid bitmap - all bits were set to 0 to start + set_bit(valid_fields[col], rec_id); + atomicAdd(&num_valid_fields[col], 1); + } + } + } else if (dtypes[col].id() == STRING) { + auto str_list = static_cast(output_columns[col]); + str_list[rec_id].first = nullptr; + str_list[rec_id].second = 0; + } + start = field_end + 1; + } +} + +/** + * @brief CUDA kernel that processes a buffer of data and determines information about the + * column types within. + * + * Data is processed in one row/record at a time, so the number of total + * threads (tid) is equal to the number of rows. + * + * @param[in] data Input data buffer + * @param[in] data_size Size of the data buffer, in bytes + * @param[in] opts A set of parsing options + * @param[in] num_columns The number of columns of input data + * @param[in] rec_starts The start the input data of interest + * @param[in] num_records The number of lines/rows of input data + * @param[out] column_infos The count for each column data type + * + * @returns void + **/ +__global__ void detect_json_data_types(const char *data, + size_t data_size, + const ParseOptions opts, + int num_columns, + const uint64_t *rec_starts, + cudf::size_type num_records, + ColumnInfo *column_infos) +{ + long rec_id = threadIdx.x + (blockDim.x * blockIdx.x); + if (rec_id >= num_records) return; + + long start = rec_starts[rec_id]; + // has the same semantics as end() in STL containers (one past last element) + long stop = ((rec_id < num_records - 1) ? rec_starts[rec_id + 1] : data_size); + + limit_range_to_brackets(data, start, stop); + const bool is_object = (data[start - 1] == '{'); + + for (int col = 0; col < num_columns; col++) { + if (is_object) { start = seek_field_name_end(data, opts, start, stop); } + auto field_start = start; + const long field_end = + cudf::experimental::io::gpu::seek_field_end(data, opts, field_start, stop); + long field_data_last = field_end - 1; + trim_field_start_end(data, &field_start, &field_data_last); + const int field_len = field_data_last - field_start + 1; + // Advance the start offset + start = field_end + 1; + + // Checking if the field is empty + if (field_start > field_data_last || + serializedTrieContains(opts.naValuesTrie, data + field_start, field_len)) { + atomicAdd(&column_infos[col].null_count, 1); + continue; + } + // Don't need counts to detect strings, any field in quotes is deduced to be a string + if (data[field_start] == opts.quotechar && data[field_data_last] == opts.quotechar) { + atomicAdd(&column_infos[col].string_count, 1); + continue; + } + + int digit_count = 0; + int decimal_count = 0; + int slash_count = 0; + int dash_count = 0; + int colon_count = 0; + int exponent_count = 0; + int other_count = 0; + + const bool maybe_hex = + ((field_len > 2 && data[field_start] == '0' && data[field_start + 1] == 'x') || + (field_len > 3 && data[field_start] == '-' && data[field_start + 1] == '0' && + data[field_start + 2] == 'x')); + for (long pos = field_start; pos <= field_data_last; pos++) { + if (is_digit(data[pos], maybe_hex)) { + digit_count++; + continue; + } + // Looking for unique characters that will help identify column types + switch (data[pos]) { + case '.': decimal_count++; break; + case '-': dash_count++; break; + case '/': slash_count++; break; + case ':': colon_count++; break; + case 'e': + case 'E': + if (!maybe_hex && pos > field_start && pos < field_data_last) exponent_count++; + break; + default: other_count++; break; + } + } + + // Integers have to have the length of the string + int int_req_number_cnt = field_len; + // Off by one if they start with a minus sign + if (data[field_start] == '-' && field_len > 1) { --int_req_number_cnt; } + // Off by one if they are a hexadecimal number + if (maybe_hex) { --int_req_number_cnt; } + if (serializedTrieContains(opts.trueValuesTrie, data + field_start, field_len) || + serializedTrieContains(opts.falseValuesTrie, data + field_start, field_len)) { + atomicAdd(&column_infos[col].bool_count, 1); + } else if (digit_count == int_req_number_cnt) { + atomicAdd(&column_infos[col].int_count, 1); + } else if (is_like_float(field_len, digit_count, decimal_count, dash_count, exponent_count)) { + atomicAdd(&column_infos[col].float_count, 1); + } + // A date-time field cannot have more than 3 non-special characters + // A number field cannot have more than one decimal point + else if (other_count > 3 || decimal_count > 1) { + atomicAdd(&column_infos[col].string_count, 1); + } else { + // A date field can have either one or two '-' or '\'; A legal combination will only have one + // of them To simplify the process of auto column detection, we are not covering all the + // date-time formation permutations + if ((dash_count > 0 && dash_count <= 2 && slash_count == 0) || + (dash_count == 0 && slash_count > 0 && slash_count <= 2)) { + if (colon_count <= 2) { + atomicAdd(&column_infos[col].datetime_count, 1); + } else { + atomicAdd(&column_infos[col].string_count, 1); + } + } else { + // Default field type is string + atomicAdd(&column_infos[col].string_count, 1); + } + } + } +} + +} // namespace + +/** + * @copydoc cudf::io::json::gpu::convert_json_to_columns + * + **/ +void convert_json_to_columns(rmm::device_buffer const &input_data, + data_type *const dtypes, + void *const *output_columns, + cudf::size_type num_records, + cudf::size_type num_columns, + const uint64_t *rec_starts, + bitmask_type *const *valid_fields, + cudf::size_type *num_valid_fields, + ParseOptions const &opts, + cudaStream_t stream) +{ + int block_size; + int min_grid_size; + CUDA_TRY(cudaOccupancyMaxPotentialBlockSize( + &min_grid_size, &block_size, convert_json_to_columns_kernel)); + + const int grid_size = (num_records + block_size - 1) / block_size; + + convert_json_to_columns_kernel<<>>( + static_cast(input_data.data()), + input_data.size(), + rec_starts, + num_records, + dtypes, + opts, + output_columns, + num_columns, + valid_fields, + num_valid_fields); + + CUDA_TRY(cudaGetLastError()); +} + +/** + * @copydoc cudf::io::json::gpu::detect_data_types + * + **/ +void detect_data_types(ColumnInfo *column_infos, + const char *data, + size_t data_size, + const ParseOptions &options, + int num_columns, + const uint64_t *rec_starts, + cudf::size_type num_records, + cudaStream_t stream) +{ + int block_size; + int min_grid_size; + CUDA_TRY(cudaOccupancyMaxPotentialBlockSize(&min_grid_size, &block_size, detect_json_data_types)); + + // Calculate actual block count to use based on records count + const int grid_size = (num_records + block_size - 1) / block_size; + + detect_json_data_types<<>>( + data, data_size, options, num_columns, rec_starts, num_records, column_infos); + + CUDA_TRY(cudaGetLastError()); +} + +} // namespace gpu +} // namespace json +} // namespace io +} // namespace experimental +} // namespace cudf diff --git a/cuda_code/kcRampLikelihoodLog1PMultBias.cu b/cuda_code/kcRampLikelihoodLog1PMultBias.cu new file mode 100644 index 0000000000000000000000000000000000000000..40c90f3b5def78e107eb010276f1ef5857d9cb37 --- /dev/null +++ b/cuda_code/kcRampLikelihoodLog1PMultBias.cu @@ -0,0 +1,234 @@ +#include + +#include +#include +#include + +#include +#include +#include "cublas_v2.h" +#include + +#include +#include + + +#include "mex.h" + +#include "kcDefs.h" //see for info on anything starting with KC_ +#include "kcArrayFunctions.h" + +//poison log likelihood for one observation +__device__ KC_FP_TYPE lh(KC_FP_TYPE y, KC_FP_TYPE x, KC_FP_TYPE g, KC_FP_TYPE dt, KC_FP_TYPE sh, KC_FP_TYPE bias) { + KC_FP_TYPE fr = KC_MAX(KC_MIN(log1p(KC_EXP(g*x))*exp(sh)+bias,KC_MAXN),KC_MINN); + return y*(KC_LOG(fr)+KC_LOG(dt)) - dt*fr - KC_GAMMALN(y+1.0); +} + +//sums up log likelihood of each trial given model parameters +__global__ void kcSumGBfinal(const KC_FP_TYPE * log_p_tr, KC_FP_TYPE * log_p, const int NT) { + int idx = blockDim.x * blockIdx.x + threadIdx.x; + if(idx < 1) { + log_p[0] = 0; + for(int ii = 0; ii < NT; ii++) { + log_p[0] += log_p_tr[ii]; + } + } +} + +//averages log likelihood of each simulated path +// (one thread for each trial) +__global__ void kcSumGBlogpTr(const KC_FP_TYPE * log_p, KC_FP_TYPE * log_p_tr, const int NT, const int nSims) { + int idx = blockIdx.x*blockDim.x+threadIdx.x; + if(idx < NT) { + + log_p_tr[idx] = 0; + KC_FP_TYPE trSum = 0; + KC_FP_TYPE log_x = 0; + log_p_tr[idx] = KC_SQRT(-1.0); + + //computes log( 1/nSims * \sum exp( log p(y | sim paths)) ) for a single trial + // does the sum in a slightly more numerical stable way than just blindly exponentiating all the log likleihoods + + for(int ii = 0; ii < nSims && isnan(log_p_tr[idx]);ii++) { + + trSum = 1 ; + log_x = log_p[ii*NT+idx]; + for(int kk = 0; kk < ii; kk++) { + trSum += KC_EXP(log_p[kk*NT+idx] - log_x); + } + for(int kk = ii+1; kk < nSims; kk++) { + trSum += KC_EXP(log_p[kk*NT+idx] - log_x); + } + if(trSum > 1e-25 && !isnan(trSum) && !isinf(trSum)) { + log_p_tr[idx] = log_x-KC_LOG((double)nSims)+KC_LOG(trSum); + break; + } + } + + } +} + +//simulates a ramping (diffusion-to-bound) path for each trial and computes likelihood +__global__ void kcSimGBPaths(const KC_FP_TYPE * y, const int * trIdx, const int * betaIdx, KC_FP_TYPE * xx, const KC_FP_TYPE * b,const KC_FP_TYPE w2,const KC_FP_TYPE l_0, const KC_FP_TYPE g, const KC_FP_TYPE dt, KC_FP_TYPE * log_p, const int NT, const int TT, const int sim, KC_FP_TYPE * spe, KC_FP_TYPE bias) { + int idx = blockIdx.x*blockDim.x+threadIdx.x; + if(idx < NT ) { + int trNum = idx; + int T1 = trIdx[trNum]; + //xx contains zero mean Gaussian noise of variance \omega^2 + + xx[T1] += l_0; //xx[T1] now contains initial point for simulated diffusion trajectory for this trial + + int currIdx = sim*(NT)+idx; + log_p[currIdx] = lh(y[T1],xx[T1],g,dt,spe[T1],bias); + for(int ii = T1+1; ii < trIdx[trNum+1];ii++) { + //progates particle forward in time + xx[ii] = (xx[ii-1] >= 1.0)?1.0:KC_MIN(xx[ii] + xx[ii-1]+b[betaIdx[ii]],1.0); + //log likelihood of single observation (bin) y[ii] given diffusion path is at x[ii] + log_p[currIdx] += lh(y[ii],xx[ii],g,dt,spe[ii],bias); + } + } +} + +//Estimates the log probability of a set of spike trains under the ramping model given a set of fixed parameters +// This estimation is made by Monte Carlo simulations from the model to integrate out latent variable +//args +// 0 = y (observations) +// 1 = trIdx (array that accesses the beta value used at each timepoint, y being indexed at 0. Includes final value that should be length of y) +// 2 = betaIdxVector (array that gives coherence used at each bins of y. i.e., accesses the beta value used at each timepoint. values begin at 0 instead of 1 to be consistent with C, unlike MATLAB) +// 3 = spike history effect (same size as y) +// 4 = beta values +// 5 = w (variance of diffusion process) +// 6 = l_0 (starting lambda value) +// 7 = g (absorbing boundary effective height) +// 8 = dt (bin size in seconds) +// 9 = number of samples to use to estimate log probability of observations (I recommend using at least 1000) +// 10 = bias +//outputs (left-hand side) +// 0 = log p(y|\theta) +// 1 = log p(y|\theta) for each individual trial +void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { + cudaError_t ce; + + //load up trial data + unsigned int TT = kcGetArrayNumEl(prhs[0]); + KC_FP_TYPE * y = kcGetArrayData(prhs[0]); + int * trIdx = kcGetArrayDataInt(prhs[1]); + unsigned int NT = kcGetArrayNumEl(prhs[1])-1; + int * betaIdx = kcGetArrayDataInt(prhs[2],TT); + + // load spike history effect + KC_FP_TYPE * spe = kcGetArrayData(prhs[3]); + + //how many simulations to use to estimate log p(y|\theta) + int trialsToSim = (int)mxGetScalar(prhs[9]); + + //load up parameters to simulate model + if(mxGetClassID(prhs[4]) != KC_FP_TYPE_MATLAB) { + mexErrMsgTxt("Beta input wrong floating point type (kcSimGaussianBound)!"); + } + KC_FP_TYPE * b = (KC_FP_TYPE *)mxGetPr(prhs[4]); + int numBetas = mxGetNumberOfElements(prhs[4]); + KC_FP_TYPE * b_gpu; + + ce = cudaMalloc((void**)&b_gpu,sizeof(KC_FP_TYPE)*numBetas); + if(ce != cudaSuccess) { + mexPrintf("Error allocating space for betas on device - first allocation in function (kcSimGaussianBound) "); + mexPrintf(cudaGetErrorString(ce)); + mexPrintf(" (%d)\n", (int)ce); + } + checkCudaErrors(cudaMemcpy(b_gpu,b,sizeof(KC_FP_TYPE)*numBetas,cudaMemcpyHostToDevice)); + + KC_FP_TYPE w = mxGetScalar(prhs[5]); + KC_FP_TYPE l_0 = mxGetScalar(prhs[6]); + KC_FP_TYPE g = mxGetScalar(prhs[7]); + KC_FP_TYPE dt = mxGetScalar(prhs[8]); + KC_FP_TYPE bias = mxGetScalar(prhs[10]); + + + + //setup CUDA variables + random number generator + int randSize = TT + (((TT)%2==0)?0:1); + KC_FP_TYPE * xx; + checkCudaErrors(cudaMalloc((void**)&xx,randSize*sizeof(KC_FP_TYPE))); + curandGenerator_t curandGen = 0; + curandStatus_t curandStatus; + curandStatus = curandCreateGenerator(&curandGen, CURAND_RNG_PSEUDO_DEFAULT); + if(curandStatus != CURAND_STATUS_SUCCESS ) { + mexPrintf("CURAND-1 error %d\n",(int)curandStatus); + mexErrMsgTxt("CUDA errors"); + } + + struct timeval now; + gettimeofday(&now,NULL); + unsigned long long mySeed = (unsigned long long)now.tv_usec+(unsigned long long)(1e7*(unsigned long long)now.tv_sec); + curandStatus = curandSetPseudoRandomGeneratorSeed(curandGen, mySeed); + if(curandStatus != CURAND_STATUS_SUCCESS ) { + mexPrintf("CURAND-2 error %d\n",(int)curandStatus); + mexErrMsgTxt("CUDA errors"); + } + + int blockSize = 2; + int nBlocks = NT/blockSize + ((NT%blockSize==0)?0:1); + + int blockSizeT = 2; + int nBlocksT = NT/blockSizeT + ((NT%blockSizeT==0)?0:1); + + //allocates sspace on GPU for simulating the likelihood + KC_FP_TYPE * log_p; + //KC_FP_TYPE * log_p_2; + KC_FP_TYPE * log_p_tr; + KC_FP_TYPE * sum_log_p; + checkCudaErrors(cudaMalloc((void**)&log_p,sizeof(KC_FP_TYPE)*NT*trialsToSim)); + //checkCudaErrors(cudaMalloc((void**)&log_p_2,sizeof(KC_FP_TYPE)*NT*trialsToSim)); + checkCudaErrors(cudaMalloc((void**)&log_p_tr,sizeof(KC_FP_TYPE)*NT)); + checkCudaErrors(cudaMalloc((void**)&sum_log_p,sizeof(KC_FP_TYPE)*1)); + + // generate AR1 noise + for(int kk = 0; kk < trialsToSim; kk++) { + //generates zero mean Gaussian noise with correct variance + curandStatus = KC_RANDOM_NORMAL_FUNCTION(curandGen,xx,randSize,0,KC_SQRT(w)); + if(curandStatus != CURAND_STATUS_SUCCESS ) { + mexPrintf("CURAND gen error %d\n",(int)curandStatus); + mexErrMsgTxt("CUDA errors"); + } + //checkCudaErrors(cudaDeviceSynchronize()); + + //calculate path + logP + kcSimGBPaths<<>>(y,trIdx,betaIdx,xx,b_gpu,w,l_0,g,dt,log_p,NT,TT,kk,spe,bias); + ce = cudaDeviceSynchronize(); + if(ce != cudaSuccess) { + mexPrintf("Error in simulating of kcSimGaussianBound.cu "); + mexPrintf(cudaGetErrorString(ce)); + mexPrintf(" (%d)\n", (int)ce); + mexErrMsgTxt("CUDA errors"); + } + } + + // log_p_2 = log_p; + + //average likelihood of each sampled path to get log p(y|\theta) for each trial + kcSumGBlogpTr<<>>(log_p,log_p_tr,NT,trialsToSim); + checkCudaErrors(cudaDeviceSynchronize()); + + //sums up log likelihood of each trial + kcSumGBfinal<<<1,1>>>(log_p_tr,sum_log_p,NT); + checkCudaErrors(cudaDeviceSynchronize()); + + //copy back to host + if(nlhs > 0) { + plhs[0] = mxCreateNumericMatrix(1,1,KC_FP_TYPE_MATLAB,mxREAL); + checkCudaErrors(cudaMemcpy((KC_FP_TYPE *)mxGetPr(plhs[0]),sum_log_p,1*sizeof(KC_FP_TYPE),cudaMemcpyDeviceToHost)); + } + if(nlhs > 1) { + plhs[1] = mxCreateNumericMatrix(NT,1,KC_FP_TYPE_MATLAB,mxREAL); + checkCudaErrors(cudaMemcpy((KC_FP_TYPE *)mxGetPr(plhs[1]),log_p_tr,NT*sizeof(KC_FP_TYPE),cudaMemcpyDeviceToHost)); + } + + //free up CUDA variables + checkCudaErrors(curandDestroyGenerator(curandGen)); + checkCudaErrors(cudaFree(xx)); + checkCudaErrors(cudaFree(b_gpu)); + checkCudaErrors(cudaFree(log_p)); + checkCudaErrors(cudaFree(log_p_tr)); + checkCudaErrors(cudaFree(sum_log_p)); +} diff --git a/cuda_code/kern_33.cu b/cuda_code/kern_33.cu new file mode 100644 index 0000000000000000000000000000000000000000..d64ba074c1fed370659ec3d6d825ace2478a038d --- /dev/null +++ b/cuda_code/kern_33.cu @@ -0,0 +1,105 @@ +/** + * \file dnn/src/cuda/elemwise_multi_type/kern.cu + * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") + * + * Copyright (c) 2014-2021 Megvii Inc. All rights reserved. + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + */ + +#include "src/cuda/elemwise_helper.cuh" +#include "src/cuda/elemwise_multi_type/kern.cuh" +#include "src/cuda/elemwise_multi_type/kern_ops.cuh" + +using namespace megdnn; +using namespace cuda; +using namespace elemwise_multi_type; +using namespace elemwise_intl; +using namespace kern_ops; + +void elemwise_multi_type::fma3_int16x32x32x32_1c1( + const ElemwiseOpParamN<3>& param, dt_int32* dst, cudaStream_t stream) { + typedef Fma3Int16x32x32x32Bcast101Op Caller; + void (*fptr)(Caller, uint32_t) = cuda_kern; + int grid_size, block_size; + get_launch_spec(reinterpret_cast(fptr), param.size, &grid_size, + &block_size); + + Caller caller; + caller.a.host_init(param[0], grid_size, block_size); + caller.b.host_init(param[1], grid_size, block_size); + caller.c.host_init(param[2], grid_size, block_size); + caller.dst = dst; + + (*fptr)<<>>(caller, param.size); + after_kernel_launch(); +} + +template +void elemwise_multi_type::round_shr_saturate_iXxi8xiX_scalar( + const ElemwiseOpParamN<2>& param, dst_type* dst, cudaStream_t stream) { + typedef RoundShrSaturateIXxBcastScalarOp Caller; + void (*fptr)(Caller, uint32_t) = cuda_kern; + int grid_size, block_size; + get_launch_spec(reinterpret_cast(fptr), param.size, &grid_size, + &block_size); + + Caller caller; + caller.a.host_init(param[0], grid_size, block_size); + caller.b.host_init(param[1], grid_size, block_size); + caller.dst = dst; + + (*fptr)<<>>(caller, param.size); + after_kernel_launch(); +} + +#define INST(stype) \ + template void \ + elemwise_multi_type::round_shr_saturate_iXxi8xiX_scalar( \ + const ElemwiseOpParamN<2>& param, dt_int8*, cudaStream_t) +INST(int32_t); +INST(int16_t); +INST(int8_t); +#undef INST + +#define INST(stype) \ + template void \ + elemwise_multi_type::round_shr_saturate_iXxi8xiX_scalar( \ + const ElemwiseOpParamN<2>& param, dt_int16*, cudaStream_t) +INST(int32_t); +INST(int16_t); +#undef INST + +template +void elemwise_multi_type::fuse_add_rmulh_round_shr_saturate_bcast_1c11( + const ElemwiseOpParamN<6>& param, dt_int8* dst, cudaStream_t stream) { + typedef FuseAddRmulhRoundingShrBcastScalarOp Caller; + void (*fptr)(Caller, uint32_t) = cuda_kern; + int grid_size, block_size; + get_launch_spec(reinterpret_cast(fptr), param.size, &grid_size, + &block_size); + + Caller caller; + caller.x.host_init(param[0], grid_size, block_size); + caller.b.host_init(param[1], grid_size, block_size); + caller.M.host_init(param[2], grid_size, block_size); + caller.k.host_init(param[3], grid_size, block_size); + caller.minv.host_init(param[4], grid_size, block_size); + caller.maxv.host_init(param[5], grid_size, block_size); + caller.dst = dst; + + (*fptr)<<>>(caller, param.size); + after_kernel_launch(); +} + +#define INST(stype) \ + template void \ + elemwise_multi_type::fuse_add_rmulh_round_shr_saturate_bcast_1c11( \ + const ElemwiseOpParamN<6>& param, dt_int8*, cudaStream_t) +INST(int32_t); +INST(int16_t); +#undef INST + +// vim: ft=cuda syntax=cuda.doxygen diff --git a/cuda_code/kern_43.cu b/cuda_code/kern_43.cu new file mode 100644 index 0000000000000000000000000000000000000000..5631ea2945e63b21490f367cf7e695e456df9dc0 --- /dev/null +++ b/cuda_code/kern_43.cu @@ -0,0 +1,31 @@ +/** + * \file dnn/src/cuda/sleep/kern.cu + * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") + * + * Copyright (c) 2014-2021 Megvii Inc. All rights reserved. + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + */ + +#include "./kern.cuh" + +namespace { + +static __global__ void kern(uint64_t cycles) { + uint64_t start = clock64(); + for (;;) { + if (clock64() - start > cycles) + return; + } +} + +} // namespace + +void megdnn::cuda::sleep(cudaStream_t stream, uint64_t cycles) { + kern<<<1, 1, 0, stream>>>(cycles); + after_kernel_launch(); +} + +// vim: syntax=cpp.doxygen diff --git a/cuda_code/kernel-dbg-info.cu b/cuda_code/kernel-dbg-info.cu new file mode 100644 index 0000000000000000000000000000000000000000..9cfad165e9ce626cee332fc5be28d99d9a01702f --- /dev/null +++ b/cuda_code/kernel-dbg-info.cu @@ -0,0 +1,50 @@ +// RUN: echo "GPU binary would be here" > %t + +// RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s -O0 \ +// RUN: -fcuda-include-gpubinary %t -debug-info-kind=limited \ +// RUN: -o - -x hip | FileCheck -check-prefixes=CHECK,O0 %s +// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -emit-llvm %s -O0 \ +// RUN: -fcuda-include-gpubinary %t -debug-info-kind=limited \ +// RUN: -o - -x hip -fcuda-is-device | FileCheck -check-prefix=DEV %s + +// RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s -O0 \ +// RUN: -fcuda-include-gpubinary %t -debug-info-kind=limited \ +// RUN: -o - -x hip -debugger-tuning=gdb -dwarf-version=4 \ +// RUN: | FileCheck -check-prefixes=CHECK,O0 %s +// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -emit-llvm %s -O0 \ +// RUN: -fcuda-include-gpubinary %t -debug-info-kind=limited \ +// RUN: -o - -x hip -debugger-tuning=gdb -dwarf-version=4 \ +// RUN: -fcuda-is-device | FileCheck -check-prefix=DEV %s + +// RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s -O3 \ +// RUN: -fcuda-include-gpubinary %t -debug-info-kind=limited \ +// RUN: -o - -x hip -debugger-tuning=gdb -dwarf-version=4 | FileCheck %s +// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -emit-llvm %s -O3 \ +// RUN: -fcuda-include-gpubinary %t -debug-info-kind=limited \ +// RUN: -o - -x hip -debugger-tuning=gdb -dwarf-version=4 \ +// RUN: -fcuda-is-device | FileCheck -check-prefix=DEV %s + +#include "Inputs/cuda.h" + +extern "C" __global__ void ckernel(int *a) { + *a = 1; +} + +// Device side kernel names +// CHECK: @[[CKERN:[0-9]*]] = {{.*}} c"ckernel\00" + +// DEV: define {{.*}}@ckernel{{.*}}!dbg +// DEV: store {{.*}}!dbg +// DEV: ret {{.*}}!dbg + +// Make sure there is no !dbg between function attributes and '{' +// CHECK: define void @[[CSTUB:__device_stub__ckernel]]{{.*}} #{{[0-9]+}} { +// CHECK-NOT: call {{.*}}@hipLaunchByPtr{{.*}}!dbg +// CHECK: call {{.*}}@hipLaunchByPtr{{.*}}@[[CSTUB]] +// CHECK-NOT: ret {{.*}}!dbg + +// CHECK-LABEL: define {{.*}}@_Z8hostfuncPi{{.*}}!dbg +// O0: call void @[[CSTUB]]{{.*}}!dbg +void hostfunc(int *a) { + ckernel<<<1, 1>>>(a); +} diff --git a/cuda_code/kernel-stub-name.cu b/cuda_code/kernel-stub-name.cu new file mode 100644 index 0000000000000000000000000000000000000000..a16592602dd8df65e321f9b0ab2166cfa291707e --- /dev/null +++ b/cuda_code/kernel-stub-name.cu @@ -0,0 +1,20 @@ +// RUN: echo "GPU binary would be here" > %t + +// RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s \ +// RUN: -fcuda-include-gpubinary %t -o - -x hip\ +// RUN: | FileCheck -allow-deprecated-dag-overlap %s --check-prefixes=CHECK + +#include "Inputs/cuda.h" + +template +__global__ void kernelfunc() {} + +// CHECK-LABEL: define{{.*}}@_Z8hostfuncv() +// CHECK: call void @[[STUB:__device_stub__Z10kernelfuncIiEvv]]() +void hostfunc(void) { kernelfunc<<<1, 1>>>(); } + +// CHECK: define{{.*}}@[[STUB]] +// CHECK: call{{.*}}@hipLaunchByPtr{{.*}}@[[STUB]] + +// CHECK-LABEL: define{{.*}}@__hip_register_globals +// CHECK: call{{.*}}@__hipRegisterFunction{{.*}}@[[STUB]] diff --git a/cuda_code/kernelDpFreeEnergyCompressibleParticles.cu b/cuda_code/kernelDpFreeEnergyCompressibleParticles.cu new file mode 100644 index 0000000000000000000000000000000000000000..37f4efd9219e601dd011b982743c86e1bf3b0831 --- /dev/null +++ b/cuda_code/kernelDpFreeEnergyCompressibleParticles.cu @@ -0,0 +1,865 @@ +// Filename: kernelDpFreeEnergyCompressibleParticles.cu +// +// Copyright (c) 2010-2013, Florencio Balboa Usabiaga +// +// This file is part of Fluam +// +// Fluam is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Fluam is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with Fluam. If not, see . + + +__global__ void kernelDpFreeEnergyCompressibleParticles(double* densityGPU, + double* densityGPU2, + double* vxGPU, + double* vyGPU, + double* vzGPU, + double* dmGPU, + double* dpxGPU, + double* dpyGPU, + double* dpzGPU, + double* d_rand, + double* fxboundaryGPU, + double* fyboundaryGPU, + double* fzboundaryGPU, + double* omegaGPU, + long long step, + double RK1, double RK2, double RK3){ + + int i = blockDim.x * blockIdx.x + threadIdx.x; + if(i>=ncellsGPU) return; + + //Particle force + int vecino, particle; + double fx = 0.f; + double fy = 0.f; + double fz = 0.f; + + + //Particle contribution + + //FORCE IN THE X DIRECTION + //Particles in Cell i + int np = tex1Dfetch(texCountParticlesInCellX,i); + for(int j=0;j0.7){ + time += dtGPU; + } + else if(RK3==0){ + time += 0.5*dtGPU; + } + if(kz==0){ + pressure += dp0 * sin(frequency*time); + pressure3 += dp0 * sin(frequency*time); + pressure4 += dp0 * sin(frequency*time); + } + else if(kz==(mzGPU-1)){ + pressure5 += dp0 * sin(frequency*time); + }*/ + + + dm = + invdxGPU * ((density3 + density) * vx - (density + density2) * vx2) + + invdyGPU * ((density4 + density) * vy - (density + density1) * vy1) + + invdzGPU * ((density5 + density) * vz - (density + density0) * vz0); + + dm = -0.5 * dm; + + double sXX, sXY, sXZ; + double sYX, sYY, sYZ; + double sZX, sZY, sZZ; + + //Particle extra pressure + pressure += omegaGPU[i]; + pressure3 += omegaGPU[vecino3]; + pressure4 += omegaGPU[vecino4]; + pressure5 += omegaGPU[vecino5]; + + sXX = pressure3 - pressure + fx * dxGPU ;//+ 0.00048828125 ; + sYY = pressure4 - pressure + fy * dyGPU ; + sZZ = pressure5 - pressure + fz * dzGPU ; + + + sXX += 0.125 * ((density3+density)*vx + (densityGPU[vecinopxpx]+density3)*vx3)*(vx+vx3); + sXX -= 0.125 * ((density+density2)*vx2 + (density3+density)*vx)*(vx2+vx); + + sYY += 0.125 * ((density4+density)*vy + (densityGPU[vecinopypy]+density4)*vy4)*(vy+vy4); + sYY -= 0.125 * ((density+density1)*vy1 + (density4+density)*vy)*(vy1+vy); + + sZZ += 0.125 * ((density5+density)*vz + (densityGPU[vecinopzpz]+density5)*vz5)*(vz+vz5); + sZZ -= 0.125 * ((density+density0)*vz0 + (density5+density)*vz)*(vz0+vz); + + double densitypxpy, densitypxmy, densitypxpz, densitypxmz; + double densitymxpy, densitymxpz; + double densitypypz, densitypymz, densitymypz; + + densitypxpy = densityGPU[vecinopxpy]; + densitypxmy = densityGPU[vecinopxmy]; + densitypxpz = densityGPU[vecinopxpz]; + densitypxmz = densityGPU[vecinopxmz]; + densitymxpy = densityGPU[vecinomxpy]; + densitymxpz = densityGPU[vecinomxpz]; + densitypypz = densityGPU[vecinopypz]; + densitypymz = densityGPU[vecinopymz]; + densitymypz = densityGPU[vecinomypz]; + + + sXY = 0.125 * ((density4 + density) * vy + (densitypxpy + density3) * vy3) * (vx + vx4); + sXY -= 0.125 * ((density + density1) * vy1 + (density3 + densitypxmy) * vypxmy) * (vx1 + vx); + sXZ = 0.125 * ((density5 + density) * vz + (densitypxpz + density3) * vz3) * (vx + vx5); + sXZ -= 0.125 * ((density + density0) * vz0 + (density3 + densitypxmz) * vzpxmz) * (vx0 + vx); + sYX = 0.125 * ((density3 + density) * vx + (densitypxpy + density4) * vx4) * (vy + vy3); + sYX -= 0.125 * ((density + density2) * vx2 + (density4 + densitymxpy) * vxmxpy) * (vy2 + vy); + sYZ = 0.125 * ((density5 + density) * vz + (densitypypz + density4) * vz4) * (vy + vy5); + sYZ -= 0.125 * ((density + density0) * vz0 + (density4 + densitypymz) * vzpymz) * (vy0 + vy); + sZX = 0.125 * ((density3 + density) * vx + (densitypxpz + density5) * vx5) * (vz + vz3); + sZX -= 0.125 * ((density + density2) * vx2 + (density5 + densitymxpz) * vxmxpz) * (vz2 + vz); + sZY = 0.125 * ((density4 + density) * vy + (densitypypz + density5) * vy5) * (vz + vz4); + sZY -= 0.125 * ((density + density1) * vy1 + (density5 + densitymypz) * vymypz) * (vz1 + vz); + + + //STRESS TENSOR CONTRIBUTION + sXX -= 2. * shearviscosityGPU * invdxGPU * (vx3 - vx) + fact3GPU * + (invdxGPU * (vx3 - vx) + invdyGPU * (vy3 - vypxmy) + invdzGPU * (vz3 - vzpxmz)); + sXX += 2. * shearviscosityGPU * invdxGPU * (vx - vx2) + fact3GPU * + (invdxGPU * (vx - vx2) + invdyGPU * (vy - vy1) + invdzGPU * (vz - vz0)); + sYY -= 2. * shearviscosityGPU * invdyGPU * (vy4 - vy) + fact3GPU * + (invdxGPU * (vx4 - vxmxpy) + invdyGPU * (vy4 - vy) + invdzGPU * (vz4 - vzpymz)); + sYY += 2. * shearviscosityGPU * invdyGPU * (vy - vy1) + fact3GPU * + (invdxGPU * (vx - vx2) + invdyGPU * (vy - vy1) + invdzGPU * (vz - vz0)); + sZZ -= 2. * shearviscosityGPU * invdzGPU * (vz5 - vz) + fact3GPU * + (invdxGPU * (vx5 - vxmxpz) + invdyGPU * (vy5 - vymypz) + invdzGPU * (vz5 -vz)); + sZZ += 2. * shearviscosityGPU * invdzGPU * (vz - vz0) + fact3GPU * + (invdxGPU * (vx - vx2) + invdyGPU * (vy - vy1) + invdzGPU * (vz - vz0)); + + sXY -= shearviscosityGPU * (invdyGPU * (vx4 - vx) + invdxGPU * (vy3 - vy)); + sXY += shearviscosityGPU * (invdyGPU * (vx - vx1) + invdxGPU * (vypxmy - vy1)); + sXZ -= shearviscosityGPU * (invdzGPU * (vx5 - vx) + invdxGPU * (vz3 - vz)); + sXZ += shearviscosityGPU * (invdzGPU * (vx - vx0) + invdxGPU * (vzpxmz - vz0)); + sYX -= shearviscosityGPU * (invdxGPU * (vy3 - vy) + invdyGPU * (vx4 - vx)); + sYX += shearviscosityGPU * (invdxGPU * (vy - vy2) + invdyGPU * (vxmxpy - vx2)); + sYZ -= shearviscosityGPU * (invdzGPU * (vy5 - vy) + invdyGPU * (vz4 - vz)); + sYZ += shearviscosityGPU * (invdzGPU * (vy - vy0) + invdyGPU * (vzpymz - vz0)); + sZX -= shearviscosityGPU * (invdxGPU * (vz3 - vz) + invdzGPU * (vx5 - vx)); + sZX += shearviscosityGPU * (invdxGPU * (vz - vz2) + invdzGPU * (vxmxpz - vx2)); + sZY -= shearviscosityGPU * (invdyGPU * (vz4 - vz) + invdzGPU * (vy5 - vy)); + sZY += shearviscosityGPU * (invdyGPU * (vz - vz1) + invdzGPU * (vymypz - vy1)); + + //Stress noise contribution + double dnoise_sXX, dnoise_sXY, dnoise_sXZ; + double dnoise_sYY, dnoise_sYZ; + double dnoise_sZZ; + double dnoise_tr; + int n0; + double fact1, fact2, fact4; + fact1 = fact1GPU; + fact2 = fact2GPU; + fact4 = fact4GPU; + + //n0 = substep * ncellsGPU * 12; + n0 = 0; + + fact1 = fact1GPU; + fact2 = fact2GPU; + fact4 = fact4GPU; + + dnoise_tr = d_rand[n0 + vecino3] + d_rand[n0 + vecino3 + 3*ncellsGPU] + d_rand[n0 + vecino3 + 5*ncellsGPU]; + dnoise_sXX = d_rand[n0 + vecino3] - dnoise_tr/3.; + sXX += fact1 * dnoise_sXX + fact2 * dnoise_tr; + + dnoise_tr = d_rand[n0 + vecino4] + d_rand[n0 + vecino4 + 3*ncellsGPU] + d_rand[n0 + vecino4 + 5*ncellsGPU]; + dnoise_sYY = d_rand[n0 + vecino4 + 3*ncellsGPU] - dnoise_tr/3.; + sYY += fact1 * dnoise_sYY + fact2 * dnoise_tr; + + dnoise_tr = d_rand[n0 + vecino5] + d_rand[n0 + vecino5 + 3*ncellsGPU] + d_rand[n0 + vecino5 + 5*ncellsGPU]; + dnoise_sZZ = d_rand[n0 + vecino5 + 5*ncellsGPU] - dnoise_tr/3.; + sZZ += fact1 * dnoise_sZZ + fact2 * dnoise_tr; + + dnoise_sXY = d_rand[n0 + i + ncellsGPU]; + sXY += fact4 * dnoise_sXY; + sYX += fact4 * dnoise_sXY; + + dnoise_sXZ = d_rand[n0 + i + 2*ncellsGPU]; + sXZ += fact4 * dnoise_sXZ; + sZX += fact4 * dnoise_sXZ; + + dnoise_sYZ = d_rand[n0 + i + 4*ncellsGPU]; + sYZ += fact4 * dnoise_sYZ; + sZY += fact4 * dnoise_sYZ; + + dnoise_tr = d_rand[n0 + i] + d_rand[n0 + i + 3*ncellsGPU] + d_rand[n0 + i + 5*ncellsGPU]; + dnoise_sXX = d_rand[n0 + i] - dnoise_tr/3.; + sXX -= fact1 * dnoise_sXX + fact2 * dnoise_tr; + + dnoise_sYY = d_rand[n0 + i + 3*ncellsGPU] - dnoise_tr/3.; + sYY -= fact1 * dnoise_sYY + fact2 * dnoise_tr; + + dnoise_sZZ = d_rand[n0 + i + 5*ncellsGPU] - dnoise_tr/3.; + sZZ -= fact1 * dnoise_sZZ + fact2 * dnoise_tr; + + dnoise_sXY = d_rand[n0 + vecino1 + ncellsGPU]; + sXY -= fact4 * dnoise_sXY; + + dnoise_sXZ = d_rand[n0 + vecino0 + 2*ncellsGPU]; + sXZ -= fact4 * dnoise_sXZ; + + dnoise_sXY = d_rand[n0 + vecino2 + ncellsGPU]; + sYX -= fact4 * dnoise_sXY; + + dnoise_sYZ = d_rand[n0 + vecino0 + 4*ncellsGPU]; + sYZ -= fact4 * dnoise_sYZ; + + dnoise_sXZ = d_rand[n0 + vecino2 + 2*ncellsGPU]; + sZX -= fact4 * dnoise_sXZ; + + dnoise_sYZ = d_rand[n0 + vecino1 + 4*ncellsGPU]; + sZY -= fact4 * dnoise_sYZ; + + if(RK3 !=0 ){ + + n0 += ncellsGPU * 6; + fact1 = RK3 * fact1GPU; + fact2 = RK3 * fact2GPU; + fact4 = RK3 * fact4GPU; + dnoise_tr = d_rand[n0 + vecino3] + d_rand[n0 + vecino3 + 3*ncellsGPU] + d_rand[n0 + vecino3 + 5*ncellsGPU]; + dnoise_sXX = d_rand[n0 + vecino3] - dnoise_tr/3.; + sXX += fact1 * dnoise_sXX + fact2 * dnoise_tr; + + dnoise_tr = d_rand[n0 + vecino4] + d_rand[n0 + vecino4 + 3*ncellsGPU] + d_rand[n0 + vecino4 + 5*ncellsGPU]; + dnoise_sYY = d_rand[n0 + vecino4 + 3*ncellsGPU] - dnoise_tr/3.; + sYY += fact1 * dnoise_sYY + fact2 * dnoise_tr; + + dnoise_tr = d_rand[n0 + vecino5] + d_rand[n0 + vecino5 + 3*ncellsGPU] + d_rand[n0 + vecino5 + 5*ncellsGPU]; + dnoise_sZZ = d_rand[n0 + vecino5 + 5*ncellsGPU] - dnoise_tr/3.; + sZZ += fact1 * dnoise_sZZ + fact2 * dnoise_tr; + + dnoise_sXY = d_rand[n0 + i + ncellsGPU]; + sXY += fact4 * dnoise_sXY; + sYX += fact4 * dnoise_sXY; + + dnoise_sXZ = d_rand[n0 + i + 2*ncellsGPU]; + sXZ += fact4 * dnoise_sXZ; + sZX += fact4 * dnoise_sXZ; + + dnoise_sYZ = d_rand[n0 + i + 4*ncellsGPU]; + sYZ += fact4 * dnoise_sYZ; + sZY += fact4 * dnoise_sYZ; + + dnoise_tr = d_rand[n0 + i] + d_rand[n0 + i + 3*ncellsGPU] + d_rand[n0 + i + 5*ncellsGPU]; + dnoise_sXX = d_rand[n0 + i] - dnoise_tr/3.; + sXX -= fact1 * dnoise_sXX + fact2 * dnoise_tr; + + dnoise_sYY = d_rand[n0 + i + 3*ncellsGPU] - dnoise_tr/3.; + sYY -= fact1 * dnoise_sYY + fact2 * dnoise_tr; + + dnoise_sZZ = d_rand[n0 + i + 5*ncellsGPU] - dnoise_tr/3.; + sZZ -= fact1 * dnoise_sZZ + fact2 * dnoise_tr; + + dnoise_sXY = d_rand[n0 + vecino1 + ncellsGPU]; + sXY -= fact4 * dnoise_sXY; + + dnoise_sXZ = d_rand[n0 + vecino0 + 2*ncellsGPU]; + sXZ -= fact4 * dnoise_sXZ; + + dnoise_sXY = d_rand[n0 + vecino2 + ncellsGPU]; + sYX -= fact4 * dnoise_sXY; + + dnoise_sYZ = d_rand[n0 + vecino0 + 4*ncellsGPU]; + sYZ -= fact4 * dnoise_sYZ; + + dnoise_sXZ = d_rand[n0 + vecino2 + 2*ncellsGPU]; + sZX -= fact4 * dnoise_sXZ; + + dnoise_sYZ = d_rand[n0 + vecino1 + 4*ncellsGPU]; + sZY -= fact4 * dnoise_sYZ; + + + } + + + + double px = vxGPU[i] * 0.5 * (densityGPU2[i] + densityGPU2[vecino3]) * RK1; + double py = vyGPU[i] * 0.5 * (densityGPU2[i] + densityGPU2[vecino4]) * RK1; + double pz = vzGPU[i] * 0.5 * (densityGPU2[i] + densityGPU2[vecino5]) * RK1; + + px += vx * 0.5 * (density + density3) * RK2; + py += vy * 0.5 * (density + density4) * RK2; + pz += vz * 0.5 * (density + density5) * RK2; + + + + px += -(invdxGPU * sXX + invdyGPU * sXY + invdzGPU * sXZ)*dtGPU*RK2; + py += -(invdxGPU * sYX + invdyGPU * sYY + invdzGPU * sYZ)*dtGPU*RK2; + pz += -(invdxGPU * sZX + invdyGPU * sZY + invdzGPU * sZZ)*dtGPU*RK2; + + dmGPU[i] = RK1 * densityGPU2[i] + RK2 * (density + dm * dtGPU); + + dpxGPU[i] = px; + dpyGPU[i] = py; + dpzGPU[i] = pz; + +} + diff --git a/cuda_code/kernel_1014.cu b/cuda_code/kernel_1014.cu new file mode 100644 index 0000000000000000000000000000000000000000..b3207a005f753b22cd9d1cc1f257e6ca14ed808d --- /dev/null +++ b/cuda_code/kernel_1014.cu @@ -0,0 +1,63 @@ +/****************************************************************************** + *cr + *cr (C) Copyright 2010 The Board of Trustees of the + *cr University of Illinois + *cr All Rights Reserved + *cr + ******************************************************************************/ + +#include +#include + +#define TILE_SIZE 16 +#define MAX_ITER 10000 +#define SIZE 1024 +#define WIDTH SIZE +#define HEIGHT SIZE + +__global__ void +generate_mandelbrot_kern(unsigned int* mandelbrot_picture) +{ + int row = blockIdx.y * blockDim.y + threadIdx.y; // WIDTH + int col = blockIdx.x * blockDim.x + threadIdx.x; // HEIGHT + int idx = row * WIDTH + col; + + if (col >= WIDTH || row >= HEIGHT) { + return; + } + + float x0 = ((float)col / WIDTH) * 3.5f - 2.5f; + float y0 = ((float)row / HEIGHT) * 3.5f - 1.75f; + + float x = 0.0f; + float y = 0.0f; + int iter = 0; + float xtemp; + while ((x * x + y * y <= 4.0f) && (iter < MAX_ITER)) { + xtemp = x * x - y * y + x0; + y = 2.0f * x * y + y0; + x = xtemp; + iter++; + } + + int color = iter * 5; + if (color >= 256) { + color = 0; + } + mandelbrot_picture[idx] = color; +} + +void +generate_mandelbrot(unsigned int* mandelbrot_picture) +{ + // Initialize thread block and kernel grid dimensions --------------------- + + const unsigned int BLOCK_SIZE = TILE_SIZE; + + dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); + dim3 dimGrid(WIDTH / dimBlock.x, HEIGHT / dimBlock.y); + + // Invoke CUDA kernel ----------------------------------------------------- + + generate_mandelbrot_kern<<>>(mandelbrot_picture); +} diff --git a/cuda_code/kernel_104.cu b/cuda_code/kernel_104.cu new file mode 100644 index 0000000000000000000000000000000000000000..29a0c15760b6587dbf7b1e8e616f048213846cfa --- /dev/null +++ b/cuda_code/kernel_104.cu @@ -0,0 +1,53 @@ +#include "cuda_runtime.h" +#include "device_launch_parameters.h" +#include + +void check(cudaError_t e) +{ + if (e != cudaSuccess) + { + printf("%s\n", cudaGetErrorString(e)); + } +} + +__global__ void addArraysGPU(int* a, int* b, int* c) +{ + int i = threadIdx.x; + c[i] = a[i] + b[i]; +} + +int main() +{ + const int count = 5; + int ha[] = { 1, 2, 3, 4, 5 }; + int hb[] = { 10, 20, 30, 40, 50 }; + int hc[count]; + + int *da, *db, *dc; + int size = sizeof(int)*count; + + cudaMalloc(&da, size); + cudaMalloc(&db, size); + cudaMalloc(&dc, size); + + cudaMemcpy(da,ha,size,cudaMemcpyHostToDevice); + cudaMemcpy(db,hb,size,cudaMemcpyHostToDevice); + + addArraysGPU<<<1,count>>>(da,db,dc); + + cudaMemcpy(hc,dc,size,cudaMemcpyDeviceToHost); + + printf("%d %d %d %d %d\n", + hc[0], + hc[1], + hc[2], + hc[3], + hc[4]); + + cudaFree(da); + cudaFree(db); + cudaFree(dc); + + //getchar(); + +} diff --git a/cuda_code/kernel_107.cu b/cuda_code/kernel_107.cu new file mode 100644 index 0000000000000000000000000000000000000000..4c4d39f5580fff1fd05dbcce0a2961751628cbfc --- /dev/null +++ b/cuda_code/kernel_107.cu @@ -0,0 +1,78 @@ +#include "cuda_runtime.h" +#include "device_launch_parameters.h" + +#include +#include +using namespace std; + +__global__ void sumSingleBlock(int* d) +{ + int tid = threadIdx.x; + + // number of participating threads halves on each iteration + for (int tc = blockDim.x, stepSize = 1; tc > 0; tc >>= 1, stepSize <<= 1) + { + // thread must be allowed to write + if (tid < tc) + { + int pa = tid * stepSize * 2; + int pb = pa + stepSize; + d[pa] += d[pb]; + } + } +} + +__global__ void sumSingleBlock2(int* d) +{ + extern __shared__ int dcopy[]; + int tid = threadIdx.x; + dcopy[tid*2] = d[tid*2]; + dcopy[tid*2+1] = d[tid*2+1]; + + // number of participating threads halves on each iteration + for (int tc = blockDim.x, stepSize = 1; tc > 0; tc >>= 1, stepSize <<= 1) + { + // thread must be allowed to write + if (tid < tc) + { + int pa = tid * stepSize * 2; + int pb = pa + stepSize; + dcopy[pa] += dcopy[pb]; + } + } + + if (tid == 0) + { + d[0] = dcopy[0]; + } +} + +int main() +{ + cudaError_t status; + + const int count = 256; + const int size = count * sizeof(int); + int* h = new int[count]; + for (int i = 0; i < count; ++i) + h[i] = i+1; + + int* d; + status = cudaMalloc(&d, size); + + status = cudaMemcpy(d,h,size, cudaMemcpyHostToDevice); + + sumSingleBlock2<<<1,count/2,size>>>(d); + + int result; + status = cudaMemcpy(&result,d,sizeof(int),cudaMemcpyDeviceToHost); + + cout << "Sum is " << result << endl; + + getchar(); + + cudaFree(d); + delete [] h; + + return 0; +} \ No newline at end of file diff --git a/cuda_code/kernel_1109.cu b/cuda_code/kernel_1109.cu new file mode 100644 index 0000000000000000000000000000000000000000..0a4e1ce1f48c16034da9ff1b725c50a2d94d2674 --- /dev/null +++ b/cuda_code/kernel_1109.cu @@ -0,0 +1,1605 @@ + +#include "cuda_runtime.h" +#include "device_launch_parameters.h" + +/*This program calculates pi using a Simpson's Rule estimation of the +integral of arctangent from 0 to 1. When inputting the number of +iterations to perform, more iterations = more precision. The number of +iterations is given as a command line argument. If no argument is +provided, a default value of 20,000 is used. At 20,000 iterations, the +value of pi is guaranteed to be accurate up to 8 decimal places. This +version uses NVIDIA CUDA to perform parallel computation of the +partial sums on a GPU. + +The amount of work each core does is given by the two #defines below. +These values will need to be tuned for each device this code runs on in +order to get maximum performance. For example, on my own personal machine, +which has a GeForce GT 650M discrete graphics card, there are now only +12 streaming multiprocessors (SM's), with 32 cores each, for a total of +384 cores. Thus, 384 threads will be created, with each thread performing +multiple iterations (total_iterations / (NUM_BLOCKS * THREADS_PER_BLOCK) +to be precise). Thus, the more iterations given, the more work each thread +does. The number of threads is kept constant in order to make clean-up +easier and to not exceed the capabilities (max number of threads or blocks) +of any particular GPU device. The last thread might have a few extra +iterations if that number doesn't divide evenly. + +The number of decimal digits to use as the precision of the calculations is +also given as a command-line argument. Obviously, the higher the number, the +more digits you can successfully calculate. Accuracy still relies on the number +of iterations, though: a high number of digits but low number of iterations +will still result in a low number of digits of precision. Thus, you should +only increase the number of digits when your iterations get too high and +you find that your calculations are no longer precise due to internal +rounding error. You'll probably find that increasing the digits will decrease +performance severely. It is recommended, though, that since error accumulates, +the more digits you want to find, the more padding you'll need to add to the +end of the word to absorb that error. As a general rule of thumb, if you +want to calculate x digits, make your words 2x long. Of course, this also +increases the runtime by 2x. + +Compilation on my own machine actually makes OSC's steps look like child's +play. The best way to do this is to download Visual Studio. Then, go to +http://developer.nvidia.com/nsight-visual-studio-edition-downloads +and follow the steps to install the proper graphics driver, then the CUDA +toolkit, then the Nsight Visual Studio Plugin. So, the project you create +will be a new CUDA project, which will link to the proper headers. Compile +it, then go to NSIGHT in the menu > Start CUDA Debugging. Eventually, the +proper result will pop up in the console window that opens. Note that if +you want to change the number of iterations/precision, you'll need to +edit the VS project settings under PROJECT > "Name" Properties... and +then under Configuration Properties > Debugging, add the values you want +to the Command Arguments field, since the default is to build the +project for Debug, not Release. Also, the sleep function is added just +because the debug console window disappears in Visual Studio. Grr. + +If you want to run this separately... you can't. I tried to compile this +from the command line. It worked, even though I had to add extra system +variables for NVCC to Window's Environment Variables, and then pass the +path to VS's C compiler to NVCC when I ran it. Running it, however, did +not. It crashed the graphics driver and returned that an error occurred +upon trying to copy the results back from the GPU. Even debugging using +DEBUG > Start Debugging in VS crashed it that way. Only debugging via +NSIGHT actually worked. But, whatever, we got results. Remember that +this solution uses dynamic memory allocation on the device, so only +CUDA 2.0+ devices will run this code. +*/ + +// Includes. Optimum values for my computer are: +// NUM_BLOCKS 12 +// THREADS_PER_BLOCK 32 +#include +#include +#include +#include + +#define NUM_BLOCKS 12 +#define THREADS_PER_BLOCK 32 + +// A bignum is stored as all its decimal digits, separated into an array. +// Really, it's quite terrible for performance, but it allows infinite digits. +// Or at least as many as we can store in memory. The power tells us where to +// put the decimal point, and the number of significant digits tells us how +// many of the digits in the number are actually used. The precision tells us +// the maximum number of digits possible for this particular instance. +typedef struct { + signed long int power; + unsigned long int sig_digs; + char * digits; + unsigned long int precision; +} bignum; + +// Function pointers, mostly for bignum operations. Note that in our use +// below, we assume most of the arithmetic functions don't fail and thus +// don't check their return values. Hope they're tested well... Notice +// now that we have mirrored versions for the GPU, most of which just +// have to call the GPU memory allocation functions. +__global__ void calculate(long *, long *, char *, long *, long *, char *, long, long); +__host__ bignum * bignum_init(long int); +__host__ void bignum_reset(bignum *); +__host__ void bignum_clear(bignum *); +__host__ int bignum_set_int(bignum *, long int); +__host__ void bignum_set(bignum *, bignum *); +__host__ void bignum_print(bignum *, long int); +__host__ int bignum_add(bignum *, bignum *, bignum *); +__host__ int bignum_add_int(bignum *, bignum *, long int); +__host__ int bignum_mult(bignum *, bignum *, bignum *); +__host__ int bignum_mult_int(bignum *, bignum *, long int); +__host__ int bignum_divide(bignum *, bignum *, bignum *); +__host__ int bignum_int_divide(bignum *, long int, bignum *); +__host__ int bignum_divide_int(bignum *, bignum *, long int); +__device__ bignum * bignum_init_gpu(long int); +__device__ void bignum_reset_gpu(bignum *); +__device__ void bignum_clear_gpu(bignum *); +__device__ int bignum_set_int_gpu(bignum *, long int); +__device__ void bignum_set_gpu(bignum *, bignum *); +__device__ int bignum_add_gpu(bignum *, bignum *, bignum *); +__device__ int bignum_add_int_gpu(bignum *, bignum *, long int); +__device__ int bignum_mult_gpu(bignum *, bignum *, bignum *); +__device__ int bignum_mult_int_gpu(bignum *, bignum *, long int); +__device__ int bignum_divide_gpu(bignum *, bignum *, bignum *); +__device__ int bignum_int_divide_gpu(bignum *, long int, bignum *); +__device__ int bignum_divide_int_gpu(bignum *, bignum *, long int); + +// Main function +int main(int argc, char * argv[]) +{ + // Obtain command line arguments + long iterations = 20000L; + if (argc > 1) { + iterations = atol(argv[1]); + if (iterations < 1L) { + iterations = 20000L; + } + } + long max_digits = 25L; + if (argc > 2) { + max_digits = atoi(argv[2]); + if (max_digits < 1L) { + max_digits = 25L; + } + } + + // Initialize global storage. Notice that we now need extra arrays for data + // transfer between the GPU and regular RAM. These will hold the partial + // sums that each of the threads calculate. Unfortunately, due to the way + // bignums are structured, each of their arguments has to be transferred + // separately. Luckily, this only happens once. + long clock_start = (long)clock(); + long int i, j; + if (cudaDeviceSetLimit(cudaLimitMallocHeapSize, (NUM_BLOCKS * THREADS_PER_BLOCK * 16384)) + != cudaSuccess) { + printf("\nError setting GPU heap size.\n"); return 1; + } + cudaDeviceSynchronize(); + long * hosttrappower = (long *)calloc((int)(NUM_BLOCKS * THREADS_PER_BLOCK), sizeof(long)); + long * hosttrapsig_digs = (long *)calloc((int)(NUM_BLOCKS * THREADS_PER_BLOCK), sizeof(long)); + char * hosttrapdigits = (char *)calloc((int)(NUM_BLOCKS * THREADS_PER_BLOCK * max_digits), sizeof(char)); + long * hostmidpower = (long *)calloc((int)(NUM_BLOCKS * THREADS_PER_BLOCK), sizeof(long)); + long * hostmidsig_digs = (long *)calloc((int)(NUM_BLOCKS * THREADS_PER_BLOCK), sizeof(long)); + char * hostmiddigits = (char *)calloc((int)(NUM_BLOCKS * THREADS_PER_BLOCK * max_digits), sizeof(char)); + if ((hosttrappower == 0) || (hosttrapsig_digs == 0) || (hosttrapdigits == 0) || + (hostmidpower == 0) || (hostmidsig_digs == 0) || (hostmiddigits == 0)) { + printf("\nError allocating memory on the CPU.\n"); + return 1; + } + long * devicetrappower; + long * devicetrapsig_digs; + char * devicetrapdigits; + long * devicemidpower; + long * devicemidsig_digs; + char * devicemiddigits; + if (cudaMalloc((void**)&devicetrappower, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * sizeof(long))) + != cudaSuccess) { + printf("\nError allocating memory on GPU.\n"); return 1; + } + if (cudaMalloc((void**)&devicetrapsig_digs, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * sizeof(long))) + != cudaSuccess) { + printf("\nError allocating memory on GPU.\n"); return 1; + } + if (cudaMalloc((void**)&devicetrapdigits, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * max_digits * sizeof(char))) + != cudaSuccess) { + printf("\nError allocating memory on GPU.\n"); return 1; + } + if (cudaMalloc((void**)&devicemidpower, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * sizeof(long))) + != cudaSuccess) { + printf("\nError allocating memory on GPU.\n"); return 1; + } + if (cudaMalloc((void**)&devicemidsig_digs, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * sizeof(long))) + != cudaSuccess) { + printf("\nError allocating memory on GPU.\n"); return 1; + } + if (cudaMalloc((void**)&devicemiddigits, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * max_digits * sizeof(char))) + != cudaSuccess) { + printf("\nError allocating memory on GPU.\n"); return 1; + } + cudaDeviceSynchronize(); + char * accepted_pi = "3.14159265358979323846264338327950288419716939937510" + "58209749445923078164062862089986280348253421170679\0"; + char pi_printer[2]; + pi_printer[0] = '0'; + pi_printer[1] = '\0'; + + // Split off worker threads. When dividing the work, if the number of + // threads does not evenly divide into the desired number of iterations, + // give any extra iterations to the final thread. This gives the final + // thread at most (num_threads - 1) extra iterations. Notice that this + // is a 1D-grid of work, and we use function arguments this time. Also, + // remember the number of threads is held constant, thanks to #defines, + // at NUM_BLOCKS * THREADS_PER_BLOCK. + dim3 numBlocks(NUM_BLOCKS); + dim3 threadsPerBlock(THREADS_PER_BLOCK); + calculate <<>>(devicetrappower, devicetrapsig_digs, + devicetrapdigits, devicemidpower, devicemidsig_digs, devicemiddigits, iterations, max_digits); + cudaDeviceSynchronize(); + + // Copy results back from GPU + if (cudaMemcpy(hosttrappower, devicetrappower, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * sizeof(long)), + cudaMemcpyDeviceToHost) != cudaSuccess) { + printf("\nError copying memory from GPU.\n"); return 3; + } + if (cudaMemcpy(hosttrapsig_digs, devicetrapsig_digs, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * sizeof(long)), + cudaMemcpyDeviceToHost) != cudaSuccess) { + printf("\nError copying memory from GPU.\n"); return 3; + } + if (cudaMemcpy(hosttrapdigits, devicetrapdigits, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * max_digits * sizeof(char)), + cudaMemcpyDeviceToHost) != cudaSuccess) { + printf("\nError copying memory from GPU.\n"); return 3; + } + if (cudaMemcpy(hostmidpower, devicemidpower, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * sizeof(long)), + cudaMemcpyDeviceToHost) != cudaSuccess) { + printf("\nError copying memory from GPU.\n"); return 3; + } + if (cudaMemcpy(hostmidsig_digs, devicemidsig_digs, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * sizeof(long)), + cudaMemcpyDeviceToHost) != cudaSuccess) { + printf("\nError copying memory from GPU.\n"); return 3; + } + if (cudaMemcpy(hostmiddigits, devicemiddigits, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * max_digits * sizeof(char)), + cudaMemcpyDeviceToHost) != cudaSuccess) { + printf("\nError copying memory from GPU.\n"); return 3; + } + cudaDeviceSynchronize(); + if (cudaFree(devicetrappower) != cudaSuccess) { printf("\nError freeing GPU memory.\n"); return 3; } + if (cudaFree(devicetrapsig_digs) != cudaSuccess) { printf("\nError freeing GPU memory.\n"); return 3; } + if (cudaFree(devicetrapdigits) != cudaSuccess) { printf("\nError freeing GPU memory.\n"); return 3; } + if (cudaFree(devicemidpower) != cudaSuccess) { printf("\nError freeing GPU memory.\n"); return 3; } + if (cudaFree(devicemidsig_digs) != cudaSuccess) { printf("\nError freeing GPU memory.\n"); return 3; } + if (cudaFree(devicemiddigits) != cudaSuccess) { printf("\nError freeing GPU memory.\n"); return 3; } + + // After worker threads end, clean up each of the partial sums + bignum * trap = bignum_init(max_digits); + bignum * mid = bignum_init(max_digits); + bignum * temp = bignum_init(max_digits); + bignum * simp = bignum_init(max_digits); + if (trap == 0 || mid == 0 || temp == 0 || simp == 0) { + printf("Error allocating memory. Now exiting.\n"); + return -1; + } + for (i = 0L; i < (NUM_BLOCKS * THREADS_PER_BLOCK); i++) { + simp->power = hosttrappower[i]; + simp->sig_digs = hosttrapsig_digs[i]; + for (j = 0L; j < max_digits; j++) { + simp->digits[(int)j] = hosttrapdigits[(int)((i * max_digits) + j)]; + } + bignum_add(temp, trap, simp); + bignum_reset(trap); + bignum_reset(simp); + bignum_set(trap, temp); + bignum_reset(temp); + simp->power = hostmidpower[i]; + simp->sig_digs = hostmidsig_digs[i]; + for (j = 0L; j < max_digits; j++) { + simp->digits[(int)j] = hostmiddigits[(int)((i * max_digits) + j)]; + } + bignum_add(temp, mid, simp); + bignum_reset(mid); + bignum_reset(simp); + bignum_set(mid, temp); + bignum_reset(temp); + } + + // Finally, Simpson's Rule is applied + bignum_mult_int(temp, mid, 2L); + bignum_reset(mid); + bignum_set(mid, temp); + bignum_reset(temp); + bignum_add(temp, trap, mid); + bignum_reset(trap); + bignum_set(trap, temp); + bignum_reset(temp); + bignum_divide_int(temp, trap, 3L); + bignum_reset(trap); + bignum_set(trap, temp); + bignum_reset(temp); + bignum_mult_int(simp, trap, 4L); + long clock_end = (long)clock(); + printf("The calculated value of pi is "); + bignum_print(simp, 0L); + printf("\nThe actual value of pi is 3."); + for (i = 0L; i < (max_digits - 1L); i++) { + // This may print an extra digit or two because, somewhere down in the + // code, we're losing our last sig dig during normal math, but it's + // bubbling back up, and causing the final result to lose a place or + // two. It's not a big deal, and I don't want to do anything about it, + // so we'll just have the ends of the numbers not line up. Whatever. + pi_printer[0] = accepted_pi[(int)(i + 2L)]; + printf("%s", pi_printer); + } + printf("\nThe time taken to calculate this was %.2f seconds\n", + ((float)(clock_end - clock_start)) / (float)CLOCKS_PER_SEC); + printf("The number of iterations performed was %ld\n", iterations); + Sleep(5000); + + // Free global storage + free(hosttrappower); + free(hosttrapsig_digs); + free(hosttrapdigits); + free(hostmidpower); + free(hostmidsig_digs); + free(hostmiddigits); + bignum_clear(trap); + bignum_clear(mid); + bignum_clear(simp); + bignum_clear(temp); + return 0; +} + +// Function executed by each thread to incrementally calculate the overall value +__global__ void calculate(long * devicetrappower, long * devicetrapsig_digs, + char * devicetrapdigits, long * devicemidpower, long * devicemidsig_digs, + char * devicemiddigits, long iterations, long max_digits) +{ + // Initialize needed variables and check for errors + long threadid = threadIdx.x + (blockIdx.x * THREADS_PER_BLOCK); + long lowlimit = threadid * (iterations / (NUM_BLOCKS * THREADS_PER_BLOCK)); + long highlimit = (((threadid + 1L) == (NUM_BLOCKS * THREADS_PER_BLOCK)) ? iterations : + ((threadid + 1L) * (iterations / (NUM_BLOCKS * THREADS_PER_BLOCK)))); + bignum * trap = bignum_init_gpu(max_digits); + bignum * mid = bignum_init_gpu(max_digits); + bignum * inverseiterations = bignum_init_gpu(max_digits); + bignum * temp_holder = bignum_init_gpu(max_digits); + bignum * temp_holder2 = bignum_init_gpu(max_digits); + bignum * inc = bignum_init_gpu(max_digits); + bignum * leftrect = bignum_init_gpu(max_digits); + bignum * rightrect = bignum_init_gpu(max_digits); + if (trap == 0 || mid == 0 || inverseiterations == 0 || temp_holder == 0 || + temp_holder2 == 0 || inc == 0 || leftrect == 0 || rightrect == 0) { + return; + } + + // Initialize values of needed variables + bignum_set_int_gpu(temp_holder, iterations); + bignum_int_divide_gpu(inverseiterations, 1L, temp_holder); + bignum_reset_gpu(temp_holder); + long i; + long k = lowlimit; + bignum_divide_int_gpu(temp_holder, inverseiterations, 2L); + bignum_set_int_gpu(inc, k); + bignum_mult_gpu(temp_holder2, inc, inverseiterations); + bignum_reset_gpu(inc); + bignum_set_gpu(inc, temp_holder2); + bignum_reset_gpu(temp_holder2); + bignum_add_gpu(temp_holder2, inc, temp_holder); + bignum_reset_gpu(inc); + bignum_set_gpu(inc, temp_holder2); + bignum_reset_gpu(temp_holder2); + bignum_reset_gpu(temp_holder); + + // Main iteration loop. Note that the values of inverseiterations, inc, + // mid, and trap are preserved across loop iterations, as is counter k. + // inverseiterations is a constant that is stored for simplicity. Man, + // this is looking more and more like assembly... + for (i = lowlimit; i < highlimit; i++) { + // First, the trapezoid rule is used to estimate pi + bignum_reset_gpu(leftrect); + bignum_set_int_gpu(leftrect, k); + bignum_mult_gpu(temp_holder2, leftrect, inverseiterations); + bignum_reset_gpu(leftrect); + bignum_set_gpu(leftrect, temp_holder2); + bignum_reset_gpu(temp_holder2); + k++; + bignum_reset_gpu(rightrect); + bignum_set_int_gpu(rightrect, k); + bignum_mult_gpu(temp_holder2, rightrect, inverseiterations); + bignum_reset_gpu(rightrect); + bignum_set_gpu(rightrect, temp_holder2); + bignum_reset_gpu(temp_holder2); + bignum_add_gpu(temp_holder, leftrect, rightrect); + bignum_divide_int_gpu(temp_holder2, temp_holder, 2L); + bignum_reset_gpu(temp_holder); + bignum_set_gpu(temp_holder, temp_holder2); + bignum_reset_gpu(temp_holder2); + bignum_mult_gpu(temp_holder2, temp_holder, temp_holder); + bignum_reset_gpu(temp_holder); + bignum_set_gpu(temp_holder, temp_holder2); + bignum_reset_gpu(temp_holder2); + bignum_add_int_gpu(temp_holder2, temp_holder, 1L); + bignum_reset_gpu(temp_holder); + bignum_set_gpu(temp_holder, temp_holder2); + bignum_reset_gpu(temp_holder2); + bignum_int_divide_gpu(temp_holder2, 1L, temp_holder); + bignum_reset_gpu(temp_holder); + bignum_set_gpu(temp_holder, temp_holder2); + bignum_reset_gpu(temp_holder2); + bignum_mult_gpu(temp_holder2, temp_holder, inverseiterations); + bignum_reset_gpu(temp_holder); + bignum_set_gpu(temp_holder, temp_holder2); + bignum_reset_gpu(temp_holder2); + bignum_add_gpu(temp_holder2, trap, temp_holder); + bignum_reset_gpu(trap); + bignum_set_gpu(trap, temp_holder2); + bignum_reset_gpu(temp_holder2); + bignum_reset_gpu(temp_holder); + + // Next, the midpoint rule is also used to estimate pi + bignum_set_gpu(temp_holder, inc); + bignum_add_gpu(temp_holder2, inc, inverseiterations); + bignum_reset_gpu(inc); + bignum_set_gpu(inc, temp_holder2); + bignum_reset_gpu(temp_holder2); + bignum_mult_gpu(temp_holder2, temp_holder, temp_holder); + bignum_reset_gpu(temp_holder); + bignum_set_gpu(temp_holder, temp_holder2); + bignum_reset_gpu(temp_holder2); + bignum_add_int_gpu(temp_holder2, temp_holder, 1L); + bignum_reset_gpu(temp_holder); + bignum_set_gpu(temp_holder, temp_holder2); + bignum_reset_gpu(temp_holder2); + bignum_int_divide_gpu(temp_holder2, 1L, temp_holder); + bignum_reset_gpu(temp_holder); + bignum_set_gpu(temp_holder, temp_holder2); + bignum_reset_gpu(temp_holder2); + bignum_mult_gpu(temp_holder2, temp_holder, inverseiterations); + bignum_reset_gpu(temp_holder); + bignum_set_gpu(temp_holder, temp_holder2); + bignum_reset_gpu(temp_holder2); + bignum_add_gpu(temp_holder2, mid, temp_holder); + bignum_reset_gpu(mid); + bignum_set_gpu(mid, temp_holder2); + bignum_reset_gpu(temp_holder2); + bignum_reset_gpu(temp_holder); + } + + // Save partial result, clear memory, and exit + devicetrappower[threadid] = trap->power; + devicetrapsig_digs[threadid] = trap->sig_digs; + for (i = 0; i < max_digits; i++) { + devicetrapdigits[(threadid * max_digits) + i] = trap->digits[i]; + } + devicemidpower[threadid] = mid->power; + devicemidsig_digs[threadid] = mid->sig_digs; + for (i = 0; i < max_digits; i++) { + devicemiddigits[(threadid * max_digits) + i] = mid->digits[i]; + } + bignum_clear_gpu(trap); + bignum_clear_gpu(mid); + bignum_clear_gpu(inverseiterations); + bignum_clear_gpu(temp_holder); + bignum_clear_gpu(temp_holder2); + bignum_clear_gpu(inc); + bignum_clear_gpu(leftrect); + bignum_clear_gpu(rightrect); +} + +// Create space for a bignum with the specified precision. +// Technically, it's also initialized if we interpret having zero +// significant digits as the number having a value of zero. +__host__ bignum * bignum_init(long int precision) { + bignum * temp_ptr = (bignum *)calloc(1, sizeof(bignum)); + temp_ptr->digits = (char *)calloc((int)precision, sizeof(char)); + if ((temp_ptr->digits) == 0) { temp_ptr = 0; } + temp_ptr->precision = precision; + return temp_ptr; +} + +// Resets a bignum's value to zero. memcpy isn't used because +// why bring the string library into this just for this use? +__host__ void bignum_reset(bignum * numval) { + if ((numval->sig_digs) > 0L) { + long int i; + for (i = 0L; i < numval->precision; i++) { numval->digits[(int)i] = '\0'; } + numval->power = 0L; + numval->sig_digs = 0L; + } + return; +} + +// Free memory used by a bignum when we're done with it +__host__ void bignum_clear(bignum * oldnum) { + free(oldnum->digits); + free(oldnum); + return; +} + +// Set an instance of a bignum to an integer value. Note that if we can't +// initialize the temp word we need for copying, we return false (value = 0). +// We also assume that the number is non-negative since we only store +// unsigned numbers. We assume the result is initialized/reset. Finally, +// we handle zero specially by just resetting (again?) the result. Note that +// we explicitly assume the number to convert fits within the max number of +// digits. If we try to convert a number bigger than we can store, it won't work. +__host__ int bignum_set_int(bignum * numval, long int intval) { + if (intval > 0L) { + // Separate out the individual digits (stored backwards) + char * temp_word = (char *)calloc((int)(numval->precision), sizeof(char)); + if (temp_word == 0) { return 0; } + long int temp_int = intval; + long int counter = 0L; + while (temp_int > 0L) { + temp_word[(int)counter] = (char)(temp_int % 10L); + temp_int = temp_int / 10L; + counter++; + } + + // Detect any trailing zeros that we don't need to store + numval->power = counter - 1L; + long int leadingzeros = 0L; + int hasleading = 1; + while (hasleading == 1) { + if (temp_word[(int)leadingzeros] != 0) { hasleading = 0; } + else { leadingzeros++; } + } + + // Store final result into actual bignum variable + for (temp_int = 0L; temp_int < (counter - leadingzeros); temp_int++) { + numval->digits[(int)temp_int] = temp_word[(int)(counter - temp_int - 1L)]; + } + numval->sig_digs = counter - leadingzeros; + free(temp_word); + return 1; + } + else { bignum_reset(numval); return 1; } +} + +// Set an instance of a bignum to the value of another bignum. We don't assume +// they're both the same precision; just use the precision of the new number. +// We do assume that the new number has already been initialized, though. +// strncpy is not used since it quits after seeing the first zero. +__host__ void bignum_set(bignum * newnum, bignum * oldnum) { + if ((oldnum->sig_digs) > 0L) { + newnum->power = oldnum->power; + newnum->sig_digs = ((oldnum->sig_digs > newnum->precision) ? + (newnum->precision) : (oldnum->sig_digs)); + long int i; + for (i = 0L; i < newnum->sig_digs; i++) { + newnum->digits[(int)i] = oldnum->digits[(int)i]; + } + } + else { bignum_reset(newnum); } + return; +} + +// Use printf to print the number one digit at a time. There are a few cases: +// power > significant digits: pad end with zeros +// significant digits > power: fractional digit (non-integer) +// power is negative: total value less than 1 +// The second argument is the maximum number of significant digits to print. +// If it's zero, then all available digits will be printed, maxing out at +// the precision of the number (the total amount is could possibly store). +// Note that this is different from total digits printed: zeroes after a +// decimal point but before the first significant digit don't count, and we +// make sure we print at least the integral part of the number (we only +// chop off fractional portions). +__host__ void bignum_print(bignum * numval, long int maxdigits) { + long int i; + long int limit = numval->sig_digs; + if (numval->sig_digs == 0L) { printf("0"); } + else { + if ((maxdigits > 0L) && (maxdigits < numval->sig_digs)) { + limit = maxdigits; + } + if (numval->power < 0L) { + printf("0."); + for (i = 1L; i < (-1L * (numval->power)); i++) { printf("0"); } + for (i = 0L; i < limit; i++) { + printf("%d", (int)(numval->digits[(int)i])); + } + } + else if (numval->sig_digs >(numval->power + 1L)) { + for (i = 0L; i <= numval->power; i++) { + printf("%d", (int)(numval->digits[(int)i])); + } + if (limit >(numval->power + 1L)) { printf("."); } + for (i = (numval->power + 1L); i < limit; i++) { + printf("%d", (int)(numval->digits[(int)i])); + } + } + else { + for (i = 0L; i < numval->sig_digs; i++) { + printf("%d", (int)(numval->digits[(int)i])); + } + } + if ((numval->power > 0L) && ((numval->power + 1L) > numval->sig_digs)) { + for (i = 0L; i < ((numval->power + 1L) - numval->sig_digs); i++) { + printf("0"); + } + } + } + fflush(stdout); + return; +} + +// Adds two bignums together and stores the result. Uses the functions to +// reset and set the location of the result internally, so current contents of +// result operand will be overwritten. Like bignum_set_int, returns 1 if +// addition was successful or 0 if an error occurred. A special shortcut is +// taken if either (or both) of the operands are zero. Note that it is possible +// for large additions to cause underflow to zero. In that case, special care is +// taken to make sure the proper input operand is used. Note that we assume the +// precision of all three operands is the same. If it's not, something terrible +// like a seg fault or incorrect answer will probably occur. Most importantly, +// the result operand CANNOT be the same as one of the input operands, since +// the result is clobbered immediately and used as a scratchpad. Note that this +// is also unsigned addition: not only does it not accept negative numbers, it +// also doesn't do subtraction (which, for that matter, isn't commutative). +__host__ int bignum_add(bignum * resultnum, bignum * leftnum, bignum * rightnum) { + bignum_reset(resultnum); + if ((leftnum->sig_digs == 0L) && (rightnum->sig_digs > 0L)) { + bignum_set(resultnum, rightnum); + return 1; + } + else if ((rightnum->sig_digs == 0L) && (leftnum->sig_digs > 0L)) { + bignum_set(resultnum, leftnum); + return 1; + } + else if ((leftnum->sig_digs == 0L) && (rightnum->sig_digs == 0L)) { return 1; } + else { + // First check for overshift: if the larger number's power is too much + // bigger than the smaller number's, the smaller will be completely lost, + // and we'll just end up with the large number as the result. + if ((((leftnum->power - rightnum->power) > 0) && + ((leftnum->power - rightnum->power) > resultnum->precision))) { + bignum_set(resultnum, leftnum); + return 1; + } + if ((((rightnum->power - leftnum->power) > 0) && + ((rightnum->power - leftnum->power) > resultnum->precision))) { + bignum_set(resultnum, rightnum); + return 1; + } + + // Next, shift the smaller operand to match the larger one by copying + // it into the result operand as a partial sum. Also copy over the + // power and total significant digits into the result. + bignum * bigger; + bignum * smaller; + if ((leftnum->power - rightnum->power) >= 0L) { + bigger = leftnum; + smaller = rightnum; + } + else { + bigger = rightnum; + smaller = leftnum; + } + long int difference = bigger->power - smaller->power; + long int startdigit = smaller->sig_digs + difference; + long int transfertotal = smaller->sig_digs; + if (startdigit > resultnum->precision) { + startdigit = resultnum->precision - difference; + transfertotal = startdigit; + } + long int startdigitcopy = startdigit; + startdigit--; + long int i; + for (i = 0L; i < transfertotal; i++) { + if ((startdigit - difference) >= 0L) { + resultnum->digits[(int)startdigit] = + smaller->digits[(int)(startdigit - difference)]; + } + startdigit--; + } + + // Now the main addition loop: loop through each digit and add it. + // The carry from the previous digit will add to the current one. + // Note that we detect any trailing zeros to take from the sig_digs. + // Also, copy over the power and significant digits + resultnum->power = bigger->power; + resultnum->sig_digs = startdigitcopy; + if (bigger->sig_digs > resultnum->sig_digs) { + resultnum->sig_digs = bigger->sig_digs; + startdigitcopy = resultnum->sig_digs; + } + int trailingzeros = 1; + long int zerocount = 0L; + char carry = 0; + for (i = 0L; i < resultnum->sig_digs; i++) { + resultnum->digits[(int)(startdigitcopy - i - 1L)] += + (bigger->digits[(int)(startdigitcopy - i - 1L)] + carry); + if (resultnum->digits[(int)(startdigitcopy - i - 1L)] >= 10) { + resultnum->digits[(int)(startdigitcopy - i - 1L)] -= 10; + carry = 1; + } + else { carry = 0; } + if (trailingzeros == 1) { + if (resultnum->digits[(int)(startdigitcopy - i - 1L)] == '\0') { + zerocount++; + } + else { trailingzeros = 0; } + } + } + + // If we've got trailing zeros, subtract them from the final count of + // sig_digs. Also, if we have a carry, we need to shift everything... + resultnum->sig_digs -= zerocount; + if (carry > 0) { + transfertotal = resultnum->sig_digs; + if (transfertotal == resultnum->precision) { transfertotal--; } + startdigitcopy = transfertotal - 1L; + for (i = 0L; i < transfertotal; i++) { + if (startdigitcopy >= 0L) { + resultnum->digits[(int)(startdigitcopy + 1L)] = + resultnum->digits[(int)startdigitcopy]; + } + else if ((startdigitcopy + 1L) >= 0L) { + resultnum->digits[(int)(startdigitcopy + 1L)] = 0; + } + startdigitcopy--; + } + resultnum->digits[0] = carry; + resultnum->power++; + resultnum->sig_digs++; + } + if (resultnum->sig_digs > resultnum->precision) { + resultnum->sig_digs = resultnum->precision; + } + return 1; + } +} + +// A convenience wrapper that temporarily creates a new bignum out of the +// given integer, calls bignum_add with it and the other operand, and deletes +// the temporary bignum before exiting. Any problems that bignum_add encounters +// are passed back up through this function and returned to the caller. +__host__ int bignum_add_int(bignum * resultnum, bignum * leftnum, long int rightint) { + bignum_reset(resultnum); + if ((rightint == 0L) && (leftnum->sig_digs > 0L)) { + bignum_set(resultnum, leftnum); + return 1; + } + else if ((leftnum->sig_digs == 0L) && (rightint > 0L)) { + return bignum_set_int(resultnum, rightint); + } + else if ((leftnum->sig_digs == 0L) && (rightint == 0L)) { return 1; } + else { + bignum * tempnum = bignum_init(resultnum->precision); + if (tempnum == 0) { return 0; } + if (bignum_set_int(tempnum, rightint) == 0) { + bignum_clear(tempnum); + return 0; + } + int retval = bignum_add(resultnum, leftnum, tempnum); + bignum_clear(tempnum); + return retval; + } +} + +// Multiplies two bignums together and stores the result. Like add, uses +// functions to reset and set the location of the result, and returns 1 upon +// success or 0 if an error occurred. A special shortcut is taken if either +// operand is zero, since the result will thus also be zero. Note that we assume +// the precision of all three operands is the same. If it's not, something +// terrible like a seg fault or incorrect answer will probably occur. Most +// importantly, the result operand CANNOT be the same as one of the input +// operands, since the result is clobbered immediately and used as a scratchpad. +// Also, note that this is unsigned: it assumes both operands are positive. +__host__ int bignum_mult(bignum * resultnum, bignum * leftnum, bignum * rightnum) { + bignum_reset(resultnum); + if ((leftnum->sig_digs == 0L) || (rightnum->sig_digs == 0L)) { return 1; } + else { + // Initialize the scratchpad and find the digit limits + char * temp_word = (char *)calloc((int)(2L * (resultnum->precision)), sizeof(char)); + if (temp_word == 0) { return 0; } + bignum * bigger; + bignum * smaller; + if (((signed long int)leftnum->sig_digs - (signed long int)rightnum->sig_digs) >= 0L) { + bigger = leftnum; + smaller = rightnum; + } + else if ((rightnum->sig_digs - leftnum->sig_digs) > 0L) { + bigger = rightnum; + smaller = leftnum; + } + long int bigstart = (bigger->sig_digs) - 1L; + long int smallstart = (smaller->sig_digs) - 1L; + long int bigcounter, smallcounter; + char carry = 0; + + // Perform the shift-addition loop. We choose to loop over each + // digit of the smaller number for fewer overall iterations. If + // the current bigloop has a zero, we can just skip that iteration. + // Also, record the final carry, power, and sig_digs values. + for (bigcounter = 0L; bigcounter < (smaller->sig_digs); bigcounter++) { + if (smaller->digits[(int)(smallstart - bigcounter)] != '\0') { + carry = 0; + for (smallcounter = 0L; smallcounter < (bigger->sig_digs); smallcounter++) { + temp_word[(int)((2L * (resultnum->precision)) - smallcounter - + bigcounter - 1L)] += (carry + (smaller->digits[(int)(smallstart - + bigcounter)] * bigger->digits[(int)(bigstart - smallcounter)])); + carry = temp_word[(int)((2L * (resultnum->precision)) - + smallcounter - bigcounter - 1L)] / 10; + temp_word[(int)((2L * (resultnum->precision)) - smallcounter - + bigcounter - 1L)] %= 10; + } + temp_word[(int)((2L * (resultnum->precision)) - bigcounter - + (bigger->sig_digs) - 1L)] = carry; + } + } + resultnum->power = ((bigger->power) + (smaller->power)); + resultnum->sig_digs = ((bigger->sig_digs) + (smaller->sig_digs)); + + // Adjust for lack of a final carry or trailing zeros. + if (carry < 1) { + (resultnum->sig_digs)--; + (resultnum->power)--; + } + (resultnum->power)++; + int trailingzeros = 1; + long int zerocount = 0L; + long int i = (2L * (resultnum->precision) - 1L); + while (trailingzeros == 1) { + if (temp_word[(int)i] == '\0') { + zerocount++; + } + else { trailingzeros = 0; } + i--; + } + resultnum->sig_digs -= zerocount; + if ((resultnum->sig_digs) > (resultnum->precision)) { + resultnum->sig_digs = (resultnum->precision); + } + + // Finally, copy from the temp word into the result, taking into + // account any digits we may lose due to precision. + long int tempstart = (2L * (resultnum->precision)) - ((bigger->sig_digs) + + (smaller->sig_digs)); + if (carry < 1) { tempstart++; } + for (i = 0L; i < (resultnum->sig_digs); i++) { + resultnum->digits[(int)i] = temp_word[(int)(tempstart + i)]; + } + free(temp_word); + return 1; + } +} + +// Like bignum_add_int, a convenience wrapper that creates a temporary bignum +// out of the integer and passes it to bignum_mult. Any problems encountered +// in client functions are passed back up to the original caller. +__host__ int bignum_mult_int(bignum * resultnum, bignum * leftnum, long int rightint) { + bignum_reset(resultnum); + if ((leftnum->sig_digs == 0L) || (rightint == 0L)) { return 1; } + else { + bignum * tempnum = bignum_init(resultnum->precision); + if (tempnum == 0) { return 0; } + if (bignum_set_int(tempnum, rightint) == 0) { + bignum_clear(tempnum); + return 0; + } + int retval = bignum_mult(resultnum, leftnum, tempnum); + bignum_clear(tempnum); + return retval; + } +} + +// Divides two bignums. Taken in terms of a fraction, leftnum is the numerator +// and rightnum is the denominator. Performs an explicit check to make sure +// the denominator is not zero, and returns 0 (an error) if it is. Returns 1 upon +// success or 0 if an error occurs. A special shortcut is taken if the numerator is +// zero. Note that we assume the precision of all three operands is the same. If it's +// not, something terrible like a seg fault or incorrect answer will probably occur. +// Most importantly, the result operand CANNOT be the same as one of the input +// operands, since the result is clobbered immediately and used as a scratchpad. +// Also, note that this is unsigned: it assumes both operands are positive. +__host__ int bignum_divide(bignum * resultnum, bignum * numerator, bignum * denominator) { + bignum_reset(resultnum); + if (denominator->sig_digs == 0L) { return 0; } + else if (numerator->sig_digs == 0L) { return 1; } + else { + // Initialize the scratchpad and initially copy the numerator into it. + // Also initialize the result's power. + char * temp_word = (char *)calloc((int)(2L * + (resultnum->precision) + 2L), sizeof(char)); // May only need to be + 1L + if (temp_word == 0) { return 0; } + long int i; + for (i = 0L; i < numerator->sig_digs; i++) { + temp_word[(int)(i + 1L)] = numerator->digits[(int)i]; + } + resultnum->power = (numerator->power - denominator->power); + long int sigdigctr = 0L; + long int numeratorindex = 0L; + + // First see if we need to "shift" the numerator by comparing it. + i = ((denominator->sig_digs) - 1L); + int denom_bigger = 1; + while ((i >= 0L) && (denom_bigger == 1)) { + if ((denominator->digits[(int)((denominator->sig_digs) - i - 1L)]) > + (temp_word[(int)((denominator->sig_digs) - i)])) { + i = 0L; + } + else if ((denominator->digits[(int)((denominator->sig_digs) - + i - 1L)]) < (temp_word[(int)((denominator->sig_digs) - i)])) { + denom_bigger = 0; + } + else if (((denominator->digits[(int)((denominator->sig_digs) - i - + 1L)]) == (temp_word[(int)((denominator->sig_digs) - i)])) && (i == 0L)) { + denom_bigger = 0; + } + i--; + } + if (denom_bigger == 1) { + numeratorindex++; + (resultnum->power)--; + } + + // Now the main division loop. Note that there's two ways to terminate: + // either we've filled the entire precision of the result word and are + // forced to truncate our result, or our answer divides exactly. In the + // second case, once we've exhausted the numerator's significant digits + // and our temp word contains nothing but zeros, we can end early since + // all subsequent iterations would contribute only zeros as well. Note + // that special care will be taken to detect extra zeros at the end of + // the result so that the sig_digs is recorded correctly. Also, we don't + // round, we truncate, which doesn't minimize error. + int nonzero = 1; + while ((sigdigctr < (resultnum->precision)) && (nonzero == 1)) { + // First run the subtraction loop. + char current_digit = 0; + int numer_bigger = 1; + while (numer_bigger == 1) { + // To subtract, first run a comparison to see if the numerator + // is bigger. If it is, increment the counter and subtract. + i = ((denominator->sig_digs) - 1L); + denom_bigger = 1; + if (temp_word[(int)numeratorindex] > 0) { denom_bigger = 0; } + while ((i >= 0L) && (denom_bigger == 1)) { + if ((denominator->digits[(int)((denominator->sig_digs) - + i - 1L)]) > (temp_word[(int)((denominator->sig_digs) + + numeratorindex - i)])) { + i = 0L; + } + else if ((denominator->digits[(int)((denominator->sig_digs) - + i - 1L)]) < (temp_word[(int)((denominator->sig_digs) + + numeratorindex - i)])) { + denom_bigger = 0; + } + else if (((denominator->digits[(int)((denominator->sig_digs) - + i - 1L)]) == (temp_word[(int)((denominator->sig_digs) + + numeratorindex - i)])) && (i == 0L)) { + denom_bigger = 0; + } + i--; + } + if (denom_bigger == 1) { + numer_bigger = 0; + } + + // Increment counter and perform subtraction loop. + if (numer_bigger == 1) { + current_digit++; + for (i = 0L; i < (denominator->sig_digs); i++) { + temp_word[(int)((denominator->sig_digs) + + numeratorindex - i)] -= (denominator->digits[ + (int)((denominator->sig_digs) - i - 1L)]); + if ((temp_word[(int)((denominator->sig_digs) + + numeratorindex - i)]) < 0) { + temp_word[(int)((denominator->sig_digs) + + numeratorindex - i)] += 10L; + (temp_word[(int)((denominator->sig_digs) + + numeratorindex - i - 1L)]) -= 1L; + } + } + } + } + + // If we're past all of the numerator's significant digits, run + // zero detection on it to see if we can end early. + if (sigdigctr > (numerator->sig_digs)) { // May only need to be >= + long int zerocounter = 0L; + i = 0L; + while ((i == zerocounter) && (i <= (denominator->sig_digs))) { + if ((temp_word[(int)(numeratorindex + i)]) < 1) { zerocounter++; } + i++; + } + if (zerocounter == ((denominator->sig_digs) + 1L)) { nonzero = 0; } + } + + // Once we have obtained the proper digit in the result, save it. + if (sigdigctr < resultnum->precision) { + resultnum->digits[(int)sigdigctr] = current_digit; + } + sigdigctr++; + numeratorindex++; + } + + // Record the result's sig digs, taking care to detect trailing zeros. + resultnum->sig_digs = sigdigctr; + int trailingzeros = 1; + long int zerocount = 0L; + i = sigdigctr - 1L; + while (trailingzeros == 1) { + if (resultnum->digits[(int)i] == '\0') { + zerocount++; + } + else { trailingzeros = 0; } + i--; + } + (resultnum->sig_digs) -= zerocount; + free(temp_word); + return 1; + } +} + +// A convenience wrapper that creates a temporary bignum out of the integer. +// Since division is not commutative, two wrappers are given. Any problems +// encountered in client functions are passed back up to the original caller. +__host__ int bignum_int_divide(bignum * resultnum, long int leftint, bignum * rightnum) { + bignum_reset(resultnum); + if (rightnum->sig_digs == 0L) { return 0; } + else if (leftint == 0L) { return 1; } + else { + bignum * tempnum = bignum_init(resultnum->precision); + if (tempnum == 0) { return 0; } + if (bignum_set_int(tempnum, leftint) == 0) { + bignum_clear(tempnum); + return 0; + } + int retval = bignum_divide(resultnum, tempnum, rightnum); + bignum_clear(tempnum); + return retval; + } +} + +// A convenience wrapper that creates a temporary bignum out of the integer. +// Since division is not commutative, two wrappers are given. Any problems +// encountered in client functions are passed back up to the original caller. +__host__ int bignum_divide_int(bignum * resultnum, bignum * leftnum, long int rightint) { + bignum_reset(resultnum); + if (rightint == 0L) { return 0; } + else if (leftnum->sig_digs == 0L) { return 1; } + else { + bignum * tempnum = bignum_init(resultnum->precision); + if (tempnum == 0) { return 0; } + if (bignum_set_int(tempnum, rightint) == 0) { + bignum_clear(tempnum); + return 0; + } + int retval = bignum_divide(resultnum, leftnum, tempnum); + bignum_clear(tempnum); + return retval; + } +} + +// Create space for a bignum with the specified precision. +// Technically, it's also initialized if we interpret having zero +// significant digits as the number having a value of zero. +__device__ bignum * bignum_init_gpu(long int precision) { + bignum * temp_ptr = (bignum *)malloc(sizeof(bignum)); + if (temp_ptr == 0) { return temp_ptr; } + temp_ptr->digits = (char *)malloc((int)(precision * sizeof(char))); + if ((temp_ptr->digits) == 0) { temp_ptr = 0; return temp_ptr; } + int i; + for (i = 0; i < precision; i++) { temp_ptr->digits[i] = '\0'; } + temp_ptr->power = 0L; + temp_ptr->sig_digs = 0L; + temp_ptr->precision = precision; + return temp_ptr; +} + +// Resets a bignum's value to zero. memcpy isn't used because +// why bring the string library into this just for this use? +__device__ void bignum_reset_gpu(bignum * numval) { + if ((numval->sig_digs) > 0L) { + long int i; + for (i = 0L; i < numval->precision; i++) { numval->digits[(int)i] = '\0'; } + numval->power = 0L; + numval->sig_digs = 0L; + } + return; +} + +// Free memory used by a bignum when we're done with it +__device__ void bignum_clear_gpu(bignum * oldnum) { + free(oldnum->digits); + free(oldnum); + return; +} + +// Set an instance of a bignum to an integer value. Note that if we can't +// initialize the temp word we need for copying, we return false (value = 0). +// We also assume that the number is non-negative since we only store +// unsigned numbers. We assume the result is initialized/reset. Finally, +// we handle zero specially by just resetting (again?) the result. Note that +// we explicitly assume the number to convert fits within the max number of +// digits. If we try to convert a number bigger than we can store, it won't work. +__device__ int bignum_set_int_gpu(bignum * numval, long int intval) { + if (intval > 0L) { + + // Separate out the individual digits (stored backwards) + char * temp_word = (char *)malloc((int)(numval->precision * sizeof(char))); + if (temp_word == 0) { return 0; } + long int i; + for (i = 0; i < numval->precision; i++) { temp_word[(int)i] = '\0'; } + long int temp_int = intval; + long int counter = 0L; + while (temp_int > 0L) { + temp_word[(int)counter] = (char)(temp_int % 10L); + temp_int = temp_int / 10L; + counter++; + } + + // Detect any trailing zeros that we don't need to store + numval->power = counter - 1L; + long int leadingzeros = 0L; + int hasleading = 1; + while (hasleading == 1) { + if (temp_word[(int)leadingzeros] != 0) { hasleading = 0; } + else { leadingzeros++; } + } + + // Store final result into actual bignum variable + for (temp_int = 0L; temp_int < (counter - leadingzeros); temp_int++) { + numval->digits[(int)temp_int] = temp_word[(int)(counter - temp_int - 1L)]; + } + numval->sig_digs = counter - leadingzeros; + free(temp_word); + return 1; + } + else { bignum_reset_gpu(numval); return 1; } +} + +// Set an instance of a bignum to the value of another bignum. We don't assume +// they're both the same precision; just use the precision of the new number. +// We do assume that the new number has already been initialized, though. +// strncpy is not used since it quits after seeing the first zero. +__device__ void bignum_set_gpu(bignum * newnum, bignum * oldnum) { + if ((oldnum->sig_digs) > 0L) { + newnum->power = oldnum->power; + newnum->sig_digs = ((oldnum->sig_digs > newnum->precision) ? + (newnum->precision) : (oldnum->sig_digs)); + long int i; + for (i = 0L; i < newnum->sig_digs; i++) { + newnum->digits[(int)i] = oldnum->digits[(int)i]; + } + } + else { bignum_reset_gpu(newnum); } + return; +} + +// Adds two bignums together and stores the result. Uses the functions to +// reset and set the location of the result internally, so current contents of +// result operand will be overwritten. Like bignum_set_int, returns 1 if +// addition was successful or 0 if an error occurred. A special shortcut is +// taken if either (or both) of the operands are zero. Note that it is possible +// for large additions to cause underflow to zero. In that case, special care is +// taken to make sure the proper input operand is used. Note that we assume the +// precision of all three operands is the same. If it's not, something terrible +// like a seg fault or incorrect answer will probably occur. Most importantly, +// the result operand CANNOT be the same as one of the input operands, since +// the result is clobbered immediately and used as a scratchpad. Note that this +// is also unsigned addition: not only does it not accept negative numbers, it +// also doesn't do subtraction (which, for that matter, isn't commutative). +__device__ int bignum_add_gpu(bignum * resultnum, bignum * leftnum, bignum * rightnum) { + bignum_reset_gpu(resultnum); + if ((leftnum->sig_digs == 0L) && (rightnum->sig_digs > 0L)) { + bignum_set_gpu(resultnum, rightnum); + return 1; + } + else if ((rightnum->sig_digs == 0L) && (leftnum->sig_digs > 0L)) { + bignum_set_gpu(resultnum, leftnum); + return 1; + } + else if ((leftnum->sig_digs == 0L) && (rightnum->sig_digs == 0L)) { return 1; } + else { + + // First check for overshift: if the larger number's power is too much + // bigger than the smaller number's, the smaller will be completely lost, + // and we'll just end up with the large number as the result. + if ((((leftnum->power - rightnum->power) > 0) && + ((leftnum->power - rightnum->power) > resultnum->precision))) { + bignum_set_gpu(resultnum, leftnum); + return 1; + } + if ((((rightnum->power - leftnum->power) > 0) && + ((rightnum->power - leftnum->power) > resultnum->precision))) { + bignum_set_gpu(resultnum, rightnum); + return 1; + } + + // Next, shift the smaller operand to match the larger one by copying + // it into the result operand as a partial sum. Also copy over the + // power and total significant digits into the result. + bignum * bigger; + bignum * smaller; + if ((leftnum->power - rightnum->power) >= 0L) { + bigger = leftnum; + smaller = rightnum; + } + else { + bigger = rightnum; + smaller = leftnum; + } + long int difference = bigger->power - smaller->power; + long int startdigit = smaller->sig_digs + difference; + long int transfertotal = smaller->sig_digs; + if (startdigit > resultnum->precision) { + startdigit = resultnum->precision - difference; + transfertotal = startdigit; + } + long int startdigitcopy = startdigit; + startdigit--; + long int i; + for (i = 0L; i < transfertotal; i++) { + if ((startdigit - difference) >= 0L) { + resultnum->digits[(int)startdigit] = + smaller->digits[(int)(startdigit - difference)]; + } + startdigit--; + } + + // Now the main addition loop: loop through each digit and add it. + // The carry from the previous digit will add to the current one. + // Note that we detect any trailing zeros to take from the sig_digs. + // Also, copy over the power and significant digits + resultnum->power = bigger->power; + resultnum->sig_digs = startdigitcopy; + if (bigger->sig_digs > resultnum->sig_digs) { + resultnum->sig_digs = bigger->sig_digs; + startdigitcopy = resultnum->sig_digs; + } + int trailingzeros = 1; + long int zerocount = 0L; + char carry = 0; + for (i = 0L; i < resultnum->sig_digs; i++) { + resultnum->digits[(int)(startdigitcopy - i - 1L)] += + (bigger->digits[(int)(startdigitcopy - i - 1L)] + carry); + if (resultnum->digits[(int)(startdigitcopy - i - 1L)] >= 10) { + resultnum->digits[(int)(startdigitcopy - i - 1L)] -= 10; + carry = 1; + } + else { carry = 0; } + if (trailingzeros == 1) { + if (resultnum->digits[(int)(startdigitcopy - i - 1L)] == '\0') { + zerocount++; + } + else { trailingzeros = 0; } + } + } + + // If we've got trailing zeros, subtract them from the final count of + // sig_digs. Also, if we have a carry, we need to shift everything... + resultnum->sig_digs -= zerocount; + if (carry > 0) { + transfertotal = resultnum->sig_digs; + if (transfertotal == resultnum->precision) { transfertotal--; } + startdigitcopy = transfertotal - 1L; + for (i = 0L; i < transfertotal; i++) { + if (startdigitcopy >= 0L) { + resultnum->digits[(int)(startdigitcopy + 1L)] = + resultnum->digits[(int)startdigitcopy]; + } + else if ((startdigitcopy + 1L) >= 0L) { + resultnum->digits[(int)(startdigitcopy + 1L)] = '\0'; + } + startdigitcopy--; + } + resultnum->digits[0] = carry; + resultnum->power++; + resultnum->sig_digs++; + } + if (resultnum->sig_digs > resultnum->precision) { + resultnum->sig_digs = resultnum->precision; + } + return 1; + } +} + +// A convenience wrapper that temporarily creates a new bignum out of the +// given integer, calls bignum_add with it and the other operand, and deletes +// the temporary bignum before exiting. Any problems that bignum_add encounters +// are passed back up through this function and returned to the caller. +__device__ int bignum_add_int_gpu(bignum * resultnum, bignum * leftnum, long int rightint) { + bignum_reset_gpu(resultnum); + if ((rightint == 0L) && (leftnum->sig_digs > 0L)) { + bignum_set_gpu(resultnum, leftnum); + return 1; + } + else if ((leftnum->sig_digs == 0L) && (rightint > 0L)) { + return bignum_set_int_gpu(resultnum, rightint); + } + else if ((leftnum->sig_digs == 0L) && (rightint == 0L)) { return 1; } + else { + bignum * tempnum = bignum_init_gpu(resultnum->precision); + if (tempnum == 0) { return 0; } + if (bignum_set_int_gpu(tempnum, rightint) == 0) { + bignum_clear_gpu(tempnum); + return 0; + } + int retval = bignum_add_gpu(resultnum, leftnum, tempnum); + bignum_clear_gpu(tempnum); + return retval; + } +} + +// Multiplies two bignums together and stores the result. Like add, uses +// functions to reset and set the location of the result, and returns 1 upon +// success or 0 if an error occurred. A special shortcut is taken if either +// operand is zero, since the result will thus also be zero. Note that we assume +// the precision of all three operands is the same. If it's not, something +// terrible like a seg fault or incorrect answer will probably occur. Most +// importantly, the result operand CANNOT be the same as one of the input +// operands, since the result is clobbered immediately and used as a scratchpad. +// Also, note that this is unsigned: it assumes both operands are positive. +__device__ int bignum_mult_gpu(bignum * resultnum, bignum * leftnum, bignum * rightnum) { + bignum_reset_gpu(resultnum); + if ((leftnum->sig_digs == 0L) || (rightnum->sig_digs == 0L)) { return 1; } + else { + + // Initialize the scratchpad and find the digit limits + char * temp_word = (char *)malloc((int)(2L * (resultnum->precision) * sizeof(char))); + if (temp_word == 0) { return 0; } + long int i; + for (i = 0; i < (2L * resultnum->precision); i++) { temp_word[(int)i] = '\0'; } + bignum * bigger; + bignum * smaller; + if (((signed long int)leftnum->sig_digs - (signed long int)rightnum->sig_digs) >= 0L) { + bigger = leftnum; + smaller = rightnum; + } + else if ((rightnum->sig_digs - leftnum->sig_digs) > 0L) { + bigger = rightnum; + smaller = leftnum; + } + long int bigstart = (bigger->sig_digs) - 1L; + long int smallstart = (smaller->sig_digs) - 1L; + long int bigcounter, smallcounter; + char carry = 0; + + // Perform the shift-addition loop. We choose to loop over each + // digit of the smaller number for fewer overall iterations. If + // the current bigloop has a zero, we can just skip that iteration. + // Also, record the final carry, power, and sig_digs values. + for (bigcounter = 0L; bigcounter < (smaller->sig_digs); bigcounter++) { + if (smaller->digits[(int)(smallstart - bigcounter)] != '\0') { + carry = 0; + for (smallcounter = 0L; smallcounter < (bigger->sig_digs); smallcounter++) { + temp_word[(int)((2L * (resultnum->precision)) - smallcounter - + bigcounter - 1L)] += (carry + (smaller->digits[(int)(smallstart - + bigcounter)] * bigger->digits[(int)(bigstart - smallcounter)])); + carry = temp_word[(int)((2L * (resultnum->precision)) - + smallcounter - bigcounter - 1L)] / 10; + temp_word[(int)((2L * (resultnum->precision)) - smallcounter - + bigcounter - 1L)] %= 10; + } + temp_word[(int)((2L * (resultnum->precision)) - bigcounter - + (bigger->sig_digs) - 1L)] = carry; + } + } + resultnum->power = ((bigger->power) + (smaller->power)); + resultnum->sig_digs = ((bigger->sig_digs) + (smaller->sig_digs)); + + // Adjust for lack of a final carry or trailing zeros. + if (carry < 1) { + (resultnum->sig_digs)--; + (resultnum->power)--; + } + (resultnum->power)++; + int trailingzeros = 1; + long int zerocount = 0L; + i = (2L * (resultnum->precision) - 1L); + while (trailingzeros == 1) { + if (temp_word[(int)i] == '\0') { + zerocount++; + } + else { trailingzeros = 0; } + i--; + } + resultnum->sig_digs -= zerocount; + if ((resultnum->sig_digs) > (resultnum->precision)) { + resultnum->sig_digs = (resultnum->precision); + } + + // Finally, copy from the temp word into the result, taking into + // account any digits we may lose due to precision. + long int tempstart = (2L * (resultnum->precision)) - ((bigger->sig_digs) + + (smaller->sig_digs)); + if (carry < 1) { tempstart++; } + for (i = 0L; i < (resultnum->sig_digs); i++) { + resultnum->digits[(int)i] = temp_word[(int)(tempstart + i)]; + } + free(temp_word); + return 1; + } +} + +// Like bignum_add_int, a convenience wrapper that creates a temporary bignum +// out of the integer and passes it to bignum_mult. Any problems encountered +// in client functions are passed back up to the original caller. +__device__ int bignum_mult_int_gpu(bignum * resultnum, bignum * leftnum, long int rightint) { + bignum_reset_gpu(resultnum); + if ((leftnum->sig_digs == 0L) || (rightint == 0L)) { return 1; } + else { + bignum * tempnum = bignum_init_gpu(resultnum->precision); + if (tempnum == 0) { return 0; } + if (bignum_set_int_gpu(tempnum, rightint) == 0) { + bignum_clear_gpu(tempnum); + return 0; + } + int retval = bignum_mult_gpu(resultnum, leftnum, tempnum); + bignum_clear_gpu(tempnum); + return retval; + } +} + +// Divides two bignums. Taken in terms of a fraction, leftnum is the numerator +// and rightnum is the denominator. Performs an explicit check to make sure +// the denominator is not zero, and returns 0 (an error) if it is. Returns 1 upon +// success or 0 if an error occurs. A special shortcut is taken if the numerator is +// zero. Note that we assume the precision of all three operands is the same. If it's +// not, something terrible like a seg fault or incorrect answer will probably occur. +// Most importantly, the result operand CANNOT be the same as one of the input +// operands, since the result is clobbered immediately and used as a scratchpad. +// Also, note that this is unsigned: it assumes both operands are positive. +__device__ int bignum_divide_gpu(bignum * resultnum, bignum * numerator, bignum * denominator) { + bignum_reset_gpu(resultnum); + if (denominator->sig_digs == 0L) { return 0; } + else if (numerator->sig_digs == 0L) { return 1; } + else { + + // Initialize the scratchpad and initially copy the numerator into it. + // Also initialize the result's power. + char * temp_word = (char *)malloc((int)(2L * + (resultnum->precision) + 2L * sizeof(char))); // May only need to be + 1L + if (temp_word == 0) { return 0; } + long int i; + temp_word[0] = '\0'; + for (i = 0L; i < numerator->sig_digs; i++) { + temp_word[(int)(i + 1L)] = numerator->digits[(int)i]; + } + for (i = (1L + numerator->sig_digs); i < + (2L * resultnum->precision + 2L); i++) { + temp_word[(int)i] = '\0'; + } + + resultnum->power = (numerator->power - denominator->power); + long int sigdigctr = 0L; + long int numeratorindex = 0L; + + // First see if we need to "shift" the numerator by comparing it. + i = ((denominator->sig_digs) - 1L); + int denom_bigger = 1; + while ((i >= 0L) && (denom_bigger == 1)) { + if ((denominator->digits[(int)((denominator->sig_digs) - i - 1L)]) > + (temp_word[(int)((denominator->sig_digs) - i)])) { + i = 0L; + } + else if ((denominator->digits[(int)((denominator->sig_digs) - + i - 1L)]) < (temp_word[(int)((denominator->sig_digs) - i)])) { + denom_bigger = 0; + } + else if (((denominator->digits[(int)((denominator->sig_digs) - i - + 1L)]) == (temp_word[(int)((denominator->sig_digs) - i)])) && (i == 0L)) { + denom_bigger = 0; + } + i--; + } + if (denom_bigger == 1) { + numeratorindex++; + (resultnum->power)--; + } + + // Now the main division loop. Note that there's two ways to terminate: + // either we've filled the entire precision of the result word and are + // forced to truncate our result, or our answer divides exactly. In the + // second case, once we've exhausted the numerator's significant digits + // and our temp word contains nothing but zeros, we can end early since + // all subsequent iterations would contribute only zeros as well. Note + // that special care will be taken to detect extra zeros at the end of + // the result so that the sig_digs is recorded correctly. Also, we don't + // round, we truncate, which doesn't minimize error. + int nonzero = 1; + while ((sigdigctr < (resultnum->precision)) && (nonzero == 1)) { + // First run the subtraction loop. + char current_digit = 0; + int numer_bigger = 1; + while (numer_bigger == 1) { + // To subtract, first run a comparison to see if the numerator + // is bigger. If it is, increment the counter and subtract. + i = ((denominator->sig_digs) - 1L); + denom_bigger = 1; + if (temp_word[(int)numeratorindex] > 0) { denom_bigger = 0; } + while ((i >= 0L) && (denom_bigger == 1)) { + if ((denominator->digits[(int)((denominator->sig_digs) - + i - 1L)]) > (temp_word[(int)((denominator->sig_digs) + + numeratorindex - i)])) { + i = 0L; + } + else if ((denominator->digits[(int)((denominator->sig_digs) - + i - 1L)]) < (temp_word[(int)((denominator->sig_digs) + + numeratorindex - i)])) { + denom_bigger = 0; + } + else if (((denominator->digits[(int)((denominator->sig_digs) - + i - 1L)]) == (temp_word[(int)((denominator->sig_digs) + + numeratorindex - i)])) && (i == 0L)) { + denom_bigger = 0; + } + i--; + } + if (denom_bigger == 1) { + numer_bigger = 0; + } + + // Increment counter and perform subtraction loop. + if (numer_bigger == 1) { + current_digit++; + for (i = 0L; i < (denominator->sig_digs); i++) { + temp_word[(int)((denominator->sig_digs) + + numeratorindex - i)] -= (denominator->digits[ + (int)((denominator->sig_digs) - i - 1L)]); + if ((temp_word[(int)((denominator->sig_digs) + + numeratorindex - i)]) < 0) { + temp_word[(int)((denominator->sig_digs) + + numeratorindex - i)] += 10L; + (temp_word[(int)((denominator->sig_digs) + + numeratorindex - i - 1L)]) -= 1L; + } + } + } + } + + // If we're past all of the numerator's significant digits, run + // zero detection on it to see if we can end early. + if (sigdigctr > (numerator->sig_digs)) { // May only need to be >= + long int zerocounter = 0L; + i = 0L; + while ((i == zerocounter) && (i <= (denominator->sig_digs))) { + if ((temp_word[(int)(numeratorindex + i)]) < 1) { zerocounter++; } + i++; + } + if (zerocounter == ((denominator->sig_digs) + 1L)) { nonzero = 0; } + } + + // Once we have obtained the proper digit in the result, save it. + if (sigdigctr < resultnum->precision) { + resultnum->digits[(int)sigdigctr] = current_digit; + } + sigdigctr++; + numeratorindex++; + } + + // Record the result's sig digs, taking care to detect trailing zeros. + resultnum->sig_digs = sigdigctr; + int trailingzeros = 1; + long int zerocount = 0L; + i = sigdigctr - 1L; + while (trailingzeros == 1) { + if (resultnum->digits[(int)i] == '\0') { + zerocount++; + } + else { trailingzeros = 0; } + i--; + } + (resultnum->sig_digs) -= zerocount; + free(temp_word); + return 1; + } +} + +// A convenience wrapper that creates a temporary bignum out of the integer. +// Since division is not commutative, two wrappers are given. Any problems +// encountered in client functions are passed back up to the original caller. +__device__ int bignum_int_divide_gpu(bignum * resultnum, long int leftint, bignum * rightnum) { + bignum_reset_gpu(resultnum); + if (rightnum->sig_digs == 0L) { return 0; } + else if (leftint == 0L) { return 1; } + else { + bignum * tempnum = bignum_init_gpu(resultnum->precision); + if (tempnum == 0) { return 0; } + if (bignum_set_int_gpu(tempnum, leftint) == 0) { + bignum_clear_gpu(tempnum); + return 0; + } + int retval = bignum_divide_gpu(resultnum, tempnum, rightnum); + bignum_clear_gpu(tempnum); + return retval; + } +} + +// A convenience wrapper that creates a temporary bignum out of the integer. +// Since division is not commutative, two wrappers are given. Any problems +// encountered in client functions are passed back up to the original caller. +__device__ int bignum_divide_int_gpu(bignum * resultnum, bignum * leftnum, long int rightint) { + bignum_reset_gpu(resultnum); + if (rightint == 0L) { return 0; } + else if (leftnum->sig_digs == 0L) { return 1; } + else { + bignum * tempnum = bignum_init_gpu(resultnum->precision); + if (tempnum == 0) { return 0; } + if (bignum_set_int_gpu(tempnum, rightint) == 0) { + bignum_clear_gpu(tempnum); + return 0; + } + int retval = bignum_divide_gpu(resultnum, leftnum, tempnum); + bignum_clear_gpu(tempnum); + return retval; + } +} diff --git a/cuda_code/kernel_123.cu b/cuda_code/kernel_123.cu new file mode 100644 index 0000000000000000000000000000000000000000..f137cf25e2972b1c58a668f4a2ff9603853e8578 --- /dev/null +++ b/cuda_code/kernel_123.cu @@ -0,0 +1,297 @@ +// Includes +#include +#include + + +// includes CUDA +#include + +#define THREADS_PER_BLOCK 256 +#define NUM_OF_BLOCKS 640 + + +// Variables + +texture texmem1; +texture texmem2; +texture texmem3; +texture texmem4; +__constant__ float ConstArray1[THREADS_PER_BLOCK]; +__constant__ float ConstArray2[THREADS_PER_BLOCK]; + +// Functions +void CleanupResources(void); +void RandomInit_int(unsigned*, int); +void RandomInit_fp(float*, int); + +//////////////////////////////////////////////////////////////////////////////// +// These are CUDA Helper functions + +// This will output the proper CUDA error strings in the event that a CUDA host call returns an error +#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) + +inline void __checkCudaErrors(cudaError err, const char *file, const int line ) +{ + if(cudaSuccess != err){ + fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) ); + exit(-1); + } +} + +// This will output the proper error string when calling cudaGetLastError +#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) + +inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ) +{ + cudaError_t err = cudaGetLastError(); + if (cudaSuccess != err){ + fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) ); + exit(-1); + } +} + +// end of CUDA Helper Functions + + + + +// Device code +__global__ void PowerKernal1(float *A, float *B, int N, int iterations) +{ + int tid = blockIdx.x*blockIdx.x + threadIdx.x; + float Value1=0; + float Value2=0; + __device__ __shared__ float I1[THREADS_PER_BLOCK]; + __device__ __shared__ float I2[THREADS_PER_BLOCK]; + + I1[tid%THREADS_PER_BLOCK] = A[tid]; + I2[tid%THREADS_PER_BLOCK] = B[tid]; + __syncthreads(); + + float sum = 0.0; + + if(tid < N){ + for(unsigned i=0; i>>(d_A1, d_A2, N, iterations); + checkCudaErrors(cudaEventRecord(stop)); + + checkCudaErrors(cudaEventSynchronize(stop)); + checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop)); + printf("gpu execution time = %.2f s\n", elapsedTime/1000); + getLastCudaError("kernel launch failure"); + cudaThreadSynchronize(); + + checkCudaErrors(cudaEventDestroy(start)); + checkCudaErrors(cudaEventDestroy(stop)); + CleanupResources(); + return 0; +} + +void CleanupResources(void) +{ + // Free device memory + if (d_A1) + cudaFree(d_A1); + if (d_A2) + cudaFree(d_A2); + if (d_A3) + cudaFree(d_A3); + // Free host memory + if (h_A1) + free(h_A1); + if (h_A2) + free(h_A2); + if (h_A3) + free(h_A3); +} + +// Allocates an array with random float entries. +void RandomInit_int(float* data, int n) +{ + for (int i = 0; i < n; ++i){ + srand((unsigned)time(0)); + data[i] = rand() / RAND_MAX; + } +} + +void RandomInit_fp(float* data, int n) +{ + for (int i = 0; i < n; ++i){ + data[i] = rand() / RAND_MAX; + } +} \ No newline at end of file diff --git a/cuda_code/kernel_290.cu b/cuda_code/kernel_290.cu new file mode 100644 index 0000000000000000000000000000000000000000..5b8e1cf967dbbc430b6ccb97039e0c85c7faec8d --- /dev/null +++ b/cuda_code/kernel_290.cu @@ -0,0 +1,85 @@ +//pass +//--gridDim=195 --blockDim=128 + +template __global__ void computeValue(unsigned int *const results, curandState *const rngStates, const unsigned int numSims); +template __global__ void computeValue(unsigned int *const results, curandState *const rngStates, const unsigned int numSims); + +__device__ static __attribute__((always_inline)) unsigned int reduce_sum(unsigned int in); +__device__ static __attribute__((always_inline)) void getPoint(float &x, float &y, curandState &state); +__device__ static __attribute__((always_inline)) void getPoint(double &x, double &y, curandState &state); + +__device__ static __attribute__((always_inline)) unsigned int reduce_sum(unsigned int in) +{ + extern __shared__ unsigned int sdata[]; + + // Perform first level of reduction: + // - Write to shared memory + unsigned int ltid = threadIdx.x; + + sdata[ltid] = in; + __syncthreads(); + + // Do reduction in shared mem + for (unsigned int s = blockDim.x / 2 ; s > 0 ; s >>= 1) + { + if (ltid < s) + { + sdata[ltid] += sdata[ltid + s]; + } + + __syncthreads(); + } + + return sdata[0]; +} + +__device__ static __attribute__((always_inline)) void getPoint(float &x, float &y, curandState &state) +{ + x = curand_uniform(&state); + y = curand_uniform(&state); +} +__device__ static __attribute__((always_inline)) void getPoint(double &x, double &y, curandState &state) +{ + x = curand_uniform_double(&state); + y = curand_uniform_double(&state); +} + +// Estimator kernel +template +__global__ void computeValue(unsigned int *const results, + curandState *const rngStates, + const unsigned int numSims) +{ + // Determine thread ID + unsigned int bid = blockIdx.x; + unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; + unsigned int step = gridDim.x * blockDim.x; + + // Initialise the RNG + curandState localState = rngStates[tid]; + + // Count the number of points which lie inside the unit quarter-circle + unsigned int pointsInside = 0; + + for (unsigned int i = tid ; i < numSims ; i += step) + { + Real x; + Real y; + getPoint(x, y, localState); + Real l2norm2 = x * x + y * y; + + if (l2norm2 < static_cast(1)) + { + pointsInside++; + } + } + + // Reduce within the block + pointsInside = reduce_sum(pointsInside); + + // Store the result + if (threadIdx.x == 0) + { + results[bid] = pointsInside; + } +} diff --git a/cuda_code/kernel_307.cu b/cuda_code/kernel_307.cu new file mode 100644 index 0000000000000000000000000000000000000000..c03366c77b836efed2a472ed7c9bf7bc968145eb --- /dev/null +++ b/cuda_code/kernel_307.cu @@ -0,0 +1,101 @@ +//pass +//--gridDim=[1,100,1] --blockDim=[64,1,1] + +#define n_directions 32 +#define k_2powneg32 2.3283064E-10F + +__global__ void sobolGPU_kernel(unsigned n_vectors, unsigned n_dimensions, unsigned *d_directions, float *d_output) +{ + __requires(n_vectors == 100000); + __requires(n_dimensions == 100); + + __shared__ unsigned int v[n_directions]; + + // Offset into the correct dimension as specified by the + // block y coordinate + d_directions = d_directions + n_directions * blockIdx.y; + d_output = d_output + n_vectors * blockIdx.y; + + // Copy the direction numbers for this dimension into shared + // memory - there are only 32 direction numbers so only the + // first 32 (n_directions) threads need participate. + if (threadIdx.x < n_directions) + { + v[threadIdx.x] = d_directions[threadIdx.x]; + } + + // __syncthreads(); + + // Set initial index (i.e. which vector this thread is + // computing first) and stride (i.e. step to the next vector + // for this thread) + int i0 = threadIdx.x + blockIdx.x * blockDim.x; + int stride = gridDim.x * blockDim.x; + + // Get the gray code of the index + // c.f. Numerical Recipes in C, chapter 20 + // http://www.nrbook.com/a/bookcpdf/c20-2.pdf + unsigned int g = i0 ^ (i0 >> 1); + + // Initialisation for first point x[i0] + // In the Bratley and Fox paper this is equation (*), where + // we are computing the value for x[n] without knowing the + // value of x[n-1]. + unsigned int X = 0; + unsigned int mask; + + for (unsigned int k = 0 ; k < __ffs(stride) - 1 ; k++) + { + // We want X ^= g_k * v[k], where g_k is one or zero. + // We do this by setting a mask with all bits equal to + // g_k. In reality we keep shifting g so that g_k is the + // LSB of g. This way we avoid multiplication. + mask = - (g & 1); + X ^= mask & v[k]; + g = g >> 1; + } + + if (i0 < n_vectors) + { + d_output[i0] = (float)X * k_2powneg32; + } + + // Now do rest of points, using the stride + // Here we want to generate x[i] from x[i-stride] where we + // don't have any of the x in between, therefore we have to + // revisit the equation (**), this is easiest with an example + // so assume stride is 16. + // From x[n] to x[n+16] there will be: + // 8 changes in the first bit + // 4 changes in the second bit + // 2 changes in the third bit + // 1 change in the fourth + // 1 change in one of the remaining bits + // + // What this means is that in the equation: + // x[n+1] = x[n] ^ v[p] + // x[n+2] = x[n+1] ^ v[q] = x[n] ^ v[p] ^ v[q] + // ... + // We will apply xor with v[1] eight times, v[2] four times, + // v[3] twice, v[4] once and one other direction number once. + // Since two xors cancel out, we can skip even applications + // and just apply xor with v[4] (i.e. log2(16)) and with + // the current applicable direction number. + // Note that all these indices count from 1, so we need to + // subtract 1 from them all to account for C arrays counting + // from zero. + unsigned int v_log2stridem1 = v[__ffs(stride) - 2]; + unsigned int v_stridemask = stride - 1; + + for (unsigned int i = i0 + stride ; i < n_vectors ; i += stride) + { + // x[i] = x[i-stride] ^ v[b] ^ v[c] + // where b is log2(stride) minus 1 for C array indexing + // where c is the index of the rightmost zero bit in i, + // not including the bottom log2(stride) bits, minus 1 + // for C array indexing + // In the Bratley and Fox paper this is equation (**) + X ^= v_log2stridem1 ^ v[__ffs(~((i - stride) | v_stridemask)) - 1]; + d_output[i] = (float)X * k_2powneg32; + } +} diff --git a/cuda_code/kernel_415.cu b/cuda_code/kernel_415.cu new file mode 100644 index 0000000000000000000000000000000000000000..04cb41beea153e3b15faaa432db56dca7443df3e --- /dev/null +++ b/cuda_code/kernel_415.cu @@ -0,0 +1,14 @@ +//pass +//--blockDim=256 --gridDim=128 + +struct s { + int a; +}; + +__device__ void bar(s x); + +__global__ void foo() +{ + __shared__ s y[4]; + bar(y[3]); +} diff --git a/cuda_code/kernel_416.cu b/cuda_code/kernel_416.cu new file mode 100644 index 0000000000000000000000000000000000000000..4bd6b932d4a837558334e2aa95bb5d64aa1e32c9 --- /dev/null +++ b/cuda_code/kernel_416.cu @@ -0,0 +1,13 @@ +//pass +//--blockDim=256 --gridDim=128 + +struct s { + char a; +}; + +__device__ void bar(s x); + +__global__ void foo(s x) +{ + bar(x); +} diff --git a/cuda_code/kernel_57.cu b/cuda_code/kernel_57.cu new file mode 100644 index 0000000000000000000000000000000000000000..44a767cb169c14fdf4ac96895d617724da0cbefe --- /dev/null +++ b/cuda_code/kernel_57.cu @@ -0,0 +1,320 @@ +#include +#include +#include +// Includes +#include + +// includes, project +#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples +//#include +//#include + +// includes CUDA +#include + +//NI DAQ +#include "../include/ContAcq-IntClk.h" + +#define THREADS_PER_BLOCK 256 +#define NUM_OF_BLOCKS 60 +#define ITERATIONS REPLACE_ITERATIONS + +// Variables + +bool noprompt = false; +unsigned int my_timer; + + +texture texmem1; +texture texmem2; +texture texmem3; +texture texmem4; +__constant__ float ConstArray1[THREADS_PER_BLOCK]; +__constant__ float ConstArray2[THREADS_PER_BLOCK]; + +// Functions +void CleanupResources(void); +void RandomInit_int(unsigned*, int); +void RandomInit_fp(float*, int); +void ParseArguments(int, char**); + +//////////////////////////////////////////////////////////////////////////////// +// These are CUDA Helper functions + +// This will output the proper CUDA error strings in the event that a CUDA host call returns an error +#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) + +inline void __checkCudaErrors(cudaError err, const char *file, const int line ) +{ + if(cudaSuccess != err){ + fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) ); + exit(-1); + } +} + +// This will output the proper error string when calling cudaGetLastError +#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) + +inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ) +{ + cudaError_t err = cudaGetLastError(); + if (cudaSuccess != err){ + fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) ); + exit(-1); + } +} + +// end of CUDA Helper Functions + + + + +// Device code +__global__ void PowerKernal1(float *A, float *B, int N) +{ + int tid = blockIdx.x*blockIdx.x + threadIdx.x; + float Value1=0; + float Value2=0; + __device__ __shared__ float I1[THREADS_PER_BLOCK]; + __device__ __shared__ float I2[THREADS_PER_BLOCK]; + + I1[tid%THREADS_PER_BLOCK] = A[tid]; + I2[tid%THREADS_PER_BLOCK] = B[tid]; + __syncthreads(); + + float sum = 0.0; + + if(tid < N){ + for(unsigned i=0; i>>(d_A3, N); + CUDA_SAFE_CALL( cudaThreadSynchronize() ); + printf("execution time = %f\n", cutGetTimerValue(my_timer)); + + PowerKernal1<<>>(d_A1, d_A2, N); + CUDA_SAFE_CALL( cudaThreadSynchronize() ); + printf("execution time = %f\n", cutGetTimerValue(my_timer)); + + //PowerKernalEmpty<<>>(d_A3, N); + CUDA_SAFE_CALL( cudaThreadSynchronize() ); + printf("execution time = %f\n", cutGetTimerValue(my_timer)); + getLastCudaError("kernel launch failure"); + + CUDA_SAFE_CALL( cudaThreadSynchronize() ); + CUT_SAFE_CALL(cutStopTimer(my_timer)); + TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer)); + printf("execution time = %f\n", cutGetTimerValue(my_timer)); + CUT_SAFE_CALL(cutDeleteTimer(my_timer)); + + #ifdef _DEBUG + checkCudaErrors( cudaDeviceSynchronize() ); + #endif + + // Copy result from device memory to host memory + + CleanupResources(); + + return 0; +} + +void CleanupResources(void) +{ + // Free device memory + if (d_A1) + cudaFree(d_A1); + if (d_A2) + cudaFree(d_A2); + if (d_A3) + cudaFree(d_A3); + // Free host memory + if (h_A1) + free(h_A1); + if (h_A2) + free(h_A2); + if (h_A3) + free(h_A3); +} + +// Allocates an array with random float entries. +void RandomInit_int(float* data, int n) +{ + for (int i = 0; i < n; ++i){ + srand((unsigned)time(0)); + data[i] = rand() / RAND_MAX; + } +} + +void RandomInit_fp(float* data, int n) +{ + for (int i = 0; i < n; ++i){ + data[i] = rand() / RAND_MAX; + } +} + + + + + + diff --git a/cuda_code/kernel_597.cu b/cuda_code/kernel_597.cu new file mode 100644 index 0000000000000000000000000000000000000000..6ff90fa6907689332f007a3584a9f720b1175e31 --- /dev/null +++ b/cuda_code/kernel_597.cu @@ -0,0 +1,941 @@ +#define _USE_MATH_DEFINES + +#include "kernel.h" + +__constant__ double SENSOR2; +__constant__ int NAHO; + +__device__ int selectedCounts[MACRO_NMAX]; +__device__ double tmpPhero_d[MACRO_MAX][MACRO_MAX]; +__device__ curandState rnd_state[MACRO_NMAX]; + +//Misc +__device__ bool isGotFood(Food& food); +__device__ double atomicAddDouble(double* address, double val); +__device__ enum Direction genDirRand(int id); +__device__ double genProbRand(int id); +__device__ int genAntNumRand(int id); +__device__ double degToRad(double a); +__device__ double dist(Cell a,Cell b); +__device__ double distCandP(Cell a,double x,double y); +__device__ bool isOppositeDir(enum Direction nestDir,enum Direction dir); +__device__ bool isOppositeDir(Cell& cell, enum Direction dir); +__device__ enum Direction selectNextDir(Cell& cell, enum Direction dir); +__device__ double hilFunc(double x,double alpha); + +//Initializer +__host__ void getDevicePtrs(); +__global__ void randInit(); +__global__ void antsInit(); +__global__ void cellsInit(); +__global__ void setNest(); +__global__ void setDistFromNest(); +__global__ void setNestDirs(); +__global__ void setNearestDirFromNest(); +__global__ void setFoodsDir(); + +//Calculation functions +__global__ void selectAnts(); +__global__ void naturalFoodDecrease(); +__global__ void evapolation(); +__global__ void chemotaxis(); +__global__ void diffusion(); +__global__ void pheroUpdate(); + + +__host__ void calculation(){ + naturalFoodDecrease<<<1,MACRO_NUM_FOODS>>>(); + evapolation<<>>(); + + //sortKeyInit<<<1,MACRO_NMAX>>>(); + //thrust::sort_by_key(sort_key_d_ptr, sort_key_d_ptr + MACRO_NMAX, ants_d_ptr); + + selectAnts<<<1,MACRO_NMAX>>>(); + chemotaxis<<<1,MACRO_NMAX>>>(); + //cudaMemcpyFromSymbol(cells,cells_d,MACRO_MAX*MACRO_MAX*sizeof(Cell),0); + //chemotaxis(); + //cudaMemcpyToSymbol(cells_d,cells,MACRO_MAX*MACRO_MAX*sizeof(Cell),0); + diffusion<<>>(); + pheroUpdate<<>>(); +} + +//Initialize + +__host__ void getDevicePtrs(){ + cudaGetSymbolAddress((void**)&sort_key_d_ptr_raw, sort_key_d); + sort_key_d_ptr = thrust::device_ptr(sort_key_d_ptr_raw); + + cudaGetSymbolAddress((void**)&seeds_d_ptr_raw, seeds_d); + seeds_d_ptr = thrust::device_ptr(seeds_d_ptr_raw); + + cudaGetSymbolAddress((void**)&ants_d_ptr_raw, ants_d); + ants_d_ptr = thrust::device_ptr(ants_d_ptr_raw); + + cudaGetSymbolAddress((void**)&cells_d_ptr_raw, cells_d); + cells_d_ptr = thrust::device_ptr(cells_d_ptr_raw); + + cudaGetSymbolAddress((void**)&foods_d_ptr_raw, foods_d); + foods_d_ptr = thrust::device_ptr(foods_d_ptr_raw); +} + +__global__ void randInit(){ + const int id = threadIdx.x + blockIdx.x * blockDim.x; + curand_init(seeds_d[id],0,0,&rnd_state[id]); +} + +__global__ void antsReset(){ + const int id = threadIdx.x + blockIdx.x * blockDim.x; + ants_d[id].status = FORAGE; + ants_d[id].i = MACRO_NEST_Y; + ants_d[id].j = MACRO_NEST_X; + ants_d[id].searchTime = 0; + ants_d[id].dir = genDirRand(id); + for (int i=0; istatus |= NEST_NEIGHBOUR_CELL; + } + } +} + +__global__ void setDistFromNest(){ + const int i = threadIdx.x; + const int j = blockIdx.x; + + Cell *nest_c; + nest_c = &cells_d[MACRO_NEST_Y][MACRO_NEST_X]; + double d = dist(cells_d[i][j],*nest_c); + cells_d[i][j].distFromNest = d; +} + +__device__ double dot(Cartesian a, Cartesian b) { + return (a.x * b.x + a.y * b.y); +} + +__device__ double cross(Cartesian a, Cartesian b) { + return (a.x * b.y - a.y * b.x); +} + +__global__ void setCriticalAngle() { + const int i = threadIdx.x; + const int j = blockIdx.x; + + + cells_d[i][j].criticalAngle = NONE; + + if( (cells_d[i][j].status&NEAR_NEST)!=NORMAL_CELL ){ + return; + } + + Cartesian c = cells_d[i][j].cart; + c.x = -c.x/cells_d[i][j].distFromNest; + c.y = -c.y/cells_d[i][j].distFromNest; + + for(enum Direction dir = UP; dir<=UPLEFT; (dir<<=1) ) { + Cartesian d; + + switch (dir) { + case UP: + d.x = 0; + d.y = 1; + break; + case UPRIGHT: + d.x = 1; + d.y = tan(M_PI/4.0); + break; + case LOWRIGHT: + d.x = 1; + d.y = -tan(M_PI/4.0); + break; + case LOW: + d.x = 0; + d.y = -1; + break; + case LOWLEFT: + d.x = -1; + d.y = -tan(M_PI/4.0); + break; + case UPLEFT: + d.x = -1; + d.y = tan(M_PI/4.0); + break; + default: + break; + } + + d.x = d.x/sqrt(dot(d,d)); + d.y = d.y/sqrt(dot(d,d)); + + double dotVal = dot(c,d); + if (dotVal<=0.3){ + cells_d[i][j].criticalAngle |= dir; + } + } +} + + +__global__ void setNearestDirFromNest(){ + const int i = threadIdx.x; + const int j = blockIdx.x; + Cell& c = cells_d[i][j]; + + for (int itr=0; itr<6; itr++){ + c.nearestDirFromNestList[itr] = NONE; + } + + enum Direction dir = UP; + for(int itr=0; dir<=UPLEFT; itr++) { + if ( c.criticalAngle&dir == NONE ){ + continue; + } + + c.nearestDirFromNestList[itr] = selectNextDir(c, dir); + dir<<=1; + } +} + +__global__ void setNestDirs(){ + const int i = threadIdx.x; + const int j = blockIdx.x; + + Cell *c; + + double d = cells_d[i][j].distFromNest; + double tmp; + for(enum Direction dir = UP; dir<=UPLEFT; (dir<<=1) ){ + + c = getCell(cells_d,i,j,dir); + + + tmp=c->distFromNest; + if( fabs(tmp-d)i; + int k = nearCell->j; + + for(enum Direction dir = UP; dir<=UPLEFT; (dir<<=1) ){ + c = getCell(cells_d,j,k,dir); + if( distCandP(*c,x,y)i; + foods_d[i].j = nearCell->j; + + nearCell->foodNo = i; + nearCell->status |= FOOD_CELL; + + + for(enum Direction dir = UP; dir<=UPLEFT; (dir<<=1) ){ + c = getCell(cells_d,foods_d[i].i,foods_d[i].j,dir); + c->foodNo = i; + c->status |= FOOD_NEIGHBOUR_CELL; + } + } + +} + + +//Calculation + +__global__ void selectAnts(){ + const int id = threadIdx.x + blockIdx.x * blockDim.x; + int rnd = genAntNumRand(id); + atomicAdd(&selectedCounts[rnd], 1); +} + +__global__ void sortKeyInit(){ + const int id = threadIdx.x + blockIdx.x * blockDim.x; + sort_key_d[id] = curand(&rnd_state[id]); + //printf("id:%d,%u\n",id,sort_key_d[id]); +} + +__global__ void diffusion(){ + const int i = blockIdx.x; + const int j = threadIdx.x; + + double tmp = 0.0; + for (enum Direction dir = UP; dir<=UPLEFT; (dir<<=1) ){ + tmp += getCell(cells_d,i,j,dir)->phero; + } + tmpPhero_d[i][j] = cells_d[i][j].phero+MACRO_DIFFE*(tmp/6.0-cells_d[i][j].phero); +} + +__global__ void pheroUpdate(){ + const int i = blockIdx.x; + const int j = threadIdx.x; + + cells_d[i][j].phero = tmpPhero_d[i][j]; +} + +__global__ void naturalFoodDecrease(){ + const int id = threadIdx.x + blockIdx.x * blockDim.x; + foods_d[id].vol=foods_d[id].vol+MACRO_REC-foods_d[id].vol*(MACRO_REC/100.0); +} + +__global__ void evapolation(){ + const int i = blockIdx.x; + const int j = threadIdx.x; + cells_d[i][j].phero *= (1.0-MACRO_EVAPOLATION_CONST); +} + + +__global__ void chemotaxis(){ + const int id = threadIdx.x + blockIdx.x * blockDim.x; + Ant *ant = &(ants_d[id]); + + for(int dummy=0; dummysearchTime++; + + int i = ant->i; + int j = ant->j; + enum Direction dir = ant->dir; + enum Direction nestDir = cells_d[i][j].nestDir; + + double leftPhero, frontPhero, rightPhero; + + Cell *leftCell = getCell(cells_d,i,j,left(dir)); + Cell *frontCell = getCell(cells_d,i,j,dir); + Cell *rightCell = getCell(cells_d,i,j,right(dir)); + + if( + ant->searchTime>=MACRO_MAX_SEARCH_TIME + && ant->status!=EMERGENCY + ){ + ant->status = EMERGENCY; + } + + if(ant->status==GOHOME){ + atomicAddDouble(&(cells_d[i][j].phero),MACRO_EMI*MACRO_ENEST); + } + __threadfence(); + if(ant->status==RANDOM_SEARCH){ + leftPhero = 1.0; + frontPhero = 1.0; + rightPhero = 1.0; + } + else { + leftPhero = leftCell->phero; + frontPhero = frontCell->phero; + rightPhero = rightCell->phero; + } + + if( (ant->status==GOHOME || ant->status==EMERGENCY) && isOppositeDir(cells_d[i][j], dir)){ + + enum Direction nextDir = cells_d[i][j].nearestDirFromNestList[dirToNum(dir)]; + + if( nextDir == left(dir) ){ + ant->dir = left(dir); + frontCell = leftCell; + } + else if( nextDir == right(dir) ){ + ant->dir = right(dir); + frontCell = rightCell; + } + else{ + if(genProbRand(id)<=0.5){ + ant->dir = right(dir); + frontCell = rightCell; + } + else{ + ant->dir = left(dir); + frontCell = leftCell; + } + } + ant->i = frontCell->i; + ant->j = frontCell->j; + } + else{ + double s1,s2,s3,s12,t,tot,rand; + if(ant->ch == NORMAL_CH){ + t = MACRO_HIL_CONST; + } + else{ + t = SENSOR2*MACRO_HIL_CONST; + } + + s1=hilFunc(leftPhero,t); + s2=hilFunc(frontPhero,t); + s3=hilFunc(rightPhero,t); + /* + if(s1dir = left(dir); + ant->i = leftCell->i; + ant->j = leftCell->j; + } + else if(rand<=s12){ + ant->i = frontCell->i; + ant->j = frontCell->j; + } + else{ + ant->dir = right(dir); + ant->i = rightCell->i; + ant->j = rightCell->j; + } + + } + + if( (cells_d[ant->i][ant->j].status&NEAR_FOOD)!=NORMAL_CELL + && foods_d[ cells_d[ant->i][ant->j].foodNo ].vol>=0.1 + && (ant->status != GOHOME && ant->status != EMERGENCY) ){ + //atomicAddDouble(&(foods_d[ cells_d[ant->i][ant->j].foodNo ].vol),-MACRO_UNIT); + //ant->status = GOHOME; + //ant->searchTime = 0; + int fNo = cells_d[ant->i][ant->j].foodNo; + + if(isGotFood(foods_d[fNo])){ + ant->status = GOHOME; + ant->searchTime = 0; + ant->_foodNo = fNo; + ant->dir = left(left(left(dir))); + } + } + __threadfence(); + + if( (cells_d[ant->i][ant->j].status&NEAR_NEST)!=NORMAL_CELL + && (ant->status == GOHOME || ant->status == EMERGENCY)){ + if(ant->status == GOHOME){ + ant->homing[ant->_foodNo]++; + //atomicAddDouble(&(cells_d[i][j].phero),MACRO_EMI*MACRO_ENEST); + } + ant->status = FORAGE; + ant->searchTime = 0; + ant->dir = genDirRand(id); + ant->i = MACRO_NEST_Y; + ant->j = MACRO_NEST_X; + } + } + selectedCounts[id] = 0; +} + + +//DataHandler +__device__ __host__ enum Direction operator<<(enum Direction d, int i){ + return static_cast(static_cast(d)<>(enum Direction d, int i){ + return static_cast(static_cast(d)>>i); +} + +__device__ __host__ enum Direction operator|(enum Direction d1, enum Direction d2){ + return static_cast(static_cast(d1)|static_cast(d2)); +} +__device__ __host__ enum Direction operator&(enum Direction d1, enum Direction d2){ + return static_cast(static_cast(d1)&static_cast(d2)); +} + +__device__ __host__ enum Direction& operator|=(enum Direction& d1, enum Direction d2){ + d1 = (d1 | d2); + return d1; +} + +__device__ __host__ enum Direction& operator&=(enum Direction& d1, enum Direction d2){ + d1 = (d1 & d2); + return d1; +} + +__device__ __host__ enum Direction& operator<<=(enum Direction& d1, int i){ + d1 = (d1 << i); + return d1; +} + +__device__ __host__ enum Direction& operator>>=(enum Direction& d1, int i){ + d1 = (d1 >> i); + return d1; +} + +__device__ __host__ bool operator<=(enum Direction d1, enum Direction d2){ + return (static_cast(d1) <= static_cast(d2)); +} + + + + + + + +__device__ __host__ enum CELLStatus operator<<(enum CELLStatus d, int i){ + return static_cast(static_cast(d)<>(enum CELLStatus d, int i){ + return static_cast(static_cast(d)>>i); +} + +__device__ __host__ enum CELLStatus operator|(enum CELLStatus d1, enum CELLStatus d2){ + return static_cast(static_cast(d1)|static_cast(d2)); +} +__device__ __host__ enum CELLStatus operator&(enum CELLStatus d1, enum CELLStatus d2){ + return static_cast(static_cast(d1)&static_cast(d2)); +} + +__device__ __host__ enum CELLStatus& operator|=(enum CELLStatus& d1, enum CELLStatus d2){ + d1 = (d1 | d2); + return d1; +} + +__device__ __host__ enum CELLStatus& operator&=(enum CELLStatus& d1, enum CELLStatus d2){ + d1 = (d1 & d2); + return d1; +} + + + +__device__ __host__ __forceinline__ enum Direction left(enum Direction dir){ + if(dir == UP){ + return UPLEFT; + } + else{ + return (dir >> 1)&ALL_DIR; + } +} + +__device__ __host__ __forceinline__ enum Direction right(enum Direction dir){ + if(dir == UPLEFT){ + return UP; + } + else{ + return (dir << 1)&ALL_DIR; + } +} + +__device__ __host__ __forceinline__ Cell* up(Cell cells[MACRO_MAX][MACRO_MAX],int i,int j){ + if( (cells[i][j].edge&UP)!=NONE ){ + return &cells[0][j]; + } + else{ + return &cells[i+1][j]; + } +} + +__device__ __host__ __forceinline__ Cell* upright(Cell cells[MACRO_MAX][MACRO_MAX],int i,int j){ + int ii,jj; + if( (cells[i][j].edge&UPRIGHT)!=NONE ){ + jj = 0; + if(abs(j-MACRO_CART_X_ZERO)%2==0){ + ii = i; + } + else{ + ii = i+1; + if(ii==MACRO_MAX){ + ii = 0; + } + } + } + else{ + jj = j+1; + if(abs(j-MACRO_CART_X_ZERO)%2==0){ + ii = i; + } + else{ + ii = i+1; + } + } + return &cells[ii][jj]; +} + +__device__ __host__ __forceinline__ Cell* lowright(Cell cells[MACRO_MAX][MACRO_MAX],int i,int j){ + + int ii,jj; + + if( (cells[i][j].edge&LOWRIGHT)!=NONE ){ + jj = 0; + if(abs(j-MACRO_CART_X_ZERO)%2==0){ + ii = i-1; + if(ii<0){ + ii=MACRO_MAX-1; + } + } + else{ + ii = i; + } + } + else{ + jj = j+1; + if(abs(j-MACRO_CART_X_ZERO)%2==0){ + ii = i-1; + } + else{ + ii = i; + } + } + return &cells[ii][jj]; +} + +__device__ __host__ __forceinline__ Cell* low(Cell cells[MACRO_MAX][MACRO_MAX],int i,int j){ + if( (cells[i][j].edge&LOW)!=NONE ){ + return &cells[MACRO_MAX-1][j]; + } + else{ + return &cells[i-1][j]; + } +} + +__device__ __host__ __forceinline__ Cell* lowleft(Cell cells[MACRO_MAX][MACRO_MAX],int i,int j){ + int ii,jj; + + if( (cells[i][j].edge&LOWLEFT)!=NONE ){ + jj = MACRO_MAX-1; + if(abs(j-MACRO_CART_X_ZERO)%2==0){ + ii = i-1; + if(ii<0){ + ii = MACRO_MAX-1; + } + } + else{ + ii = i; + } + } + else{ + jj = j-1; + if(abs(j-MACRO_CART_X_ZERO)%2==0){ + ii = i-1; + } + else{ + ii=i; + } + } + return &cells[ii][jj]; +} + +__device__ __host__ __forceinline__ Cell* upleft(Cell cells[MACRO_MAX][MACRO_MAX],int i,int j){ + int ii,jj; + if( (cells[i][j].edge&UPLEFT)!=NONE ){ + jj = MACRO_MAX-1; + + if(abs(j-MACRO_CART_X_ZERO)%2==0){ + ii = i; + } + else{ + ii= i+1; + if(ii==MACRO_MAX){ + ii=0; + } + } + } + else{ + jj = j-1; + if(abs(j-MACRO_CART_X_ZERO)%2==0){ + ii = i; + } + else{ + ii = i+1; + } + } + return &cells[ii][jj]; +} + +__device__ __host__ Cell* getCell(Cell cells[MACRO_MAX][MACRO_MAX],int i,int j, enum Direction dir){ + + switch (dir){ + case UP: + return up(cells,i,j); + case UPRIGHT: + return upright(cells,i,j); + case LOWRIGHT: + return lowright(cells,i,j); + case LOW: + return low(cells,i,j); + case LOWLEFT: + return lowleft(cells,i,j); + case UPLEFT: + return upleft(cells,i,j); + default: + return NULL; + } +} + +__device__ __host__ int dirToNum(enum Direction dir){ + switch (dir){ + case UP: + return 0; + case UPRIGHT: + return 1; + case LOWRIGHT: + return 2; + case LOW: + return 3; + case LOWLEFT: + return 4; + case UPLEFT: + return 5; + default: + return -1; + } +} + + + +//Misc +__device__ __forceinline__ bool isGotFood(Food& food){ + unsigned long long int* address_as_ull = + (unsigned long long int*)(&(food.vol)); + unsigned long long int old = *address_as_ull, assumed; + + do { + assumed = old; + if(__longlong_as_double(assumed)<0.1){ + return false; + } + old = atomicCAS(address_as_ull, assumed,__double_as_longlong(-MACRO_UNIT + __longlong_as_double(assumed))); + } while (assumed != old); + return true; +} + +__device__ __forceinline__ double atomicAddDouble(double* address, double val){ + unsigned long long int* address_as_ull = + (unsigned long long int*)address; + unsigned long long int old = *address_as_ull, assumed; + do { + assumed = old; + old = atomicCAS(address_as_ull, assumed, + __double_as_longlong(val + + __longlong_as_double(assumed))); + } while (assumed != old); + return __longlong_as_double(old); +} + +__device__ __forceinline__ enum Direction genDirRand(int id){ + return static_cast(1 << (curand(&rnd_state[id])%6)); +} + +__device__ __forceinline__ double genProbRand(int id){ + return curand_uniform_double(&rnd_state[id]); +} + +__device__ __forceinline__ int genAntNumRand(int id){ + return curand(&rnd_state[id])%MACRO_NMAX; +} + +__device__ __forceinline__ double degToRad(double a) { + return a * M_PI / 180.0; +} + +__device__ __forceinline__ double dist(Cell a,Cell b){ + return sqrt( (a.cart.x - b.cart.x)*(a.cart.x - b.cart.x) + + (a.cart.y - b.cart.y)*(a.cart.y - b.cart.y) ); +} + +__device__ __forceinline__ double distCandP(Cell a,double x,double y){ + return sqrt( (a.cart.x - x)*(a.cart.x - x) + + (a.cart.y - y)*(a.cart.y - y) ); +} + +__device__ __forceinline__ bool isOppositeDir(enum Direction nestDir,enum Direction dir){ + //If theta = 60 deg., this is OK. + if( (dir&nestDir) !=NONE + || (left(dir)&nestDir) !=NONE + || (right(dir)&nestDir) !=NONE){ + return false; + } + else{ + return true; + } +} + +__device__ __forceinline__ bool isOppositeDir(Cell& cell, enum Direction dir){ + if ( (cell.criticalAngle & dir)==dir ){ + return true; + } + else{ + return false; + } +} + +__device__ __forceinline__ enum Direction selectNextDir(Cell& cell, enum Direction dir){ + int rightCount = 0; + int leftCount = 0; + for (enum Direction currentDir=right(dir); currentDir!=dir; currentDir=right(currentDir)){ + if( (cell.criticalAngle & currentDir)!=currentDir ){ + break; + } + rightCount++; + } + + for (enum Direction currentDir=left(dir); currentDir!=dir; currentDir=left(currentDir)){ + if( (cell.criticalAngle & currentDir)!=currentDir ){ + break; + } + leftCount++; + } + + if ( rightCount < leftCount ){ + return right(dir); + } + else if ( rightCount > leftCount ){ + return left(dir); + } + else{ + return NONE; + } +} + +__device__ __forceinline__ double hilFunc(double x,double alpha){ + return pow(alpha*x+0.05,10); +} + +__host__ void initialize(){ + getDevicePtrs(); + + //antsInit<<>>(); + cellsInit<<>>(); + + setEdges<<>>(); + setNest<<>>(); + setDistFromNest<<>>(); + + setCriticalAngle<<>>(); + setNearestDirFromNest<<>>(); + + setNestDirs<<>>(); + setFoodsDir<<>>(); +} + +__host__ void reset(double sensor,int naho,unsigned long long int step){ + cudaMemcpyToSymbol(SENSOR2,&sensor,sizeof(double),0); + cudaMemcpyToSymbol(NAHO,&naho,sizeof(int),0); + + //initialize(); + //antsInit<<>>(); + //cellsInit<<>>(); + + //setEdges<<>>(); + //setNest<<>>(); + //setDistFromNest<<>>(); + + //setNestDirs<<>>(); + //setFoodsDir<<>>(); + + srand(MACRO_RND_SEED+step); + + thrust::host_vector seeds_vec_h(MACRO_NMAX); + std::generate(seeds_vec_h.begin(), seeds_vec_h.end(), rand); + thrust::copy(seeds_vec_h.begin(), seeds_vec_h.end(), seeds_d_ptr); + randInit<<>>(); + + antsReset<<>>(); + cellsReset<<>>(); + foodsReset<<>>(); +} diff --git a/cuda_code/kernel_722.cu b/cuda_code/kernel_722.cu new file mode 100644 index 0000000000000000000000000000000000000000..3ba78326223f4f4ef1a44f3a312dfb64172b0fb6 --- /dev/null +++ b/cuda_code/kernel_722.cu @@ -0,0 +1,74 @@ +#include "mex.h" +#include "stdio.h" +#include "string.h" +#include +#include +#include +#include "instances.h" +#include "MRFEnergy.h" +#include "cuda_runtime.h" +#include "device_launch_parameters.h" +#include "gpu/mxGPUArray.h" + +#include "cuda.h" + + +void mexFunction(int nlhs,mxArray *plhs[],int nrhs,const mxArray *prhs[]){ + bool IsUseGPU; + if(nrhs==5) + IsUseGPU=bool(*((double*)mxGetPr(prhs[4]))); + else if(nrhs==4) + IsUseGPU=true; + else + mexErrMsgTxt("\nErrors in input.\n"); + MRFEnergy* mrf; + MRFEnergy::NodeId* nodes; + MRFEnergy::Options options; + TypeGeneral::REAL energy, lowerBound; + + double* EdgeTerminals=((double*)mxGetPr(prhs[0])); + int EdgeNum,LabelNum,nodeNum; + EdgeNum=mxGetN(prhs[0]); + TypeGeneral::REAL* f1=((double*)mxGetPr(prhs[1])); + LabelNum=mxGetM(prhs[1]); + nodeNum=mxGetN(prhs[1]); + TypeGeneral::REAL* f2=((double*)mxGetPr(prhs[2])); + TypeGeneral::REAL* op=((double*)mxGetPr(prhs[3])); + + int i; + double tmp=0; + for(i=0;i(TypeGeneral::GlobalSize()); + nodes=new MRFEnergy::NodeId[nodeNum]; + mexPrintf("add nodes\n"); + + for(i=0;iAddNode(TypeGeneral::LocalSize(LabelNum), TypeGeneral::NodeData(&f1[LabelNum*i])); + } + mexPrintf("add edges\n"); + for(i=0;iAddEdge(nodes[int(EdgeTerminals[i*2])],nodes[int(EdgeTerminals[i*2+1])],TypeGeneral::EdgeData(TypeGeneral::GENERAL,&f2[LabelNum*LabelNum*i])); + } + mexPrintf("set ordering\n"); + // Function below is optional - it may help if, for example, nodes are added in a random order + mrf->SetAutomaticOrdering(); + /////////////////////// TRW-S algorithm ////////////////////// + options.m_iterMax=int(op[1]); // maximum number of iterations + options.m_eps=op[0]; + mexPrintf("energy minimization\n"); + mrf->Minimize_TRW_S(IsUseGPU,options,lowerBound,energy); + mexPrintf("trws done\n"); + // read solution + double* x; + plhs[0]=mxCreateDoubleMatrix(nodeNum,1,mxREAL); + x=mxGetPr(plhs[0]); + for(i=0;iGetSolution(nodes[i]); + } + delete nodes; + delete mrf; +} diff --git a/cuda_code/kernel_872.cu b/cuda_code/kernel_872.cu new file mode 100644 index 0000000000000000000000000000000000000000..faeab6694c7d10b0d58b289f1ed5155198f4f50f --- /dev/null +++ b/cuda_code/kernel_872.cu @@ -0,0 +1,33 @@ + +#include "cuda_runtime.h" +#include "device_launch_parameters.h" + +#include + +void addArrays(int *a, int *b, int *c, int count) +{ + for (int i = 0; i < count; ++i) + c[i] = a[i] + b[i]; +} + +int main() +{ + // Constante + const int count = 5; + + // Arrays + int a[] = {1, 2, 3, 4, 5}; + int b[] = {100, 200, 300, 400, 500}; + + // Arrays para o resultado + int c[count]; + + // Somar os arrays + addArrays(a, b, c, count); + + // Imprime os itens do array c + for (int i = 0; i < count; ++i) + printf("%d ", c[i]); + + getchar(); +} diff --git a/cuda_code/kernel_97.cu b/cuda_code/kernel_97.cu new file mode 100644 index 0000000000000000000000000000000000000000..938c6d2ec05f58b52f5de581ca0397e4549a35d6 --- /dev/null +++ b/cuda_code/kernel_97.cu @@ -0,0 +1,302 @@ +#include +#include +#include +#include "kernel.hpp" +#include "Mesh.hpp" + +static std::string to_string(cudaError_t error) { + char buf[256]; + snprintf(buf, 256, "%d", error); + return buf; +} + + +class CudaError : public std::runtime_error { +public: + CudaError(std::string source, cudaError_t errorCode) : + std::runtime_error( source + ": code" + to_string(errorCode) + ": " + cudaGetErrorString(errorCode) ) { + } +}; + +#define checkCudaErrors( val ) checkError( ( val ), #val, __FILE__, __LINE__ ) +void checkError(cudaError_t result, const char* calledFunc, const char* file, int line) { + if (result) { + std::ostringstream ss; + ss << file << ": " << line << " {" << calledFunc << '}'; + + throw CudaError(ss.str(), result); + } +} + +__global__ void meshUpdateKernel(float* mesh_in, float* mesh_out, size_t pitch, unsigned size) { + const int x = blockIdx.x * blockDim.x + threadIdx.x; + const int y = blockIdx.y * blockDim.y + threadIdx.y; + + if ( x > 0 && x < size - 1 && y > 0 && y < size - 1) { + const float t_left = *getElem(mesh_in, pitch, y, x - 1); + const float t_right = *getElem(mesh_in, pitch, y, x + 1); + const float t_top = *getElem(mesh_in, pitch, y - 1, x); + const float t_bottom = *getElem(mesh_in, pitch, y + 1, x); + + const float newTemperature = (t_left + t_right + t_top + t_bottom) / 4; + + *getElem(mesh_out, pitch, y, x) = newTemperature; + } +} + + +// optimal block size is 128,1,1 +__global__ void meshUpdateKernel_opt1(float *mesh_in, float *mesh_out, size_t pitch, unsigned size) { + const int x = blockIdx.x * blockDim.x + threadIdx.x; + const int y = blockIdx.y * blockDim.y + threadIdx.y; + + //TODO: switch to dynamic shared memory + __shared__ float shared[3][128 + 2]; +/* + if (threadIdx.x == 0) { + if (x > 0) { + shared[1][0] = *getElem(mesh_in, pitch, y, x - 1); + } + else { + shared[1][1] = *getElem(mesh_in, pitch, y, x); + } + } + + if (threadIdx.x == blockDim.x - 1) { + if (x < size - 1) { + shared[1][blockDim.x + 1] = *getElem(mesh_in, pitch, y, x + 1); + } + else { + const auto pos = size - blockIdx.x * blockDim.x; + shared[1][pos] = *getElem(mesh_in, pitch, y, blockIdx.x * blockDim.x + pos - 1); + } + }*/ + + if (x > 0 && x < size - 1 && y > 0 && y < size - 1) { +// shared[1][threadIdx.x + 1 - 1] = *getElem(mesh_in, pitch, y, x - 1); +// shared[1][threadIdx.x + 1 + 1] = *getElem(mesh_in, pitch, y, x + 1); + shared[0][threadIdx.x + 1] = *getElem(mesh_in, pitch, y-1, x); + shared[1][threadIdx.x + 1] = *getElem(mesh_in, pitch, y, x); + shared[2][threadIdx.x + 1] = *getElem(mesh_in, pitch, y+1, x); + + __syncthreads(); + + const float t_l = shared[1][threadIdx.x + 1 - 1]; + const float t_r = shared[1][threadIdx.x + 1 + 1]; + const float t_t = shared[0][threadIdx.x + 1]; + const float t_b = shared[2][threadIdx.x + 1]; + + const float newTemperature = (t_l + t_r + t_b + t_t) / 4; + +// printf("[%d,%d]: {%f;%f;%f;%f}: %f\n", x, y, t_l, t_r, t_t, t_b, newTemperature); + + *getElem(mesh_out, pitch, y, x) = newTemperature; + } +} + + +void cuda() { + size_t pitch; + float *temperature = allocMeshLinear(pitch); + size_t d_pitch; + float *d_temperature_in, *d_temperature_out; + + try { + checkCudaErrors(cudaMallocPitch(&d_temperature_in, &d_pitch, MESH_SIZE_EXTENDED * sizeof(float), MESH_SIZE_EXTENDED)); + checkCudaErrors(cudaMallocPitch(&d_temperature_out, &d_pitch, MESH_SIZE_EXTENDED * sizeof(float), MESH_SIZE_EXTENDED)); + } + catch (CudaError& err) { + std::cout << err.what() << std::endl; + return; + } + + try { + SimpleTimer t( "CUDA implementation" ); + dim3 blockSize(BLOCK_DIM_X, BLOCK_DIM_Y); + unsigned computedGridDimX = (MESH_SIZE_EXTENDED + blockSize.x - 1) / blockSize.x; + unsigned computedGridDimY = (MESH_SIZE_EXTENDED + blockSize.y - 1) / blockSize.y; + dim3 gridSize(computedGridDimX, computedGridDimY); + + checkCudaErrors(cudaMemcpy2D(d_temperature_in, d_pitch, temperature, pitch, MESH_SIZE_EXTENDED * sizeof(float), MESH_SIZE_EXTENDED, cudaMemcpyHostToDevice)); + checkCudaErrors(cudaMemcpy2D(d_temperature_out, d_pitch, d_temperature_in, d_pitch, MESH_SIZE_EXTENDED * sizeof(float), MESH_SIZE_EXTENDED, cudaMemcpyDeviceToDevice)); + + for (int step = 0; step < STEPS; ++step) { + meshUpdateKernel << < gridSize, blockSize >> > (d_temperature_in, d_temperature_out, d_pitch, MESH_SIZE_EXTENDED); + checkCudaErrors(cudaGetLastError()); // Check for any errors launching the kernel + checkCudaErrors(cudaDeviceSynchronize());// cudaDeviceSynchronize waits for the kernel to finish, and returns any errors encountered during the launch. + std::swap(d_temperature_in, d_temperature_out); + } + + checkCudaErrors(cudaMemcpy2D(temperature, pitch, d_temperature_in, d_pitch, MESH_SIZE_EXTENDED * sizeof(float), MESH_SIZE_EXTENDED, cudaMemcpyDeviceToHost)); + } + catch (CudaError& err) { + std::cout << err.what() << std::endl; + } + + validateResults(temperature, pitch); + + delete[] temperature; + try { + checkCudaErrors(cudaFree(d_temperature_in)); + checkCudaErrors(cudaFree(d_temperature_out)); + } + catch (CudaError& err) { + std::cout << err.what() << std::endl; + } + + // cudaDeviceReset must be called before exiting in order for profiling and + // tracing tools such as Nsight and Visual Profiler to show complete traces. + checkCudaErrors(cudaDeviceReset()); +} + +// +// +// Hybrid implementation +// +// + +__global__ void meshUpdateKernel_hybrid(float* mesh_in, float* mesh_out, size_t pitch, unsigned size_x, unsigned size_y) { + const int x = blockIdx.x * blockDim.x + threadIdx.x; + const int y = blockIdx.y * blockDim.y + threadIdx.y; + + if (x > 0 && x < size_x - 1 && y > 0 && y < size_y - 1) { + const float t_left = *getElem(mesh_in, pitch, y, x - 1); + const float t_right = *getElem(mesh_in, pitch, y, x + 1); + const float t_top = *getElem(mesh_in, pitch, y - 1, x); + const float t_bottom = *getElem(mesh_in, pitch, y + 1, x); + + const float newTemperature = (t_left + t_right + t_top + t_bottom) / 4; + + *getElem(mesh_out, pitch, y, x) = newTemperature; + + // printf("[%d,%d]: {%f;%f;%f;%f}: %f\n", x, y, t_left, t_right, t_top, t_bottom, newTemperature); + } +} + + +HybridCuda::HybridCuda(size_t divisionPoint, size_t pitch, int deviceId) : +DIVISION_POINT(divisionPoint), +pitch(pitch), +deviceId(deviceId) { + part = (deviceId == 0 ? BOTTOM : TOP); + + if (part == BOTTOM) { + allocNumRows = MESH_SIZE_EXTENDED - (DIVISION_POINT - 1); + } + else { + allocNumRows = DIVISION_POINT + 1; + } + + setDevice(); + + try { + checkCudaErrors(cudaMallocPitch(&d_temperature_in, &d_pitch, MESH_SIZE_EXTENDED * sizeof(float), allocNumRows)); + checkCudaErrors(cudaMallocPitch(&d_temperature_out, &d_pitch, MESH_SIZE_EXTENDED * sizeof(float), allocNumRows)); + } + catch (CudaError& err) { + std::cout << err.what() << std::endl; + return; + } +} + +HybridCuda::~HybridCuda() { + setDevice(); + + try { + checkCudaErrors(cudaFree(d_temperature_in)); + checkCudaErrors(cudaFree(d_temperature_out)); + } + catch (CudaError& err) { + std::cout << err.what() << std::endl; + } +} + +//TODO: overlap computation and communication - launch separate streams +void HybridCuda::launchCompute(float* temperature_in) { + setDevice(); + + try { + dim3 blockSize(BLOCK_DIM_X, BLOCK_DIM_Y); + unsigned computedGridDimX = (MESH_SIZE_EXTENDED + blockSize.x - 1) / blockSize.x; + unsigned computedGridDimY = (allocNumRows + blockSize.y - 1) / blockSize.y; + dim3 gridSize(computedGridDimX, computedGridDimY); + + float* srcPtr; + if (part == BOTTOM) { + srcPtr = reinterpret_cast(reinterpret_cast(temperature_in) + (DIVISION_POINT - 1) * pitch); + } + else { + srcPtr = temperature_in; + } + + checkCudaErrors(cudaMemcpy2D(d_temperature_in, d_pitch, srcPtr, pitch, MESH_SIZE_EXTENDED * sizeof(float), 1, cudaMemcpyHostToDevice)); + + meshUpdateKernel_hybrid<<< gridSize, blockSize >>> (d_temperature_in, d_temperature_out, d_pitch, MESH_SIZE_EXTENDED, allocNumRows); + // checkCudaErrors(cudaGetLastError()); // Check for any errors launching the kernel + } + catch (CudaError& err) { + std::cout << err.what() << std::endl; + } +} + +void HybridCuda::finalizeCompute(float* temperature_out) { + setDevice(); + + try { + checkCudaErrors(cudaDeviceSynchronize());// cudaDeviceSynchronize waits for the kernel to finish, and returns any errors encountered during the launch. + + float *srcPtr, *dstPtr; + if (part == BOTTOM) { + dstPtr = reinterpret_cast(reinterpret_cast(temperature_out) + DIVISION_POINT * pitch); + srcPtr = reinterpret_cast(reinterpret_cast(d_temperature_out) + d_pitch); + } + else { + dstPtr = reinterpret_cast(reinterpret_cast(temperature_out) + (DIVISION_POINT + 1) * pitch); + srcPtr = reinterpret_cast(reinterpret_cast(d_temperature_out) + (allocNumRows - 1) * pitch); //TODO: verify + } + + + checkCudaErrors(cudaMemcpy2D(dstPtr, pitch, srcPtr, d_pitch, MESH_SIZE_EXTENDED * sizeof(float), 1, cudaMemcpyDeviceToHost)); + + std::swap(d_temperature_in, d_temperature_out); + } + catch (CudaError& err) { + std::cout << err.what() << std::endl; + } +} + +void HybridCuda::copyInitial(float* temperature_in) { + setDevice(); + + float* srcPtr; + if (part == BOTTOM) { + srcPtr = reinterpret_cast(reinterpret_cast(temperature_in) + (DIVISION_POINT - 1) * pitch); + } + else { + srcPtr = temperature_in; + } + checkCudaErrors(cudaMemcpy2D(d_temperature_in, d_pitch, srcPtr, pitch, MESH_SIZE_EXTENDED * sizeof(float), allocNumRows, cudaMemcpyHostToDevice)); + + //TODO: remove this copy - copy only last row + checkCudaErrors(cudaMemcpy2D(d_temperature_out, d_pitch, d_temperature_in, d_pitch, MESH_SIZE_EXTENDED * sizeof(float), allocNumRows, cudaMemcpyDeviceToDevice)); +} + +void HybridCuda::copyFinal(float* temperature_out) { + setDevice(); + + float *srcPtr, *dstPtr; + if (part == BOTTOM) { + dstPtr = reinterpret_cast(reinterpret_cast(temperature_out) + DIVISION_POINT * pitch); + srcPtr = reinterpret_cast(reinterpret_cast(d_temperature_in) + d_pitch); + } + else { + dstPtr = temperature_out; + srcPtr = d_temperature_in; + } + + checkCudaErrors(cudaMemcpy2D(dstPtr, pitch, srcPtr, d_pitch, MESH_SIZE_EXTENDED * sizeof(float), allocNumRows - 1, cudaMemcpyDeviceToHost)); +} + +void HybridCuda::setDevice() { + cudaSetDevice(deviceId); +} diff --git a/cuda_code/kernel_980.cu b/cuda_code/kernel_980.cu new file mode 100644 index 0000000000000000000000000000000000000000..9a2f05bfe697f72f843638b3c66d6777bed1b477 --- /dev/null +++ b/cuda_code/kernel_980.cu @@ -0,0 +1,308 @@ +#include "cuda/cudaarray.h" +#include "cuda/cudatypes.h" +#include "rendering/framebuffer.h" +#include "math/algorithms.h" +#include "math/matrix4x4.h" +#include "math/vector4.h" +#include "rendering/ray.h" +#include "rendering/randomnumbergenerator.h" + +namespace ToyPT +{ +namespace Rendering +{ +namespace Cuda +{ + +__device__ bool intersect(const Rendering::Ray &ray, CudaArray::const_pointer *data, float &t, float &u, float &v) +{ + bool returnValue; + float determinant, inverseDeterminant; + + Math::Vector4 v0o, pVector, qVector; + + // Sub + v0o = ray.origin - (*data)->v0; + + // Cross + pVector = ray.direction.crossProduct((*data)->e02); + qVector = v0o.crossProduct((*data)->e01); + + determinant = (*data)->e01.dotProduct(pVector); + inverseDeterminant = 1.0f / determinant; + + u = v0o.dotProduct(pVector) * inverseDeterminant; + v = ray.direction.dotProduct(qVector) * inverseDeterminant; + t = (*data)->e02.dotProduct(qVector) * inverseDeterminant; + (*data)++; + + // Conditions + bool c0, c1, c2, c3, c4, c5; + c0 = determinant < Math::epsilon; + c1 = u < 0.0f; + c2 = u > 1.0f; + c3 = v < 0.0f; + c4 = (u + v) > 1.0f; + c5 = t > Math::epsilon; + returnValue = (c0 | c1 | c2 | c3 | c4) & c5; + + return returnValue; +} + +__device__ float traceRay(const Rendering::Ray &ray, const Cuda::Types::Scene &scene, Cuda::Types::IntersectionInfo &intersection) +{ + float returnValue = 0.0f; + + const Cuda::Types::Triangle *dataPointer = scene.triangleBuffer; + const Cuda::Types::Mesh *nearestMesh = nullptr; + uint32_t nearestTriangle = 0xFFFFFFFF; + float newDistance = 0.0f; + float distance = 1E7f; +// float u = 0; +// float v = 0; + newDistance = distance; + + // Intersect triangles + for (uint32_t triangleIndex = 0; triangleIndex < scene.triangleCount; triangleIndex++) + { + float t, u, v; + bool intersected = intersect(ray, &dataPointer, t, u, v); + + if ((newDistance < distance) & bool(intersected)) + { + nearestMesh = &scene.meshBuffer[scene.triangleBuffer[triangleIndex].meshIndex]; + distance = newDistance; + nearestTriangle = triangleIndex; + } + } + + returnValue = distance; + intersection.mesh = nearestMesh; + intersection.triangleOffset = nearestTriangle; +// intersection.u = u; +// intersection.v = v; + + return returnValue; +} + +__device__ void createCoordinateSystem(const Math::Vector4 &normal, Math::Vector4 &tangentNormal, Math::Vector4 &binormal) +{ + const Math::Vector4 a = Math::Vector4{normal.z(), 0.0f, -normal.x()}; + const Math::Vector4 b = Math::Vector4{0.0f, -normal.z(), normal.y()}; + float t = fabsf(normal.x()) > fabsf(normal.y()); + + tangentNormal = Math::lerp(a, b, t).normalize(); + + binormal = normal.crossProduct(tangentNormal); +} + +__device__ Math::Vector4 createUniformHemisphere(const float r1, const float r2) +{ + float sinTheta = sqrtf(1.0f - r1 * r1); + float phi = 2.0f * float(M_PI) * r2; + float x = sinTheta * cosf(phi); + float z = sinTheta * sinf(phi); + return {x, r1, z}; +} + +__device__ Math::Vector4 randomDirection(const Math::Vector4 &normal, RandomNumberGenerator &rng, float &cosinusTheta) +{ + float ratio; + + Math::Vector4 Nt; + Math::Vector4 Nb; + + createCoordinateSystem(normal, Nt, Nb); + + // Generate hemisphere + constexpr float scalingFactor = 1.0f / float(0xFFFFFFFF); + cosinusTheta = std::pow(rng.get(scalingFactor), 0.5f); + ratio = rng.get(scalingFactor); + + Math::Vector4 sample = createUniformHemisphere(cosinusTheta, ratio); + + Math::Matrix4x4 localToWorldMatrix{ + {Nb.x(), normal.x(), Nt.x()}, + {Nb.y(), normal.y(), Nt.y()}, + {Nb.z(), normal.z(), Nt.z()} + }; + + return (localToWorldMatrix * sample).normalize(); +} + +__device__ Math::Vector4 interpolateNormal(const Math::Vector4 &intersectionPoint, const Types::Triangle *data) +{ + Math::Vector4 returnValue, p, n0, n1, n2, n01, n02, v0, v1, v2, e01, e02, v12, v0p, v1p, v2p, vab, v2ab; + + v0 = data->v0; + e01 = data->e01; + e02 = data->e02; + v1 = e01 + v0; + v2 = e02 + v0; + n0 = data->n0; + n1 = data->n1; + n2 = data->n2; + + data++; + + p = intersectionPoint; + v12 = v2 - v1; + v0p = p - v0; + v1p = p - v1; + v2p = p - v2; + + float a, denominator; + + denominator = (e01.x() * v2p.y() - v2p.x() * e01.y()) + Math::epsilon; + a = (-(v0.x() * v2p.y() - v2p.x() * v0.y() + v2p.x() * v2.y() - v2.x() * v2p.y())) / denominator; +// b = (e01.x() * v0.y() - e01.x() * v2.y() - v0.x() * e01.y() + v2.x() * e01.y()) / denominator; + + vab = v0 + a * e01; + + n01 = Math::lerp(n1, n0, a).normalize(); + v2ab = vab - v2; + + returnValue = Math::lerp(n01, n2, (v2p.magnitude() / v2ab.magnitude())).normalize(); + + return returnValue; +} + +__device__ Ray createCameraRay(const uint pixelX, const uint pixelY, const uint width, const uint height, const float fieldOfView, RandomNumberGenerator &rng) +{ + float fovRadians = fieldOfView / 180.0f * float(M_PI); + float zCoordinate = -(width/(2.0f * tanf(fovRadians / 2.0f))); + + float offsetX, offsetY; + constexpr float scalingFactor = 1.0f / float(0xFFFFFFFF); + offsetX = rng.get(scalingFactor) - 0.5f; + offsetY = rng.get(scalingFactor) - 0.5f; + + float x = (pixelX + offsetX + 0.5f) - (width / 2.0f); + float y = -(pixelY + offsetY + 0.5f) + (height / 2.0f); + + Math::Vector4 direction{x, y, zCoordinate}; + direction.normalize(); + + return Ray{Math::Vector4{}, direction}; +} + +__global__ void castRay(const Cuda::Types::Tile tile, RandomNumberGenerator *rngs, const uint32_t width, const uint32_t height, const float fieldOfView, + const Cuda::Types::Scene scene, const size_t maxBounces, const Math::Vector4 skyColor, Math::Vector4 *pixels) +{ + uint pixelX = threadIdx.x * blockIdx.x * blockDim.x; + uint pixelY = threadIdx.y * blockIdx.y * blockDim.y; + + // Return early if pixel is outside tile + if ((pixelX > tile.x1) | (pixelY > tile.y1)) + { + return; + } + + uint pixelIndex = pixelX + pixelY * width; + + RandomNumberGenerator &rng = rngs[pixelIndex]; + Ray ray = createCameraRay(pixelX, pixelY, width, height, fieldOfView, rng); + + Math::Vector4 returnValue = {0.0f, 0.0f, 0.0f}; + Math::Vector4 mask = {1.0f, 1.0f, 1.0f}; + + Math::Vector4 currentDirection = ray.direction; + Math::Vector4 currentOrigin = ray.origin; + + float cosinusTheta; + + for (size_t currentBounce = 0; currentBounce < maxBounces; currentBounce++) + { + Math::Vector4 intersectionPoint; + Types::IntersectionInfo objectIntersection; + Math::Vector4 normal; + float objectDistance = traceRay({currentOrigin, currentDirection}, scene, objectIntersection); + + intersectionPoint = currentOrigin + (objectDistance * currentDirection); + + if (objectIntersection.mesh != nullptr) + { + Material objectMaterial = scene.materialBuffer[objectIntersection.mesh->materialOffset]; + Math::Vector4 objectColor = objectMaterial.color; + + // Calculate normal + const Cuda::Types::Triangle *dataPointer = scene.triangleBuffer + objectIntersection.triangleOffset; + normal = interpolateNormal(intersectionPoint, dataPointer); + + // Calculate new origin and offset + currentOrigin = intersectionPoint + (Math::epsilon * normal); + + // Global illumination + Math::Vector4 newDirection, reflectedDirection, diffuseDirection; + Math::Vector4 diffuse, specular; + + diffuseDirection = randomDirection(normal, rng, cosinusTheta); + reflectedDirection = (currentDirection - 2.0f * currentDirection.dotProduct(normal) * normal).normalize(); + + newDirection = Math::lerp(diffuseDirection, reflectedDirection, objectMaterial.roughness); + + specular = Math::Vector4{1.0f, 1.0f, 1.0f} * (1.0f - objectMaterial.roughness); + diffuse = Math::Vector4{1.0f, 1.0f, 1.0f} - specular; + + currentDirection = newDirection; + + returnValue += objectMaterial.emittance * mask; + mask *= (2.0f * objectColor * diffuse + specular) * cosinusTheta; + } + else + { + returnValue += skyColor * mask; + break; + } + } + +// pixels[pixelIndex] = returnValue; + Math::Vector4 &pixel = pixels[pixelIndex]; + atomicAdd(&pixel.data()[0], returnValue.data()[0]); + atomicAdd(&pixel.data()[1], returnValue.data()[1]); + atomicAdd(&pixel.data()[2], returnValue.data()[2]); +} + +__host__ void render(FrameBuffer &frameBuffer, RandomNumberGenerator rng, + const CudaArray &triangleBuffer, + const CudaArray &meshBuffer, + const CudaArray &materialBuffer, + const uint32_t samples, + const uint32_t maxBounces, + const float fieldOfView, + const Math::Vector4 &skyColor) +{ + const uint32_t pixelCount = frameBuffer.width() * frameBuffer.height(); + + Types::Scene scene{ + triangleBuffer.data(), + triangleBuffer.size(), + meshBuffer.data(), + meshBuffer.size(), + materialBuffer.data(), + materialBuffer.size() + }; + + dim3 gridSize(samples); + dim3 blockSize(frameBuffer.width(), frameBuffer.height()); + + CudaArray rngs(pixelCount); + + for (uint32_t i = 0; i < rngs.size(); i++) + { + rngs[i] = rng.get(); + } + + CudaArray gpuFrameBuffer(pixelCount); + + castRay<<>>(Types::Tile{0, 0, frameBuffer.width(), frameBuffer.height()}, rngs.data(), frameBuffer.width(), frameBuffer.height(), fieldOfView, scene, maxBounces, + skyColor, gpuFrameBuffer.data()); + cudaDeviceSynchronize(); + + // Copy frame buffer back + frameBuffer = FrameBuffer::fromRawData(gpuFrameBuffer.data(), frameBuffer.width(), frameBuffer.height()); +} + +} +} +} diff --git a/cuda_code/kernel_opt_intrinsics_1.cu b/cuda_code/kernel_opt_intrinsics_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..689dace09d40ceb0108c5945a83a3b3e29ff1d3a --- /dev/null +++ b/cuda_code/kernel_opt_intrinsics_1.cu @@ -0,0 +1,450 @@ +// Copyright 2019 ETH Zürich, Thomas Schöps +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +#include +#include +#include + +#include "badslam/cost_function.cuh" +#include "badslam/cuda_util.cuh" +#include "badslam/cuda_matrix.cuh" +#include "badslam/gauss_newton.cuh" +#include "badslam/surfel_projection_nvcc_only.cuh" +#include "badslam/util.cuh" +#include "badslam/util_nvcc_only.cuh" + + +namespace vis { + +constexpr int kARows = 4 + 1; + +template +__global__ void AccumulateIntrinsicsCoefficientsCUDAKernel( + SurfelProjectionParameters s, + DepthToColorPixelCorner depth_to_color, + PixelCornerProjector color_corner_projector, + PixelCenterUnprojector depth_center_unprojector, + float color_fx, float color_fy, + cudaTextureObject_t color_texture, + CUDABuffer_ observation_count, + CUDABuffer_ depth_A, + CUDABuffer_ depth_B, + CUDABuffer_ depth_D, + CUDABuffer_ depth_b1, + CUDABuffer_ depth_b2, + CUDABuffer_ color_H, + CUDABuffer_ color_b) { + constexpr int block_height = 1; + const unsigned int surfel_index = blockIdx.x * blockDim.x + threadIdx.x; + + // Parameters: fx_inv, fy_inv, cx_inv, cy_inv, a_0, a_1 (all global), cfactor (per sparsification pixel) + float depth_jacobian[kARows + 1] = {0, 0, 0, 0, 0}; + float raw_depth_residual = 0; + + // Parameters: fx_inv, fy_inv, cx_inv, cy_inv + float descriptor_jacobian_1[4] = {0, 0, 0, 0}; + float raw_descriptor_residual_1 = 0; + float descriptor_jacobian_2[4] = {0, 0, 0, 0}; + float raw_descriptor_residual_2 = 0; + + int sparse_pixel_index = -1; + + SurfelProjectionResult6 r; + if (SurfelProjectsToAssociatedPixel(surfel_index, s, &r)) { + float nx = depth_center_unprojector.nx(r.px); + float ny = depth_center_unprojector.ny(r.py); + + if (optimize_depth_intrinsics) { + int sparse_px = r.px / s.depth_params.sparse_surfel_cell_size; + int sparse_py = r.py / s.depth_params.sparse_surfel_cell_size; + float cfactor = s.depth_params.cfactor_buffer(sparse_py, sparse_px); + + float raw_inv_depth = 1.0f / (s.depth_params.raw_to_float_depth * s.depth_buffer(r.py, r.px)); // TODO: SurfelProjectsToAssociatedPixel() also reads that value, could be gotten from there + float exp_inv_depth = expf(- s.depth_params.a * raw_inv_depth); + float corrected_inv_depth = cfactor * exp_inv_depth + raw_inv_depth; + if (fabs(corrected_inv_depth) > 1e-4f) { // NOTE: Corresponds to 1000 meters + float3 local_surfel_normal = s.frame_T_global.Rotate(r.surfel_normal); + float dot = Dot(make_float3(nx, ny, 1), local_surfel_normal); + + float depth_residual_inv_stddev = + ComputeDepthResidualInvStddevEstimate(nx, ny, r.pixel_calibrated_depth, local_surfel_normal, s.depth_params.baseline_fx); + + float jac_base = depth_residual_inv_stddev * dot * exp_inv_depth / (corrected_inv_depth * corrected_inv_depth); + + // Depth residual derivative wrt. ... + // cx_inv (Attention: notice the indexing order!) + depth_jacobian[2] = depth_residual_inv_stddev * r.pixel_calibrated_depth * Dot(r.surfel_normal, make_float3(s.frame_T_global.row0.x, s.frame_T_global.row0.y, s.frame_T_global.row0.z)); + // cy_inv + depth_jacobian[3] = depth_residual_inv_stddev * r.pixel_calibrated_depth * Dot(r.surfel_normal, make_float3(s.frame_T_global.row1.x, s.frame_T_global.row1.y, s.frame_T_global.row1.z)); + // fx_inv + depth_jacobian[0] = r.px * depth_jacobian[2]; + // fy_inv + depth_jacobian[1] = r.py * depth_jacobian[3]; +// // a_0 +// depth_jacobian[4] = -cfactor * jac_base; + // a + depth_jacobian[4] = cfactor * raw_inv_depth * jac_base; + // cfactor + depth_jacobian[5] = -jac_base; + + float3 local_unproj = make_float3(r.pixel_calibrated_depth * nx, r.pixel_calibrated_depth * ny, r.pixel_calibrated_depth); + ComputeRawDepthResidual( + depth_residual_inv_stddev, r.surfel_local_position, local_surfel_normal, local_unproj, &raw_depth_residual); + + sparse_pixel_index = sparse_px + sparse_py * s.depth_params.cfactor_buffer.width(); + } + } + + if (optimize_color_intrinsics) { + float2 color_pxy; + if (TransformDepthToColorPixelCorner(r.pxy, depth_to_color, &color_pxy)) { + float2 t1_pxy, t2_pxy; + ComputeTangentProjections( + r.surfel_global_position, + r.surfel_normal, + SurfelGetRadiusSquared(s.surfels, surfel_index), + s.frame_T_global, + color_corner_projector, + &t1_pxy, + &t2_pxy); + float grad_x_1; + float grad_y_1; + float grad_x_2; + float grad_y_2; + DescriptorJacobianWrtProjectedPosition( + color_texture, color_pxy, t1_pxy, t2_pxy, &grad_x_1, &grad_y_1, &grad_x_2, &grad_y_2); + + descriptor_jacobian_1[0] = grad_x_1 * nx; + descriptor_jacobian_1[1] = grad_y_1 * ny; + descriptor_jacobian_1[2] = grad_x_1; + descriptor_jacobian_1[3] = grad_y_1; + + descriptor_jacobian_2[0] = grad_x_2 * nx; + descriptor_jacobian_2[1] = grad_y_2 * ny; + descriptor_jacobian_2[2] = grad_x_2; + descriptor_jacobian_2[3] = grad_y_2; + + float surfel_descriptor_1 = s.surfels(kSurfelDescriptor1, surfel_index); + float surfel_descriptor_2 = s.surfels(kSurfelDescriptor2, surfel_index); + ComputeRawDescriptorResidual( + color_texture, color_pxy, t1_pxy, t2_pxy, surfel_descriptor_1, surfel_descriptor_2, &raw_descriptor_residual_1, &raw_descriptor_residual_2); + } + } + } + + // TODO: Would it be faster to use a few different shared memory buffers (instead of only a single one) for the reduce operations to avoid some of the __syncthreads()? + typedef cub::BlockReduce BlockReduceFloat; + __shared__ typename BlockReduceFloat::TempStorage float_storage; + + if (optimize_depth_intrinsics) { + const float depth_weight = ComputeDepthResidualWeight(raw_depth_residual); + + // depth_jacobian.tranpose() * depth_jacobian (top-left part A), as well as + // depth_jacobian.transpose() * raw_depth_residual (top rows corresponding to A): + AccumulateGaussNewtonHAndB( + sparse_pixel_index >= 0, + raw_depth_residual, + depth_weight, + depth_jacobian, + depth_A, + depth_b1, + &float_storage); + + if (sparse_pixel_index >= 0) { + // depth_jacobian.tranpose() * depth_jacobian (top-right part B): + #pragma unroll + for (int i = 0; i < kARows; ++ i) { + const float depth_jacobian_sq_i = depth_weight * depth_jacobian[/*row*/ i] * depth_jacobian[/*col*/ kARows]; + atomicAdd(&depth_B(i, sparse_pixel_index), depth_jacobian_sq_i); + } + + // depth_jacobian.tranpose() * depth_jacobian (diagonal-only part D): + const float depth_jacobian_sq_i = depth_weight * depth_jacobian[/*row*/ kARows] * depth_jacobian[/*col*/ kARows]; + atomicAdd(&depth_D(0, sparse_pixel_index), depth_jacobian_sq_i); + + // depth_jacobian.transpose() * point_residual (bottom row corresponding to D): + const float b_pose_i = depth_weight * raw_depth_residual * depth_jacobian[kARows]; + atomicAdd(&depth_b2(0, sparse_pixel_index), b_pose_i); + + // observation count: + atomicAdd(&observation_count(0, sparse_pixel_index), 1); + } + } + + if (optimize_color_intrinsics) { + AccumulateGaussNewtonHAndB<4, block_width, block_height>( + raw_descriptor_residual_1 != 0, + raw_descriptor_residual_1, + ComputeDescriptorResidualWeight(raw_descriptor_residual_1), + descriptor_jacobian_1, + color_H, + color_b, + &float_storage); + AccumulateGaussNewtonHAndB<4, block_width, block_height>( + raw_descriptor_residual_2 != 0, + raw_descriptor_residual_2, + ComputeDescriptorResidualWeight(raw_descriptor_residual_2), + descriptor_jacobian_2, + color_H, + color_b, + &float_storage); + } +} + +void CallAccumulateIntrinsicsCoefficientsCUDAKernel( + cudaStream_t stream, + bool optimize_color_intrinsics, + bool optimize_depth_intrinsics, + const SurfelProjectionParameters& s, + const DepthToColorPixelCorner& depth_to_color, + const PixelCornerProjector& color_corner_projector, + const PixelCenterUnprojector& depth_center_unprojector, + float color_fx, + float color_fy, + cudaTextureObject_t color_texture, + const CUDABuffer_& observation_count, + const CUDABuffer_& depth_A, + const CUDABuffer_& depth_B, + const CUDABuffer_& depth_D, + const CUDABuffer_& depth_b1, + const CUDABuffer_& depth_b2, + const CUDABuffer_& color_H, + const CUDABuffer_& color_b) { + COMPILE_OPTION_2(optimize_color_intrinsics, optimize_depth_intrinsics, + CUDA_AUTO_TUNE_1D_TEMPLATED( + AccumulateIntrinsicsCoefficientsCUDAKernel, + 1024, + s.surfels_size, + 0, stream, + TEMPLATE_ARGUMENTS(block_width, _optimize_color_intrinsics, _optimize_depth_intrinsics), + /* kernel parameters */ + s, + depth_to_color, + color_corner_projector, + depth_center_unprojector, + color_fx, + color_fy, + color_texture, + observation_count, + depth_A, + depth_B, + depth_D, + depth_b1, + depth_b2, + color_H, + color_b)); + CUDA_CHECK(); +} + + +template +__global__ void ComputeIntrinsicsIntermediateMatricesCUDAKernel( + u32 pixel_count, + CUDABuffer_ A, + CUDABuffer_ B, + CUDABuffer_ D, + CUDABuffer_ b1, + CUDABuffer_ b2) { + unsigned int pixel_index = blockIdx.x * blockDim.x + threadIdx.x; + + // TODO: Would it be faster to use a few different shared memory buffers for the reduce operations to avoid some of the __syncthreads()? + typedef cub::BlockReduce BlockReduceFloat; + __shared__ typename BlockReduceFloat::TempStorage float_storage; + + u8 weight = 1; + if (pixel_index >= pixel_count) { + weight = 0; + pixel_index = pixel_count - 1; + } + + const float D_inverse = 1.0f / D(0, pixel_index); + if (!(D_inverse < 1e12f)) { + weight = 0; + D(0, pixel_index) = CUDART_NAN_F; + } + +// if (pixel_index >= 5 * 640 + 320 - 14 && pixel_index <= 5 * 640 + 320 + 14) { +// printf("px index %i D: %f, D_inverse: %f\n", pixel_index, D(0, pixel_index), D_inverse); +// } + + // D^(-1) b2 [Dx1], exclusive access by this thread + float D_inv_b2; + if (weight > 0) { + D_inv_b2 = D_inverse * b2(0, pixel_index); + // Store in D + D(0, pixel_index) = D_inv_b2; + } + + // B D^(-1) B^T [AxA], concurrent access + // TODO: load all the B(:, pixel_index) into variables for better performance? + int index = 0; + #pragma unroll + for (int row = 0; row < kARows; ++ row) { + #pragma unroll + for (int col = row; col < kARows; ++ col) { + float B_D_inv_B_i = B(row, pixel_index) * D_inverse * B(col, pixel_index); + + // Accumulate on A(0, index) (subtract from it) + __syncthreads(); // Required before re-use of shared memory. + const float block_sum = + BlockReduceFloat(float_storage).Sum(weight ? B_D_inv_B_i : 0); + if (threadIdx.x == 0) { + atomicAdd(&A(0, index), -1.f * block_sum); + ++ index; + } + } + } + + // B D^(-1) b2 [Ax1], concurrent access + #pragma unroll + for (int row = 0; row < kARows; ++ row) { + float B_D_inv_b2_i = B(row, pixel_index) * D_inv_b2; + + // Accumulate on b1(0, row) (subtract from it) + __syncthreads(); // Required before re-use of shared memory. + const float block_sum = + BlockReduceFloat(float_storage).Sum(weight ? B_D_inv_b2_i : 0); + if (threadIdx.x == 0) { + atomicAdd(&b1(0, row), -1.f * block_sum); + } + } + + // D^(-1) B^T [DxA], exclusive access by this thread + if (weight > 0) { + #pragma unroll + for (int row = 0; row < kARows; ++ row) { + float D_inv_B_T_i = D_inverse * B(row, pixel_index); + + // Store in B + B(row, pixel_index) = D_inv_B_T_i; + } + } +} + +void CallComputeIntrinsicsIntermediateMatricesCUDAKernel( + cudaStream_t stream, + u32 pixel_count, + const CUDABuffer_& A, + const CUDABuffer_& B, + const CUDABuffer_& D, + const CUDABuffer_& b1, + const CUDABuffer_& b2) { + CUDA_AUTO_TUNE_1D_TEMPLATED( + ComputeIntrinsicsIntermediateMatricesCUDAKernel, + 1024, + pixel_count, + 0, stream, + TEMPLATE_ARGUMENTS(block_width), + /* kernel parameters */ + pixel_count, + A, + B, + D, + b1, + b2); + CUDA_CHECK(); +} + + +template +__global__ void SolveForPixelIntrinsicsUpdateCUDAKernel( + u32 pixel_count, + CUDABuffer_ observation_count, + CUDABuffer_ B, + CUDABuffer_ D, + CUDABuffer_ x1, + CUDABuffer_ cfactor_buffer) { + unsigned int pixel_index = blockIdx.x * blockDim.x + threadIdx.x; + + __shared__ float x1_shared[kARows]; + if (threadIdx.x < kARows) { + x1_shared[threadIdx.x] = x1(0, threadIdx.x); + } + + u8 weight = 1; + if (pixel_index >= pixel_count) { + weight = 0; + pixel_index = pixel_count - 1; + } + + // x2 = (D^(-1) b2) (stored in D) - (D^(-1) B^T x1) (D^(-1) B^T stored in B) + float offset = D(0, pixel_index); + + __syncthreads(); // Make x1_shared available + + if (::isnan(offset)) { + offset = 0; + } else { + #pragma unroll + for (int row = 0; row < kARows; ++ row) { + offset -= B(row, pixel_index) * x1_shared[row]; + } + } + + int y = pixel_index / cfactor_buffer.width(); + int x = pixel_index - y * cfactor_buffer.width(); + float cfactor = 0; + if (weight > 0 /*&& observation_count(0, pixel_index) >= 10*/) { + cfactor = cfactor_buffer(y, x) - offset; + + // Reset pixels which do not have any observation anymore to avoid having + // outlier values stick around + if (observation_count(0, pixel_index) == 0) { + weight = 0; + cfactor = 0; + } + + cfactor_buffer(y, x) = cfactor; + } +} + +void CallSolveForPixelIntrinsicsUpdateCUDAKernel( + cudaStream_t stream, + u32 pixel_count, + const CUDABuffer_& observation_count, + const CUDABuffer_& B, + const CUDABuffer_& D, + const CUDABuffer_& x1, + const CUDABuffer_& cfactor_buffer) { + CUDA_AUTO_TUNE_1D_TEMPLATED( + SolveForPixelIntrinsicsUpdateCUDAKernel, + 1024, + pixel_count, + 0, stream, + TEMPLATE_ARGUMENTS(block_width), + /* kernel parameters */ + pixel_count, + observation_count, + B, + D, + x1, + cfactor_buffer); + CUDA_CHECK(); +} + +} diff --git a/cuda_code/kernel_opt_pose.cu b/cuda_code/kernel_opt_pose.cu new file mode 100644 index 0000000000000000000000000000000000000000..d1cfb8f1b277eaa2798911b4cf1bf4067f2ec4f0 --- /dev/null +++ b/cuda_code/kernel_opt_pose.cu @@ -0,0 +1,1900 @@ +// Copyright 2019 ETH Zürich, Thomas Schöps +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +#include +#include +#include + +#include "badslam/cost_function.cuh" +#include "badslam/cuda_util.cuh" +#include "badslam/cuda_matrix.cuh" +#include "badslam/gauss_newton.cuh" +#include "badslam/surfel_projection.cuh" +#include "badslam/surfel_projection_nvcc_only.cuh" +#include "badslam/util.cuh" +#include "badslam/util_nvcc_only.cuh" + + + +namespace vis { +// Macro definition +#define CudaAssert( X ) if ( !(X) ) { printf( "Thread %d:%d failed assert at %s:%d! \n", blockIdx.x, threadIdx.x, __FILE__, __LINE__ ); return; } +__forceinline__ __device__ void ComputeRawDepthResidualAndJacobian( + const PixelCenterUnprojector& unprojector, + int px, + int py, + float pixel_calibrated_depth, + float depth_residual_inv_stddev, + const float3& surfel_local_position, + const float3& surfel_local_normal, + float* raw_residual, + float* jacobian) { + float3 local_unproj; + ComputeRawDepthResidual(unprojector, px, py, pixel_calibrated_depth, + depth_residual_inv_stddev, + surfel_local_position, surfel_local_normal, + &local_unproj, raw_residual); + + // Compute Jacobian of residual. + +// // Old version for exp(hat(T)) * global_T_frame: +// jacobian[0] = surfel_global_normal.x; +// jacobian[1] = surfel_global_normal.y; +// jacobian[2] = surfel_global_normal.z; +// jacobian[3] = surfel_global_position.y * surfel_global_normal.z - surfel_global_position.z * surfel_global_normal.y; +// jacobian[4] = -surfel_global_position.x * surfel_global_normal.z + surfel_global_position.z * surfel_global_normal.x; +// jacobian[5] = surfel_global_position.x * surfel_global_normal.y - surfel_global_position.y * surfel_global_normal.x; + + // New version for global_T_frame * exp(hat(T)): +// jacobian[0] = gtf.row0.x*surfel_global_normal.x + gtf.row1.x*surfel_global_normal.y + gtf.row2.x*surfel_global_normal.z; +// jacobian[1] = gtf.row0.y*surfel_global_normal.x + gtf.row1.y*surfel_global_normal.y + gtf.row2.y*surfel_global_normal.z; +// jacobian[2] = gtf.row0.z*surfel_global_normal.x + gtf.row1.z*surfel_global_normal.y + gtf.row2.z*surfel_global_normal.z; +// jacobian[3] = - surfel_global_normal.x*(gtf.row0.y*local_unproj.z - gtf.row0.z*local_unproj.y) +// - surfel_global_normal.y*(gtf.row1.y*local_unproj.z - gtf.row1.z*local_unproj.y) +// - surfel_global_normal.z*(gtf.row2.y*local_unproj.z - gtf.row2.z*local_unproj.y); +// jacobian[4] = surfel_global_normal.x*(gtf.row0.x*local_unproj.z - gtf.row0.z*local_unproj.x) +// + surfel_global_normal.y*(gtf.row1.x*local_unproj.z - gtf.row1.z*local_unproj.x) +// + surfel_global_normal.z*(gtf.row2.x*local_unproj.z - gtf.row2.z*local_unproj.x); +// jacobian[5] = - surfel_global_normal.x*(gtf.row0.x*local_unproj.y - gtf.row0.y*local_unproj.x) +// - surfel_global_normal.y*(gtf.row1.x*local_unproj.y - gtf.row1.y*local_unproj.x) +// - surfel_global_normal.z*(gtf.row2.x*local_unproj.y - gtf.row2.y*local_unproj.x); + + // Simplified form of the new version above by rotating all the vectors into + // the local frame (which does not change the values of the dot products), + // i.e., multiplying by frame_tr_global from the left side: + jacobian[0] = depth_residual_inv_stddev * surfel_local_normal.x; + jacobian[1] = depth_residual_inv_stddev * surfel_local_normal.y; + jacobian[2] = depth_residual_inv_stddev * surfel_local_normal.z; + jacobian[3] = depth_residual_inv_stddev * (-surfel_local_normal.y * local_unproj.z + surfel_local_normal.z * local_unproj.y); + jacobian[4] = depth_residual_inv_stddev * ( surfel_local_normal.x * local_unproj.z - surfel_local_normal.z * local_unproj.x); + jacobian[5] = depth_residual_inv_stddev * (-surfel_local_normal.x * local_unproj.y + surfel_local_normal.y * local_unproj.x); +} + +__forceinline__ __device__ void ComputeRawDescriptorResidualAndJacobian( + const PixelCenterProjector& color_center_projector, + cudaTextureObject_t color_texture, + const float2& pxy, + const float2& t1_pxy, + const float2& t2_pxy, + const float3& ls, // surfel_local_position + float surfel_descriptor_1, + float surfel_descriptor_2, + float* raw_residual_1, + float* raw_residual_2, + float* jacobian_1, + float* jacobian_2) { + ComputeRawDescriptorResidual(color_texture, pxy, t1_pxy, t2_pxy, surfel_descriptor_1, surfel_descriptor_2, raw_residual_1, raw_residual_2); + + float grad_x_fx_1; + float grad_y_fy_1; + float grad_x_fx_2; + float grad_y_fy_2; + DescriptorJacobianWrtProjectedPosition( + color_texture, pxy, t1_pxy, t2_pxy, &grad_x_fx_1, &grad_y_fy_1, &grad_x_fx_2, &grad_y_fy_2); + grad_x_fx_1 *= color_center_projector.fx; + grad_x_fx_2 *= color_center_projector.fx; + grad_y_fy_1 *= color_center_projector.fy; + grad_y_fy_2 *= color_center_projector.fy; + + float inv_ls_z = 1.f / ls.z; + float ls_z_sq = ls.z * ls.z; + float inv_ls_z_sq = inv_ls_z * inv_ls_z; + + jacobian_1[0] = -grad_x_fx_1 * inv_ls_z; + jacobian_1[1] = -grad_y_fy_1 * inv_ls_z; + jacobian_1[2] = (ls.x * grad_x_fx_1 + ls.y * grad_y_fy_1) * inv_ls_z_sq; + + float ls_x_y = ls.x * ls.y; + + jacobian_1[3] = ((ls.y * ls.y + ls_z_sq) * grad_y_fy_1 + ls_x_y * grad_x_fx_1) * inv_ls_z_sq; + jacobian_1[4] = -((ls.x * ls.x + ls_z_sq) * grad_x_fx_1 + ls_x_y * grad_y_fy_1) * inv_ls_z_sq; + jacobian_1[5] = -(ls.x * grad_y_fy_1 - ls.y * grad_x_fx_1) * inv_ls_z; + + jacobian_2[0] = -grad_x_fx_2 * inv_ls_z; + jacobian_2[1] = -grad_y_fy_2 * inv_ls_z; + jacobian_2[2] = (ls.x * grad_x_fx_2 + ls.y * grad_y_fy_2) * inv_ls_z_sq; + jacobian_2[3] = ((ls.y * ls.y + ls_z_sq) * grad_y_fy_2 + ls_x_y * grad_x_fx_2) * inv_ls_z_sq; + jacobian_2[4] = -((ls.x * ls.x + ls_z_sq) * grad_x_fx_2 + ls_x_y * grad_y_fy_2) * inv_ls_z_sq; + jacobian_2[5] = -(ls.x * grad_y_fy_2 - ls.y * grad_x_fx_2) * inv_ls_z; +} + +__forceinline__ __device__ void ComputeRawDescriptorFeatureJacobian( + const PixelCenterProjector& color_center_projector, + cudaTextureObject_t color_texture, + const float2& pxy, + const float2& t1_pxy, + const float2& t2_pxy, + const float3& ls, // surfel_local_position + float* jacobian_1, + float* jacobian_2, + int channel) { + CudaAssert(ls.x == ls.x); + CudaAssert(ls.y == ls.y); + CudaAssert(ls.z == ls.z); +// 11.3 jzmTODO: reuse computation. here the derivative of the projected position w.r.t. pose is the same for all the channels. +float grad_x_fx_1; +float grad_y_fy_1; +float grad_x_fx_2; +float grad_y_fy_2; +DescriptorJacobianWrtProjectedPositionOnChannels( + color_texture, pxy, t1_pxy, t2_pxy, &grad_x_fx_1, &grad_y_fy_1, &grad_x_fx_2, &grad_y_fy_2, channel); +grad_x_fx_1 *= color_center_projector.fx; +grad_x_fx_2 *= color_center_projector.fx; +grad_y_fy_1 *= color_center_projector.fy; +grad_y_fy_2 *= color_center_projector.fy; +// 11.3 jzmTODO: for debugging , delete it after debugging +// unsigned int surfel_index = blockIdx.x * blockDim.x + threadIdx.x; + +float inv_ls_z = 1.f / ls.z; +CudaAssert(inv_ls_z == inv_ls_z); +float ls_z_sq = ls.z * ls.z; +CudaAssert(ls_z_sq == ls_z_sq); +float inv_ls_z_sq = inv_ls_z * inv_ls_z; +CudaAssert(inv_ls_z_sq == inv_ls_z_sq) + +jacobian_1[0] = -grad_x_fx_1 * inv_ls_z; +jacobian_1[1] = -grad_y_fy_1 * inv_ls_z; +jacobian_1[2] = (ls.x * grad_x_fx_1 + ls.y * grad_y_fy_1) * inv_ls_z_sq; + +float ls_x_y = ls.x * ls.y; +CudaAssert(ls_x_y == ls_x_y); +jacobian_1[3] = ((ls.y * ls.y + ls_z_sq) * grad_y_fy_1 + ls_x_y * grad_x_fx_1) * inv_ls_z_sq; +CudaAssert(jacobian_1[3] == jacobian_1[3]); +jacobian_1[4] = -((ls.x * ls.x + ls_z_sq) * grad_x_fx_1 + ls_x_y * grad_y_fy_1) * inv_ls_z_sq; +CudaAssert(jacobian_1[4] == jacobian_1[4]); +jacobian_1[5] = -(ls.x * grad_y_fy_1 - ls.y * grad_x_fx_1) * inv_ls_z; + +jacobian_2[0] = -grad_x_fx_2 * inv_ls_z; +jacobian_2[1] = -grad_y_fy_2 * inv_ls_z; +jacobian_2[2] = (ls.x * grad_x_fx_2 + ls.y * grad_y_fy_2) * inv_ls_z_sq; +jacobian_2[3] = ((ls.y * ls.y + ls_z_sq) * grad_y_fy_2 + ls_x_y * grad_x_fx_2) * inv_ls_z_sq; +CudaAssert(jacobian_2[3] == jacobian_2[3]); +jacobian_2[4] = -((ls.x * ls.x + ls_z_sq) * grad_x_fx_2 + ls_x_y * grad_y_fy_2) * inv_ls_z_sq; +CudaAssert(jacobian_2[4] == jacobian_2[4]); +jacobian_2[5] = -(ls.x * grad_y_fy_2 - ls.y * grad_x_fx_2) * inv_ls_z; +} + +__forceinline__ __device__ void TestComputeRawDescriptorFeatureJacobian( + const CUDABuffer_& feature_arr, + const PixelCenterProjector& color_center_projector, + const float2& pxy, + const float2& t1_pxy, + const float2& t2_pxy, + const float3& ls, // surfel_local_position + float* jacobian_all, + int channel) { + /* unsigned int surfel_index = blockIdx.x * blockDim.x + threadIdx.x; + if (surfel_index == 0){ + printf("pose_jacobian: feat(400,2000)=%f, feat(457,2216)=%f \n",feature_arr(400,2000), feature_arr(457,2216)); + }*/ + + CudaAssert(ls.x == ls.x); + CudaAssert(ls.y == ls.y); + CudaAssert(ls.z == ls.z); +// 11.3 jzmTODO: reuse computation. here the derivative of the projected position w.r.t. pose is the same for all the channels. +float grad_x_fx_1; +float grad_y_fy_1; +float grad_x_fx_2; +float grad_y_fy_2; +TestDescriptorJacobianWrtProjectedPositionOnChannels( + feature_arr, pxy, t1_pxy, t2_pxy, &grad_x_fx_1, &grad_y_fy_1, &grad_x_fx_2, &grad_y_fy_2, channel); +grad_x_fx_1 *= color_center_projector.fx; +grad_x_fx_2 *= color_center_projector.fx; +grad_y_fy_1 *= color_center_projector.fy; +grad_y_fy_2 *= color_center_projector.fy; +// 11.3 jzmTODO: for debugging , delete it after debugging +// unsigned int surfel_index = blockIdx.x * blockDim.x + threadIdx.x; + +float inv_ls_z = 1.f / ls.z; +CudaAssert(inv_ls_z == inv_ls_z); +float ls_z_sq = ls.z * ls.z; +CudaAssert(ls_z_sq == ls_z_sq); +float inv_ls_z_sq = inv_ls_z * inv_ls_z; +CudaAssert(inv_ls_z_sq == inv_ls_z_sq) +// 11.20 jacobian_1, depending on channel, channel is 0-based index. +*(jacobian_all+6*channel) = -grad_x_fx_1 * inv_ls_z; // jacobian_1[0] = -grad_x_fx_1 * inv_ls_z; +*(jacobian_all+6*channel+1) = -grad_y_fy_1 * inv_ls_z; // jacobian_1[1] = -grad_y_fy_1 * inv_ls_z; +*(jacobian_all+6*channel+2) = (ls.x * grad_x_fx_1 + ls.y * grad_y_fy_1) * inv_ls_z_sq; // jacobian_1[2] = (ls.x * grad_x_fx_1 + ls.y * grad_y_fy_1) * inv_ls_z_sq; + +float ls_x_y = ls.x * ls.y; +//CudaAssert(ls_x_y == ls_x_y); +*(jacobian_all+6*channel+3) = ((ls.y * ls.y + ls_z_sq) * grad_y_fy_1 + ls_x_y * grad_x_fx_1) * inv_ls_z_sq; // jacobian_1[3] = ((ls.y * ls.y + ls_z_sq) * grad_y_fy_1 + ls_x_y * grad_x_fx_1) * inv_ls_z_sq; +*(jacobian_all+6*channel+4) = -((ls.x * ls.x + ls_z_sq) * grad_x_fx_1 + ls_x_y * grad_y_fy_1) * inv_ls_z_sq; // jacobian_1[4] = -((ls.x * ls.x + ls_z_sq) * grad_x_fx_1 + ls_x_y * grad_y_fy_1) * inv_ls_z_sq; +*(jacobian_all+6*channel+5) = -(ls.x * grad_y_fy_1 - ls.y * grad_x_fx_1) * inv_ls_z; //jacobian_1[5] = -(ls.x * grad_y_fy_1 - ls.y * grad_x_fx_1) * inv_ls_z; + +// 11.20 jacobian_2, depending on channel, channel is 0-based index. +*(jacobian_all + 6*kTotalChannels + 6*channel) = -grad_x_fx_2 * inv_ls_z; // jacobian_2[0] = -grad_x_fx_2 * inv_ls_z; +*(jacobian_all + 6*kTotalChannels + 6*channel+1) = -grad_y_fy_2 * inv_ls_z; // jacobian_2[1] = -grad_y_fy_2 * inv_ls_z; +*(jacobian_all + 6*kTotalChannels + 6*channel+2) = (ls.x * grad_x_fx_2 + ls.y * grad_y_fy_2) * inv_ls_z_sq; // jacobian_2[2] = (ls.x * grad_x_fx_2 + ls.y * grad_y_fy_2) * inv_ls_z_sq; +*(jacobian_all + 6*kTotalChannels + 6*channel+3) = ((ls.y * ls.y + ls_z_sq) * grad_y_fy_2 + ls_x_y * grad_x_fx_2) * inv_ls_z_sq; // jacobian_2[3] = ((ls.y * ls.y + ls_z_sq) * grad_y_fy_2 + ls_x_y * grad_x_fx_2) * inv_ls_z_sq; +*(jacobian_all + 6*kTotalChannels + 6*channel+4) = -((ls.x * ls.x + ls_z_sq) * grad_x_fx_2 + ls_x_y * grad_y_fy_2) * inv_ls_z_sq; // jacobian_2[4] = -((ls.x * ls.x + ls_z_sq) * grad_x_fx_2 + ls_x_y * grad_y_fy_2) * inv_ls_z_sq; +*(jacobian_all + 6*kTotalChannels + 6*channel+5) = -(ls.x * grad_y_fy_2 - ls.y * grad_x_fx_2) * inv_ls_z; // jacobian_2[5] = -(ls.x * grad_y_fy_2 - ls.y * grad_x_fx_2) * inv_ls_z; +} + +__forceinline__ __device__ void ComputeRawDescriptorFeatureResidualAndJacobian( + const PixelCenterProjector& color_center_projector, + cudaTextureObject_t color_texture, + const float2& pxy, + const float2& t1_pxy, + const float2& t2_pxy, + const float3& ls, // surfel_local_position + float* surfel_descriptor, + float* raw_residual_vec, + float* jacobian_1, + float* jacobian_2, + int channel) { +ComputeRawFeatureDescriptorResidual( + color_texture, // TODO: use feature_texture + pxy, + t1_pxy, + t2_pxy, + surfel_descriptor, + raw_residual_vec); + +float grad_x_fx_1; +float grad_y_fy_1; +float grad_x_fx_2; +float grad_y_fy_2; +DescriptorJacobianWrtProjectedPosition( + color_texture, pxy, t1_pxy, t2_pxy, &grad_x_fx_1, &grad_y_fy_1, &grad_x_fx_2, &grad_y_fy_2); +grad_x_fx_1 *= color_center_projector.fx; +grad_x_fx_2 *= color_center_projector.fx; +grad_y_fy_1 *= color_center_projector.fy; +grad_y_fy_2 *= color_center_projector.fy; + +float inv_ls_z = 1.f / ls.z; +float ls_z_sq = ls.z * ls.z; +float inv_ls_z_sq = inv_ls_z * inv_ls_z; + +jacobian_1[0] = -grad_x_fx_1 * inv_ls_z; +jacobian_1[1] = -grad_y_fy_1 * inv_ls_z; +jacobian_1[2] = (ls.x * grad_x_fx_1 + ls.y * grad_y_fy_1) * inv_ls_z_sq; + +float ls_x_y = ls.x * ls.y; + +jacobian_1[3] = ((ls.y * ls.y + ls_z_sq) * grad_y_fy_1 + ls_x_y * grad_x_fx_1) * inv_ls_z_sq; +jacobian_1[4] = -((ls.x * ls.x + ls_z_sq) * grad_x_fx_1 + ls_x_y * grad_y_fy_1) * inv_ls_z_sq; +jacobian_1[5] = -(ls.x * grad_y_fy_1 - ls.y * grad_x_fx_1) * inv_ls_z; + +jacobian_2[0] = -grad_x_fx_2 * inv_ls_z; +jacobian_2[1] = -grad_y_fy_2 * inv_ls_z; +jacobian_2[2] = (ls.x * grad_x_fx_2 + ls.y * grad_y_fy_2) * inv_ls_z_sq; +jacobian_2[3] = ((ls.y * ls.y + ls_z_sq) * grad_y_fy_2 + ls_x_y * grad_x_fx_2) * inv_ls_z_sq; +jacobian_2[4] = -((ls.x * ls.x + ls_z_sq) * grad_x_fx_2 + ls_x_y * grad_y_fy_2) * inv_ls_z_sq; +jacobian_2[5] = -(ls.x * grad_y_fy_2 - ls.y * grad_x_fx_2) * inv_ls_z; +} + +__forceinline__ __device__ void ComputeRawDescriptorResidualAndJacobianWithFloatTexture( + const PixelCenterProjector& color_center_projector, + cudaTextureObject_t color_texture, + const float2& pxy, + const float2& t1_pxy, + const float2& t2_pxy, + const float3& ls, // surfel_local_position + float surfel_descriptor_1, + float surfel_descriptor_2, + float* raw_residual_1, + float* raw_residual_2, + float* jacobian_1, + float* jacobian_2) { + ComputeRawDescriptorResidualWithFloatTexture(color_texture, pxy, t1_pxy, t2_pxy, surfel_descriptor_1, surfel_descriptor_2, raw_residual_1, raw_residual_2); + + float grad_x_fx_1; + float grad_y_fy_1; + float grad_x_fx_2; + float grad_y_fy_2; + DescriptorJacobianWrtProjectedPositionWithFloatTexture( + color_texture, pxy, t1_pxy, t2_pxy, &grad_x_fx_1, &grad_y_fy_1, &grad_x_fx_2, &grad_y_fy_2); + grad_x_fx_1 *= color_center_projector.fx; + grad_x_fx_2 *= color_center_projector.fx; + grad_y_fy_1 *= color_center_projector.fy; + grad_y_fy_2 *= color_center_projector.fy; + + float inv_ls_z = 1.f / ls.z; + float ls_z_sq = ls.z * ls.z; + float inv_ls_z_sq = inv_ls_z * inv_ls_z; + + jacobian_1[0] = -grad_x_fx_1 * inv_ls_z; + jacobian_1[1] = -grad_y_fy_1 * inv_ls_z; + jacobian_1[2] = (ls.x * grad_x_fx_1 + ls.y * grad_y_fy_1) * inv_ls_z_sq; + + float ls_x_y = ls.x * ls.y; + + jacobian_1[3] = ((ls.y * ls.y + ls_z_sq) * grad_y_fy_1 + ls_x_y * grad_x_fx_1) * inv_ls_z_sq; + jacobian_1[4] = -((ls.x * ls.x + ls_z_sq) * grad_x_fx_1 + ls_x_y * grad_y_fy_1) * inv_ls_z_sq; + jacobian_1[5] = -(ls.x * grad_y_fy_1 - ls.y * grad_x_fx_1) * inv_ls_z; + + jacobian_2[0] = -grad_x_fx_2 * inv_ls_z; + jacobian_2[1] = -grad_y_fy_2 * inv_ls_z; + jacobian_2[2] = (ls.x * grad_x_fx_2 + ls.y * grad_y_fy_2) * inv_ls_z_sq; + jacobian_2[3] = ((ls.y * ls.y + ls_z_sq) * grad_y_fy_2 + ls_x_y * grad_x_fx_2) * inv_ls_z_sq; + jacobian_2[4] = -((ls.x * ls.x + ls_z_sq) * grad_x_fx_2 + ls_x_y * grad_y_fy_2) * inv_ls_z_sq; + jacobian_2[5] = -(ls.x * grad_y_fy_2 - ls.y * grad_x_fx_2) * inv_ls_z; +} + +__forceinline__ __device__ void ComputeRawColorResidualAndJacobian( + const PixelCenterProjector& color_center_projector, + cudaTextureObject_t color_texture, + const float2& pxy, + const float3& ls, // surfel_local_position + float surfel_gradmag, + float* raw_residual, + float* jacobian) { + ComputeRawColorResidual(color_texture, pxy, surfel_gradmag, raw_residual); + + float grad_x_fx; + float grad_y_fy; + ColorJacobianWrtProjectedPosition( + color_texture, pxy, &grad_x_fx, &grad_y_fy); + grad_x_fx *= color_center_projector.fx; + grad_y_fy *= color_center_projector.fy; + + float inv_ls_z = 1.f / ls.z; + float ls_z_sq = ls.z * ls.z; + float inv_ls_z_sq = inv_ls_z * inv_ls_z; + + jacobian[0] = -grad_x_fx * inv_ls_z; + jacobian[1] = -grad_y_fy * inv_ls_z; + jacobian[2] = (ls.x * grad_x_fx + ls.y * grad_y_fy) * inv_ls_z_sq; + + float ls_x_y = ls.x * ls.y; + + jacobian[3] = ((ls.y * ls.y + ls_z_sq) * grad_y_fy + ls_x_y * grad_x_fx) * inv_ls_z_sq; + jacobian[4] = -((ls.x * ls.x + ls_z_sq) * grad_x_fx + ls_x_y * grad_y_fy) * inv_ls_z_sq; + jacobian[5] = -(ls.x * grad_y_fy - ls.y * grad_x_fx) * inv_ls_z; +} + +template +__forceinline__ __device__ void AccumulatePoseResidualAndCount( + bool visible, + float residual, + CUDABuffer_& residual_count_buffer, + CUDABuffer_& residual_buffer, + typename cub::BlockReduce::TempStorage* float_storage, + typename cub::BlockReduce::TempStorage* int_storage) { + typedef typename cub::BlockReduce BlockReduceFloat; + typedef typename cub::BlockReduce BlockReduceInt; + + __syncthreads(); // Required before re-use of shared memory. + int num_valid_residuals = BlockReduceInt(*int_storage).Sum(visible ? 1 : 0); + if (threadIdx.x == 0 && (block_height == 1 || threadIdx.y == 0)) { + //residual_count_buffer[blockIdx.x] = num_valid_residuals; + atomicAdd(&residual_count_buffer(0, 0), static_cast(num_valid_residuals)); + } + + __syncthreads(); // Required before re-use of shared memory. + const float residual_sum = + BlockReduceFloat(*float_storage).Sum(visible ? residual : 0.f); + if (threadIdx.x == 0 && (block_height == 1 || threadIdx.y == 0)) { + atomicAdd(&residual_buffer(0, 0), residual_sum); + } +} + +template +__global__ void AccumulatePoseEstimationCoeffsCUDAKernel( + SurfelProjectionParameters s, + DepthToColorPixelCorner depth_to_color, + PixelCenterProjector color_center_projector, + PixelCornerProjector color_corner_projector, + PixelCenterUnprojector depth_unprojector, + cudaTextureObject_t color_texture, + CUDABuffer_ residual_count_buffer, + CUDABuffer_ residual_buffer, + CUDABuffer_ H_buffer, + CUDABuffer_ b_buffer) { + unsigned int surfel_index = blockIdx.x * blockDim.x + threadIdx.x; + + bool visible; + SurfelProjectionResult6 r; + if (!AnySurfelProjectsToAssociatedPixel(&surfel_index, s, &visible, &r)) { + return; + } + + float jacobian[6]; + float raw_residual; + + constexpr int block_height = 1; + typedef cub::BlockReduce BlockReduceFloat; + typedef cub::BlockReduce BlockReduceInt; + __shared__ union { + typename BlockReduceFloat::TempStorage float_storage; + typename BlockReduceInt::TempStorage int_storage; + } temp_storage; + + // TODO: Would it be faster to do the accumulation only once, while summing + // both residual types at the same time? + + // --- Depth residual --- + if (use_depth_residuals) { + float3 surfel_local_normal = s.frame_T_global.Rotate(r.surfel_normal); // TODO: Could be gotten from surfel association instead of computing it twice + + float depth_residual_inv_stddev = + ComputeDepthResidualInvStddevEstimate(depth_unprojector.nx(r.px), depth_unprojector.ny(r.py), r.pixel_calibrated_depth, surfel_local_normal, s.depth_params.baseline_fx); + + ComputeRawDepthResidualAndJacobian( + depth_unprojector, + r.px, + r.py, + r.pixel_calibrated_depth, + depth_residual_inv_stddev, + r.surfel_local_position, + surfel_local_normal, + &raw_residual, + jacobian); + + AccumulateGaussNewtonHAndB<6, block_width, block_height>( + visible, + raw_residual, + ComputeDepthResidualWeight(raw_residual), + jacobian, + H_buffer, + b_buffer, + &temp_storage.float_storage); + + if (debug) { + AccumulatePoseResidualAndCount( + visible, + ComputeWeightedDepthResidual(raw_residual), + residual_count_buffer, + residual_buffer, + &temp_storage.float_storage, + &temp_storage.int_storage); + } + } + + // --- Descriptor residual --- + if (use_descriptor_residuals) { + float raw_residual_2; + float jacobian_2[6]; + + float2 color_pxy; + if (TransformDepthToColorPixelCorner(r.pxy, depth_to_color, &color_pxy)) { + // CudaAssert(visible); + float2 t1_pxy, t2_pxy; + ComputeTangentProjections( + r.surfel_global_position, + r.surfel_normal, + SurfelGetRadiusSquared(s.surfels, surfel_index), + s.frame_T_global, + color_corner_projector, + &t1_pxy, + &t2_pxy); + ComputeRawDescriptorResidualAndJacobian( + color_center_projector, + color_texture, + color_pxy, + t1_pxy, t2_pxy, + r.surfel_local_position, + s.surfels(kSurfelDescriptor1, surfel_index), + s.surfels(kSurfelDescriptor2, surfel_index), + &raw_residual, + &raw_residual_2, + jacobian, + jacobian_2); + /*if (surfel_index == 0){ + for (int debugi = 0; debugi < 6; ++debugi){ + printf("jacobian1 = %f, jacobian2 = %f \n", jacobian[debugi], jacobian_2[debugi]); + } + + printf("residual_weight 1: %f \n",ComputeDescriptorResidualWeight(raw_residual)); + printf("residual_weight 2: %f \n",ComputeDescriptorResidualWeight(raw_residual_2)); + }*/ + } else { + visible = false; + } + + AccumulateGaussNewtonHAndB<6, block_width, block_height>( + visible, + raw_residual, + ComputeDescriptorResidualWeight(raw_residual), + jacobian, + H_buffer, + b_buffer, + &temp_storage.float_storage); + + AccumulateGaussNewtonHAndB<6, block_width, block_height>( + visible, + raw_residual_2, + ComputeDescriptorResidualWeight(raw_residual_2), + jacobian_2, + H_buffer, + b_buffer, + &temp_storage.float_storage); + + if (debug) { + AccumulatePoseResidualAndCount( + visible, + ComputeWeightedDescriptorResidual(raw_residual), + residual_count_buffer, + residual_buffer, + &temp_storage.float_storage, + &temp_storage.int_storage); + } + } +} + +template +__global__ void MyNewAccumulatePoseEstimationCoeffsCUDAKernel( + SurfelProjectionParameters s, + DepthToColorPixelCorner depth_to_color, + PixelCenterProjector color_center_projector, + PixelCornerProjector color_corner_projector, + PixelCenterUnprojector depth_unprojector, + cudaTextureObject_t color_texture, + CUDABuffer_ residual_count_buffer, + CUDABuffer_ residual_buffer, + CUDABuffer_ H_buffer, + CUDABuffer_ b_buffer) { + unsigned int surfel_index = blockIdx.x * blockDim.x + threadIdx.x; + + bool visible; + SurfelProjectionResult6 r; + if (!AnySurfelProjectsToAssociatedPixel(&surfel_index, s, &visible, &r)) { + return; + } + // CudaAssert(visible); //should be true to be here? + float jacobian[6] = {0,0,0,0,0,0}; + float depth_raw_residual = 0; + float raw_residual_vec[6] = {0,0,0,0,0,0}; // It's very important to initialize !!!! + + constexpr int block_height = 1; + typedef cub::BlockReduce BlockReduceFloat; + typedef cub::BlockReduce BlockReduceInt; + __shared__ union { + typename BlockReduceFloat::TempStorage float_storage; + typename BlockReduceInt::TempStorage int_storage; + } temp_storage; + + // TODO: Would it be faster to do the accumulation only once, while summing + // both residual types at the same time? + + // --- Depth residual --- + if (use_depth_residuals) { + float3 surfel_local_normal = s.frame_T_global.Rotate(r.surfel_normal); // TODO: Could be gotten from surfel association instead of computing it twice + + float depth_residual_inv_stddev = + ComputeDepthResidualInvStddevEstimate(depth_unprojector.nx(r.px), depth_unprojector.ny(r.py), r.pixel_calibrated_depth, surfel_local_normal, s.depth_params.baseline_fx); + ComputeRawDepthResidualAndJacobian( + depth_unprojector, + r.px, + r.py, + r.pixel_calibrated_depth, + depth_residual_inv_stddev, + r.surfel_local_position, + surfel_local_normal, + &depth_raw_residual, + jacobian); + + AccumulateGaussNewtonHAndB<6, block_width, block_height>( + visible, + depth_raw_residual, + ComputeDepthResidualWeight(depth_raw_residual), + jacobian, + H_buffer, + b_buffer, + &temp_storage.float_storage); + + if (debug) { + AccumulatePoseResidualAndCount( + visible, + ComputeWeightedDepthResidual(depth_raw_residual), + residual_count_buffer, + residual_buffer, + &temp_storage.float_storage, + &temp_storage.int_storage); + } + } + + // --- Descriptor residual --- + if (use_descriptor_residuals) { + // float raw_residual_2; + // jzmTODO 11.9: how do you arrange the jacobians if you have 128 channels of features? + float jacobian_2[6] = {0,0,0,0,0,0}; + float jacobian_3[6] = {0,0,0,0,0,0}; + float jacobian_4[6] = {0,0,0,0,0,0}; + float jacobian_5[6] = {0,0,0,0,0,0}; + float jacobian_6[6] = {0,0,0,0,0,0}; + float2 color_pxy; + float2 t1_pxy, t2_pxy; + // 10.30 If visible, compute t1_px1, t2_pxy ( <- 11.12 This statement is false! The transformdepthtocolorpixelcorner function will execute anyway whatever visible is. + // I tried to save this computation by skipping doing the ComputeRawFeatureDescriptorResidual if visible = false, but I got deadlock, which might come from surfels sharing the same keyframe? ) + if (TransformDepthToColorPixelCorner(r.pxy, depth_to_color, &color_pxy)) { + // CudaAssert(visible); + ComputeTangentProjections( + r.surfel_global_position, + r.surfel_normal, + SurfelGetRadiusSquared(s.surfels, surfel_index), + s.frame_T_global, + color_corner_projector, + &t1_pxy, + &t2_pxy); + // 10.30 If visible, iterate over all the channels, accumulate H and b for each channel + // We only need to retrieve current surfel_descriptor value once + // constexpr int kSurfelDescriptorArr[6] = {6,7,8,9,10,11}; + float surfel_descriptor[kSurfelNumDescriptor]; // problematic with const float array and use for loop to initialize + #pragma unroll + for (int i = 0; i< kSurfelNumDescriptor; ++i){ + surfel_descriptor[i] = s.surfels(kSurfelFixedAttributeCount+i, surfel_index); + CudaAssert(surfel_descriptor[i] == surfel_descriptor[i]); + } + // we only need to compute the descriptor residual in vector form once. + // jzmTODO: maybe when we change the data structure from color_texture to feature_texture, we can learn from intensity implementation and + // loop over all the feature maps, for each feature map, we do exactly the same thing for intensity based approach, just to change the + // indices of H and b (in geometry optimization). For pose optimization, we just loop over all the feature maps and accumulate H and b. + ComputeRawFeatureDescriptorResidual( + color_texture, // TODO: use feature_texture + color_pxy, + t1_pxy, + t2_pxy, + surfel_descriptor, + raw_residual_vec); + // 11.3 debug weight, why jacobian is not nan but H is nan + /*if (surfel_index == 0){ + for (int debugi=0; debugi < 6; ++debugi){ + printf("residual_weight %d: %f \n", debugi,ComputeDescriptorResidualWeight(raw_residual_vec[debugi])); + } + }*/ + ComputeRawDescriptorFeatureJacobian( + color_center_projector, + color_texture, + color_pxy, + t1_pxy, t2_pxy, + r.surfel_local_position, + jacobian, + jacobian_2, + 0 /* channel*/); + ComputeRawDescriptorFeatureJacobian( + color_center_projector, + color_texture, + color_pxy, + t1_pxy, t2_pxy, + r.surfel_local_position, + jacobian_3, + jacobian_4, + 1 /* channel*/); + ComputeRawDescriptorFeatureJacobian( + color_center_projector, + color_texture, + color_pxy, + t1_pxy, t2_pxy, + r.surfel_local_position, + jacobian_5, + jacobian_6, + 2 /* channel*/); + } + else{ + visible = false; // nothing is done if not visible + } + + AccumulateGaussNewtonHAndB<6, block_width, block_height>( + visible, + raw_residual_vec[0], + ComputeDescriptorResidualWeight(raw_residual_vec[0]), + jacobian, + H_buffer, + b_buffer, + &temp_storage.float_storage); + + AccumulateGaussNewtonHAndB<6, block_width, block_height>( + visible, + raw_residual_vec[0 + 3], // channel_i + N is residual_2 for each channel + ComputeDescriptorResidualWeight(raw_residual_vec[0 + 3]), + jacobian_2, + H_buffer, + b_buffer, + &temp_storage.float_storage); + + AccumulateGaussNewtonHAndB<6, block_width, block_height>( + visible, + raw_residual_vec[1], + ComputeDescriptorResidualWeight(raw_residual_vec[1]), + jacobian_3, + H_buffer, + b_buffer, + &temp_storage.float_storage); + + AccumulateGaussNewtonHAndB<6, block_width, block_height>( + visible, + raw_residual_vec[1 + 3], // channel_i + N is residual_2 for each channel + ComputeDescriptorResidualWeight(raw_residual_vec[1 + 3]), + jacobian_4, + H_buffer, + b_buffer, + &temp_storage.float_storage); + + AccumulateGaussNewtonHAndB<6, block_width, block_height>( + visible, + raw_residual_vec[2], + ComputeDescriptorResidualWeight(raw_residual_vec[2]), + jacobian_5, + H_buffer, + b_buffer, + &temp_storage.float_storage); + + AccumulateGaussNewtonHAndB<6, block_width, block_height>( + visible, + raw_residual_vec[2 + 3], // channel_i + N is residual_2 for each channel + ComputeDescriptorResidualWeight(raw_residual_vec[2 + 3]), + jacobian_6, + H_buffer, + b_buffer, + &temp_storage.float_storage); + + + + // 10.30 Put the debug within the for loop above? + if (debug) { + AccumulatePoseResidualAndCount( + visible, + ComputeWeightedDescriptorResidual(raw_residual_vec[0]), + residual_count_buffer, + residual_buffer, + &temp_storage.float_storage, + &temp_storage.int_storage); + } + } +} + +template +__global__ void TestAccumulatePoseEstimationCoeffsCUDAKernel( + SurfelProjectionParameters s, + DepthToColorPixelCorner depth_to_color, + PixelCenterProjector color_center_projector, + PixelCornerProjector color_corner_projector, + PixelCenterUnprojector depth_unprojector, + /*cudaTextureObject_t color_texture,*/ + CUDABuffer_ feature_arr, + CUDABuffer_ residual_count_buffer, + CUDABuffer_ residual_buffer, + CUDABuffer_ H_buffer, + CUDABuffer_ b_buffer) { + unsigned int surfel_index = blockIdx.x * blockDim.x + threadIdx.x; + /*if(surfel_index == 0){ + printf("pose: feat(400,2000)=%f, feat(457,2216)=%f \n",feature_arr(400,2000), feature_arr(457,2216)); + }*/ + bool visible; + SurfelProjectionResult6 r; + if (!AnySurfelProjectsToAssociatedPixel(&surfel_index, s, &visible, &r)) { + return; + } + // CudaAssert(visible); //should be true to be here? + float jacobian[6] = {0}; + float depth_raw_residual = 0; + float raw_residual_vec[kSurfelNumDescriptor] = {0}; // It's very important to initialize !!!! + + constexpr int block_height = 1; + typedef cub::BlockReduce BlockReduceFloat; + typedef cub::BlockReduce BlockReduceInt; + __shared__ union { + typename BlockReduceFloat::TempStorage float_storage; + typename BlockReduceInt::TempStorage int_storage; + } temp_storage; + + // TODO: Would it be faster to do the accumulation only once, while summing + // both residual types at the same time? + + // --- Depth residual --- + if (use_depth_residuals) { + float3 surfel_local_normal = s.frame_T_global.Rotate(r.surfel_normal); // TODO: Could be gotten from surfel association instead of computing it twice + + float depth_residual_inv_stddev = + ComputeDepthResidualInvStddevEstimate(depth_unprojector.nx(r.px), depth_unprojector.ny(r.py), r.pixel_calibrated_depth, surfel_local_normal, s.depth_params.baseline_fx); + ComputeRawDepthResidualAndJacobian( + depth_unprojector, + r.px, + r.py, + r.pixel_calibrated_depth, + depth_residual_inv_stddev, + r.surfel_local_position, + surfel_local_normal, + &depth_raw_residual, + jacobian); + + AccumulateGaussNewtonHAndB<6, block_width, block_height>( + visible, + depth_raw_residual, + ComputeDepthResidualWeight(depth_raw_residual), + jacobian, + H_buffer, + b_buffer, + &temp_storage.float_storage); + + if (debug) { + AccumulatePoseResidualAndCount( + visible, + ComputeWeightedDepthResidual(depth_raw_residual), + residual_count_buffer, + residual_buffer, + &temp_storage.float_storage, + &temp_storage.int_storage); + } + } + + // --- Descriptor residual --- + if (use_descriptor_residuals) { + // float raw_residual_2; + float jacobian_all[6*kSurfelNumDescriptor] = {0}; + float2 color_pxy; + float2 t1_pxy, t2_pxy; + // 10.30 If visible, compute t1_px1, t2_pxy ( <- 11.12 This statement is false! The transformdepthtocolorpixelcorner function will execute anyway whatever visible is. + // I tried to save this computation by skipping doing the ComputeRawFeatureDescriptorResidual if visible = false, but I got deadlock, which might come from surfels sharing the same keyframe? ) + if (TransformDepthToColorPixelCorner(r.pxy, depth_to_color, &color_pxy)) { + // CudaAssert(visible); + ComputeTangentProjections( + r.surfel_global_position, + r.surfel_normal, + SurfelGetRadiusSquared(s.surfels, surfel_index), + s.frame_T_global, + color_corner_projector, + &t1_pxy, + &t2_pxy); + // CudaAssert(t1_pxy.x > 0.5f && t1_pxy.y > 0.5f); + // CudaAssert(t2_pxy.x > 0.5f && t2_pxy.y > 0.5f); + + // 10.30 If visible, iterate over all the channels, accumulate H and b for each channel + // We only need to retrieve current surfel_descriptor value once + float surfel_descriptor[kSurfelNumDescriptor]; + #pragma unroll + for (int i = 0; i< kSurfelNumDescriptor; ++i){ + surfel_descriptor[i] = s.surfels(kSurfelFixedAttributeCount + i, surfel_index); // constexpr int kSurfelDescriptorArr[] = {6,7,8,9,10,11}; + CudaAssert(surfel_descriptor[i] == surfel_descriptor[i]); + } + // we only need to compute the descriptor residual in vector form once. + // jzmTODO: maybe when we change the data structure from color_texture to feature_texture, we can learn from intensity implementation and + // loop over all the feature maps, for each feature map, we do exactly the same thing for intensity based approach, just to change the + // indices of H and b (in geometry optimization). For pose optimization, we just loop over all the feature maps and accumulate H and b. + TestComputeRawFeatureDescriptorResidual( + feature_arr, + color_pxy, + t1_pxy, + t2_pxy, + surfel_descriptor, + raw_residual_vec); + for (int channel = 0; channel < kTotalChannels; ++channel){ + TestComputeRawDescriptorFeatureJacobian( + feature_arr, + color_center_projector, + color_pxy, + t1_pxy, t2_pxy, + r.surfel_local_position, + jacobian_all, + channel /* channel*/); + } + } + else{ + visible = false; // nothing is done if not visible + } + for (int channel = 0; channel < kTotalChannels; ++channel){ + AccumulateGaussNewtonHAndB<6, block_width, block_height>( + visible, + raw_residual_vec[channel], + ComputeDescriptorResidualWeight(raw_residual_vec[channel]), + jacobian_all+6*channel, // pass the address of jacobian_c_1[0] + H_buffer, + b_buffer, + &temp_storage.float_storage); + + AccumulateGaussNewtonHAndB<6, block_width, block_height>( + visible, + raw_residual_vec[channel + kTotalChannels], // channel_i + N is residual_2 for each channel + ComputeDescriptorResidualWeight(raw_residual_vec[channel + kTotalChannels]), + jacobian_all + 6*kTotalChannels + 6*channel, // pass the address of jacobian_c_2[0] + H_buffer, + b_buffer, + &temp_storage.float_storage); + } + // 10.30 Put the debug within the for loop above? + if (debug) { + AccumulatePoseResidualAndCount( + visible, + ComputeWeightedDescriptorResidual(raw_residual_vec[0]), + residual_count_buffer, + residual_buffer, + &temp_storage.float_storage, + &temp_storage.int_storage); + } + } +} +void CallAccumulatePoseEstimationCoeffsCUDAKernel( + cudaStream_t stream, + bool debug, + bool use_depth_residuals, + bool use_descriptor_residuals, + const SurfelProjectionParameters& s, + const DepthToColorPixelCorner& depth_to_color, + const PixelCenterProjector& color_center_projector, + const PixelCornerProjector& color_corner_projector, + const PixelCenterUnprojector& depth_unprojector, + /*cudaTextureObject_t color_texture,*/ + const CUDABuffer_& feature_buffer, + const CUDABuffer_& residual_count_buffer, + const CUDABuffer_& residual_buffer, + const CUDABuffer_& H_buffer, + const CUDABuffer_& b_buffer) { + COMPILE_OPTION_3(debug, use_depth_residuals, use_descriptor_residuals, + CUDA_AUTO_TUNE_1D_TEMPLATED( + TestAccumulatePoseEstimationCoeffsCUDAKernel, + 256, + s.surfels_size, + 0, stream, + TEMPLATE_ARGUMENTS(block_width, _debug, _use_depth_residuals, _use_descriptor_residuals), + /* kernel parameters */ + s, + depth_to_color, + color_center_projector, + color_corner_projector, + depth_unprojector, + /*color_texture,*/ + feature_buffer, + residual_count_buffer, + residual_buffer, + H_buffer, + b_buffer)); + CUDA_CHECK(); +} + + +template +__global__ void AccumulatePoseEstimationCoeffsFromImagesCUDAKernel_GradientXY( + PixelCornerProjector depth_projector, + PixelCenterProjector color_center_projector, + PixelCenterUnprojector depth_unprojector, + float baseline_fx, + DepthToColorPixelCorner depth_to_color, + float threshold_factor, + CUDAMatrix3x4 estimate_frame_T_surfel_frame, + CUDABuffer_ surfel_depth, + CUDABuffer_ surfel_normals, + CUDABuffer_ surfel_color, + CUDABuffer_ frame_depth, + CUDABuffer_ frame_normals, + cudaTextureObject_t frame_color, + CUDABuffer_ residual_count_buffer, + CUDABuffer_ residual_buffer, + CUDABuffer_ H_buffer, + CUDABuffer_ b_buffer, + CUDABuffer_ debug_residual_image) { + unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; + unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; + + bool visible = false; + float depth_jacobian[6]; + float raw_depth_residual; + float descriptor_jacobian_1[6]; + float descriptor_jacobian_2[6]; + float raw_descriptor_residual_1; + float raw_descriptor_residual_2; + + if (x < surfel_depth.width() && y < surfel_depth.height()) { + float surfel_calibrated_depth = surfel_depth(y, x); + if (surfel_calibrated_depth > 0) { + float3 surfel_local_position; + if (estimate_frame_T_surfel_frame.MultiplyIfResultZIsPositive(depth_unprojector.UnprojectPoint(x, y, surfel_calibrated_depth), &surfel_local_position)) { + int px, py; + float2 pxy; + if (ProjectSurfelToImage( + frame_depth.width(), frame_depth.height(), + depth_projector, + surfel_local_position, + &px, &py, + &pxy)) { + float pixel_calibrated_depth = frame_depth(py, px); + if (pixel_calibrated_depth > 0) { + float3 surfel_local_normal; + if (IsAssociatedWithPixel( + surfel_local_position, + surfel_normals, + x, + y, + estimate_frame_T_surfel_frame, + frame_normals, + px, + py, + pixel_calibrated_depth, + threshold_factor * kDepthResidualDefaultTukeyParam, + baseline_fx, + depth_unprojector, + nullptr, + &surfel_local_normal)) { + visible = true; + + if (use_depth_residuals) { + float depth_residual_inv_stddev = + ComputeDepthResidualInvStddevEstimate(depth_unprojector.nx(px), depth_unprojector.ny(py), pixel_calibrated_depth, surfel_local_normal, baseline_fx); + + ComputeRawDepthResidualAndJacobian( + depth_unprojector, + px, + py, + pixel_calibrated_depth, + depth_residual_inv_stddev, + surfel_local_position, + surfel_local_normal, + &raw_depth_residual, + depth_jacobian); + } + + if (use_descriptor_residuals) { + if (x < surfel_depth.width() - 1 && // NOTE: These conditions are only necessary since we compute descriptors in the input image and always go right / down + y < surfel_depth.height() - 1) { + // TODO: De-duplicate this with the identical code below in this file + // Compute descriptor in surfel image + const float intensity = 1 / 255.f * surfel_color(y, x); + const float t1_intensity = 1 / 255.f * surfel_color(y, x + 1); + const float t2_intensity = 1 / 255.f * surfel_color(y + 1, x); + + float surfel_descriptor_1 = (180.f * (t1_intensity - intensity)); + float surfel_descriptor_2 = (180.f * (t2_intensity - intensity)); + + // Transform the two offset points to the target / estimate frame. + // In order not to require depth estimates at both offset pixels, + // we estimate their depth using the center pixel's normal. + float3 surfel_normal = U16ToImageSpaceNormal(surfel_normals(y, x)); + const float plane_d = + (depth_unprojector.nx(x) * surfel_calibrated_depth) * surfel_normal.x + + (depth_unprojector.ny(y) * surfel_calibrated_depth) * surfel_normal.y + surfel_calibrated_depth * surfel_normal.z; + + float x_plus_1_depth = plane_d / (depth_unprojector.nx(x + 1) * surfel_normal.x + depth_unprojector.ny(y) * surfel_normal.y + surfel_normal.z); + float3 x_plus_1_local_position = estimate_frame_T_surfel_frame * depth_unprojector.UnprojectPoint(x + 1, y, x_plus_1_depth); + float2 pxy_t1 = depth_projector.Project(x_plus_1_local_position); + int t1_px = static_cast(pxy_t1.x); + int t1_py = static_cast(pxy_t1.y); + if (pxy_t1.x < 0 || pxy_t1.y < 0 || + // t1_px < 0 || t1_py < 0 || + t1_px >= frame_depth.width() || t1_py >= frame_depth.height()) { + visible = false; + } + + float y_plus_1_depth = plane_d / (depth_unprojector.nx(x) * surfel_normal.x + depth_unprojector.ny(y + 1) * surfel_normal.y + surfel_normal.z); + float3 y_plus_1_local_position = estimate_frame_T_surfel_frame * depth_unprojector.UnprojectPoint(x, y + 1, y_plus_1_depth); + float2 pxy_t2 = depth_projector.Project(y_plus_1_local_position); + int t2_px = static_cast(pxy_t2.x); + int t2_py = static_cast(pxy_t2.y); + if (pxy_t2.x < 0 || pxy_t2.y < 0 || + // t2_px < 0 || t2_py < 0 || + t2_px >= frame_depth.width() || t2_py >= frame_depth.height()) { + visible = false; + } + + float2 color_pxy, color_pxy_t1, color_pxy_t2; + if (visible && + x_plus_1_local_position.z > 0 && + y_plus_1_local_position.z > 0 && + TransformDepthToColorPixelCorner(pxy, depth_to_color, &color_pxy) && + TransformDepthToColorPixelCorner(pxy_t1, depth_to_color, &color_pxy_t1) && + TransformDepthToColorPixelCorner(pxy_t2, depth_to_color, &color_pxy_t2)) { + ComputeRawDescriptorResidualAndJacobianWithFloatTexture( + color_center_projector, + frame_color, + color_pxy, + color_pxy_t1, + color_pxy_t2, + surfel_local_position, + surfel_descriptor_1, + surfel_descriptor_2, + &raw_descriptor_residual_1, + &raw_descriptor_residual_2, + descriptor_jacobian_1, + descriptor_jacobian_2); + } else { + visible = false; + } + } else { + visible = false; + } + } + } + } + } + } + } + } + + // Write residual debug image? + if (debug && x < surfel_depth.width() && y < surfel_depth.height()) { + debug_residual_image(y, x) = + visible ? + ((use_depth_residuals ? ComputeWeightedDepthResidual(raw_depth_residual) : 0) + + (use_descriptor_residuals ? ComputeWeightedDescriptorResidual(raw_descriptor_residual_1) : 0)) : // NOTE: Using the 1st residual only + CUDART_NAN_F; + } + + // Early exit? + __shared__ int have_visible; + if (threadIdx.x == 0 && threadIdx.y == 0) { + have_visible = 0; + } + __syncthreads(); + + if (visible) { + have_visible = 1; + } + __syncthreads(); + if (have_visible == 0) { + return; + } + + typedef cub::BlockReduce BlockReduceFloat; + typedef cub::BlockReduce BlockReduceInt; + __shared__ union { + typename BlockReduceFloat::TempStorage float_storage; + typename BlockReduceInt::TempStorage int_storage; + } temp_storage; + + if (use_depth_residuals) { + AccumulateGaussNewtonHAndB<6, block_width, block_height>( + visible, + raw_depth_residual, + ComputeDepthResidualWeight(raw_depth_residual, threshold_factor), + depth_jacobian, + H_buffer, + b_buffer, + &temp_storage.float_storage); + + if (debug) { + AccumulatePoseResidualAndCount( + visible, + ComputeWeightedDepthResidual(raw_depth_residual, threshold_factor), + residual_count_buffer, + residual_buffer, + &temp_storage.float_storage, + &temp_storage.int_storage); + } + } + + if (use_descriptor_residuals) { + AccumulateGaussNewtonHAndB<6, block_width, block_height>( + visible, + raw_descriptor_residual_1, + ComputeDescriptorResidualWeight(raw_descriptor_residual_1, threshold_factor), + descriptor_jacobian_1, + H_buffer, + b_buffer, + &temp_storage.float_storage); + + AccumulateGaussNewtonHAndB<6, block_width, block_height>( + visible, + raw_descriptor_residual_2, + ComputeDescriptorResidualWeight(raw_descriptor_residual_2, threshold_factor), + descriptor_jacobian_2, + H_buffer, + b_buffer, + &temp_storage.float_storage); + + if (debug) { + AccumulatePoseResidualAndCount( + visible, + ComputeWeightedDescriptorResidual(raw_descriptor_residual_1, threshold_factor), // NOTE: Using the 1st residual only + residual_count_buffer, + residual_buffer, + &temp_storage.float_storage, + &temp_storage.int_storage); + } + } +} + +void CallAccumulatePoseEstimationCoeffsFromImagesCUDAKernel_GradientXY( + cudaStream_t stream, + bool debug, + bool use_depth_residuals, + bool use_descriptor_residuals, + const PixelCornerProjector& depth_projector, + const PixelCenterProjector& color_center_projector, + const PixelCenterUnprojector& depth_unprojector, + float baseline_fx, + const DepthToColorPixelCorner& depth_to_color, + float threshold_factor, + const CUDAMatrix3x4& estimate_frame_T_surfel_frame, + const CUDABuffer_& surfel_depth, + const CUDABuffer_& surfel_normals, + const CUDABuffer_& surfel_color, + const CUDABuffer_& frame_depth, + const CUDABuffer_& frame_normals, + cudaTextureObject_t frame_color, + const CUDABuffer_& residual_count_buffer, + const CUDABuffer_& residual_buffer, + const CUDABuffer_& H_buffer, + const CUDABuffer_& b_buffer, + CUDABuffer_* debug_residual_image) { + COMPILE_OPTION_3(debug, use_depth_residuals, use_descriptor_residuals, + CUDA_AUTO_TUNE_2D_TEMPLATED( + AccumulatePoseEstimationCoeffsFromImagesCUDAKernel_GradientXY, + 32, 32, + surfel_depth.width(), surfel_depth.height(), + 0, stream, + TEMPLATE_ARGUMENTS(block_width, block_height, _debug, _use_depth_residuals, _use_descriptor_residuals), + /* kernel parameters */ + depth_projector, + color_center_projector, + depth_unprojector, + baseline_fx, + depth_to_color, + threshold_factor, + estimate_frame_T_surfel_frame, + surfel_depth, + surfel_normals, + surfel_color, + frame_depth, + frame_normals, + frame_color, + residual_count_buffer, + residual_buffer, + H_buffer, + b_buffer, + debug_residual_image ? *debug_residual_image : CUDABuffer_())); +} + + +template +__global__ void AccumulatePoseEstimationCoeffsFromImagesCUDAKernel_GradMag( + PixelCornerProjector depth_projector, + PixelCenterProjector color_center_projector, + PixelCenterUnprojector depth_unprojector, + float baseline_fx, + DepthToColorPixelCorner depth_to_color, + float threshold_factor, + CUDAMatrix3x4 estimate_frame_T_surfel_frame, + CUDABuffer_ surfel_depth, + CUDABuffer_ surfel_normals, + CUDABuffer_ surfel_color, + CUDABuffer_ frame_depth, + CUDABuffer_ frame_normals, + cudaTextureObject_t frame_color, + CUDABuffer_ residual_count_buffer, + CUDABuffer_ residual_buffer, + CUDABuffer_ H_buffer, + CUDABuffer_ b_buffer, + CUDABuffer_ debug_residual_image) { + unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; + unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; + + bool visible = false; + float depth_jacobian[6]; + float raw_depth_residual; + float descriptor_jacobian[6]; + float raw_descriptor_residual; + + if (x < surfel_depth.width() && y < surfel_depth.height()) { + float surfel_calibrated_depth = surfel_depth(y, x); + if (surfel_calibrated_depth > 0) { + float3 surfel_local_position; + if (estimate_frame_T_surfel_frame.MultiplyIfResultZIsPositive(depth_unprojector.UnprojectPoint(x, y, surfel_calibrated_depth), &surfel_local_position)) { + int px, py; + float2 pxy; + if (ProjectSurfelToImage( + frame_depth.width(), frame_depth.height(), + depth_projector, + surfel_local_position, + &px, &py, + &pxy)) { + float pixel_calibrated_depth = frame_depth(py, px); + if (pixel_calibrated_depth > 0) { + float3 surfel_local_normal; + if (IsAssociatedWithPixel( + surfel_local_position, + surfel_normals, + x, + y, + estimate_frame_T_surfel_frame, + frame_normals, + px, + py, + pixel_calibrated_depth, + threshold_factor * kDepthResidualDefaultTukeyParam, + baseline_fx, + depth_unprojector, + nullptr, + &surfel_local_normal)) { + visible = true; + + if (use_depth_residuals) { + float depth_residual_inv_stddev = + ComputeDepthResidualInvStddevEstimate(depth_unprojector.nx(px), depth_unprojector.ny(py), pixel_calibrated_depth, surfel_local_normal, baseline_fx); + + ComputeRawDepthResidualAndJacobian( + depth_unprojector, + px, + py, + pixel_calibrated_depth, + depth_residual_inv_stddev, + surfel_local_position, + surfel_local_normal, + &raw_depth_residual, + depth_jacobian); + } + + if (use_descriptor_residuals) { + float2 color_pxy; + if (TransformDepthToColorPixelCorner(pxy, depth_to_color, &color_pxy)) { + ComputeRawColorResidualAndJacobian( + color_center_projector, + frame_color, + color_pxy, + surfel_local_position, + surfel_color(y, x), + &raw_descriptor_residual, + descriptor_jacobian); + } else { + visible = false; + } + } + } + } + } + } + } + } + + // Write residual debug image? + if (debug && x < surfel_depth.width() && y < surfel_depth.height()) { + debug_residual_image(y, x) = + visible ? + ((use_depth_residuals ? ComputeWeightedDepthResidual(raw_depth_residual) : 0) + + (use_descriptor_residuals ? ComputeWeightedDescriptorResidual(raw_descriptor_residual) : 0)) : + CUDART_NAN_F; + } + + // Early exit? + __shared__ int have_visible; + if (threadIdx.x == 0 && threadIdx.y == 0) { + have_visible = 0; + } + __syncthreads(); + + if (visible) { + have_visible = 1; + } + __syncthreads(); + if (have_visible == 0) { + return; + } + + typedef cub::BlockReduce BlockReduceFloat; + typedef cub::BlockReduce BlockReduceInt; + __shared__ union { + typename BlockReduceFloat::TempStorage float_storage; + typename BlockReduceInt::TempStorage int_storage; + } temp_storage; + + if (use_depth_residuals) { + AccumulateGaussNewtonHAndB<6, block_width, block_height>( + visible, + raw_depth_residual, + ComputeDepthResidualWeight(raw_depth_residual, threshold_factor), + depth_jacobian, + H_buffer, + b_buffer, + &temp_storage.float_storage); + + if (debug) { + AccumulatePoseResidualAndCount( + visible, + ComputeWeightedDepthResidual(raw_depth_residual, threshold_factor), + residual_count_buffer, + residual_buffer, + &temp_storage.float_storage, + &temp_storage.int_storage); + } + } + + if (use_descriptor_residuals) { + AccumulateGaussNewtonHAndB<6, block_width, block_height>( + visible, + raw_descriptor_residual, + ComputeDescriptorResidualWeight(raw_descriptor_residual, threshold_factor), + descriptor_jacobian, + H_buffer, + b_buffer, + &temp_storage.float_storage); + + if (debug) { + AccumulatePoseResidualAndCount( + visible, + ComputeWeightedDescriptorResidual(raw_descriptor_residual, threshold_factor), + residual_count_buffer, + residual_buffer, + &temp_storage.float_storage, + &temp_storage.int_storage); + } + } +} + +void CallAccumulatePoseEstimationCoeffsFromImagesCUDAKernel_GradMag( + cudaStream_t stream, + bool debug, + bool use_depth_residuals, + bool use_descriptor_residuals, + const PixelCornerProjector& depth_projector, + const PixelCenterProjector& color_center_projector, + const PixelCenterUnprojector& depth_unprojector, + float baseline_fx, + const DepthToColorPixelCorner& depth_to_color, + float threshold_factor, + const CUDAMatrix3x4& estimate_frame_T_surfel_frame, + const CUDABuffer_& surfel_depth, + const CUDABuffer_& surfel_normals, + const CUDABuffer_& surfel_color, + const CUDABuffer_& frame_depth, + const CUDABuffer_& frame_normals, + cudaTextureObject_t frame_color, + const CUDABuffer_& residual_count_buffer, + const CUDABuffer_& residual_buffer, + const CUDABuffer_& H_buffer, + const CUDABuffer_& b_buffer, + CUDABuffer_* debug_residual_image) { + COMPILE_OPTION_3(debug, use_depth_residuals, use_descriptor_residuals, + CUDA_AUTO_TUNE_2D_TEMPLATED( + AccumulatePoseEstimationCoeffsFromImagesCUDAKernel_GradMag, + 32, 32, + surfel_depth.width(), surfel_depth.height(), + 0, stream, + TEMPLATE_ARGUMENTS(block_width, block_height, _debug, _use_depth_residuals, _use_descriptor_residuals), + /* kernel parameters */ + depth_projector, + color_center_projector, + depth_unprojector, + baseline_fx, + depth_to_color, + threshold_factor, + estimate_frame_T_surfel_frame, + surfel_depth, + surfel_normals, + surfel_color, + frame_depth, + frame_normals, + frame_color, + residual_count_buffer, + residual_buffer, + H_buffer, + b_buffer, + debug_residual_image ? *debug_residual_image : CUDABuffer_())); +} + + +template +__global__ void ComputeCostAndResidualCountFromImagesCUDAKernel_GradientXY( + PixelCornerProjector depth_projector, + PixelCenterUnprojector depth_unprojector, + float baseline_fx, + DepthToColorPixelCorner depth_to_color, + float threshold_factor, + CUDAMatrix3x4 estimate_frame_T_surfel_frame, + CUDABuffer_ surfel_depth, + CUDABuffer_ surfel_normals, + CUDABuffer_ surfel_color, + CUDABuffer_ frame_depth, + CUDABuffer_ frame_normals, + cudaTextureObject_t frame_color, + CUDABuffer_ residual_count_buffer, + CUDABuffer_ residual_buffer) { + unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; + unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; + + bool visible = false; + float raw_depth_residual; + float raw_descriptor_residual_1; + float raw_descriptor_residual_2; + + if (x < surfel_depth.width() && y < surfel_depth.height()) { + float surfel_calibrated_depth = surfel_depth(y, x); + if (surfel_calibrated_depth > 0) { + float3 surfel_local_position; + if (estimate_frame_T_surfel_frame.MultiplyIfResultZIsPositive(depth_unprojector.UnprojectPoint(x, y, surfel_calibrated_depth), &surfel_local_position)) { + int px, py; + float2 pxy; + if (ProjectSurfelToImage( + frame_depth.width(), frame_depth.height(), + depth_projector, + surfel_local_position, + &px, &py, + &pxy)) { + float pixel_calibrated_depth = frame_depth(py, px); + if (pixel_calibrated_depth > 0) { + float3 surfel_local_normal; + if (IsAssociatedWithPixel( + surfel_local_position, + surfel_normals, + x, + y, + estimate_frame_T_surfel_frame, + frame_normals, + px, + py, + pixel_calibrated_depth, + threshold_factor * kDepthResidualDefaultTukeyParam, + baseline_fx, + depth_unprojector, + nullptr, + &surfel_local_normal)) { + visible = true; + + if (use_depth_residuals) { + float depth_residual_inv_stddev = + ComputeDepthResidualInvStddevEstimate(depth_unprojector.nx(px), depth_unprojector.ny(py), pixel_calibrated_depth, surfel_local_normal, baseline_fx); + + float3 local_unproj; + ComputeRawDepthResidual(depth_unprojector, px, py, pixel_calibrated_depth, + depth_residual_inv_stddev, + surfel_local_position, surfel_local_normal, + &local_unproj, &raw_depth_residual); + } + + if (use_descriptor_residuals) { + if (x < surfel_depth.width() - 1 && // NOTE: These conditions are only necessary since we compute descriptors in the input image and always go right / down + y < surfel_depth.height() - 1) { + // Compute descriptor in surfel image + const float intensity = 1 / 255.f * surfel_color(y, x); + const float t1_intensity = 1 / 255.f * surfel_color(y, x + 1); + const float t2_intensity = 1 / 255.f * surfel_color(y + 1, x); + + float surfel_descriptor_1 = (180.f * (t1_intensity - intensity)); + float surfel_descriptor_2 = (180.f * (t2_intensity - intensity)); + + // Transform the two offset points to the target / estimate frame. + // In order not to require depth estimates at both offset pixels, + // we estimate their depth using the center pixel's normal. + float3 surfel_normal = U16ToImageSpaceNormal(surfel_normals(y, x)); + const float plane_d = + (depth_unprojector.nx(x) * surfel_calibrated_depth) * surfel_normal.x + + (depth_unprojector.ny(y) * surfel_calibrated_depth) * surfel_normal.y + surfel_calibrated_depth * surfel_normal.z; + + float x_plus_1_depth = plane_d / (depth_unprojector.nx(x + 1) * surfel_normal.x + depth_unprojector.ny(y) * surfel_normal.y + surfel_normal.z); + float3 x_plus_1_local_position = estimate_frame_T_surfel_frame * depth_unprojector.UnprojectPoint(x + 1, y, x_plus_1_depth); + float2 pxy_t1 = depth_projector.Project(x_plus_1_local_position); + int t1_px = static_cast(pxy_t1.x); + int t1_py = static_cast(pxy_t1.y); + if (pxy_t1.x < 0 || pxy_t1.y < 0 || + // t1_px < 0 || t1_py < 0 || + t1_px >= frame_depth.width() || t1_py >= frame_depth.height()) { + visible = false; + } + + float y_plus_1_depth = plane_d / (depth_unprojector.nx(x) * surfel_normal.x + depth_unprojector.ny(y + 1) * surfel_normal.y + surfel_normal.z); + float3 y_plus_1_local_position = estimate_frame_T_surfel_frame * depth_unprojector.UnprojectPoint(x, y + 1, y_plus_1_depth); + float2 pxy_t2 = depth_projector.Project(y_plus_1_local_position); + int t2_px = static_cast(pxy_t2.x); + int t2_py = static_cast(pxy_t2.y); + if (pxy_t2.x < 0 || pxy_t2.y < 0 || + // t2_px < 0 || t2_py < 0 || + t2_px >= frame_depth.width() || t2_py >= frame_depth.height()) { + visible = false; + } + + float2 color_pxy, color_pxy_t1, color_pxy_t2; + if (visible && + x_plus_1_local_position.z > 0 && + y_plus_1_local_position.z > 0 && + TransformDepthToColorPixelCorner(pxy, depth_to_color, &color_pxy) && + TransformDepthToColorPixelCorner(pxy_t1, depth_to_color, &color_pxy_t1) && + TransformDepthToColorPixelCorner(pxy_t2, depth_to_color, &color_pxy_t2)) { + ComputeRawDescriptorResidualWithFloatTexture( + frame_color, + color_pxy, + color_pxy_t1, + color_pxy_t2, + surfel_descriptor_1, + surfel_descriptor_2, + &raw_descriptor_residual_1, + &raw_descriptor_residual_2); + } else { + visible = false; + } + } else { + visible = false; + } + } + } + } + } + } + } + } + + // Early exit? + __shared__ int have_visible; + if (threadIdx.x == 0 && threadIdx.y == 0) { + have_visible = 0; + } + __syncthreads(); + + if (visible) { + have_visible = 1; + } + __syncthreads(); + if (have_visible == 0) { + return; + } + + typedef cub::BlockReduce BlockReduceFloat; + typedef cub::BlockReduce BlockReduceInt; + __shared__ union { + typename BlockReduceFloat::TempStorage float_storage; + typename BlockReduceInt::TempStorage int_storage; + } temp_storage; + + if (use_depth_residuals) { + AccumulatePoseResidualAndCount( + visible, + ComputeWeightedDepthResidual(raw_depth_residual, threshold_factor), + residual_count_buffer, + residual_buffer, + &temp_storage.float_storage, + &temp_storage.int_storage); + } + + if (use_descriptor_residuals) { + // TODO: It should be possible to merge these two calls and directly accumulate the sum (also use 2 for the residual count then). + // It should even be possible to merge it with the depth residual call as well in case both residual types are used. + AccumulatePoseResidualAndCount( + visible, + ComputeWeightedDescriptorResidual(raw_descriptor_residual_1, threshold_factor), + residual_count_buffer, + residual_buffer, + &temp_storage.float_storage, + &temp_storage.int_storage); + AccumulatePoseResidualAndCount( + visible, + ComputeWeightedDescriptorResidual(raw_descriptor_residual_2, threshold_factor), + residual_count_buffer, + residual_buffer, + &temp_storage.float_storage, + &temp_storage.int_storage); + } +} + +void ComputeCostAndResidualCountFromImagesCUDAKernel_GradientXY( + cudaStream_t stream, + bool use_depth_residuals, + bool use_descriptor_residuals, + const PixelCornerProjector& depth_projector, + const PixelCenterUnprojector& depth_unprojector, + float baseline_fx, + const DepthToColorPixelCorner& depth_to_color, + float threshold_factor, + const CUDAMatrix3x4& estimate_frame_T_surfel_frame, + const CUDABuffer_& surfel_depth, + const CUDABuffer_& surfel_normals, + const CUDABuffer_& surfel_color, + const CUDABuffer_& frame_depth, + const CUDABuffer_& frame_normals, + cudaTextureObject_t frame_color, + const CUDABuffer_& residual_count_buffer, + const CUDABuffer_& residual_buffer) { + COMPILE_OPTION_2(use_depth_residuals, use_descriptor_residuals, + CUDA_AUTO_TUNE_2D_TEMPLATED( + ComputeCostAndResidualCountFromImagesCUDAKernel_GradientXY, + 32, 32, + surfel_depth.width(), surfel_depth.height(), + 0, stream, + TEMPLATE_ARGUMENTS(block_width, block_height, _use_depth_residuals, _use_descriptor_residuals), + /* kernel parameters */ + depth_projector, + depth_unprojector, + baseline_fx, + depth_to_color, + threshold_factor, + estimate_frame_T_surfel_frame, + surfel_depth, + surfel_normals, + surfel_color, + frame_depth, + frame_normals, + frame_color, + residual_count_buffer, + residual_buffer)); +} + + +template +__global__ void ComputeCostAndResidualCountFromImagesCUDAKernel_GradMag( + PixelCornerProjector depth_projector, + PixelCenterUnprojector depth_unprojector, + float baseline_fx, + DepthToColorPixelCorner depth_to_color, + float threshold_factor, + CUDAMatrix3x4 estimate_frame_T_surfel_frame, + CUDABuffer_ surfel_depth, + CUDABuffer_ surfel_normals, + CUDABuffer_ surfel_color, + CUDABuffer_ frame_depth, + CUDABuffer_ frame_normals, + cudaTextureObject_t frame_color, + CUDABuffer_ residual_count_buffer, + CUDABuffer_ residual_buffer) { + unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; + unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; + + bool visible = false; + float raw_depth_residual; + float raw_descriptor_residual; + + if (x < surfel_depth.width() && y < surfel_depth.height()) { + float surfel_calibrated_depth = surfel_depth(y, x); + if (surfel_calibrated_depth > 0) { + float3 surfel_local_position; + if (estimate_frame_T_surfel_frame.MultiplyIfResultZIsPositive(depth_unprojector.UnprojectPoint(x, y, surfel_calibrated_depth), &surfel_local_position)) { + int px, py; + float2 pxy; + if (ProjectSurfelToImage( + frame_depth.width(), frame_depth.height(), + depth_projector, + surfel_local_position, + &px, &py, + &pxy)) { + float pixel_calibrated_depth = frame_depth(py, px); + if (pixel_calibrated_depth > 0) { + float3 surfel_local_normal; + if (IsAssociatedWithPixel( + surfel_local_position, + surfel_normals, + x, + y, + estimate_frame_T_surfel_frame, + frame_normals, + px, + py, + pixel_calibrated_depth, + threshold_factor * kDepthResidualDefaultTukeyParam, + baseline_fx, + depth_unprojector, + nullptr, + &surfel_local_normal)) { + visible = true; + + if (use_depth_residuals) { + float depth_residual_inv_stddev = + ComputeDepthResidualInvStddevEstimate(depth_unprojector.nx(px), depth_unprojector.ny(py), pixel_calibrated_depth, surfel_local_normal, baseline_fx); + + float3 local_unproj; + ComputeRawDepthResidual(depth_unprojector, px, py, pixel_calibrated_depth, + depth_residual_inv_stddev, + surfel_local_position, surfel_local_normal, + &local_unproj, &raw_depth_residual); + } + + if (use_descriptor_residuals) { + float2 color_pxy; + if (TransformDepthToColorPixelCorner(pxy, depth_to_color, &color_pxy)) { + ComputeRawColorResidual(frame_color, color_pxy, surfel_color(y, x), &raw_descriptor_residual); + } else { + visible = false; + } + } + } + } + } + } + } + } + + // Early exit? + __shared__ int have_visible; + if (threadIdx.x == 0 && threadIdx.y == 0) { + have_visible = 0; + } + __syncthreads(); + + if (visible) { + have_visible = 1; + } + __syncthreads(); + if (have_visible == 0) { + return; + } + + typedef cub::BlockReduce BlockReduceFloat; + typedef cub::BlockReduce BlockReduceInt; + __shared__ union { + typename BlockReduceFloat::TempStorage float_storage; + typename BlockReduceInt::TempStorage int_storage; + } temp_storage; + + if (use_depth_residuals) { + AccumulatePoseResidualAndCount( + visible, + ComputeWeightedDepthResidual(raw_depth_residual, threshold_factor), + residual_count_buffer, + residual_buffer, + &temp_storage.float_storage, + &temp_storage.int_storage); + } + + if (use_descriptor_residuals) { + AccumulatePoseResidualAndCount( + visible, + ComputeWeightedDescriptorResidual(raw_descriptor_residual, threshold_factor), + residual_count_buffer, + residual_buffer, + &temp_storage.float_storage, + &temp_storage.int_storage); + } +} + +void CallComputeCostAndResidualCountFromImagesCUDAKernel_GradMag( + cudaStream_t stream, + bool use_depth_residuals, + bool use_descriptor_residuals, + const PixelCornerProjector& depth_projector, + const PixelCenterUnprojector& depth_unprojector, + float baseline_fx, + const DepthToColorPixelCorner& depth_to_color, + float threshold_factor, + const CUDAMatrix3x4& estimate_frame_T_surfel_frame, + const CUDABuffer_& surfel_depth, + const CUDABuffer_& surfel_normals, + const CUDABuffer_& surfel_color, + const CUDABuffer_& frame_depth, + const CUDABuffer_& frame_normals, + cudaTextureObject_t frame_color, + const CUDABuffer_& residual_count_buffer, + const CUDABuffer_& residual_buffer) { + COMPILE_OPTION_2(use_depth_residuals, use_descriptor_residuals, + CUDA_AUTO_TUNE_2D_TEMPLATED( + ComputeCostAndResidualCountFromImagesCUDAKernel_GradMag, + 32, 32, + surfel_depth.width(), surfel_depth.height(), + 0, stream, + TEMPLATE_ARGUMENTS(block_width, block_height, _use_depth_residuals, _use_descriptor_residuals), + /* kernel parameters */ + depth_projector, + depth_unprojector, + baseline_fx, + depth_to_color, + threshold_factor, + estimate_frame_T_surfel_frame, + surfel_depth, + surfel_normals, + surfel_color, + frame_depth, + frame_normals, + frame_color, + residual_count_buffer, + residual_buffer)); +} + +} diff --git a/cuda_code/kernels_200.cu b/cuda_code/kernels_200.cu new file mode 100644 index 0000000000000000000000000000000000000000..ec5137321ad04277c53a97730ab7354290d9c3a3 --- /dev/null +++ b/cuda_code/kernels_200.cu @@ -0,0 +1,2692 @@ +/* + + + Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at + + http://aws.amazon.com/apache2.0/ + + or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + */ + +#include "GpuTypes.h" +#include "NNTypes.h" +#include + +static __constant__ GpuData cData; + +__device__ inline uint64_t llitoulli(int64_t l) +{ + uint64_t u; + asm("mov.b64 %0, %1;" : "=l"(u) : "l"(l)); + return u; +} + +__device__ inline int64_t ullitolli(uint64_t u) +{ + int64_t l; + asm("mov.b64 %0, %1;" : "=l"(l) : "l"(u)); + return l; +} + +void SetKernelsGpuData() +{ + cudaError_t status; + status = cudaMemcpyToSymbol(cData, &(getGpu()._data), sizeof(GpuData)); + RTERROR(status, "cudaMemcpyToSymbol: SetKernelsGpuData copy to cData failed"); +} + +void GetKernelsGpuData() +{ + cudaError_t status; + status = cudaMemcpyFromSymbol(&(getGpu()._data), cData, sizeof(GpuData)); + RTERROR(status, "cudaMemcpyToSymbol: SetKernelsGpuData copy From cData failed"); +} + + +uint32_t CalculateBlocks(uint64_t size) +{ + return (size + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock; +} + +// Scales and biases a weight matrix previously generated +__global__ void +LAUNCH_BOUNDS() +kScaleAndBias_kernel(NNFloat* pData, uint64_t size, NNFloat scale, NNFloat bias) +{ + uint64_t offset = blockIdx.x * blockDim.x + threadIdx.x; + if (offset < size) + { + NNFloat value = pData[offset]; + pData[offset] = scale * value - bias; + } +} + +void kScaleAndBias(NNFloat* pData, uint64_t size, NNFloat scale, NNFloat bias) +{ + uint32_t blocks = CalculateBlocks(size); + kScaleAndBias_kernel<<>>(pData, size, scale, bias); + LAUNCHERROR("kScaleAndBias_kernel"); +} + + +// Initializes hidden or output unit with bias of single incoming unit +__global__ void +LAUNCH_BOUNDS() +kClearUnit_kernel(NNFloat* pUnit, NNFloat* pBias, uint32_t stride, uint64_t size) +{ + uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; + uint32_t bpos = pos % stride; + if (pos < size) + { + pUnit[pos] = pBias[bpos]; + } +} + + +void kClearUnit(NNFloat* pUnit, NNFloat* pBias, uint32_t stride, uint32_t batch) +{ + uint64_t size = (uint64_t)stride * (uint64_t)batch; + uint32_t blocks = CalculateBlocks(size); + kClearUnit_kernel<<>>(pUnit, pBias, stride, size); + LAUNCHERROR("kClearUnit_kernel"); +} + +// Initializes hidden or output unit with biases of 2 incoming units +__global__ void +LAUNCH_BOUNDS() +kClearDualSourceUnit_kernel(NNFloat* pUnit, NNFloat* pBias1, NNFloat* pBias2, uint32_t stride, uint32_t size) +{ + uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; + uint32_t bpos = pos % stride; + if (pos < size) + { + pUnit[pos] = pBias1[bpos] + pBias2[bpos]; + } +} + +void kClearDualSourceUnit(NNFloat* pUnit, NNFloat* pBias1, NNFloat* pBias2, uint32_t stride, uint32_t batch) +{ + uint64_t size = (uint64_t)stride * (uint64_t)batch; + uint32_t blocks = (size + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock; + kClearDualSourceUnit_kernel<<>>(pUnit, pBias1, pBias2, stride, size); + LAUNCHERROR("kClearDualSourceUnit_kernel"); +} + + + +// Initializes hidden or output unit with biases of 3 incoming units +__global__ void +LAUNCH_BOUNDS() +kClearTripleSourceUnit_kernel(NNFloat* pUnit, NNFloat* pBias1, NNFloat* pBias2, NNFloat* pBias3, uint32_t stride, uint32_t size) +{ + uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; + uint32_t bpos = pos % stride; + if (pos < size) + { + pUnit[pos] = pBias1[bpos] + pBias2[bpos] + pBias3[pos]; + } +} + +void kClearTripleSourceUnit(NNFloat* pUnit, NNFloat* pBias1, NNFloat* pBias2, NNFloat* pBias3, uint32_t stride, uint32_t batch) +{ + uint64_t size = (uint64_t)stride * (uint64_t)batch; + uint32_t blocks = (size + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock; + kClearTripleSourceUnit_kernel<<>>(pUnit, pBias1, pBias2, pBias3, stride, size); + LAUNCHERROR("kClearTripleSource_kernel"); +} + +// Initializes hidden or output unit with biases of 4 incoming units +__global__ void +LAUNCH_BOUNDS() +kClearQuadSourceUnit_kernel(NNFloat* pUnit, NNFloat* pBias1, NNFloat* pBias2, NNFloat* pBias3, NNFloat* pBias4, uint32_t stride, uint32_t size) +{ + uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; + uint32_t bpos = pos % stride; + if (pos < size) + { + pUnit[pos] = pBias1[bpos] + pBias2[bpos] + pBias3[pos] + pBias4[pos]; + } +} + +void kClearQuadSourceUnit(NNFloat* pUnit, NNFloat* pBias1, NNFloat* pBias2, NNFloat* pBias3, NNFloat* pBias4, uint32_t stride, uint32_t batch) +{ + uint64_t size = (uint64_t)stride * (uint64_t)batch; + uint32_t blocks = (size + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock; + kClearQuadSourceUnit_kernel<<>>(pUnit, pBias1, pBias2, pBias3, pBias4, stride, size); + LAUNCHERROR("kClearQuadSource_kernel"); +} +__global__ void +LAUNCH_BOUNDS() +kLoadSparseInputUnit_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex) +{ + uint32_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; + if (pos < batch) + { + uint32_t pos1 = pos + position; + pos1 = cData._bShuffleIndices ? cData._pShuffleIndex[pos1] : pos1; + uint64_t start = pSparseStart[pos1] + (threadIdx.x & cData._warpMask); + uint64_t end = pSparseEnd[pos1]; + uint64_t offset = pos * stride; + while (start < end) + { + uint64_t pos2 = offset + pSparseIndex[start]; + pUnit[pos2] = 1.0f; + start += cData._warpSize; + } + } +} + +void kLoadSparseInputUnit(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex) +{ + uint32_t last = position + batch; + uint32_t count = last - position; + uint32_t blocks = (count * getGpu()._warpSize + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock; + + cudaError_t status = cudaMemset(pUnit, 0, (uint64_t)batch * (uint64_t)stride * sizeof(NNFloat)); + RTERROR(status, "kLoadSparseInputUnit failed"); + kLoadSparseInputUnit_kernel<<>>(position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex); + LAUNCHERROR("kLoadSparseInputUnit_kernel"); +} + +template +__global__ void +LAUNCH_BOUNDS() +kLoadSparseAnalogInputUnit_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, T* pSparseData) +{ + uint32_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; + if (pos < batch) + { + uint32_t pos1 = pos + position; + pos1 = cData._bShuffleIndices ? cData._pShuffleIndex[pos1] : pos1; + uint64_t start = pSparseStart[pos1] + (threadIdx.x & cData._warpMask); + uint64_t end = pSparseEnd[pos1]; + uint64_t offset = pos * stride; + while (start < end) + { + uint64_t pos2 = offset + pSparseIndex[start]; + T data = pSparseData[start]; + pUnit[pos2] = data; + start += cData._warpSize; + } + } +} + +template +void kLoadSparseAnalogInputUnit(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, T* pSparseData) +{ + uint32_t last = position + batch; + uint32_t count = last - position; + uint32_t blocks = (count * getGpu()._warpSize + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock; + cudaError_t status = cudaMemset(pUnit, 0, (uint64_t)batch * (uint64_t)stride * sizeof(NNFloat)); + RTERROR(status, "kLoadSparseAnalogInputUnit failed"); + kLoadSparseAnalogInputUnit_kernel<<>>(position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseData); + LAUNCHERROR("kLoadSparseAnalogInputUnit_kernel"); +} + +__global__ void +LAUNCH_BOUNDS() +kLoadSparseDenoisedInputUnit_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pRandom) +{ + uint32_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; + if (pos < batch) + { + uint32_t pos1 = cData._bShuffleIndices ? cData._pShuffleIndex[pos + position] : pos + position; + uint64_t start = pSparseStart[pos1] + (threadIdx.x & cData._warpMask); + uint64_t end = pSparseEnd[pos1]; + uint64_t offset = pos * stride; + while (start < end) + { + NNFloat value = pRandom[start]; + uint64_t pos2 = offset + pSparseIndex[start]; + if (value >= cData._denoising_p) + pUnit[pos2] = cData._denoising_q; + start += cData._warpSize; + } + } +} + + +void kLoadSparseDenoisedInputUnit(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, NNFloat* pRandom) +{ + uint32_t last = position + batch; + uint32_t count = last - position; + uint32_t blocks = (count * getGpu()._warpSize + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock; + + + //printf("KLSPDU %u %u %u %u %lu %lu %lu %lu %lu\n", position, batch, stride, blocks, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pRandom); + + cudaError_t status = cudaMemset(pUnit, 0, (uint64_t)batch * (uint64_t)stride * sizeof(NNFloat)); + RTERROR(status, "kLoadSparseDenoisedInputUnit failed"); + kLoadSparseDenoisedInputUnit_kernel<<>>(position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pRandom); + LAUNCHERROR("kLoadSparseDenoisedInputUnit_kernel"); +} + +template +__global__ void +LAUNCH_BOUNDS() +kLoadSparseAnalogDenoisedInputUnit_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, T* pSparseData, NNFloat* pRandom) +{ + uint32_t pos = (blockIdx.x * blockDim.x + threadIdx.x) / cData._warpSize; + if (pos < batch) + { + uint32_t pos1 = cData._bShuffleIndices ? cData._pShuffleIndex[pos + position] : pos + position; + uint64_t start = pSparseStart[pos1] + (threadIdx.x & cData._warpMask); + uint64_t end = pSparseEnd[pos1]; + uint64_t offset = pos * stride; + while (start < end) + { + NNFloat value = pRandom[start]; + uint64_t pos2 = offset + pSparseIndex[start]; + T data = pSparseData[start]; + if (value >= cData._denoising_p) + pUnit[pos2] = cData._denoising_q * data; + start += cData._warpSize; + } + } +} + +template +void kLoadSparseAnalogDenoisedInputUnit(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T *pSparseData, NNFloat* pRandom) +{ + uint32_t last = position + batch; + uint32_t count = last - position; + uint32_t blocks = (count * getGpu()._warpSize + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock; + + cudaError_t status = cudaMemset(pUnit, 0, (uint64_t)batch * (uint64_t)stride * sizeof(NNFloat)); + RTERROR(status, "kLoadSparseAnalogDenoisedInputUnit failed"); + kLoadSparseAnalogDenoisedInputUnit_kernel<<>>(position, batch, stride, pUnit, pSparseStart, pSparseEnd, pSparseIndex, pSparseData, pRandom); + LAUNCHERROR("kLoadSparseAnalogDenoisedInputUnit_kernel"); +} + +template +__global__ void +LAUNCH_BOUNDS() +kLoadInputUnit_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, T* pData) +{ + uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; + if (pos < stride) + { + uint32_t pos1 = cData._bShuffleIndices ? cData._pShuffleIndex[blockIdx.x + position] : blockIdx.x + position; + uint64_t soffset = pos1 * stride + pos; + uint64_t doffset = blockIdx.x * stride + pos; + pUnit[doffset] = pData[soffset]; + } +} + +__global__ void +kLoadNormalizedInputUnit_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, unsigned char* pData) +{ + uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; + if (pos < stride) + { + uint32_t pos1 = cData._bShuffleIndices ? cData._pShuffleIndex[blockIdx.x + position] : blockIdx.x + position; + uint64_t soffset = pos1 * stride + pos; + uint64_t doffset = blockIdx.x * stride + pos; + pUnit[doffset] = (NNFloat)pData[soffset] * (NNFloat)(1.0 / 256.0); + } +} + +__global__ void +kLoadNormalizedInputUnit_kernel(uint32_t position, uint32_t stride, NNFloat* pUnit, char* pData) +{ + uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x; + if (pos < stride) + { + uint32_t pos1 = cData._bShuffleIndices ? cData._pShuffleIndex[blockIdx.x + position] : blockIdx.x + position; + uint64_t soffset = pos1 * stride + pos; + uint64_t doffset = blockIdx.x * stride + pos; + pUnit[doffset] = (NNFloat)pData[soffset] * (NNFloat)(1.0 / 128.0); + } +} + +template void kLoadInputUnit(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, T* pData) +{ + dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); + kLoadInputUnit_kernel<<>>(position, stride, pUnit, pData); + LAUNCHERROR("kLoadInputUnit_kernel"); +} + +template<> void kLoadInputUnit(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, unsigned char* pData) +{ + dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); + kLoadNormalizedInputUnit_kernel<<>>(position, stride, pUnit, pData); + LAUNCHERROR("kLoadNormalizedInputUnit_kernel"); +} + +template<> void kLoadInputUnit(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, char* pData) +{ + dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); + kLoadNormalizedInputUnit_kernel<<>>(position, stride, pUnit, pData); + LAUNCHERROR("kLoadNormalizedInputUnit_kernel"); +} + +// Adds bias from single incoming unit +__global__ void +LAUNCH_BOUNDS() +kAddBias_kernel(NNFloat* pUnit, NNFloat* pBias, uint32_t stride, uint32_t size) +{ + uint32_t pos = blockIdx.x * blockDim.x + threadIdx.x; + uint32_t bpos = pos % stride; + if (pos < size) + { + pUnit[pos] += pBias[bpos]; + } +} + + +void kAddBias(NNFloat* pUnit, NNFloat* pBias, uint32_t stride, uint32_t batch) +{ + uint32_t size = stride * batch; + uint32_t blocks = CalculateBlocks(size); + kAddBias_kernel<<>>(pUnit, pBias, stride, size); + LAUNCHERROR("kAddBias_kernel"); +} + + +// Adds biases of 2 incoming units to hidden or output unit +__global__ void +LAUNCH_BOUNDS() +kAddDualBias_kernel(NNFloat* pUnit, NNFloat* pBias1, NNFloat* pBias2, uint32_t stride, uint32_t size) +{ + uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; + uint32_t bpos = pos % stride; + if (pos < size) + { + pUnit[pos] += pBias1[bpos] + pBias2[bpos]; + } +} + +void kAddDualBias(NNFloat* pUnit, NNFloat* pBias1, NNFloat* pBias2, uint32_t stride, uint32_t batch) +{ + uint64_t size = (uint64_t)stride * (uint64_t)batch; + uint32_t blocks = (size + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock; + kAddDualBias_kernel<<>>(pUnit, pBias1, pBias2, stride, size); + LAUNCHERROR("kAddDualBias_kernel"); +} + +// Adds biases of 3 incoming units to hidden or output unit +__global__ void +LAUNCH_BOUNDS() +kAddTripleBias_kernel(NNFloat* pUnit, NNFloat* pBias1, NNFloat* pBias2, NNFloat* pBias3, uint32_t stride, uint32_t size) +{ + uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; + uint32_t bpos = pos % stride; + if (pos < size) + { + pUnit[pos] += pBias1[bpos] + pBias2[bpos] + pBias3[pos]; + } +} + +void kAddTripleBias(NNFloat* pUnit, NNFloat* pBias1, NNFloat* pBias2, NNFloat* pBias3, uint32_t stride, uint32_t batch) +{ + uint64_t size = (uint64_t)stride * (uint64_t)batch; + uint32_t blocks = (size + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock; + kAddTripleBias_kernel<<>>(pUnit, pBias1, pBias2, pBias3, stride, size); + LAUNCHERROR("kAddTripleBias_kernel"); +} + +// Adds biases of 4 incoming units to hidden or output unit +__global__ void +LAUNCH_BOUNDS() +kAddQuadBias_kernel(NNFloat* pUnit, NNFloat* pBias1, NNFloat* pBias2, NNFloat* pBias3, NNFloat* pBias4, uint32_t stride, uint32_t size) +{ + uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; + uint32_t bpos = pos % stride; + if (pos < size) + { + pUnit[pos] += pBias1[bpos] + pBias2[bpos] + pBias3[pos] + pBias4[pos]; + } +} + +void kAddQuadBias(NNFloat* pUnit, NNFloat* pBias1, NNFloat* pBias2, NNFloat* pBias3, NNFloat* pBias4, uint32_t stride, uint32_t batch) +{ + uint64_t size = (uint64_t)stride * (uint64_t)batch; + uint32_t blocks = (size + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock; + kAddQuadBias_kernel<<>>(pUnit, pBias1, pBias2, pBias3, pBias4, stride, size); + LAUNCHERROR("kAddQuadBias_kernel"); +} + +#if (__CUDA_ARCH__ >= 500) +static const uint32_t MAXSPARSE = SM_5X_MAXSPARSE; +static const uint32_t MAXSPARSEANALOG = SM_5X_MAXSPARSEANALOG; +#else +static const uint32_t MAXSPARSE = SM_3X_MAXSPARSE; +static const uint32_t MAXSPARSEANALOG = SM_3X_MAXSPARSEANALOG; +#endif + + +__global__ void +LAUNCH_BOUNDS256() +kCalculateSparseZ_kernel(uint32_t position, uint32_t stride, NNFloat* pWeight, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pUnit, NNFloat beta) +{ +__shared__ uint32_t sOpos; // Shared output position +__shared__ uint32_t sOffset[MAXSPARSE]; // Shared set of offsets to non-zero weights + + // Read sparse indices into shared memory so they're only read once + sOpos = blockDim.x; + position = cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x; + uint64_t start = pSparseStart[position]; + uint64_t end = pSparseEnd[position]; + uint32_t inputs = end - start; + uint32_t pos = threadIdx.x; + start += threadIdx.x; + while (start < end) + { + sOffset[pos] = pSparseIndex[start] * stride; + pos += blockDim.x; + start += blockDim.x; + } + + __threadfence(); + __syncthreads(); + + // Cycle through all output positions + pUnit += blockIdx.x * stride; + uint32_t opos = threadIdx.x; + uint32_t tgx = threadIdx.x & cData._warpMask; + while (opos < stride) + { + // Read all non-zero inputs + NNFloat unit = (beta == (NNFloat)0.0) ? (NNFloat)0.0 : pUnit[opos]; + for (uint32_t i = 0; i < inputs; i++) + { + uint32_t offset = sOffset[i]; + unit += pWeight[offset + opos]; + } + + // Write output + pUnit[opos] = unit; + + // Advance to next set of outputs + if (tgx == 0) + { + opos = atomicAdd(&sOpos, cData._warpSize); + } + opos = __shfl(opos, 0); + opos += tgx; + } +} + + +void kCalculateSparseZ(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pWeight, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pUnit, NNFloat beta) +{ + uint32_t threads = min(256, ((stride + getGpu()._warpSize - 1) >> getGpu()._warpBits) << getGpu()._warpBits); + kCalculateSparseZ_kernel<<>>(position, stride, pWeight, pSparseStart, pSparseEnd, pSparseIndex, pUnit, beta); + LAUNCHERROR("kCalculateSparseZ_kernel"); +} + +template +__global__ void +LAUNCH_BOUNDS256() +kCalculateSparseAnalogZ_kernel(uint32_t position, uint32_t stride, NNFloat* pWeight, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, T* pSparseData, NNFloat* pUnit, NNFloat beta) +{ +__shared__ uint32_t sOpos; // Shared output position +__shared__ uint32_t sOffset[MAXSPARSEANALOG]; // Shared set of offsets to non-zero weights +__shared__ T sValue[MAXSPARSEANALOG]; + + // Read sparse indices into shared memory so they're only read once + sOpos = blockDim.x; + position = cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x; + uint64_t start = pSparseStart[position]; + uint64_t end = pSparseEnd[position]; + uint32_t inputs = end - start; + uint32_t pos = threadIdx.x; + start += threadIdx.x; + while (start < end) + { + sOffset[pos] = pSparseIndex[start] * stride; + sValue[pos] = pSparseData[start]; + pos += blockDim.x; + start += blockDim.x; + } + + __threadfence(); + __syncthreads(); + + // Cycle through all output positions + pUnit += blockIdx.x * stride; + uint32_t opos = threadIdx.x; + uint32_t tgx = threadIdx.x & cData._warpMask; + while (opos < stride) + { + // Read all non-zero inputs + NNFloat unit = (beta == (NNFloat)0.0) ? (NNFloat)0.0 : pUnit[opos]; + for (uint32_t i = 0; i < inputs; i++) + { + uint32_t offset = sOffset[i]; + unit += pWeight[offset + opos] * sValue[i]; + } + + // Write output + pUnit[opos] = unit; + + // Advance to next set of outputs + if (tgx == 0) + { + opos = atomicAdd(&sOpos, cData._warpSize); + } + opos = __shfl(opos, 0); + opos += tgx; + } +} + +template<> +__global__ void +LAUNCH_BOUNDS256() +kCalculateSparseAnalogZ_kernel(uint32_t position, uint32_t stride, NNFloat* pWeight, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, unsigned char* pSparseData, NNFloat* pUnit, NNFloat beta) +{ +__shared__ uint32_t sOpos; // Shared output position +__shared__ uint32_t sOffset[MAXSPARSEANALOG]; // Shared set of offsets to non-zero weights +__shared__ NNFloat sValue[MAXSPARSEANALOG]; + + // Read sparse indices into shared memory so they're only read once + sOpos = blockDim.x; + position = cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x; + uint64_t start = pSparseStart[position]; + uint64_t end = pSparseEnd[position]; + uint32_t inputs = end - start; + uint32_t pos = threadIdx.x; + start += threadIdx.x; + while (start < end) + { + sOffset[pos] = pSparseIndex[start] * stride; + sValue[pos] = (NNFloat)pSparseData[start] * (NNFloat)(1.0 / 256.0); + pos += blockDim.x; + start += blockDim.x; + } + + __threadfence(); + __syncthreads(); + + // Cycle through all output positions + pUnit += blockIdx.x * stride; + uint32_t opos = threadIdx.x; + uint32_t tgx = threadIdx.x & cData._warpMask; + while (opos < stride) + { + // Read all non-zero inputs + NNFloat unit = (beta == (NNFloat)0.0) ? (NNFloat)0.0 : pUnit[opos]; + for (uint32_t i = 0; i < inputs; i++) + { + uint32_t offset = sOffset[i]; + unit += pWeight[offset + opos] * sValue[i]; + } + + // Write output + pUnit[opos] = unit; + + // Advance to next set of outputs + if (tgx == 0) + { + opos = atomicAdd(&sOpos, cData._warpSize); + } + opos = __shfl(opos, 0); + opos += tgx; + } +} + +template<> +__global__ void +LAUNCH_BOUNDS256() +kCalculateSparseAnalogZ_kernel(uint32_t position, uint32_t stride, NNFloat* pWeight, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, char* pSparseData, NNFloat* pUnit, NNFloat beta) +{ +__shared__ uint32_t sOpos; // Shared output position +__shared__ uint32_t sOffset[MAXSPARSEANALOG]; // Shared set of offsets to non-zero weights +__shared__ NNFloat sValue[MAXSPARSEANALOG]; + + // Read sparse indices into shared memory so they're only read once + sOpos = blockDim.x; + position = cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x; + uint64_t start = pSparseStart[position]; + uint64_t end = pSparseEnd[position]; + uint32_t inputs = end - start; + uint32_t pos = threadIdx.x; + start += threadIdx.x; + while (start < end) + { + sOffset[pos] = pSparseIndex[start] * stride; + sValue[pos] = (NNFloat)pSparseData[start] * (NNFloat)(1.0 / 128.0); + pos += blockDim.x; + start += blockDim.x; + } + + __threadfence(); + __syncthreads(); + + // Cycle through all output positions + pUnit += blockIdx.x * stride; + uint32_t opos = threadIdx.x; + uint32_t tgx = threadIdx.x & cData._warpMask; + while (opos < stride) + { + // Read all non-zero inputs + NNFloat unit = (beta == (NNFloat)0.0) ? (NNFloat)0.0 : pUnit[opos]; + for (uint32_t i = 0; i < inputs; i++) + { + uint32_t offset = sOffset[i]; + unit += pWeight[offset + opos] * sValue[i]; + } + + // Write output + pUnit[opos] = unit; + + // Advance to next set of outputs + if (tgx == 0) + { + opos = atomicAdd(&sOpos, cData._warpSize); + } + opos = __shfl(opos, 0); + opos += tgx; + } +} + +template void kCalculateSparseAnalogZ(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pWeight, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, T* pSparseData, NNFloat* pUnit, NNFloat beta) +{ + uint32_t threads = min(256, ((stride + getGpu()._warpSize - 1) >> getGpu()._warpBits) << getGpu()._warpBits); + kCalculateSparseAnalogZ_kernel<<>>(position, stride, pWeight, pSparseStart, pSparseEnd, pSparseIndex, pSparseData, pUnit, beta); + LAUNCHERROR("kCalculateSparseZ_kernel"); +} + +template<> void kCalculateSparseAnalogZ(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pWeight, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, char* pSparseData, NNFloat* pUnit, NNFloat beta) +{ + uint32_t threads = min(256, ((stride + getGpu()._warpSize - 1) >> getGpu()._warpBits) << getGpu()._warpBits); + kCalculateSparseAnalogZ_kernel<<>>(position, stride, pWeight, pSparseStart, pSparseEnd, pSparseIndex, pSparseData, pUnit, beta); + LAUNCHERROR("kCalculateSparseZ_kernel"); +} + +template<> void kCalculateSparseAnalogZ(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pWeight, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, unsigned char* pSparseData, NNFloat* pUnit, NNFloat beta) +{ + uint32_t threads = min(256, ((stride + getGpu()._warpSize - 1) >> getGpu()._warpBits) << getGpu()._warpBits); + kCalculateSparseAnalogZ_kernel<<>>(position, stride, pWeight, pSparseStart, pSparseEnd, pSparseIndex, pSparseData, pUnit, beta); + LAUNCHERROR("kCalculateSparseZ_kernel"); +} + +__global__ void +LAUNCH_BOUNDS256() +kCalculateSparseDenoisedZ_kernel(uint32_t position, uint32_t stride, NNFloat* pWeight, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pRandom, NNFloat* pUnit, NNFloat beta) +{ +__shared__ uint32_t sOpos; // Shared output position +__shared__ uint32_t sOffset[MAXSPARSE]; // Shared set of offsets to non-zero weights + + // Read sparse indices into shared memory so they're only read once + sOpos = blockDim.x; + position = cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x; + uint64_t start = pSparseStart[position]; + uint64_t end = pSparseEnd[position]; + uint32_t inputs = end - start; + uint32_t pos = threadIdx.x; + start += threadIdx.x; + while (start < end) + { + NNFloat value = pRandom[start]; + sOffset[pos] = (value < cData._denoising_p) ? cData._maxUint32_t : (int32_t)pSparseIndex[start] * stride; + pos += blockDim.x; + start += blockDim.x; + } + + __threadfence(); + __syncthreads(); + + // Cycle through all output positions + pUnit += blockIdx.x * stride; + uint32_t opos = threadIdx.x; + uint32_t tgx = threadIdx.x & cData._warpMask; + while (opos < stride) + { + // Read all non-zero inputs + NNFloat unit = (beta == (NNFloat)0.0) ? (NNFloat)0.0 : pUnit[opos]; + for (uint32_t i = 0; i < inputs; i++) + { + int32_t offset = sOffset[i]; + if (offset != cData._maxUint32_t) + unit += pWeight[offset + opos] * cData._denoising_q; + } + + // Write output + pUnit[opos] = unit; + + // Advance to next set of outputs + if (tgx == 0) + { + opos = atomicAdd(&sOpos, cData._warpSize); + } + opos = __shfl(opos, 0); + opos += tgx; + } +} + +void kCalculateSparseDenoisedZ(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pWeight, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pRandom, NNFloat* pUnit, NNFloat beta) +{ + uint32_t threads = min(256, ((stride + getGpu()._warpSize - 1) >> getGpu()._warpBits) << getGpu()._warpBits); + kCalculateSparseDenoisedZ_kernel<<>>(position, stride, pWeight, pSparseStart, pSparseEnd, pSparseIndex, pRandom, pUnit, beta); + LAUNCHERROR("kCalculateSparseDenoisedZ_kernel"); +} + +template +__global__ void +LAUNCH_BOUNDS256() +kCalculateSparseAnalogDenoisedZ_kernel(uint32_t position, uint32_t stride, NNFloat* pWeight, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, T* pSparseData, NNFloat* pRandom, NNFloat* pUnit, NNFloat beta) +{ +__shared__ uint32_t sOpos; // Shared output position +__shared__ uint32_t sOffset[MAXSPARSEANALOG]; // Shared set of offsets to non-zero weights +__shared__ T sValue[MAXSPARSEANALOG]; + + // Read sparse indices into shared memory so they're only read once + sOpos = blockDim.x; + position = cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x; + uint64_t start = pSparseStart[position]; + uint64_t end = pSparseEnd[position]; + uint32_t inputs = end - start; + uint32_t pos = threadIdx.x; + start += threadIdx.x; + while (start < end) + { + NNFloat value = pRandom[start]; + sOffset[pos] = (value < cData._denoising_p) ? cData._maxUint32_t : pSparseIndex[start] * stride; + sValue[pos] = pSparseData[start] * cData._denoising_q; + pos += blockDim.x; + start += blockDim.x; + } + + __threadfence(); + __syncthreads(); + + // Cycle through all output positions + pUnit += blockIdx.x * stride; + uint32_t opos = threadIdx.x; + uint32_t tgx = threadIdx.x & cData._warpMask; + while (opos < stride) + { + // Read all non-zero inputs + NNFloat unit = (beta == (NNFloat)0.0) ? (NNFloat)0.0 : pUnit[opos]; + for (uint32_t i = 0; i < inputs; i++) + { + int32_t offset = sOffset[i]; + if (offset != cData._maxUint32_t) + unit += pWeight[offset + opos] * sValue[i]; + } + + // Write output + pUnit[opos] = unit; + + // Advance to next set of outputs + if (tgx == 0) + { + opos = atomicAdd(&sOpos, cData._warpSize); + } + opos = __shfl(opos, 0); + opos += tgx; + } +} + +template<> +__global__ void +LAUNCH_BOUNDS256() +kCalculateSparseAnalogDenoisedZ_kernel(uint32_t position, uint32_t stride, NNFloat* pWeight, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, unsigned char* pSparseData, NNFloat* pRandom, NNFloat* pUnit, NNFloat beta) +{ +__shared__ uint32_t sOpos; // Shared output position +__shared__ int32_t sOffset[MAXSPARSEANALOG]; // Shared set of offsets to non-zero weights +__shared__ NNFloat sValue[MAXSPARSEANALOG]; + + // Read sparse indices into shared memory so they're only read once + sOpos = blockDim.x; + position = cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x; + uint64_t start = pSparseStart[position]; + uint64_t end = pSparseEnd[position]; + uint32_t inputs = end - start; + uint32_t pos = threadIdx.x; + start += threadIdx.x; + while (start < end) + { + NNFloat value = pRandom[start]; + sOffset[pos] = (value < cData._denoising_p) ? cData._maxUint32_t : pSparseIndex[start] * stride; + sValue[pos] = (NNFloat)pSparseData[start] * (NNFloat)(1.0 / 256.0) * cData._denoising_q; + pos += blockDim.x; + start += blockDim.x; + } + + __threadfence(); + __syncthreads(); + + // Cycle through all output positions + pUnit += blockIdx.x * stride; + uint32_t opos = threadIdx.x; + uint32_t tgx = threadIdx.x & cData._warpMask; + while (opos < stride) + { + // Read all non-zero inputs + NNFloat unit = (beta == (NNFloat)0.0) ? (NNFloat)0.0 : pUnit[opos]; + for (uint32_t i = 0; i < inputs; i++) + { + int32_t offset = sOffset[i]; + if (offset != cData._maxUint32_t) + unit += pWeight[offset + opos] * sValue[i]; + } + + // Write output + pUnit[opos] = unit; + + // Advance to next set of outputs + if (tgx == 0) + { + opos = atomicAdd(&sOpos, cData._warpSize); + } + opos = __shfl(opos, 0); + opos += tgx; + } +} + +template<> +__global__ void +LAUNCH_BOUNDS256() +kCalculateSparseAnalogDenoisedZ_kernel(uint32_t position, uint32_t stride, NNFloat* pWeight, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, char* pSparseData, NNFloat* pRandom, NNFloat* pUnit, NNFloat beta) +{ +__shared__ uint32_t sOpos; // Shared output position +__shared__ uint32_t sOffset[MAXSPARSEANALOG]; // Shared set of offsets to non-zero weights +__shared__ NNFloat sValue[MAXSPARSEANALOG]; + + // Read sparse indices into shared memory so they're only read once + sOpos = blockDim.x; + position = cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x; + uint64_t start = pSparseStart[position]; + uint64_t end = pSparseEnd[position]; + uint32_t inputs = end - start; + uint32_t pos = threadIdx.x; + start += threadIdx.x; + while (start < end) + { + NNFloat value = pRandom[start]; + sOffset[pos] = (value < cData._denoising_p) ? cData._maxUint32_t : pSparseIndex[start] * stride; + sValue[pos] = (NNFloat)pSparseData[start] * (NNFloat)(1.0 / 128.0) * cData._denoising_q; + pos += blockDim.x; + start += blockDim.x; + } + + __threadfence(); + __syncthreads(); + + // Cycle through all output positions + pUnit += blockIdx.x * stride; + uint32_t opos = threadIdx.x; + uint32_t tgx = threadIdx.x & cData._warpMask; + while (opos < stride) + { + // Read all non-zero inputs + NNFloat unit = (beta == (NNFloat)0.0) ? (NNFloat)0.0 : pUnit[opos]; + for (uint32_t i = 0; i < inputs; i++) + { + uint32_t offset = sOffset[i]; + if (offset != cData._maxUint32_t) + unit += pWeight[offset + opos] * sValue[i]; + } + + // Write output + pUnit[opos] = unit; + + // Advance to next set of outputs + if (tgx == 0) + { + opos = atomicAdd(&sOpos, cData._warpSize); + } + opos = __shfl(opos, 0); + opos += tgx; + } +} + +template void kCalculateSparseAnalogDenoisedZ(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pWeight, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, T* pSparseData, NNFloat* pRandom, NNFloat* pUnit, NNFloat beta) +{ + uint32_t threads = min(256, ((stride + getGpu()._warpSize - 1) >> getGpu()._warpBits) << getGpu()._warpBits); + kCalculateSparseAnalogDenoisedZ_kernel<<>>(position, stride, pWeight, pSparseStart, pSparseEnd, pSparseIndex, pSparseData, pRandom, pUnit, beta); + LAUNCHERROR("kCalculateSparseAnalogDenoisedZ_kernel"); +} + +template<> void kCalculateSparseAnalogDenoisedZ(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pWeight, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, char* pSparseData, NNFloat* pRandom, NNFloat* pUnit, NNFloat beta) +{ + uint32_t threads = min(256, ((stride + getGpu()._warpSize - 1) >> getGpu()._warpBits) << getGpu()._warpBits); + kCalculateSparseAnalogDenoisedZ_kernel<<>>(position, stride, pWeight, pSparseStart, pSparseEnd, pSparseIndex, pSparseData, pRandom, pUnit, beta); + LAUNCHERROR("kCalculateSparseAnalogDenoisedZ_kernel"); +} + +template<> void kCalculateSparseAnalogDenoisedZ(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pWeight, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, unsigned char* pSparseData, NNFloat* pRandom, NNFloat* pUnit, NNFloat beta) +{ + uint32_t threads = min(256, ((stride + getGpu()._warpSize - 1) >> getGpu()._warpBits) << getGpu()._warpBits); + kCalculateSparseAnalogDenoisedZ_kernel<<>>(position, stride, pWeight, pSparseStart, pSparseEnd, pSparseIndex, pSparseData, pRandom, pUnit, beta); + LAUNCHERROR("kCalculateSparseAnalogDenoisedZ_kernel"); +} + +__global__ void +LAUNCH_BOUNDS() +kCalculateSparseTransposedMatrix_kernel(uint32_t position, uint32_t batch, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, uint32_t* pSparseTransposedEnd, uint32_t* pSparseTransposedIndex) +{ + // Determine batch position + uint32_t bpos = (blockIdx.x * blockDim.x + threadIdx.x) >> cData._warpBits; + uint32_t tgx = threadIdx.x & cData._warpMask; + + // Add indices to sparse transposed activation matrix + if (bpos < batch) + { + position = cData._bShuffleIndices ? cData._pShuffleIndex[position + bpos] : position + bpos; + uint64_t start = pSparseStart[position] + tgx; + uint64_t end = pSparseEnd[position]; + while (start < end) + { + uint32_t index = pSparseIndex[start]; + uint32_t opos = atomicAdd(&pSparseTransposedEnd[index], 1); + pSparseTransposedIndex[opos] = bpos; + start += cData._warpSize; + } + } +} + +void kCalculateSparseTransposedMatrix(uint32_t position, uint32_t batch, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, uint32_t* pSparseTransposedEnd, uint32_t* pSparseTransposedIndex) +{ + uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); + kCalculateSparseTransposedMatrix_kernel<<>>(position, batch, pSparseStart, pSparseEnd, pSparseIndex, pSparseTransposedEnd, pSparseTransposedIndex); + LAUNCHERROR("kCalculateSparseTransposedMatrix_kernel"); +} + +__global__ void +LAUNCH_BOUNDS() +kCalculateSparseTransposedDenoisedMatrix_kernel(uint32_t position, uint32_t batch, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat *pRandom, uint32_t* pSparseTransposedEnd, uint32_t* pSparseTransposedIndex) +{ + // Determine batch position + uint32_t bpos = (blockIdx.x * blockDim.x + threadIdx.x) >> cData._warpBits; + uint32_t tgx = threadIdx.x & cData._warpMask; + + // Add indices to sparse transposed activation matrix + if (bpos < batch) + { + position = cData._bShuffleIndices ? cData._pShuffleIndex[position + bpos] : position + bpos; + uint64_t start = pSparseStart[position] + tgx; + uint64_t end = pSparseEnd[position]; + while (start < end) + { + NNFloat rnd = pRandom[start]; + uint32_t index = pSparseIndex[start]; + if (rnd >= cData._denoising_p) + { + uint32_t opos = atomicAdd(&pSparseTransposedEnd[index], 1); + pSparseTransposedIndex[opos]= bpos; + } + start += cData._warpSize; + } + } +} + +void kCalculateSparseTransposedDenoisedMatrix(uint32_t position, uint32_t batch, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, NNFloat* pRandom, uint32_t* pSparseTransposedEnd, uint32_t* pSparseTransposedIndex) +{ + uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); + kCalculateSparseTransposedDenoisedMatrix_kernel<<>>(position, batch, pSparseStart, pSparseEnd, pSparseIndex, pRandom, pSparseTransposedEnd, pSparseTransposedIndex); + LAUNCHERROR("kCalculateSparseTransposedDenoisedMatrix_kernel"); +} + +template +__global__ void +LAUNCH_BOUNDS() +kCalculateSparseTransposedAnalogMatrix_kernel(uint32_t position, uint32_t batch, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, T* pSparseData, uint32_t* pSparseTransposedEnd, uint32_t* pSparseTransposedIndex, T* pSparseTransposedData) +{ + // Determine batch position + uint32_t bpos = (blockIdx.x * blockDim.x + threadIdx.x) >> cData._warpBits; + uint32_t tgx = threadIdx.x & cData._warpMask; + + // Add indices to sparse transposed activation matrix + if (bpos < batch) + { + position = cData._bShuffleIndices ? cData._pShuffleIndex[position + bpos] : position + bpos; + uint64_t start = pSparseStart[position] + tgx; + uint64_t end = pSparseEnd[position]; + while (start < end) + { + uint32_t index = pSparseIndex[start]; + T value = pSparseData[start]; + uint32_t opos = atomicAdd(&pSparseTransposedEnd[index], 1); + pSparseTransposedIndex[opos] = bpos; + pSparseTransposedData[opos] = value; + start += cData._warpSize; + } + } +} + +template +void kCalculateSparseTransposedAnalogMatrix(uint32_t position, uint32_t batch, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, T* pSparseData, uint32_t* pSparseTransposedEnd, uint32_t* pSparseTransposedIndex, T* pSparseTransposedData) +{ + uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); + kCalculateSparseTransposedAnalogMatrix_kernel<<>>(position, batch, pSparseStart, pSparseEnd, pSparseIndex, pSparseData, pSparseTransposedEnd, pSparseTransposedIndex, pSparseTransposedData); + LAUNCHERROR("kCalculateSparseTransposedAnalogMatrix_kernel"); +} + +template +__global__ void +LAUNCH_BOUNDS() +kCalculateSparseTransposedAnalogDenoisedMatrix_kernel(uint32_t position, uint32_t batch, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, T* pSparseData, NNFloat *pRandom, uint32_t* pSparseTransposedEnd, uint32_t* pSparseTransposedIndex, T* pSparseTransposedData) +{ + // Determine batch position + uint32_t bpos = (blockIdx.x * blockDim.x + threadIdx.x) >> cData._warpBits; + uint32_t tgx = threadIdx.x & cData._warpMask; + + // Add indices to sparse transposed activation matrix + if (bpos < batch) + { + position = cData._bShuffleIndices ? cData._pShuffleIndex[position + bpos] : position + bpos; + uint64_t start = pSparseStart[position] + tgx; + uint64_t end = pSparseEnd[position]; + while (start < end) + { + NNFloat rnd = pRandom[start]; + uint32_t index = pSparseIndex[start]; + if (rnd >= cData._denoising_p) + { + T value = pSparseData[start]; + uint32_t opos = atomicAdd(&pSparseTransposedEnd[index], 1); + pSparseTransposedIndex[opos]= bpos; + pSparseTransposedData[opos] = value; + } + start += cData._warpSize; + } + } +} + +template +void kCalculateSparseTransposedAnalogDenoisedMatrix(uint32_t position, uint32_t batch, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t* pSparseIndex, T* pSparseData, NNFloat* pRandom, uint32_t* pSparseTransposedEnd, uint32_t* pSparseTransposedIndex, T* pSparseTransposedData) +{ + uint32_t blocks = CalculateBlocks(batch * getGpu()._warpSize); + kCalculateSparseTransposedAnalogDenoisedMatrix_kernel<<>>(position, batch, pSparseStart, pSparseEnd, pSparseIndex, pSparseData, pRandom, pSparseTransposedEnd, pSparseTransposedIndex, pSparseTransposedData); + LAUNCHERROR("kCalculateSparseTransposedAnalogDenoisedMatrix_kernel"); +} + + +__global__ void +LAUNCH_BOUNDS256() +kCalculateSparseTransposedWeightGradient_kernel(NNFloat alpha, NNFloat beta, uint32_t n, uint32_t* pSparseTransposedStart, uint32_t* pSparseTransposedEnd, uint32_t* pSparseTransposedIndex, NNFloat* pDelta, NNFloat* pWeightGradient) +{ +__shared__ uint32_t sOpos; // Shared output position +__shared__ uint32_t sOffset[MAXSPARSE]; // Shared set of offsets to non-zero weights + + // Read transposed sparse indices into shared memory so they're only read once + sOpos = blockDim.x; + uint64_t start = pSparseTransposedStart[blockIdx.x]; + uint64_t end = pSparseTransposedEnd[blockIdx.x]; + uint32_t inputs = end - start; + uint32_t pos = threadIdx.x; + start += threadIdx.x; + while (start < end) + { + sOffset[pos] = pSparseTransposedIndex[start] * n; + pos += blockDim.x; + start += blockDim.x; + } + + __threadfence(); + __syncthreads(); + + // Cycle through all output positions + alpha *= cData._denoising_q; + pWeightGradient += blockIdx.x * n; + uint32_t opos = threadIdx.x; + uint32_t tgx = threadIdx.x & cData._warpMask; + while (opos < n) + { + // Read all non-zero inputs, accumulate in 64-bit FP to maintain deterministic results + NNFloat oldgradient = (beta == (NNFloat)0.0) ? (NNFloat)0.0 : beta * pWeightGradient[opos]; + int64_t sum = 0; + for (uint32_t i = 0; i < inputs; i++) + { + uint32_t offset = sOffset[i]; + sum += llrintf(ERRORSCALEF * pDelta[offset + opos]); + } + + // Write output + NNFloat fsum = alpha * (NNFloat)((double)sum * ONEOVERERRORSCALE); + pWeightGradient[opos] = oldgradient + fsum; + + // Advance to next set of outputs + if (tgx == 0) + { + opos = atomicAdd(&sOpos, cData._warpSize); + } + opos = __shfl(opos, 0); + opos += tgx; + } +} + + +void kCalculateSparseTransposedWeightGradient(NNFloat alpha, NNFloat beta, uint32_t m, uint32_t n, uint32_t* pSparseTransposedStart, uint32_t* pSparseTransposedEnd, uint32_t* pSparseTransposedIndex, NNFloat* pDelta, NNFloat* pWeightGradient) +{ + uint32_t threads = min(256, ((m + getGpu()._warpSize - 1) >> getGpu()._warpBits) << getGpu()._warpBits); + kCalculateSparseTransposedWeightGradient_kernel<<>>(alpha, beta, n, pSparseTransposedStart, pSparseTransposedEnd, pSparseTransposedIndex, pDelta, pWeightGradient); + LAUNCHERROR("kCalculateSparseTransposedWeightGradient_kernel"); +} + +template +__global__ void +LAUNCH_BOUNDS256() +kCalculateSparseTransposedAnalogWeightGradient_kernel(NNFloat alpha, NNFloat beta, uint32_t n, uint32_t* pSparseTransposedStart, uint32_t* pSparseTransposedEnd, uint32_t* pSparseTransposedIndex, T* pSparseTransposedData, NNFloat* pDelta, NNFloat* pWeightGradient) +{ +__shared__ uint32_t sOpos; // Shared output position +__shared__ uint32_t sOffset[MAXSPARSEANALOG]; // Shared set of offsets to non-zero weights +__shared__ T sValue[MAXSPARSEANALOG]; + + // Read transposed sparse indices and data into shared memory so they're only read once + sOpos = blockDim.x; + uint64_t start = pSparseTransposedStart[blockIdx.x]; + uint64_t end = pSparseTransposedEnd[blockIdx.x]; + uint32_t inputs = end - start; + uint32_t pos = threadIdx.x; + alpha *= cData._denoising_q; + start += threadIdx.x; + while (start < end) + { + sOffset[pos] = pSparseTransposedIndex[start] * n; + sValue[pos] = pSparseTransposedData[start]; + pos += blockDim.x; + start += blockDim.x; + } + + __threadfence(); + __syncthreads(); + + // Cycle through all output positions + pWeightGradient += blockIdx.x * n; + uint32_t opos = threadIdx.x; + uint32_t tgx = threadIdx.x & cData._warpMask; + while (opos < n) + { + // Read all non-zero inputs, accumulate in 64-bit FP to maintain deterministic results + NNFloat oldgradient = (beta == (NNFloat)0.0) ? (NNFloat)0.0 : beta * pWeightGradient[opos]; + int64_t sum = 0; + for (uint32_t i = 0; i < inputs; i++) + { + uint32_t offset = sOffset[i]; + T value = sValue[i]; + sum += llrintf(ERRORSCALEF * value * pDelta[offset + opos]); + } + + // Write output + NNFloat fsum = alpha * (NNFloat)((double)sum * ONEOVERERRORSCALE); + pWeightGradient[opos] = oldgradient + fsum; + + + // Advance to next set of outputs + if (tgx == 0) + { + opos = atomicAdd(&sOpos, cData._warpSize); + } + opos = __shfl(opos, 0); + opos += tgx; + } +} + +template <> +__global__ void +LAUNCH_BOUNDS256() +kCalculateSparseTransposedAnalogWeightGradient_kernel(NNFloat alpha, NNFloat beta, uint32_t n, uint32_t* pSparseTransposedStart, uint32_t* pSparseTransposedEnd, uint32_t* pSparseTransposedIndex, char* pSparseTransposedData, NNFloat* pDelta, NNFloat* pWeightGradient) +{ +__shared__ uint32_t sOpos; // Shared output position +__shared__ uint32_t sOffset[MAXSPARSEANALOG]; // Shared set of offsets to non-zero weights +__shared__ NNFloat sValue[MAXSPARSEANALOG]; + + // Read transposed sparse indices and data into shared memory so they're only read once + sOpos = blockDim.x; + uint64_t start = pSparseTransposedStart[blockIdx.x]; + uint64_t end = pSparseTransposedEnd[blockIdx.x]; + uint32_t inputs = end - start; + uint32_t pos = threadIdx.x; + alpha *= cData._denoising_q; + start += threadIdx.x; + while (start < end) + { + sOffset[pos] = pSparseTransposedIndex[start] * n; + sValue[pos] = (NNFloat)pSparseTransposedData[start] * (NNFloat)(1.0 / 128.0); + pos += blockDim.x; + start += blockDim.x; + } + + __threadfence(); + __syncthreads(); + + // Cycle through all output positions + pWeightGradient += blockIdx.x * n; + uint32_t opos = threadIdx.x; + uint32_t tgx = threadIdx.x & cData._warpMask; + while (opos < n) + { + // Read all non-zero inputs, accumulate in 64-bit FP to maintain deterministic results + NNFloat oldgradient = (beta == (NNFloat)0.0) ? (NNFloat)0.0 : beta * pWeightGradient[opos]; + int64_t sum = 0; + for (uint32_t i = 0; i < inputs; i++) + { + uint32_t offset = sOffset[i]; + NNFloat value = sValue[i]; + sum += llrintf(ERRORSCALEF * value * pDelta[offset + opos]); + } + + // Write output + NNFloat fsum = alpha * (NNFloat)((double)sum * ONEOVERERRORSCALE); + pWeightGradient[opos] = oldgradient + fsum; + + // Advance to next set of outputs + if (tgx == 0) + { + opos = atomicAdd(&sOpos, cData._warpSize); + } + opos = __shfl(opos, 0); + opos += tgx; + } +} + +template <> +__global__ void +LAUNCH_BOUNDS256() +kCalculateSparseTransposedAnalogWeightGradient_kernel(NNFloat alpha, NNFloat beta, uint32_t n, uint32_t* pSparseTransposedStart, uint32_t* pSparseTransposedEnd, uint32_t* pSparseTransposedIndex, unsigned char* pSparseTransposedData, NNFloat* pDelta, NNFloat* pWeightGradient) +{ +__shared__ uint32_t sOpos; // Shared output position +__shared__ uint32_t sOffset[MAXSPARSEANALOG]; // Shared set of offsets to non-zero weights +__shared__ NNFloat sValue[MAXSPARSEANALOG]; + + // Read transposed sparse indices and data into shared memory so they're only read once + sOpos = blockDim.x; + uint64_t start = pSparseTransposedStart[blockIdx.x]; + uint64_t end = pSparseTransposedEnd[blockIdx.x]; + uint32_t inputs = end - start; + uint32_t pos = threadIdx.x; + alpha *= cData._denoising_q; + start += threadIdx.x; + while (start < end) + { + sOffset[pos] = pSparseTransposedIndex[start] * n; + sValue[pos] = (NNFloat)pSparseTransposedData[start] * (NNFloat)(1.0 / 256.0); + pos += blockDim.x; + start += blockDim.x; + } + + __threadfence(); + __syncthreads(); + + // Cycle through all output positions + pWeightGradient += blockIdx.x * n; + uint32_t opos = threadIdx.x; + uint32_t tgx = threadIdx.x & cData._warpMask; + while (opos < n) + { + // Read all non-zero inputs, accumulate in 64-bit FP to maintain deterministic results + NNFloat oldgradient = (beta == (NNFloat)0.0) ? (NNFloat)0.0 : beta * pWeightGradient[opos]; + int64_t sum = 0; + for (uint32_t i = 0; i < inputs; i++) + { + uint32_t offset = sOffset[i]; + NNFloat value = sValue[i]; + sum += llrintf(ERRORSCALEF * value * pDelta[offset + opos]); + } + + // Write output + NNFloat fsum = alpha * (NNFloat)((double)sum * ONEOVERERRORSCALE); + pWeightGradient[opos] = oldgradient + fsum; + + // Advance to next set of outputs + if (tgx == 0) + { + opos = atomicAdd(&sOpos, cData._warpSize); + } + opos = __shfl(opos, 0); + opos += tgx; + } +} + +template +void kCalculateSparseTransposedAnalogWeightGradient(NNFloat alpha, NNFloat beta, uint32_t m, uint32_t n, uint32_t* pSparseTransposedStart, uint32_t* pSparseTransposedEnd, uint32_t* pSparseTransposedIndex, T* pSparseTransposedData, NNFloat* pDelta, NNFloat* pWeightGradient) +{ + uint32_t threads = min(256, ((m + getGpu()._warpSize - 1) >> getGpu()._warpBits) << getGpu()._warpBits); + kCalculateSparseTransposedAnalogWeightGradient_kernel<<>>(alpha, beta, n, pSparseTransposedStart, pSparseTransposedEnd, pSparseTransposedIndex, pSparseTransposedData, pDelta, pWeightGradient); + LAUNCHERROR("kCalculateSparseTransposedAnalogWeightGradient_kernel"); +} + +template<> +void kCalculateSparseTransposedAnalogWeightGradient(NNFloat alpha, NNFloat beta, uint32_t m, uint32_t n, uint32_t* pSparseTransposedStart, uint32_t* pSparseTransposedEnd, uint32_t* pSparseTransposedIndex, char* pSparseTransposedData, NNFloat* pDelta, NNFloat* pWeightGradient) +{ + uint32_t threads = min(256, ((m + getGpu()._warpSize - 1) >> getGpu()._warpBits) << getGpu()._warpBits); + kCalculateSparseTransposedAnalogWeightGradient_kernel<<>>(alpha, beta, n, pSparseTransposedStart, pSparseTransposedEnd, pSparseTransposedIndex, pSparseTransposedData, pDelta, pWeightGradient); + LAUNCHERROR("kCalculateSparseTransposedAnalogWeightGradient_kernel"); +} + +template<> +void kCalculateSparseTransposedAnalogWeightGradient(NNFloat alpha, NNFloat beta, uint32_t m, uint32_t n, uint32_t* pSparseTransposedStart, uint32_t* pSparseTransposedEnd, uint32_t* pSparseTransposedIndex, unsigned char* pSparseTransposedData, NNFloat* pDelta, NNFloat* pWeightGradient) +{ + uint32_t threads = min(256, ((m + getGpu()._warpSize - 1) >> getGpu()._warpBits) << getGpu()._warpBits); + kCalculateSparseTransposedAnalogWeightGradient_kernel<<>>(alpha, beta, n, pSparseTransposedStart, pSparseTransposedEnd, pSparseTransposedIndex, pSparseTransposedData, pDelta, pWeightGradient); + LAUNCHERROR("kCalculateSparseTransposedAnalogWeightGradient_kernel"); +} + +__global__ void +LAUNCH_BOUNDS() +kUpdateBiases_kernel(NNFloat alpha, uint32_t batch, uint32_t width, NNFloat* pDelta, NNFloat* pBias) +{ + uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; + if (pos < width) + { + NNFloat sum = (NNFloat)0.0; + pDelta += pos; + for (uint32_t i = 0; i < batch; i++) + { + sum += *pDelta; + pDelta += width; + } + pBias[pos] -= alpha * sum; + } +} + +void kUpdateBiases(NNFloat alpha, uint32_t batch, uint32_t width, NNFloat* pDelta, NNFloat* pBias) +{ + uint32_t blocks = CalculateBlocks(width); + kUpdateBiases_kernel<<>>(alpha, batch, width, pDelta, pBias); + LAUNCHERROR("kUpdateBiases_kernel"); +} + +__global__ void +LAUNCH_BOUNDS() +kCalculateRegularizationError_kernel(NNFloat* pWeight, uint64_t size) +{ + uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; + NNFloat error = (NNFloat)0.0; + if (pos < size) + { + NNFloat w = pWeight[pos]; + error = w * w; + } + + // Reduce error across threads + error += __shfl(error, threadIdx.x ^ 1); + error += __shfl(error, threadIdx.x ^ 2); + error += __shfl(error, threadIdx.x ^ 4); + error += __shfl(error, threadIdx.x ^ 8); + error += __shfl(error, threadIdx.x ^ 16); + + if ((threadIdx.x & cData._warpMask) == 0) + { + atomicAdd(cData._pAccumulator, llitoulli(llrintf(ERRORSCALEF * error))); + } +} + +// Calculates raw weight decay/regularization error +NNFloat kCalculateRegularizationError(NNFloat lambda, NNFloat* pWeight, uint64_t size) +{ + uint32_t blocks = CalculateBlocks(size); + cudaMemset(getGpu()._data._pAccumulator, 0, sizeof(uint64_t)); + kCalculateRegularizationError_kernel<<>>(pWeight, size); + LAUNCHERROR("kCalculateRegularizationError_kernel"); + getGpu()._pbAccumulator->Download(); + //printf("Reg %llu %f\n", size, lambda * 0.5f * (double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); + return (NNFloat)(lambda * 0.5f * (double)(getGpu()._pbAccumulator->_pSysData[0]) * ONEOVERERRORSCALE); +} + +// Instantiates allowable templated functions so we can hide the implementations here +// instead of in the header file because we're mixing CUDA and C++ and that's +// a migraine headache in the making otherwise. +void KernelsTempFunction() +{ + + kInitSort(1, NULL, NULL); + kInitSort(1, NULL, NULL); + kInitSort(1, NULL, NULL); + kInitSort(1, NULL, NULL); + kSort(1, NULL, NULL, NULL, NULL, NULL, 0); + kSort(1, NULL, NULL, NULL, NULL, NULL, 0); + kSort(1, NULL, NULL, NULL, NULL, NULL, 0); + kSort(1, NULL, NULL, NULL, NULL, NULL, 0); + + kLoadSparseAnalogDenoisedInputUnit(0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL); + kLoadSparseAnalogDenoisedInputUnit(0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL); + kLoadSparseAnalogDenoisedInputUnit(0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL); + kLoadSparseAnalogDenoisedInputUnit(0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL); + kLoadSparseAnalogDenoisedInputUnit(0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL); + kLoadSparseAnalogDenoisedInputUnit(0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL); + kLoadSparseAnalogDenoisedInputUnit(0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL); + kLoadSparseAnalogDenoisedInputUnit(0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL); + + kLoadSparseAnalogInputUnit(0, 0, 0, NULL, NULL, NULL, NULL, NULL); + kLoadSparseAnalogInputUnit(0, 0, 0, NULL, NULL, NULL, NULL, NULL); + kLoadSparseAnalogInputUnit(0, 0, 0, NULL, NULL, NULL, NULL, NULL); + kLoadSparseAnalogInputUnit(0, 0, 0, NULL, NULL, NULL, NULL, NULL); + kLoadSparseAnalogInputUnit(0, 0, 0, NULL, NULL, NULL, NULL, NULL); + kLoadSparseAnalogInputUnit(0, 0, 0, NULL, NULL, NULL, NULL, NULL); + kLoadSparseAnalogInputUnit(0, 0, 0, NULL, NULL, NULL, NULL, NULL); + kLoadSparseAnalogInputUnit(0, 0, 0, NULL, NULL, NULL, NULL, NULL); + + kCalculateSparseAnalogZ(0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, (NNFloat)0.0); + kCalculateSparseAnalogZ(0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, (NNFloat)0.0); + kCalculateSparseAnalogZ(0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, (NNFloat)0.0); + kCalculateSparseAnalogZ(0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, (NNFloat)0.0); + kCalculateSparseAnalogZ(0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, (NNFloat)0.0); + kCalculateSparseAnalogZ(0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, (NNFloat)0.0); + kCalculateSparseAnalogZ(0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, (NNFloat)0.0); + kCalculateSparseAnalogZ(0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, (NNFloat)0.0); + + kCalculateSparseAnalogDenoisedZ(0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, (NNFloat)0.0); + kCalculateSparseAnalogDenoisedZ(0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, (NNFloat)0.0); + kCalculateSparseAnalogDenoisedZ(0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, (NNFloat)0.0); + kCalculateSparseAnalogDenoisedZ(0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, (NNFloat)0.0); + kCalculateSparseAnalogDenoisedZ(0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, (NNFloat)0.0); + kCalculateSparseAnalogDenoisedZ(0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, (NNFloat)0.0); + kCalculateSparseAnalogDenoisedZ(0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, (NNFloat)0.0); + kCalculateSparseAnalogDenoisedZ(0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, (NNFloat)0.0); + + kCalculateSparseTransposedAnalogMatrix(0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + kCalculateSparseTransposedAnalogMatrix(0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + kCalculateSparseTransposedAnalogMatrix(0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + kCalculateSparseTransposedAnalogMatrix(0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + kCalculateSparseTransposedAnalogMatrix(0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + kCalculateSparseTransposedAnalogMatrix(0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + kCalculateSparseTransposedAnalogMatrix(0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + kCalculateSparseTransposedAnalogMatrix(0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + + kCalculateSparseTransposedAnalogDenoisedMatrix(0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + kCalculateSparseTransposedAnalogDenoisedMatrix(0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + kCalculateSparseTransposedAnalogDenoisedMatrix(0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + kCalculateSparseTransposedAnalogDenoisedMatrix(0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + kCalculateSparseTransposedAnalogDenoisedMatrix(0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + kCalculateSparseTransposedAnalogDenoisedMatrix(0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + kCalculateSparseTransposedAnalogDenoisedMatrix(0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + kCalculateSparseTransposedAnalogDenoisedMatrix(0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + + kCalculateSparseTransposedAnalogWeightGradient(0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL); + kCalculateSparseTransposedAnalogWeightGradient(0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL); + kCalculateSparseTransposedAnalogWeightGradient(0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL); + kCalculateSparseTransposedAnalogWeightGradient(0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL); + kCalculateSparseTransposedAnalogWeightGradient(0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL); + kCalculateSparseTransposedAnalogWeightGradient(0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL); + kCalculateSparseTransposedAnalogWeightGradient(0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL); + kCalculateSparseTransposedAnalogWeightGradient(0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL); + + kLoadInputUnit(0, 0, 0, NULL, NULL); + kLoadInputUnit(0, 0, 0, NULL, NULL); + kLoadInputUnit(0, 0, 0, NULL, NULL); + kLoadInputUnit(0, 0, 0, NULL, NULL); + kLoadInputUnit(0, 0, 0, NULL, NULL); + kLoadInputUnit(0, 0, 0, NULL, NULL); + kLoadInputUnit(0, 0, 0, NULL, NULL); + kLoadInputUnit(0, 0, 0, NULL, NULL); +} + + +__global__ void +LAUNCH_BOUNDS() +kSGDUpdateWeights_kernel(NNFloat lambda, uint64_t size, NNFloat* pWeightGradient, NNFloat* pWeight) +{ + uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; + if (pos < size) + { + NNFloat g = pWeightGradient[pos]; + NNFloat w = pWeight[pos]; + pWeight[pos] = w + g - lambda * w; + } +} + +void kSGDUpdateWeights(NNFloat lambda, uint64_t size, NNFloat* pWeightGradient, NNFloat* pWeight) +{ + uint32_t blocks = CalculateBlocks(size); + kSGDUpdateWeights_kernel<<>>(lambda, size, pWeightGradient, pWeight); + LAUNCHERROR("kMomentumUpdateWeights_kernel"); +} + +__global__ void +LAUNCH_BOUNDS() +kSGDUpdateBiases_kernel(NNFloat alpha, uint32_t batch, uint32_t width, NNFloat* pDelta, NNFloat* pBias) +{ + uint32_t pos = blockIdx.x * blockDim.x + threadIdx.x; + if (pos < width) + { + NNFloat sum = 0.0f; + pDelta += pos; + + // Calculate bias gradient + for (uint32_t i = 0; i < batch; i++) + { + sum += *pDelta; + pDelta += width; + } + + // Update velocity and bias + NNFloat bias = pBias[pos]; + pBias[pos] = bias + alpha * sum; + } +} + +void kSGDUpdateBiases(NNFloat alpha, uint32_t batch, uint32_t width, NNFloat* pDelta, NNFloat* pBias) +{ + uint32_t blocks = CalculateBlocks(width); + kSGDUpdateBiases_kernel<<>>(alpha, batch, width, pDelta, pBias); + LAUNCHERROR("kSGDUpdateBiases_kernel"); +} + + +__global__ void +LAUNCH_BOUNDS() +kMomentumUpdateWeights_kernel(NNFloat lambda, NNFloat mu, uint64_t size, NNFloat* pWeightVelocity, NNFloat* pWeightGradient, NNFloat* pWeight) +{ + uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; + if (pos < size) + { + NNFloat g = pWeightGradient[pos]; + NNFloat w = pWeight[pos]; + NNFloat v = pWeightVelocity[pos]; + v = mu * v + g - lambda * w; + pWeightVelocity[pos] = v; + pWeight[pos] = w + v; + } +} + +void kMomentumUpdateWeights(NNFloat lambda, NNFloat mu, uint64_t size, NNFloat* pWeightVelocity, NNFloat* pWeightGradient, NNFloat* pWeight) +{ + uint32_t blocks = CalculateBlocks(size); + kMomentumUpdateWeights_kernel<<>>(lambda, mu, size, pWeightVelocity, pWeightGradient, pWeight); + LAUNCHERROR("kMomentumUpdateWeights_kernel"); +} + +__global__ void +LAUNCH_BOUNDS() +kMomentumUpdateBiases_kernel(NNFloat alpha, NNFloat mu, uint32_t batch, uint32_t width, NNFloat* pDelta, NNFloat* pBiasVelocity, NNFloat* pBias) +{ + uint32_t pos = blockIdx.x * blockDim.x + threadIdx.x; + if (pos < width) + { + NNFloat sum = 0.0f; + pDelta += pos; + + // Calculate bias gradient + for (uint32_t i = 0; i < batch; i++) + { + sum += *pDelta; + pDelta += width; + } + + // Update velocity and bias + NNFloat v = pBiasVelocity[pos]; + v = mu * v - alpha * sum; + pBiasVelocity[pos] = v; + pBias[pos] += v; + } +} + +void kMomentumUpdateBiases(NNFloat alpha, NNFloat mu, uint32_t batch, uint32_t width, NNFloat* pDelta, NNFloat* pBiasVelocity, NNFloat* pBias) +{ + uint32_t blocks = CalculateBlocks(width); + kMomentumUpdateBiases_kernel<<>>(alpha, mu, batch, width, pDelta, pBiasVelocity, pBias); + LAUNCHERROR("kMomentumUpdateBiases_kernel"); +} + +__global__ void +LAUNCH_BOUNDS() +kAdaGradUpdateWeights_kernel(NNFloat alpha, NNFloat lambda, uint64_t size, NNFloat* pWeightVelocity, NNFloat* pWeightGradient, NNFloat* pWeight) +{ + uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; + if (pos < size) + { + NNFloat g = pWeightGradient[pos]; + NNFloat w = pWeight[pos]; + NNFloat v = pWeightVelocity[pos]; + g -= lambda * w; + v += g * g; + pWeightVelocity[pos] = v; + pWeight[pos] = w + alpha * g * rsqrt(max(0.000000001f, v)); + } +} + +void kAdaGradUpdateWeights(NNFloat alpha, NNFloat lambda, uint64_t size, NNFloat* pWeightVelocity, NNFloat* pWeightGradient, NNFloat* pWeight) +{ + unsigned long blocks = CalculateBlocks(size); + kAdaGradUpdateWeights_kernel<<>>(alpha, lambda, size, pWeightVelocity, pWeightGradient, pWeight); + LAUNCHERROR("kAdaGradUpdateWeights_kernel"); +} + +__global__ void +LAUNCH_BOUNDS() +kAdaGradUpdateBiases_kernel(NNFloat alpha, uint32_t batch, uint32_t width, NNFloat* pDelta, NNFloat* pBiasVelocity, NNFloat* pBias) +{ + uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; + if (pos < width) + { + NNFloat sum = 0.0f; + pDelta += pos; + + // Calculate bias gradient + for (uint32_t i = 0; i < batch; i++) + { + sum += *pDelta; + pDelta += width; + } + + // Update velocity and bias + NNFloat v = pBiasVelocity[pos]; + v += sum * sum; + pBiasVelocity[pos] = v; + pBias[pos] -= alpha * sum * rsqrt(max(0.000000001f, v)); + } +} + +void kAdaGradUpdateBiases(NNFloat alpha, uint32_t batch, uint32_t width, NNFloat* pDelta, NNFloat* pBiasVelocity, NNFloat* pBias) +{ + uint32_t blocks = CalculateBlocks(width); + kAdaGradUpdateBiases_kernel<<>>(alpha, batch, width, pDelta, pBiasVelocity, pBias); + LAUNCHERROR("kAdaGradUpdateBiases_kernel"); +} + +__global__ void +LAUNCH_BOUNDS() +kAdaDeltaUpdateWeights_kernel(NNFloat alpha, NNFloat mu, NNFloat lambda, uint64_t size, NNFloat* pWeightVelocity, NNFloat* pWeightGradient, NNFloat* pWeightGradientVelocity, NNFloat* pWeight) +{ + uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; + if (pos < size) + { + NNFloat g = pWeightGradient[pos]; + NNFloat w = pWeight[pos]; + NNFloat v = pWeightVelocity[pos]; + NNFloat vg = pWeightGradientVelocity[pos]; + g -= lambda * w; + v = mu * v + ((NNFloat)1.0 - mu) * g; + NNFloat dw = sqrt(max((NNFloat)0.000000001, vg) / max((NNFloat)0.000000001, v)); + vg = mu * vg + ((NNFloat)1.0 - mu) * vg; + pWeightVelocity[pos] = v; + pWeightGradientVelocity[pos] = vg; + pWeight[pos] = w + alpha * g * dw; + } +} + +void kAdaDeltaUpdateWeights(NNFloat alpha, NNFloat mu, NNFloat lambda, uint64_t size, NNFloat* pWeightVelocity, NNFloat* pWeightGradient, NNFloat* pWeightGradientVelocity, NNFloat* pWeight) +{ + unsigned long blocks = CalculateBlocks(size); + kAdaDeltaUpdateWeights_kernel<<>>(alpha, mu, lambda, size, pWeightVelocity, pWeightGradient, pWeightGradientVelocity, pWeight); + LAUNCHERROR("kAdaDeltaUpdateWeights_kernel"); +} + +__global__ void +LAUNCH_BOUNDS() +kAdaDeltaUpdateBiases_kernel(NNFloat alpha, NNFloat mu, uint32_t batch, uint32_t width, NNFloat* pDelta, NNFloat* pBiasVelocity, NNFloat* pBiasGradientVelocity, NNFloat* pBias) +{ + uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; + if (pos < width) + { + NNFloat sum = (NNFloat)0.0; + pDelta += pos; + + // Calculate bias gradient + for (uint32_t i = 0; i < batch; i++) + { + sum += *pDelta; + pDelta += width; + } + + // Update velocity and bias + NNFloat v = pBiasVelocity[pos]; + NNFloat vg = pBiasGradientVelocity[pos]; + v = mu * v + ((NNFloat)1.0 - mu) * sum; + NNFloat dw = sqrt(max((NNFloat)0.000000001, vg) / max((NNFloat)0.000000001, v)); + vg = mu * vg + ((NNFloat)1.0 - mu) * vg; + pBiasVelocity[pos] = v; + pBiasGradientVelocity[pos] = vg; + pBias[pos] -= alpha * sum * dw; + } +} + +void kAdaDeltaUpdateBiases(NNFloat alpha, NNFloat mu, uint32_t batch, uint32_t width, NNFloat* pDelta, NNFloat* pBiasVelocity, NNFloat* pBiasGradientVelocity, NNFloat* pBias) +{ + uint32_t blocks = CalculateBlocks(width); + kAdaDeltaUpdateBiases_kernel<<>>(alpha, mu, batch, width, pDelta, pBiasVelocity, pBiasGradientVelocity, pBias); + LAUNCHERROR("kAdaDeltaUpdateBiases_kernel"); +} + +__global__ void +LAUNCH_BOUNDS() +kNesterovUpdateWeights_kernel(NNFloat lambda, NNFloat mu, uint64_t size, NNFloat* pWeightVelocity, NNFloat* pWeightGradient, NNFloat* pWeight) +{ + uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; + if (pos < size) + { + NNFloat g = pWeightGradient[pos]; + NNFloat w = pWeight[pos]; + NNFloat vOld = pWeightVelocity[pos]; + NNFloat vNew = mu * vOld + g - lambda * w; + pWeightVelocity[pos] = vNew; + w = w + vNew + mu * (vNew - vOld); + pWeight[pos] = w; + } +} + +void kNesterovUpdateWeights(NNFloat lambda, NNFloat mu, uint64_t size, NNFloat* pWeightVelocity, NNFloat* pWeightGradient, NNFloat* pWeight) +{ + uint32_t blocks = CalculateBlocks(size); + kNesterovUpdateWeights_kernel<<>>(lambda, mu, size, pWeightVelocity, pWeightGradient, pWeight); + LAUNCHERROR("kNesterovUpdateWeights_kernel"); +} + +__global__ void +LAUNCH_BOUNDS() +kNesterovUpdateBiases_kernel(NNFloat alpha, NNFloat mu, uint32_t batch, uint32_t width, NNFloat* pDelta, NNFloat* pBiasVelocity, NNFloat* pBias) +{ + uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; + if (pos < width) + { + NNFloat sum = 0.0f; + pDelta += pos; + + // Calculate bias gradient + for (uint32_t i = 0; i < batch; i++) + { + sum += *pDelta; + pDelta += width; + } + + // Update velocity and bias + NNFloat vOld = pBiasVelocity[pos]; + NNFloat vNew = mu * vOld - alpha * sum; + pBiasVelocity[pos] = vNew; + pBias[pos] += vNew + mu * (vNew - vOld); + } +} + +void kNesterovUpdateBiases(NNFloat alpha, NNFloat mu, uint32_t batch, uint32_t width, NNFloat* pDelta, NNFloat* pBiasVelocity, NNFloat* pBias) +{ + uint32_t blocks = CalculateBlocks(width); + kNesterovUpdateBiases_kernel<<>>(alpha, mu, batch, width, pDelta, pBiasVelocity, pBias); + LAUNCHERROR("kNesterovUpdateBiases_kernel"); +} + +__global__ void +LAUNCH_BOUNDS() +kNesterovShiftWeights_kernel(NNFloat mu, uint64_t size, NNFloat* pWeightVelocity, NNFloat* pWeight) +{ + uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; + if (pos < size) + { + NNFloat w = pWeight[pos]; + NNFloat v = pWeightVelocity[pos]; + pWeight[pos] = w + mu * v; + } +} + +void kNesterovShiftWeights(NNFloat mu, uint64_t size, NNFloat* pWeightVelocity, NNFloat* pWeight) +{ + uint32_t blocks = CalculateBlocks(size); + kNesterovShiftWeights_kernel<<>>(mu, size, pWeightVelocity, pWeight); + LAUNCHERROR("kNesterovShiftWeights_kernel"); +} + +__global__ void +LAUNCH_BOUNDS() +kNesterovShiftBiases_kernel(NNFloat mu, uint32_t width, NNFloat* pBiasVelocity, NNFloat* pBias) +{ + uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; + if (pos < width) + { + NNFloat b = pBias[pos]; + NNFloat v = pBiasVelocity[pos]; + pBias[pos] = b + mu * v; + } +} + +void kNesterovShiftBiases(NNFloat mu, uint32_t width, NNFloat* pBiasVelocity, NNFloat* pBias) +{ + uint32_t blocks = CalculateBlocks(width); + kNesterovShiftBiases_kernel<<>>(mu, width, pBiasVelocity, pBias); + LAUNCHERROR("kNesterovShiftBiases_kernel"); +} + +__global__ void +LAUNCH_BOUNDS() +kRMSPropUpdateWeights_kernel(NNFloat alpha, NNFloat lambda, NNFloat mu, uint64_t size, NNFloat* pWeightVelocity, NNFloat* pWeightGradient, NNFloat* pWeight) +{ + uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; + if (pos < size) + { + NNFloat g = pWeightGradient[pos]; + NNFloat w = pWeight[pos]; + NNFloat v = pWeightVelocity[pos]; + g -= lambda * w; + v = mu * v + (1.0f - mu) * g * g; + pWeightVelocity[pos] = v; + pWeight[pos] = w + alpha * g * rsqrt(max(0.000000001f, v)); + } +} + +void kRMSPropUpdateWeights(NNFloat alpha, NNFloat lambda, NNFloat mu, uint64_t size, NNFloat* pWeightVelocity, NNFloat* pWeightGradient, NNFloat* pWeight) +{ + uint32_t blocks = CalculateBlocks(size); + kRMSPropUpdateWeights_kernel<<>>(alpha, lambda, mu, size, pWeightVelocity, pWeightGradient, pWeight); + LAUNCHERROR("kRMSPropUpdateWeights_kernel"); +} + +__global__ void +LAUNCH_BOUNDS() +kRMSPropUpdateBiases_kernel(NNFloat alpha, NNFloat mu, uint32_t batch, uint32_t width, NNFloat* pDelta, NNFloat* pBiasVelocity, NNFloat* pBias) +{ + uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; + if (pos < width) + { + NNFloat sum = 0.0f; + pDelta += pos; + + // Calculate bias gradient + for (uint32_t i = 0; i < batch; i++) + { + sum += *pDelta; + pDelta += width; + } + + // Update velocity and bias + NNFloat v = pBiasVelocity[pos]; + v = mu * v + (1.0f - mu) * sum * sum; + pBiasVelocity[pos] = v; + pBias[pos] -= alpha * sum * rsqrt(max(0.000000001f, v)); + } +} + +void kRMSPropUpdateBiases(NNFloat alpha, NNFloat mu, uint32_t batch, uint32_t width, NNFloat* pDelta, NNFloat* pBiasVelocity, NNFloat* pBias) +{ + uint32_t blocks = CalculateBlocks(width); + kRMSPropUpdateBiases_kernel<<>>(alpha, mu, batch, width, pDelta, pBiasVelocity, pBias); + LAUNCHERROR("kRMSPropUpdateBiases_kernel"); +} + +#include "bitonic.h" +__global__ void +LAUNCH_BOUNDS() +kCalculateTopK_kernel(NNFloat* pOutputBuffer, NNFloat* pKeyBuffer, uint32_t* pValueBuffer, uint32_t batch, uint32_t width, uint32_t k) +{ +__shared__ volatile NNFloat sKey[160 * 4]; +__shared__ volatile uint32_t sValue[160 * 4]; + + + uint32_t pos = (blockIdx.x * blockDim.x + threadIdx.x) >> cData._warpBits; + uint32_t tgx = threadIdx.x & cData._warpMask; + + + if (pos < batch) + { + NNFloat *pOutput = pOutputBuffer + pos * width; + uint32_t offset = threadIdx.x >> cData._warpBits; + volatile NNFloat* psKey = &sKey[160 * offset]; + volatile uint32_t* psValue = &sValue[160 * offset]; + + // Initialize values to + NNFloat k0 = -MAX_VALUE; + NNFloat k1 = -MAX_VALUE; + NNFloat k2 = -MAX_VALUE; + NNFloat k3 = -MAX_VALUE; + NNFloat k4 = -MAX_VALUE; + NNFloat k5 = -MAX_VALUE; + NNFloat k6 = -MAX_VALUE; + NNFloat k7 = -MAX_VALUE; + uint32_t v0 = 0; + uint32_t v1 = 0; + uint32_t v2 = 0; + uint32_t v3 = 0; + uint32_t v4 = 0; + uint32_t v5 = 0; + uint32_t v6 = 0; + uint32_t v7 = 0; + + // Read first 128 elements into registers + uint32_t wpos = tgx; + if (wpos < width) + { + k0 = pOutput[wpos]; + v0 = wpos; + } + wpos += cData._warpSize; + if (wpos < width) + { + k1 = pOutput[wpos]; + v1 = wpos; + } + wpos += cData._warpSize; + if (wpos < width) + { + k2 = pOutput[wpos]; + v2 = wpos; + } + wpos += cData._warpSize; + if (wpos < width) + { + k3 = pOutput[wpos]; + v3 = wpos; + } + + // Run through remainder of data + NNFloat minValue = -MAX_VALUE; + uint32_t rpos = 128; + uint32_t bufferSize = 0; + NNFloat key1, key2; + uint32_t value1, value2; + uint32_t otgx; + bool flag; + while (rpos < width) + { + // Read block of data + unsigned wpos = rpos + tgx; + NNFloat key = -MAX_VALUE; + uint32_t value = wpos; + if (wpos < width) + { + key = pOutput[wpos]; + } + + // Add values > minValue to shared memory buffer + uint32_t count = __ballot(key > minValue); + if (key > minValue) + { + uint32_t mask = 0xffffffff >> (32 - tgx); + uint32_t offset = __popc(count & mask); + offset += bufferSize; + psKey[offset] = key; + psValue[offset] = value; + } + bufferSize += __popc(count); + + // Check if buffer is full + if (bufferSize >= 128) + { + // Sort 256 elements + k4 = psKey[tgx]; + v4 = psValue[tgx]; + k5 = psKey[tgx + cData._warpSize]; + v5 = psValue[tgx + cData._warpSize]; + k6 = psKey[tgx + 2 * cData._warpSize]; + v6 = psValue[tgx + 2 * cData._warpSize]; + k7 = psKey[tgx + 3 * cData._warpSize]; + v7 = psValue[tgx + 3 * cData._warpSize]; + bool flag; + BITONICSORT256_256(); + + // Shift members in shared memory to beginning + bufferSize -= 128; + if (tgx < bufferSize) + { + psKey[tgx] = psKey[tgx + 128]; + psValue[tgx] = psValue[tgx + 128]; + } + } + + // Advance to next block of data + rpos += cData._warpSize; + } + + // Do final sort if buffer has any remaining data + if ((bufferSize > 0) || (width < 128)) + { + // Store sentinel values in registers + k4 = -MAX_VALUE; + k5 = -MAX_VALUE; + k6 = -MAX_VALUE; + k7 = -MAX_VALUE; + v4 = 0; + v5 = 0; + v6 = 0; + v7 = 0; + + // Load last block of unsorted data into registers + if (tgx < bufferSize) + { + k4 = psKey[tgx]; + v4 = psValue[tgx]; + } + if (tgx < bufferSize - cData._warpSize) + { + k5 = psKey[tgx + cData._warpSize]; + v5 = psValue[tgx + cData._warpSize]; + } + if (tgx < bufferSize - 2 * cData._warpSize) + { + k6 = psKey[tgx + 2 * cData._warpSize]; + v6 = psValue[tgx + 2 * cData._warpSize]; + } + if (tgx < bufferSize - 3 * cData._warpSize) + { + k7 = psKey[tgx + 3 * cData._warpSize]; + v7 = psValue[tgx + 3 * cData._warpSize]; + } + + BITONICSORT256_256(); + } + + // Copy results to key and value pointers + NNFloat* pKey = pKeyBuffer + pos * k; + uint32_t* pValue = pValueBuffer + pos * k; + wpos = tgx; + if (wpos < k) + { + pKey[wpos] = k0; + pValue[wpos] = v0; + } + wpos += cData._warpSize; + if (wpos < k) + { + pKey[wpos] = k1; + pValue[wpos] = v1; + } + wpos += cData._warpSize; + if (wpos < k) + { + pKey[wpos] = k2; + pValue[wpos] = v2; + } + wpos += cData._warpSize; + if (wpos < k) + { + pKey[wpos] = k3; + pValue[wpos] = v3; + } + } +} + +void kCalculateTopK(NNFloat* pOutput, NNFloat *pKey, uint32_t* pValue, uint32_t batch, uint32_t width, uint32_t k) +{ + uint32_t blocks = (batch + 3) / 4; + kCalculateTopK_kernel<<>>(pOutput, pKey, pValue, batch, width, k); + LAUNCHERROR("kCalculateTopK_kernel"); +} + + +__global__ void +LAUNCH_BOUNDS() +kCalculateTopK_kernel(NNFloat* pOutputKey, NNFloat* pOutputValue, NNFloat* pKeyBuffer, NNFloat* pValueBuffer, uint32_t batch, uint32_t width, uint32_t k) +{ +__shared__ volatile NNFloat sKey[160 * 4]; +__shared__ volatile NNFloat sValue[160 * 4]; + + + uint32_t pos = (blockIdx.x * blockDim.x + threadIdx.x) >> cData._warpBits; + uint32_t tgx = threadIdx.x & cData._warpMask; + + + if (pos < batch) + { + pOutputKey += pos * width; + pOutputValue += pos * width; + uint32_t offset = threadIdx.x >> cData._warpBits; + volatile NNFloat* psKey = &sKey[160 * offset]; + volatile NNFloat* psValue = &sValue[160 * offset]; + + // Initialize values to + NNFloat k0 = -MAX_VALUE; + NNFloat k1 = -MAX_VALUE; + NNFloat k2 = -MAX_VALUE; + NNFloat k3 = -MAX_VALUE; + NNFloat k4 = -MAX_VALUE; + NNFloat k5 = -MAX_VALUE; + NNFloat k6 = -MAX_VALUE; + NNFloat k7 = -MAX_VALUE; + NNFloat v0 = 0.0f; + NNFloat v1 = 0.0f; + NNFloat v2 = 0.0f; + NNFloat v3 = 0.0f; + NNFloat v4 = 0.0f; + NNFloat v5 = 0.0f; + NNFloat v6 = 0.0f; + NNFloat v7 = 0.0f; + + // Read first 128 elements into registers + uint32_t wpos = tgx; + if (wpos < width) + { + k0 = pOutputKey[wpos]; + v0 = pOutputValue[wpos]; + } + wpos += cData._warpSize; + if (wpos < width) + { + k1 = pOutputKey[wpos]; + v1 = pOutputValue[wpos]; + } + wpos += cData._warpSize; + if (wpos < width) + { + k2 = pOutputKey[wpos]; + v2 = pOutputValue[wpos]; + } + wpos += cData._warpSize; + if (wpos < width) + { + k3 = pOutputKey[wpos]; + v3 = pOutputValue[wpos]; + } + + // Run through remainder of data + NNFloat minValue = -MAX_VALUE; + uint32_t rpos = 128; + uint32_t bufferSize = 0; + NNFloat key1, key2; + NNFloat value1, value2; + uint32_t otgx; + bool flag; + while (rpos < width) + { + // Read block of data + unsigned wpos = rpos + tgx; + NNFloat key = -MAX_VALUE; + NNFloat value = 0.0f; + if (wpos < width) + { + key = pOutputKey[wpos]; + value = pOutputValue[wpos]; + } + + // Add values > minValue to shared memory buffer + uint32_t count = __ballot(key > minValue); + if (key > minValue) + { + uint32_t mask = 0xffffffff >> (32 - tgx); + uint32_t offset = __popc(count & mask); + offset += bufferSize; + psKey[offset] = key; + psValue[offset] = value; + } + bufferSize += __popc(count); + + // Check if buffer is full + if (bufferSize >= 128) + { + // Sort 256 elements + k4 = psKey[tgx]; + v4 = psValue[tgx]; + k5 = psKey[tgx + cData._warpSize]; + v5 = psValue[tgx + cData._warpSize]; + k6 = psKey[tgx + 2 * cData._warpSize]; + v6 = psValue[tgx + 2 * cData._warpSize]; + k7 = psKey[tgx + 3 * cData._warpSize]; + v7 = psValue[tgx + 3 * cData._warpSize]; + bool flag; + BITONICSORT256_256(); + + // Shift members in shared memory to beginning + bufferSize -= 128; + if (tgx < bufferSize) + { + psKey[tgx] = psKey[tgx + 128]; + psValue[tgx] = psValue[tgx + 128]; + } + } + + // Advance to next block of data + rpos += cData._warpSize; + } + + // Do final sort if buffer has any remaining data + if ((bufferSize > 0) || (width < 128)) + { + // Store sentinel values in registers + k4 = -MAX_VALUE; + k5 = -MAX_VALUE; + k6 = -MAX_VALUE; + k7 = -MAX_VALUE; + v4 = 0; + v5 = 0; + v6 = 0; + v7 = 0; + + // Load last block of unsorted data into registers + if (tgx < bufferSize) + { + k4 = psKey[tgx]; + v4 = psValue[tgx]; + } + if (tgx < bufferSize - cData._warpSize) + { + k5 = psKey[tgx + cData._warpSize]; + v5 = psValue[tgx + cData._warpSize]; + } + if (tgx < bufferSize - 2 * cData._warpSize) + { + k6 = psKey[tgx + 2 * cData._warpSize]; + v6 = psValue[tgx + 2 * cData._warpSize]; + } + if (tgx < bufferSize - 3 * cData._warpSize) + { + k7 = psKey[tgx + 3 * cData._warpSize]; + v7 = psValue[tgx + 3 * cData._warpSize]; + } + + BITONICSORT256_256(); + } + + // Copy results to index and value pointers + NNFloat* pKey = pKeyBuffer + pos * k; + NNFloat* pValue = pValueBuffer + pos * k; + wpos = tgx; + if (wpos < k) + { + pKey[wpos] = k0; + pValue[wpos] = v0; + } + wpos += cData._warpSize; + if (wpos < k) + { + pKey[wpos] = k1; + pValue[wpos] = v1; + } + wpos += cData._warpSize; + if (wpos < k) + { + pKey[wpos] = k2; + pValue[wpos] = v2; + } + wpos += cData._warpSize; + if (wpos < k) + { + pKey[wpos] = k3; + pValue[wpos] = v3; + } + } +} + +void kCalculateTopK(NNFloat* pOutputKey, NNFloat* pOutputValue, NNFloat *pKey, NNFloat* pValue, uint32_t batch, uint32_t width, uint32_t k) +{ + uint32_t blocks = (batch + 3) / 4; + kCalculateTopK_kernel<<>>(pOutputKey, pOutputValue, pKey, pValue, batch, width, k); + LAUNCHERROR("kCalculateTopK_kernel"); +} + +__global__ void +LAUNCH_BOUNDS() +kCalculateTopK_kernel(NNFloat* pOutputKey, uint32_t* pOutputValue, NNFloat* pKeyBuffer, uint32_t* pValueBuffer, uint32_t batch, uint32_t width, uint32_t k) +{ +__shared__ volatile NNFloat sKey[160 * 4]; +__shared__ volatile uint32_t sValue[160 * 4]; + uint32_t pos = (blockIdx.x * blockDim.x + threadIdx.x) >> cData._warpBits; + uint32_t tgx = threadIdx.x & cData._warpMask; + + if (pos < batch) + { + pOutputKey += pos * width; + pOutputValue += pos * width; + uint32_t offset = threadIdx.x >> cData._warpBits; + volatile NNFloat* psKey = &sKey[160 * offset]; + volatile uint32_t* psValue = &sValue[160 * offset]; + + // Initialize values to + NNFloat k0 = -MAX_VALUE; + NNFloat k1 = -MAX_VALUE; + NNFloat k2 = -MAX_VALUE; + NNFloat k3 = -MAX_VALUE; + NNFloat k4 = -MAX_VALUE; + NNFloat k5 = -MAX_VALUE; + NNFloat k6 = -MAX_VALUE; + NNFloat k7 = -MAX_VALUE; + uint32_t v0 = 0; + uint32_t v1 = 0; + uint32_t v2 = 0; + uint32_t v3 = 0; + uint32_t v4 = 0; + uint32_t v5 = 0; + uint32_t v6 = 0; + uint32_t v7 = 0; + + // Read first 128 elements into registers + uint32_t wpos = tgx; + if (wpos < width) + { + k0 = pOutputKey[wpos]; + v0 = pOutputValue[wpos]; + } + wpos += cData._warpSize; + if (wpos < width) + { + k1 = pOutputKey[wpos]; + v1 = pOutputValue[wpos]; + } + wpos += cData._warpSize; + if (wpos < width) + { + k2 = pOutputKey[wpos]; + v2 = pOutputValue[wpos]; + } + wpos += cData._warpSize; + if (wpos < width) + { + k3 = pOutputKey[wpos]; + v3 = pOutputValue[wpos]; + } + + // Run through remainder of data + NNFloat minValue = -MAX_VALUE; + uint32_t rpos = 128; + uint32_t bufferSize = 0; + NNFloat key1, key2; + uint32_t value1, value2; + uint32_t otgx; + bool flag; + while (rpos < width) + { + // Read block of data + unsigned wpos = rpos + tgx; + NNFloat key = -MAX_VALUE; + NNFloat value = 0.0f; + if (wpos < width) + { + key = pOutputKey[wpos]; + value = pOutputValue[wpos]; + } + + // Add values > minValue to shared memory buffer + uint32_t count = __ballot(key > minValue); + if (key > minValue) + { + uint32_t mask = 0xffffffff >> (32 - tgx); + uint32_t offset = __popc(count & mask); + offset += bufferSize; + psKey[offset] = key; + psValue[offset] = value; + } + bufferSize += __popc(count); + + // Check if buffer is full + if (bufferSize >= 128) + { + // Sort 256 elements + k4 = psKey[tgx]; + v4 = psValue[tgx]; + k5 = psKey[tgx + cData._warpSize]; + v5 = psValue[tgx + cData._warpSize]; + k6 = psKey[tgx + 2 * cData._warpSize]; + v6 = psValue[tgx + 2 * cData._warpSize]; + k7 = psKey[tgx + 3 * cData._warpSize]; + v7 = psValue[tgx + 3 * cData._warpSize]; + bool flag; + BITONICSORT256_256(); + + // Shift members in shared memory to beginning + bufferSize -= 128; + if (tgx < bufferSize) + { + psKey[tgx] = psKey[tgx + 128]; + psValue[tgx] = psValue[tgx + 128]; + } + } + + // Advance to next block of data + rpos += cData._warpSize; + } + + // Do final sort if buffer has any remaining data + if ((bufferSize > 0) || (width < 128)) + { + // Store sentinel values in registers + k4 = -MAX_VALUE; + k5 = -MAX_VALUE; + k6 = -MAX_VALUE; + k7 = -MAX_VALUE; + v4 = 0; + v5 = 0; + v6 = 0; + v7 = 0; + + // Load last block of unsorted data into registers + if (tgx < bufferSize) + { + k4 = psKey[tgx]; + v4 = psValue[tgx]; + } + if (tgx < bufferSize - cData._warpSize) + { + k5 = psKey[tgx + cData._warpSize]; + v5 = psValue[tgx + cData._warpSize]; + } + if (tgx < bufferSize - 2 * cData._warpSize) + { + k6 = psKey[tgx + 2 * cData._warpSize]; + v6 = psValue[tgx + 2 * cData._warpSize]; + } + if (tgx < bufferSize - 3 * cData._warpSize) + { + k7 = psKey[tgx + 3 * cData._warpSize]; + v7 = psValue[tgx + 3 * cData._warpSize]; + } + + BITONICSORT256_256(); + } + + // Copy results to index and value pointers + NNFloat* pKey = pKeyBuffer + pos * k; + uint32_t* pValue = pValueBuffer + pos * k; + wpos = tgx; + if (wpos < k) + { + pKey[wpos] = k0; + pValue[wpos] = v0; + } + wpos += cData._warpSize; + if (wpos < k) + { + pKey[wpos] = k1; + pValue[wpos] = v1; + } + wpos += cData._warpSize; + if (wpos < k) + { + pKey[wpos] = k2; + pValue[wpos] = v2; + } + wpos += cData._warpSize; + if (wpos < k) + { + pKey[wpos] = k3; + pValue[wpos] = v3; + } + } +} + + +void kCalculateTopK(NNFloat* pOutputKey, uint32_t* pOutputValue, NNFloat *pKey, uint32_t * pValue, uint32_t batch, uint32_t width, uint32_t k) +{ + uint32_t blocks = (batch + 3) / 4; + kCalculateTopK_kernel<<>>(pOutputKey, pOutputValue, pKey, pValue, batch, width, k); + LAUNCHERROR("kCalculateTopK_kernel"); +} + +__global__ void +LAUNCH_BOUNDS() +kNormalizeWeights_kernel(NNFloat norm, uint32_t outputStride, uint32_t inputStride, NNFloat* pWeight) +{ + uint32_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; + if (pos < outputStride) + { + NNFloat r2 = 0.0f; + NNFloat* pEnd = pWeight + outputStride * inputStride; + pWeight += pos; + NNFloat* p = pWeight; + + // Calculate squared weight vector length + while (p < pEnd) + { + NNFloat x = *p; + r2 += x * x; + p += outputStride; + } + + // Normalize if necessary + if (r2 > norm * norm) + { + norm *= rsqrt(r2); + p = pWeight; + while (p < pEnd) + { + *p *= norm; + p += outputStride; + } + } + } + +} + +void kNormalizeWeights(NNFloat norm, uint32_t outputStride, uint32_t inputStride, NNFloat* pWeight) +{ + uint32_t blocks = (outputStride + 127) / 128; + kNormalizeWeights_kernel<<>>(norm, outputStride, inputStride, pWeight); + LAUNCHERROR("kNormalizeWeights_kernel"); +} + + +__global__ void +LAUNCH_BOUNDS() +kCalculateWeightMagnitudes_kernel(uint32_t outputStride, uint32_t inputStride, NNFloat* pWeight, NNFloat* pMagnitude) +{ + uint32_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; + if (pos < outputStride) + { + NNFloat r2 = 0.0f; + NNFloat* pEnd = pWeight + outputStride * inputStride; + pWeight += pos; + NNFloat* p = pWeight; + + // Calculate squared weight vector length + while (p < pEnd) + { + NNFloat x = *p; + r2 += x * x; + p += outputStride; + } + + // Output to accumulator + pMagnitude[pos] = r2; + } + +} + +void kCalculateWeightMagnitudes(uint32_t outputStride, uint32_t inputStride, NNFloat* pWeight, NNFloat* pMagnitude) +{ + uint32_t blocks = (outputStride + 127) / 128; + kCalculateWeightMagnitudes_kernel<<>>(outputStride, inputStride, pWeight, pMagnitude); + LAUNCHERROR("kCalculateWeightMagnitudes_kernel"); +} + +__global__ void +LAUNCH_BOUNDS() +kNormalizeWeightMagnitudes_kernel(NNFloat norm, uint32_t outputStride, uint32_t inputStride, NNFloat* pWeight, NNFloat* pMagnitude) +{ + uint32_t pos = (blockIdx.x * blockDim.x) + threadIdx.x; + if (pos < outputStride) + { + NNFloat r2 = pMagnitude[pos]; + NNFloat* pEnd = pWeight + outputStride * inputStride; + pWeight += pos; + NNFloat* p = pWeight; + + // Normalize if necessary + if (r2 > norm * norm) + { + norm *= rsqrt(r2); + p = pWeight; + while (p < pEnd) + { + *p *= norm; + p += outputStride; + } + } + } + +} + +void kNormalizeWeightMagnitudes(NNFloat norm, uint32_t outputStride, uint32_t inputStride, NNFloat* pWeight, NNFloat* pMagnitude) +{ + uint32_t blocks = (outputStride + 127) / 128; + kNormalizeWeightMagnitudes_kernel<<>>(norm, outputStride, inputStride, pWeight, pMagnitude); + LAUNCHERROR("kNormalizeWeightMagnitudes_kernel"); +} + +__global__ void +LAUNCH_BOUNDS() +kCalculateDropout_kernel(NNFloat* pUnit, NNFloat* pRandom, NNFloat p, NNFloat scale) +{ + uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; + NNFloat r = pRandom[pos]; + if (r < p) + pUnit[pos] = (NNFloat)0.0; + NNFloat o = pUnit[pos]; + o = (r < p) ? (NNFloat)0.0 : o; + pUnit[pos] = scale * o; +} + +void kCalculateDropout(NNFloat* pUnit, NNFloat* pRandom, uint32_t batch, uint32_t stride, NNFloat p) +{ + curandGenerateUniform(getGpu()._RNG, pRandom, batch * stride); + unsigned long blocks = CalculateBlocks(batch * stride); + NNFloat scale = (NNFloat)1.0 / ((NNFloat)1.0 - p); + kCalculateDropout_kernel<<>>(pUnit, pRandom, p, scale); + LAUNCHERROR("kCalculateDropout_kernel"); +} + +#include "cub/util_allocator.cuh" +#include "cub/device/device_radix_sort.cuh" +template size_t kInitSort(uint32_t items, GpuBuffer* pbKey, GpuBuffer* pbValue) +{ + uint32_t itemStride = ((items + 511) >> 9) << 9; + size_t tempBytes; + cub::DoubleBuffer d_keys(pbKey->_pDevData, pbKey->_pDevData + itemStride); + cub::DoubleBuffer d_values(pbValue->_pDevData, pbValue->_pDevData + itemStride); + cub::DeviceRadixSort::SortPairs(NULL, tempBytes, d_keys, d_values, items); + return tempBytes; +} + +template bool kSort(uint32_t items, KeyType* pKey0, KeyType* pKey1, ValueType* pValue0, ValueType* pValue1, char* pTemp, size_t tempBytes) +{ + cub::DoubleBuffer d_keys(pKey0, pKey1); + cub::DoubleBuffer d_values(pValue0, pValue1); + cub::DeviceRadixSort::SortPairs(pTemp, tempBytes, d_keys, d_values, items); + return true; +} +__global__ void +LAUNCH_BOUNDS() +kAddBuffers_kernel(NNFloat* pDst, NNFloat* pSrc, uint64_t size) +{ + uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x; + if (pos < size) + *(pDst + pos) += *(pSrc + pos); +} + +void kAddBuffers(NNFloat* pDst, NNFloat* pSrc, uint64_t size) +{ + uint32_t blocks = CalculateBlocks(size); + kAddBuffers_kernel<<>>(pDst, pSrc, size); + LAUNCHERROR("kAddBuffers_kernel"); +} + +__global__ void +LAUNCH_BOUNDS() +kAddBuffers2D_kernel(NNFloat* pDst, uint32_t dpitch, NNFloat* pSrc, uint32_t spitch, uint32_t width) +{ + uint64_t yOffset = blockIdx.y * blockDim.x + threadIdx.x; + if (yOffset < width) + { + uint64_t dpos = blockIdx.x * dpitch + yOffset; + uint64_t spos = blockIdx.x * spitch + yOffset; + pDst[dpos] += pSrc[spos]; + } +} + +void kAddBuffers2D(NNFloat* pDst, uint32_t dpitch, NNFloat* pSrc, uint32_t spitch, uint32_t width, uint32_t height) +{ + dim3 grid(height, (width + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); + kAddBuffers2D_kernel<<>>(pDst, dpitch, pSrc, spitch, width); + LAUNCHERROR("kAddBuffers2D_kernel"); +} + +__global__ void +LAUNCH_BOUNDS() +kCopy2D_kernel(NNFloat* pDst, uint32_t dpitch, NNFloat* pSrc, uint32_t spitch, uint32_t width) +{ + uint64_t yOffset = blockIdx.y * blockDim.x + threadIdx.x; + if (yOffset < width) + { + uint64_t dpos = blockIdx.x * dpitch + yOffset; + uint64_t spos = blockIdx.x * spitch + yOffset; + pDst[dpos] = pSrc[spos]; + } +} + +void kCopy2D(NNFloat* pDst, uint32_t dpitch, NNFloat* pSrc, uint32_t spitch, uint32_t width, uint32_t height) +{ + dim3 grid(height, (width + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock); + kCopy2D_kernel<<>>(pDst, dpitch, pSrc, spitch, width); + LAUNCHERROR("kCopy2D_kernel"); +} + + diff --git a/cuda_code/kernels_205.cu b/cuda_code/kernels_205.cu new file mode 100644 index 0000000000000000000000000000000000000000..8a8fcf3a4421262940f0ed3cfa6710c0b63436d2 --- /dev/null +++ b/cuda_code/kernels_205.cu @@ -0,0 +1,279 @@ +/****************************************************************************** + * Copyright 2020 The Apollo Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + *****************************************************************************/ + +#include "modules/perception/inference/tensorrt/plugins/kernels.h" + +namespace apollo { +namespace perception { +namespace inference { + +// Decode bbox. +// boxes dims: [num_box, 4], deltas dims: [N, num_box, C, 4], +// out_boxes dims: [N, num_box, C, 4] +// nthreads = N * num_box * C +__global__ void bbox_transform_inv_kernel( + const int nthreads, const float *boxes, const float *deltas, + const int num_box, const int num_channel, float *out_boxes) { + int index = threadIdx.x + blockIdx.x * blockDim.x; + if (index >= nthreads) { + return; + } + + int box_id = (index / num_channel) % num_box; + + float x_min = boxes[box_id * 4]; + float y_min = boxes[box_id * 4 + 1]; + float x_max = boxes[box_id * 4 + 2]; + float y_max = boxes[box_id * 4 + 3]; + float w = x_max - x_min + 1; + float h = y_max - y_min + 1; + float x_ctr = x_min + 0.5 * (w - 1); + float y_ctr = y_min + 0.5 * (h - 1); + + float dx = deltas[index * 4]; + float dy = deltas[index * 4 + 1]; + float dw = deltas[index * 4 + 2]; + float dh = deltas[index * 4 + 3]; + + float pred_x_ctr = dx * w + x_ctr; + float pred_y_ctr = dy * h + y_ctr; + float pred_w = std::exp(dw) * w; + float pred_h = std::exp(dh) * h; + + out_boxes[index * 4] = pred_x_ctr - 0.5 * (pred_w - 1); // pred x_min + out_boxes[index * 4 + 1] = pred_y_ctr - 0.5 * (pred_h - 1); // pred y_min + out_boxes[index * 4 + 2] = pred_x_ctr + 0.5 * (pred_w - 1); // pred x_max + out_boxes[index * 4 + 3] = pred_y_ctr + 0.5 * (pred_h - 1); // pred y_max +} + +// boxes dim: [N, num_box, 4], nthreads = N * num_box * 4 +__global__ void clip_boxes_kernel(const int nthreads, float *boxes, + const float height, const float width) { + int index = threadIdx.x + blockIdx.x * blockDim.x; + if (index >= nthreads) { + return; + } + + // refine x_min, x_max to be in [0, img_width) + if (index % 4 == 0 || index % 4 == 2) { + if (boxes[index] < 0) { + boxes[index] = 0; + } else if (boxes[index] > width - 1) { + boxes[index] = width - 1; + } + } else { // refine y_min, y_max to be in [0, img_height) + if (boxes[index] < 0) { + boxes[index] = 0; + } else if (boxes[index] > height - 1) { + boxes[index] = height - 1; + } + } +} + +// boxes dims: [N, num_box, num_channel, 4], +// filtered_boxes dims: [N, num_box, 4] +// scores dims: [N, num_box, num_class], filtered_scores dims: [N, num_box] +// all_probs dims: [N, num_box, num_prob], +// filtered_all_probs dims: [N, num_box, num_prob] +// filtered_count dims: [N] +__global__ void filter_boxes_kernel( + const int nthreads, const float *boxes, const float *scores, + const float *all_probs, const int num_box, const int num_channel, + const int num_class, const int num_prob, const int filter_channel, + const int filter_class, const int min_size_mode, const float min_size_h, + const float min_size_w, const float threshold_score, float *filtered_boxes, + float *filtered_scores, float *filtered_all_probs, int *filtered_count) { + int index = threadIdx.x + blockIdx.x * blockDim.x; + if (index >= nthreads) { + return; + } + + int batch_id = index / num_box; + if (scores[index * num_class + filter_class] > threshold_score) { + bool keep = true; + int box_id = index * num_channel + filter_channel; + float w = boxes[box_id * 4 + 2] - boxes[box_id * 4] + 1; + float h = boxes[box_id * 4 + 3] - boxes[box_id * 4 + 1] + 1; + if (min_size_mode == 0) { + // filter boxes with minimum size of height & width + if (h < min_size_h || w < min_size_w) { + keep = false; + } + } else if (min_size_mode == 1) { + // filter boxes with minimum size of height or width + if (h < min_size_h && w < min_size_w) { + keep = false; + } + } + + if (keep) { + int counter = atomicAdd(&filtered_count[batch_id], 1); + for (int i = 0; i < 4; ++i) { + filtered_boxes[batch_id * num_box * 4 + counter * 4 + i] = + boxes[box_id * 4 + i]; + } + filtered_scores[batch_id * num_box + counter] = + scores[index * num_class + filter_class]; + if (all_probs != nullptr && filtered_all_probs != nullptr) { + for (int i = 0; i < num_prob; ++i) { + filtered_all_probs[batch_id * num_box * num_prob + + counter * num_prob + i] = + all_probs[index * num_prob + i]; + } + } + } + } +} + +// Gather boxes by indexes and keep top N boxes. +// boxes dims: [N, num_box, 4], scores dims: [N, num_box], +// all_probs dims: [N, num_box, num_prob] +// indexes dims: [N, num_box], count dims: [N] +// out_boxes dims: [N, topN, 4], out_scores dims: [N, topN] +// out_all_probs dims: [N, topN, num_prob] +// nthreads = N * max_num_box +__global__ void keep_topN_boxes_kernel( + const int nthreads, const float *boxes, const float *scores, + const float *all_probs, const int *indexes, const int *count, + const bool keep_score, const int num_box, const int num_prob, + const int topN, float *out_boxes, float *out_scores, float *out_all_probs) { + int index = threadIdx.x + blockIdx.x * blockDim.x; + if (index >= nthreads) { + return; + } + + int batch_id = index / topN; + int box_id = index % topN; + if (box_id < count[batch_id]) { + int in_box_id = indexes[batch_id * num_box + box_id]; + for (int i = 0; i < 4; ++i) { + out_boxes[index * 4 + i] = + boxes[batch_id * num_box * 4 + in_box_id * 4 + i]; + } + + if (keep_score) { + out_scores[index] = scores[batch_id * num_box + in_box_id]; + for (int i = 0; i < num_prob; i++) { + out_all_probs[index * num_prob + i] = + all_probs[batch_id * num_box * num_prob + in_box_id * num_prob + i]; + } + } + } +} + +__global__ void repeatedly_add_kernel(const int nthreads, const float *in_data, + float *out_data, const float *add_vec, + int add_vec_size) { + int index = threadIdx.x + blockIdx.x * blockDim.x; + if (index < nthreads) { + out_data[index] = in_data[index] + add_vec[index % add_vec_size]; + } +} + +__global__ void repeatedly_mul_kernel(const int nthreads, const float *in_data, + float *out_data, const float *mul_vec, + int mul_vec_size) { + int index = threadIdx.x + blockIdx.x * blockDim.x; + if (index < nthreads) { + out_data[index] = in_data[index] * mul_vec[index % mul_vec_size]; + } +} + +// input dims: [N, C], output dims: [N, C_sliced] +__global__ void slice2d_kernel(const int nthreads, const float *in_data, + float *out_data, const int *slice_axises, + int slice_axis_num, int input_axis_size) { + int out_index = threadIdx.x + blockIdx.x * blockDim.x; + if (out_index < nthreads) { + int id = out_index / slice_axis_num; + int slice_axis_id = out_index % slice_axis_num; + int in_index = slice_axises[slice_axis_id] + id * input_axis_size; + out_data[out_index] = in_data[in_index]; + } +} + +void bbox_transform_inv_cuda(int block_size, int thread_size, int shared_mem, + cudaStream_t stream, const int nthreads, + const float *boxes, const float *deltas, + const int num_box, const int num_channel, + float *out_boxes) { + bbox_transform_inv_kernel<<>>( + nthreads, boxes, deltas, num_box, num_channel, out_boxes); +} + +void clip_boxes_cuda(int block_size, int thread_size, int shared_mem, + cudaStream_t stream, const int nthreads, float *boxes, + const float height, const float width) { + clip_boxes_kernel<<>>( + nthreads, boxes, height, width); +} + +void filter_boxes_cuda( + int block_size, int thread_size, int shared_mem, cudaStream_t stream, + const int nthreads, const float *boxes, const float *scores, + const float *all_probs, const int num_box, const int num_channel, + const int num_class, const int num_prob, const int filter_channel, + const int filter_class, const int min_size_mode, const float min_size_h, + const float min_size_w, const float threshold_score, float *filtered_boxes, + float *filtered_scores, float *filtered_all_probs, int *filtered_count) { + filter_boxes_kernel<<>>( + nthreads, boxes, scores, all_probs, num_box, num_channel, num_class, + num_prob, filter_channel, filter_class, min_size_mode, min_size_h, + min_size_w, threshold_score, filtered_boxes, filtered_scores, + filtered_all_probs, filtered_count); +} + +void keep_topN_boxes_cuda(int block_size, int thread_size, int shared_mem, + cudaStream_t stream, const int nthreads, + const float *boxes, const float *scores, + const float *all_probs, const int *indexes, + const int *count, const bool keep_score, + const int num_box, const int num_prob, const int topN, + float *out_boxes, float *out_scores, + float *out_all_probs) { + keep_topN_boxes_kernel<<>>( + nthreads, boxes, scores, all_probs, indexes, count, keep_score, num_box, + num_prob, topN, out_boxes, out_scores, out_all_probs); +} + +void repeatedly_add_cuda(int block_size, int thread_size, int shared_mem, + cudaStream_t stream, const int nthreads, + const float *in_data, float *out_data, + const float *add_vec, int add_vec_size) { + repeatedly_add_kernel<<>>( + nthreads, in_data, out_data, add_vec, add_vec_size); +} + +void repeatedly_mul_cuda(int block_size, int thread_size, int shared_mem, + cudaStream_t stream, const int nthreads, + const float *in_data, float *out_data, + const float *mul_vec, int mul_vec_size) { + repeatedly_mul_kernel<<>>( + nthreads, in_data, out_data, mul_vec, mul_vec_size); +} + +void slice2d_cuda(int block_size, int thread_size, int shared_mem, + cudaStream_t stream, const int nthreads, const float *in_data, + float *out_data, const int *slice_axises, int slice_axis_num, + int input_axis_size) { + slice2d_kernel<<>>( + nthreads, in_data, out_data, slice_axises, slice_axis_num, + input_axis_size); +} + +} // namespace inference +} // namespace perception +} // namespace apollo \ No newline at end of file diff --git a/cuda_code/kernels_68.cu b/cuda_code/kernels_68.cu new file mode 100644 index 0000000000000000000000000000000000000000..f90d71d579921262c3ca3e6f6f6d785a82043ed9 --- /dev/null +++ b/cuda_code/kernels_68.cu @@ -0,0 +1,577 @@ +// +// kernels.cu +// Burgers3d-GPU +// +// Created by Manuel Diaz on 7/26/16. +// Copyright © 2016 Manuel Diaz. All rights reserved. +// + +extern "C" { +#include "Burgers.h" +} + +#define checkCuda(error) __checkCuda(error, __FILE__, __LINE__) + +__constant__ REAL d_kx; +__constant__ REAL d_ky; +__constant__ REAL d_kz; + +/*********************************************/ +/* A method for checking error in CUDA calls */ +/*********************************************/ +inline void __checkCuda(cudaError_t error, const char *file, const int line) +{ + #if defined(DISPL) + if (error != cudaSuccess) + { + printf("checkCuda error at %s:%i: %s\n", file, line, cudaGetErrorString(cudaGetLastError())); + exit(-1); + } + #endif + return; +} + +/*****************/ +/* FLUX FUNCTION */ +/*****************/ +__device__ REAL Flux( + const REAL u){ + return 0.5*u*u; +} + +// ************************************************************************* +// Input: v(i) = [v(i-2) v(i-1) v(i) v(i+1) v(i+2) v(i+3)]; +// Output: res = df/dx; +// +// Based on: +// C.W. Shu's Lectures notes on: 'ENO and WENO schemes for Hyperbolic +// Conservation Laws' +// +// coded by Manuel Diaz, 02.10.2012, NTU Taiwan. +// ************************************************************************* +// +// Domain cells (I{i}) reference: +// +// | | u(i) | | +// | u(i-1) |___________| | +// |___________| | u(i+1) | +// | | |___________| +// ...|-----0-----|-----0-----|-----0-----|... +// | i-1 | i | i+1 | +// |- +|- +|- +| +// i-3/2 i-1/2 i+1/2 i+3/2 +// +// ENO stencils (S{r}) reference: +// +// |___________S2__________| +// | | +// |___________S1__________| | +// | | | using only f^{+} +// |___________S0__________| | | +// ..|---o---|---o---|---o---|---o---|---o---|... +// | I{i-2}| I{i-1}| I{i} | I{i+1}| I{i+2}| +// -| +// i+1/2 +// +// |___________S0__________| +// | | +// | |___________S1__________| using only f^{-} +// | | | +// | | |___________S2__________| +// ..|---o---|---o---|---o---|---o---|---o---|... +// | I{i-1}| I{i} | I{i+1}| I{i+2}| I{i+3}| +// |+ +// i+1/2 +// +// WENO stencil: S{i} = [ I{i-2},...,I{i+3} ] +// ************************************************************************* + +/***********************/ +/* WENO RECONSTRUCTION */ +/***********************/ +__device__ REAL WENO5reconstruction( + const REAL vmm, + const REAL vm, + const REAL v, + const REAL vp, + const REAL vpp, + const REAL umm, + const REAL um, + const REAL u, + const REAL up, + const REAL upp) +{ + REAL B0, B1, B2, a0, a1, a2, alphasum, dflux; + + // Smooth Indicators (Beta factors) + B0 = C1312*(vmm-2*vm+v )*(vmm-2*vm+v ) + C14*(vmm-4*vm+3*v)*(vmm-4*vm+3*v); + B1 = C1312*(vm -2*v +vp )*(vm -2*v +vp ) + C14*(vm-vp)*(vm-vp); + B2 = C1312*(v -2*vp+vpp)*(v -2*vp+vpp) + C14*(3*v-4*vp+vpp)*(3*v-4*vp+vpp); + + // Alpha weights + a0 = D0N/((EPS + B0)*(EPS + B0)); + a1 = D1N/((EPS + B1)*(EPS + B1)); + a2 = D2N/((EPS + B2)*(EPS + B2)); + alphasum = a0 + a1 + a2; + + // Numerical Flux at cell boundary, $v_{i+1/2}^{-}$; + dflux =(a0*(2*vmm- 7*vm + 11*v) + + a1*( -vm + 5*v + 2*vp) + + a2*( 2*v + 5*vp - vpp ))/(6*alphasum); + + // Smooth Indicators (Beta factors) + B0 = C1312*(umm-2*um+u )*(umm-2*um +u ) + C14*(umm-4*um+3*u)*(umm-4*um+3*u); + B1 = C1312*(um -2*u +up )*(um -2*u +up ) + C14*(um-up)*(um-up); + B2 = C1312*(u -2*up+upp)*(u -2*up +upp) + C14*(3*u-4*up+upp)*(3*u-4*up+upp); + + // Alpha weights + a0 = D0P/((EPS + B0)*(EPS + B0)); + a1 = D1P/((EPS + B1)*(EPS + B1)); + a2 = D2P/((EPS + B2)*(EPS + B2)); + alphasum = a0 + a1 + a2; + + // Numerical Flux at cell boundary, $v_{i+1/2}^{+}$; + dflux+=(a0*( -umm + 5*um + 2*u ) + + a1*( 2*um + 5*u - up ) + + a2*(11*u - 7*up + 2*upp))/(6*alphasum); + + // Compute the numerical flux v_{i+1/2} + return dflux; +} + +__device__ REAL WENO5Zreconstruction( + const REAL vmm, + const REAL vm, + const REAL v, + const REAL vp, + const REAL vpp, + const REAL umm, + const REAL um, + const REAL u, + const REAL up, + const REAL upp) +{ + REAL B0, B1, B2, a0, a1, a2, tau5, alphasum, dflux; + + // Smooth Indicators (Beta factors) + B0 = C1312*(vmm-2*vm+v )*(vmm-2*vm+v ) + C14*(vmm-4*vm+3*v)*(vmm-4*vm+3*v); + B1 = C1312*(vm -2*v +vp )*(vm -2*v +vp ) + C14*(vm-vp)*(vm-vp); + B2 = C1312*(v -2*vp+vpp)*(v -2*vp+vpp) + C14*(3*v-4*vp+vpp)*(3*v-4*vp+vpp); + + // Alpha weights + tau5 = fabs(B0-B2); + a0 = D0N*(1.+tau5/(B0+EPS)); + a1 = D1N*(1.+tau5/(B1+EPS)); + a2 = D2N*(1.+tau5/(B2+EPS)); + alphasum = a0 + a1 + a2; + + // Numerical Flux at cell boundary, $v_{i+1/2}^{-}$; + dflux =(a0*(2*vmm- 7*vm + 11*v) + + a1*( -vm + 5*v + 2*vp) + + a2*( 2*v + 5*vp - vpp ))/(6*alphasum); + + // Smooth Indicators (Beta factors) + B0 = C1312*(umm-2*um+u )*(umm-2*um +u ) + C14*(umm-4*um+3*u)*(umm-4*um+3*u); + B1 = C1312*(um -2*u +up )*(um -2*u +up ) + C14*(um-up)*(um-up); + B2 = C1312*(u -2*up+upp)*(u -2*up +upp) + C14*(3*u-4*up+upp)*(3*u-4*up+upp); + + // Alpha weights + tau5 = fabs(B0-B2); + a0 = D0P*(1.+tau5/(B0+EPS)); + a1 = D1P*(1.+tau5/(B1+EPS)); + a2 = D2P*(1.+tau5/(B2+EPS)); + alphasum = a0 + a1 + a2; + + // Numerical Flux at cell boundary, $v_{i+1/2}^{+}$; + dflux+=(a0*( -umm + 5*um + 2*u ) + + a1*( 2*um + 5*u - up ) + + a2*(11*u - 7*up + 2*upp))/(6*alphasum); + + // Compute the numerical flux v_{i+1/2} + return dflux; +} + +/*****************/ +/* Compute du/dx */ // <==== parallel strategy: compute serialy by rows or by columns! +/*****************/ +__global__ void Compute_dF( + const REAL * __restrict__ u, + REAL * __restrict__ Lu, + const unsigned int pitch, + const unsigned int nx, + const unsigned int ny, + const unsigned int nz, + const REAL dx) +{ + // Temporary variables + REAL fu, fu_old; + REAL f1mm, f1m, f1, f1p, f1pp; + REAL g1mm, g1m, g1, g1p, g1pp; + + // Indexes + unsigned int i, j, k, o; + + // local threads indexes + j = blockDim.x * blockIdx.x + threadIdx.x; + k = blockDim.y * blockIdx.y + threadIdx.y; + + // Compute only for internal nodes + if (j>2 && j2 && k2 && i2 && k2 && i2 && j2 && i2 && j2 && i2 && j2 && i2 && j2 && k>>(u,Lu,pitch,nx,ny,nz,dx); +} + +extern "C" void Call_Adv_y(dim3 numBlocks, dim3 threadsPerBlock, cudaStream_t aStream, + unsigned int pitch, unsigned int nx, unsigned int ny, unsigned int nz, REAL dy, REAL *u, REAL *Lu) +{ + Compute_dG<<>>(u,Lu,pitch,nx,ny,nz,dy); +} + +extern "C" void Call_Adv_z(dim3 numBlocks, dim3 threadsPerBlock, cudaStream_t aStream, + unsigned int pitch, unsigned int nx, unsigned int ny, unsigned int nz, REAL dz, REAL *u, REAL *Lu) +{ + Compute_dH<<>>(u,Lu,pitch,nx,ny,nz,dz); +} + +extern "C" void Call_Diff_(dim3 numBlocks, dim3 threadsPerBlock, cudaStream_t aStream, + unsigned int pitch, unsigned int nx, unsigned int ny, unsigned int nz, REAL *u, REAL *Lu) +{ + // Compute_Laplace<<>>(u,Lu,pitch,nx,ny,nz); + Compute_Laplace_Async<<>>(u,Lu,pitch,nx,ny,nz,3,nz-2,LOOP); +} + +extern "C" void Call_sspRK(dim3 numBlocks, dim3 threadsPerBlock, cudaStream_t aStream, + unsigned int pitch, unsigned int nx, unsigned int ny, unsigned int nz, unsigned int step, REAL dt, REAL *u, REAL *uo, REAL *Lu) +{ + Compute_RK<<>>(u,uo,Lu,step,pitch,nx,ny,nz,dt); +} diff --git a/cuda_code/kmeans_labels.cu b/cuda_code/kmeans_labels.cu new file mode 100644 index 0000000000000000000000000000000000000000..1fbc121a053c88070078390ded59066d7fe5282a --- /dev/null +++ b/cuda_code/kmeans_labels.cu @@ -0,0 +1,350 @@ +/*! + * Modifications Copyright 2017 H2O.ai, Inc. + */ +// original code from https://github.com/NVIDIA/kmeans (Apache V2.0 License) +#include "kmeans_labels.h" +#include +#include +#include +#include "kmeans_general.h" + +cudaStream_t cuda_stream[MAX_NGPUS]; + +namespace kmeans { +namespace detail { + +template +struct absolute_value { + __host__ __device__ + void operator()(T &x) const { + x = (x > 0 ? x : -x); + } +}; + +cublasHandle_t cublas_handle[MAX_NGPUS]; + +void labels_init() { + cublasStatus_t stat; + cudaError_t err; + int dev_num; + safe_cuda(cudaGetDevice(&dev_num)); + stat = cublasCreate(&detail::cublas_handle[dev_num]); + if (stat != CUBLAS_STATUS_SUCCESS) { + std::cout << "CUBLAS initialization failed" << std::endl; + exit(1); + } + err = safe_cuda(cudaStreamCreate(&cuda_stream[dev_num])); + if (err != cudaSuccess) { + std::cout << "Stream creation failed" << std::endl; + + } + cublasSetStream(cublas_handle[dev_num], cuda_stream[dev_num]); + mycub::cub_init(dev_num); +} + +void labels_close() { + int dev_num; + safe_cuda(cudaGetDevice(&dev_num)); + safe_cublas(cublasDestroy(cublas_handle[dev_num])); + safe_cuda(cudaStreamDestroy(cuda_stream[dev_num])); + mycub::cub_close(dev_num); +} + +void streamsync(int dev_num) { + cudaStreamSynchronize(cuda_stream[dev_num]); +} + +/** + * Matrix multiplication: alpha * A^T * B + beta * C + * Optimized for tall and skinny matrices + * + * @tparam float_t + * @param A + * @param B + * @param C + * @param alpha + * @param beta + * @param n + * @param d + * @param k + * @param max_block_rows + * @return + */ +template +__global__ void matmul(const float_t *A, const float_t *B, float_t *C, + const float_t alpha, const float_t beta, int n, int d, int k, int max_block_rows) { + extern __shared__ __align__(sizeof(float_t)) unsigned char my_smem[]; + float_t *shared = reinterpret_cast(my_smem); + + float_t *s_A = shared; + float_t *s_B = shared + max_block_rows * d; + + for (int i = threadIdx.x; i < d * k; i += blockDim.x) { + s_B[i] = B[i]; + } + + size_t block_start_row_index = blockIdx.x * max_block_rows; + size_t block_rows = max_block_rows; + + if (blockIdx.x == gridDim.x - 1 && n % max_block_rows != 0) { + block_rows = n % max_block_rows; + } + + for (size_t i = threadIdx.x; i < d * block_rows; i += blockDim.x) { + s_A[i] = alpha * A[d * block_start_row_index + i]; + } + + __syncthreads(); + + float_t elem_c = 0; + + int col_c = threadIdx.x % k; + size_t abs_row_c = block_start_row_index + threadIdx.x / k; + int row_c = threadIdx.x / k; + + // Thread/Block combination either too far for data array + // Or is calculating for index that should be calculated in a different blocks - in some edge cases + // "col_c * n + abs_row_c" can yield same result in different thread/block combinations + if (abs_row_c >= n || threadIdx.x >= block_rows * k) { + return; + } + + for (size_t i = 0; i < d; i++) { + elem_c += s_B[d * col_c + i] * s_A[d * row_c + i]; + } + + C[col_c * n + abs_row_c] = beta * C[col_c * n + abs_row_c] + elem_c; + +} + +template<> +void calculate_distances(int verbose, int q, size_t n, int d, int k, + thrust::device_vector &data, + size_t data_offset, + thrust::device_vector ¢roids, + thrust::device_vector &data_dots, + thrust::device_vector ¢roid_dots, + thrust::device_vector &pairwise_distances) { + detail::make_self_dots(k, d, centroids, centroid_dots); + detail::make_all_dots(n, k, data_offset, data_dots, centroid_dots, pairwise_distances); + + //||x-y||^2 = ||x||^2 + ||y||^2 - 2 x . y + //pairwise_distances has ||x||^2 + ||y||^2, so beta = 1 + //The dgemm calculates x.y for all x and y, so alpha = -2.0 + double alpha = -2.0; + double beta = 1.0; + //If the data were in standard column major order, we'd do a + //centroids * data ^ T + //But the data is in row major order, so we have to permute + //the arguments a little + int dev_num; + safe_cuda(cudaGetDevice(&dev_num)); + + bool do_cublas = true; + if (k <= 16 && d <= 64) { + const int BLOCK_SIZE_MUL = 128; + int block_rows = std::min((size_t)BLOCK_SIZE_MUL / k, n); + int grid_size = std::ceil(static_cast(n) / block_rows); + + int shared_size_B = d * k * sizeof(double); + size_t shared_size_A = block_rows * d * sizeof(double); + if(shared_size_B + shared_size_A < (1 << 15)){ + + matmul << < grid_size, BLOCK_SIZE_MUL, shared_size_B + shared_size_A >> > ( + thrust::raw_pointer_cast(data.data() + data_offset * d), + thrust::raw_pointer_cast(centroids.data()), + thrust::raw_pointer_cast(pairwise_distances.data()), + alpha, beta, n, d, k, block_rows + ); + do_cublas = false; + } + } + + if(do_cublas){ + cublasStatus_t stat = safe_cublas(cublasDgemm(detail::cublas_handle[dev_num], + CUBLAS_OP_T, CUBLAS_OP_N, + n, k, d, &alpha, + thrust::raw_pointer_cast(data.data() + data_offset * d), + d,//Has to be n or d + thrust::raw_pointer_cast(centroids.data()), + d,//Has to be k or d + &beta, + thrust::raw_pointer_cast(pairwise_distances.data()), + n)); //Has to be n or k + + if (stat != CUBLAS_STATUS_SUCCESS) { + std::cout << "Invalid Dgemm" << std::endl; + exit(1); + } + } + + thrust::for_each(pairwise_distances.begin(), + pairwise_distances.end(), + absolute_value()); // in-place transformation to ensure all distances are positive indefinite + + #if(CHECK) + gpuErrchk(cudaGetLastError()); + #endif +} + +template<> +void calculate_distances(int verbose, int q, size_t n, int d, int k, + thrust::device_vector &data, + size_t data_offset, + thrust::device_vector ¢roids, + thrust::device_vector &data_dots, + thrust::device_vector ¢roid_dots, + thrust::device_vector &pairwise_distances) { + detail::make_self_dots(k, d, centroids, centroid_dots); + detail::make_all_dots(n, k, data_offset, data_dots, centroid_dots, pairwise_distances); + + //||x-y||^2 = ||x||^2 + ||y||^2 - 2 x . y + //pairwise_distances has ||x||^2 + ||y||^2, so beta = 1 + //The dgemm calculates x.y for all x and y, so alpha = -2.0 + float alpha = -2.0; + float beta = 1.0; + //If the data were in standard column major order, we'd do a + //centroids * data ^ T + //But the data is in row major order, so we have to permute + //the arguments a little + int dev_num; + safe_cuda(cudaGetDevice(&dev_num)); + + if (k <= 16 && d <= 64) { + const int BLOCK_SIZE_MUL = 128; + int block_rows = std::min((size_t)BLOCK_SIZE_MUL / k, n); + int grid_size = std::ceil(static_cast(n) / block_rows); + + int shared_size_B = d * k * sizeof(float); + int shared_size_A = block_rows * d * sizeof(float); + + matmul << < grid_size, BLOCK_SIZE_MUL, shared_size_B + shared_size_A >> > ( + thrust::raw_pointer_cast(data.data() + data_offset * d), + thrust::raw_pointer_cast(centroids.data()), + thrust::raw_pointer_cast(pairwise_distances.data()), + alpha, beta, n, d, k, block_rows + ); + } else { + cublasStatus_t stat = safe_cublas(cublasSgemm(detail::cublas_handle[dev_num], + CUBLAS_OP_T, CUBLAS_OP_N, + n, k, d, &alpha, + thrust::raw_pointer_cast(data.data() + data_offset * d), + d,//Has to be n or d + thrust::raw_pointer_cast(centroids.data()), + d,//Has to be k or d + &beta, + thrust::raw_pointer_cast(pairwise_distances.data()), + n)); //Has to be n or k + + if (stat != CUBLAS_STATUS_SUCCESS) { + std::cout << "Invalid Sgemm" << std::endl; + exit(1); + } + } + + thrust::for_each(pairwise_distances.begin(), + pairwise_distances.end(), + absolute_value()); // in-place transformation to ensure all distances are positive indefinite + + #if(CHECK) + gpuErrchk(cudaGetLastError()); + #endif +} + +} +} + +namespace mycub { + +void *d_key_alt_buf[MAX_NGPUS]; +unsigned int key_alt_buf_bytes[MAX_NGPUS]; +void *d_value_alt_buf[MAX_NGPUS]; +unsigned int value_alt_buf_bytes[MAX_NGPUS]; +void *d_temp_storage[MAX_NGPUS]; +size_t temp_storage_bytes[MAX_NGPUS]; +void *d_temp_storage2[MAX_NGPUS]; +size_t temp_storage_bytes2[MAX_NGPUS]; +bool cub_initted; +void cub_init() { + // std::cout <<"CUB init" << std::endl; + for (int q = 0; q < MAX_NGPUS; q++) { + d_key_alt_buf[q] = NULL; + key_alt_buf_bytes[q] = 0; + d_value_alt_buf[q] = NULL; + value_alt_buf_bytes[q] = 0; + d_temp_storage[q] = NULL; + temp_storage_bytes[q] = 0; + d_temp_storage2[q] = NULL; + temp_storage_bytes2[q] = 0; + } + cub_initted = true; +} + +void cub_init(int dev) { + d_key_alt_buf[dev] = NULL; + key_alt_buf_bytes[dev] = 0; + d_value_alt_buf[dev] = NULL; + value_alt_buf_bytes[dev] = 0; + d_temp_storage[dev] = NULL; + temp_storage_bytes[dev] = 0; + d_temp_storage2[dev] = NULL; + temp_storage_bytes2[dev] = 0; +} + +void cub_close() { + for (int q = 0; q < MAX_NGPUS; q++) { + if (d_key_alt_buf[q]) safe_cuda(cudaFree(d_key_alt_buf[q])); + if (d_value_alt_buf[q]) safe_cuda(cudaFree(d_value_alt_buf[q])); + if (d_temp_storage[q]) safe_cuda(cudaFree(d_temp_storage[q])); + if (d_temp_storage2[q]) safe_cuda(cudaFree(d_temp_storage2[q])); + d_temp_storage[q] = NULL; + d_temp_storage2[q] = NULL; + } + cub_initted = false; +} + +void cub_close(int dev) { + if (d_key_alt_buf[dev]) safe_cuda(cudaFree(d_key_alt_buf[dev])); + if (d_value_alt_buf[dev]) safe_cuda(cudaFree(d_value_alt_buf[dev])); + if (d_temp_storage[dev]) safe_cuda(cudaFree(d_temp_storage[dev])); + if (d_temp_storage2[dev]) safe_cuda(cudaFree(d_temp_storage2[dev])); + d_temp_storage[dev] = NULL; + d_temp_storage2[dev] = NULL; +} + +void sort_by_key_int(thrust::device_vector &keys, thrust::device_vector &values) { + int dev_num; + safe_cuda(cudaGetDevice(&dev_num)); + cudaStream_t this_stream = cuda_stream[dev_num]; + int SIZE = keys.size(); + //int *d_key_alt_buf, *d_value_alt_buf; + if (key_alt_buf_bytes[dev_num] < sizeof(int) * SIZE) { + if (d_key_alt_buf[dev_num]) safe_cuda(cudaFree(d_key_alt_buf[dev_num])); + safe_cuda(cudaMalloc(&d_key_alt_buf[dev_num], sizeof(int) * SIZE)); + key_alt_buf_bytes[dev_num] = sizeof(int) * SIZE; + } + if (value_alt_buf_bytes[dev_num] < sizeof(int) * SIZE) { + if (d_value_alt_buf[dev_num]) safe_cuda(cudaFree(d_value_alt_buf[dev_num])); + safe_cuda(cudaMalloc(&d_value_alt_buf[dev_num], sizeof(int) * SIZE)); + value_alt_buf_bytes[dev_num] = sizeof(int) * SIZE; + } + cub::DoubleBuffer d_keys(thrust::raw_pointer_cast(keys.data()), (int *) d_key_alt_buf[dev_num]); + cub::DoubleBuffer d_values(thrust::raw_pointer_cast(values.data()), (int *) d_value_alt_buf[dev_num]); + + // Determine temporary device storage requirements for sorting operation + if (!d_temp_storage[dev_num]) { + cub::DeviceRadixSort::SortPairs(d_temp_storage[dev_num], temp_storage_bytes[dev_num], d_keys, + d_values, SIZE, 0, sizeof(int) * 8, this_stream); + // Allocate temporary storage for sorting operation + safe_cuda(cudaMalloc(&d_temp_storage[dev_num], temp_storage_bytes[dev_num])); + } + // Run sorting operation + cub::DeviceRadixSort::SortPairs(d_temp_storage[dev_num], temp_storage_bytes[dev_num], + d_keys, d_values, SIZE, 0, sizeof(int) * 8, this_stream); + // Sorted keys and values are referenced by d_keys.Current() and d_values.Current() + + keys.data() = thrust::device_pointer_cast(d_keys.Current()); + values.data() = thrust::device_pointer_cast(d_values.Current()); +} + +} \ No newline at end of file diff --git a/cuda_code/knn_classify_4.cu b/cuda_code/knn_classify_4.cu new file mode 100644 index 0000000000000000000000000000000000000000..97e9601c53ea86a9f1a38a0b9ff8c5292f3965e9 --- /dev/null +++ b/cuda_code/knn_classify_4.cu @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2020-2022, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "knn_test_helper.cuh" +#include + +namespace ML { +namespace KNN { +namespace opg { + +template <> +void generate_partitions(float* data, + int* lbls_ptr, + size_t n_rows, + int n_cols, + int n_clusters, + int my_rank, + cudaStream_t stream) +{ + Random::make_blobs(data, + lbls_ptr, + (int)n_rows, + (int)n_cols, + n_clusters, + allocator, + stream, + true, + nullptr, + nullptr, + 1.0, + -10.0, + 10.0, + my_rank); +} + +class KNNClassifyTest : public ::testing::TestWithParam { + public: + bool runTest(const KNNParams& params) + { + KNNTestHelper knn_th; + knn_th.generate_data(params); + + std::vector n_unique; + for (int i = 0; i < params.n_outputs; i++) { + n_unique.push_back(params.n_classes); + } + + std::vector uniq_labels(params.n_outputs); + for (int i = 0; i < params.n_outputs; i++) { + int nu = n_unique[i]; + std::vector ul_h(nu); + for (int j = 0; j < nu; j++) { + ul_h[j] = j; + } + uniq_labels[i] = (int*)knn_th.allocator.get()->allocate(nu * sizeof(int), knn_th.stream); + raft::update_device(uniq_labels[i], ul_h.data(), ul_h.size(), knn_th.stream); + } + + /** + * Execute knn_classify() + */ + knn_classify(knn_th.handle, + &(knn_th.out_parts), + &(knn_th.out_i_parts), + &(knn_th.out_d_parts), + nullptr, + knn_th.index_parts, + *(knn_th.idx_desc), + knn_th.query_parts, + *(knn_th.query_desc), + knn_th.y, + uniq_labels, + n_unique, + false, + false, + false, + params.k, + params.batch_size, + true); + + knn_th.display_results(); + knn_th.release_ressources(params); + + int actual = 1; + int expected = 1; + return raft::CompareApprox(1)(actual, expected); + } +}; + +const std::vector inputs = {{5, 1, 8, 50, 3, 2, 2, 12}}; + +typedef KNNClassifyTest KNNClTest; + +TEST_P(KNNClTest, Result) { ASSERT_TRUE(runTest(GetParam())); } + +INSTANTIATE_TEST_CASE_P(KNNClassifyTest, KNNClTest, ::testing::ValuesIn(inputs)); + +} // namespace opg +} // namespace KNN +} // namespace ML diff --git a/cuda_code/knn_classify_9.cu b/cuda_code/knn_classify_9.cu new file mode 100644 index 0000000000000000000000000000000000000000..d21d57a9bd697a1bb4accbbe8a5f0144a8c50dc5 --- /dev/null +++ b/cuda_code/knn_classify_9.cu @@ -0,0 +1,152 @@ +/* + * Copyright (c) 2019-2022, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "test_utils.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace MLCommon { +namespace Selection { + +struct KNNClassifyInputs { + int rows; + int cols; + int n_labels; + float cluster_std; + int k; +}; + +class KNNClassifyTest : public ::testing::TestWithParam { + protected: + void basicTest() + { + raft::handle_t handle; + cudaStream_t stream = handle.get_stream(); + + params = ::testing::TestWithParam::GetParam(); + + raft::allocate(train_samples, params.rows * params.cols, stream); + raft::allocate(train_labels, params.rows, stream); + + raft::allocate(pred_labels, params.rows, stream); + + raft::allocate(knn_indices, params.rows * params.k, stream); + raft::allocate(knn_dists, params.rows * params.k, stream); + + MLCommon::Random::make_blobs(train_samples, + train_labels, + params.rows, + params.cols, + params.n_labels, + stream, + true, + nullptr, + nullptr, + params.cluster_std); + + rmm::device_uvector unique_labels(0, stream); + auto n_classes = raft::label::getUniquelabels(unique_labels, train_labels, params.rows, stream); + + std::vector ptrs(1); + std::vector sizes(1); + ptrs[0] = train_samples; + sizes[0] = params.rows; + + raft::spatial::knn::brute_force_knn(handle, + ptrs, + sizes, + params.cols, + train_samples, + params.rows, + knn_indices, + knn_dists, + params.k); + + std::vector y; + y.push_back(train_labels); + + std::vector uniq_labels; + uniq_labels.push_back(unique_labels.data()); + + std::vector n_unique; + n_unique.push_back(n_classes); + + knn_classify(handle, + pred_labels, + knn_indices, + y, + params.rows, + params.rows, + params.k, + uniq_labels, + n_unique); + + CUDA_CHECK(cudaStreamSynchronize(stream)); + } + + void SetUp() override { basicTest(); } + + void TearDown() override + { + CUDA_CHECK(cudaFree(train_samples)); + CUDA_CHECK(cudaFree(train_labels)); + + CUDA_CHECK(cudaFree(pred_labels)); + + CUDA_CHECK(cudaFree(knn_indices)); + CUDA_CHECK(cudaFree(knn_dists)); + } + + protected: + KNNClassifyInputs params; + + float* train_samples; + int* train_labels; + + int* pred_labels; + + int64_t* knn_indices; + float* knn_dists; +}; + +typedef KNNClassifyTest KNNClassifyTestF; +TEST_P(KNNClassifyTestF, Fit) +{ + ASSERT_TRUE(devArrMatch(train_labels, pred_labels, params.rows, raft::Compare())); +} + +const std::vector inputsf = {{100, 10, 2, 0.01f, 2}, + {1000, 10, 5, 0.01f, 2}, + {10000, 10, 5, 0.01f, 2}, + {100, 10, 2, 0.01f, 10}, + {1000, 10, 5, 0.01f, 10}, + {10000, 10, 5, 0.01f, 10}, + {100, 10, 2, 0.01f, 50}, + {1000, 10, 5, 0.01f, 50}, + {10000, 10, 5, 0.01f, 50}}; + +INSTANTIATE_TEST_CASE_P(KNNClassifyTest, KNNClassifyTestF, ::testing::ValuesIn(inputsf)); + +}; // end namespace Selection +}; // namespace MLCommon diff --git a/cuda_code/kpca.cu b/cuda_code/kpca.cu new file mode 100644 index 0000000000000000000000000000000000000000..082e5d400abfc7de922790cfbb18e0101551a344 --- /dev/null +++ b/cuda_code/kpca.cu @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2018-2019, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "kpca.cuh" +namespace ML { + +void kpcaFit(raft::handle_t &handle, float *input, float *alphas, + float *lambdas, const ML::paramsKPCA &prms) { + kpcaFit(handle, input, alphas, lambdas, prms, handle.get_stream()); +} + +void kpcaFit(raft::handle_t &handle, double *input, double *alphas, + double *lambdas, const ML::paramsKPCA &prms) { + kpcaFit(handle, input, alphas, lambdas, prms, handle.get_stream()); +} + +void kpcaTransform(raft::handle_t &handle, float *input, float *alphas, float *lambdas, + float *trans_input, const ML::paramsKPCA &prms) { + kpcaTransform(handle, input, alphas, lambdas, trans_input, prms, handle.get_stream()); +} + +void kpcaTransform(raft::handle_t &handle, double *input, double *alphas, double *lambdas, + double *trans_input, const ML::paramsKPCA &prms) { + kpcaTransform(handle, input, alphas, lambdas, trans_input, prms, handle.get_stream()); +} + +} \ No newline at end of file diff --git a/cuda_code/kthvalue_op_1.cu b/cuda_code/kthvalue_op_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..c6c62a763aa066ff369ade68a773a1e8ff9e1445 --- /dev/null +++ b/cuda_code/kthvalue_op_1.cu @@ -0,0 +1,279 @@ +// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/kthvalue_op.h" +#include "paddle/fluid/operators/top_k_function_cuda.h" +#include "paddle/fluid/operators/top_k_v2_op.h" +#ifdef __NVCC__ +#include "cub/cub.cuh" +#endif +#ifdef __HIPCC__ +#include +#endif + +namespace paddle { +namespace operators { + +int getBlockSize(int col) { + if (col > 512) + return 1024; + else if (col > 256 && col <= 512) + return 512; + else if (col > 128 && col <= 256) + return 256; + else if (col > 64 && col <= 128) + return 128; + else + return 64; +} + +template +bool SortKthvalue(const platform::CUDADeviceContext& ctx, + const framework::Tensor* input_tensor, const int64_t num_cols, + const int64_t num_rows, const int k, + framework::Tensor* out_tensor, + framework::Tensor* indices_tensor) { + auto cu_stream = ctx.stream(); + framework::Tensor input_indices; + const std::vector dims = {num_rows, num_cols}; + auto dim = framework::make_ddim(dims); + input_indices.Resize(dim); + input_indices.mutable_data(ctx.GetPlace()); + size_t temp_storage_bytes = -1; + int block_size = getBlockSize(num_cols); + unsigned int maxGridDimX = ctx.GetCUDAMaxGridDimSize().x; + unsigned int grid_size = num_rows < maxGridDimX + ? static_cast(num_rows) + : maxGridDimX; + InitIndex<<>>( + input_indices.data(), num_rows, num_cols); + cub::CountingInputIterator counting_iter(0); + cub::TransformInputIterator> + segment_offsets_t(counting_iter, SegmentOffsetIter(num_cols)); + T* sorted_values_ptr; + int64_t* sorted_indices_ptr; + framework::Tensor temp_values, temp_indices; + const T* input = input_tensor->data(); + T* values = out_tensor->data(); + int64_t* indices = indices_tensor->mutable_data(ctx.GetPlace()); + temp_values.Resize(dim); + temp_indices.Resize(dim); + sorted_values_ptr = temp_values.mutable_data(ctx.GetPlace()); + sorted_indices_ptr = temp_indices.mutable_data(ctx.GetPlace()); + auto err = cub::DeviceSegmentedRadixSort::SortPairs( + nullptr, temp_storage_bytes, input, sorted_values_ptr, + input_indices.data(), sorted_indices_ptr, num_cols * num_rows, + num_rows, segment_offsets_t, segment_offsets_t + 1, 0, sizeof(T) * 8, + cu_stream); +#ifdef __HIPCC__ + if (err != hipSuccess) { + LOG(ERROR) << "KthvalueOP failed as could not launch " + "hipcub::DeviceSegmentedRadixSort::SortPairs, status: " + << hipGetErrorString(err); + return false; + } +#else + if (err != cudaSuccess) { + LOG(ERROR) << "KthvalueOP failed as could not launch " + "cub::DeviceSegmentedRadixSort::SortPairs, status: " + << cudaGetErrorString(err); + return false; + } +#endif + framework::Tensor temp_storage; + temp_storage.mutable_data(ctx.GetPlace(), temp_storage_bytes); + + err = cub::DeviceSegmentedRadixSort::SortPairs( + temp_storage.data(), temp_storage_bytes, input, + sorted_values_ptr, input_indices.data(), sorted_indices_ptr, + num_cols * num_rows, num_rows, segment_offsets_t, segment_offsets_t + 1, + 0, sizeof(T) * 8, cu_stream); +#ifdef __HIPCC__ + if (err != hipSuccess) { + LOG(ERROR) << "KthvalueOP failed as could not launch " + "hipcub::DeviceSegmentedRadixSort::SortPairs, " + << temp_storage_bytes << ", status: " << hipGetErrorString(err); + return false; + } +#else + if (err != cudaSuccess) { + LOG(ERROR) << "KthvalueOP failed as could not launch " + "cub::DeviceSegmentedRadixSort::SortPairs, " + << temp_storage_bytes << ", status: " << cudaGetErrorString(err); + return false; + } +#endif + auto& dev = *ctx.eigen_device(); + const Eigen::DSizes slice_indices{0, k - 1}; + const Eigen::DSizes slice_sizes{num_rows, 1}; + auto e_indices = framework::EigenMatrix::From(*indices_tensor, dim); + auto e_tmp_indices = framework::EigenMatrix::From( + static_cast(temp_indices)); + std::vector odims = {static_cast(num_rows), static_cast(1)}; + dim = framework::make_ddim(odims); + auto e_values = framework::EigenMatrix::From(*out_tensor, dim); + auto e_tmp_values = framework::EigenMatrix::From( + static_cast(temp_values)); + + EigenSlice, int64_t, 2>::Eval( + dev, e_indices, e_tmp_indices, slice_indices, slice_sizes); + EigenSlice, T, 2>::Eval( + dev, e_values, e_tmp_values, slice_indices, slice_sizes); + return true; +} + +template +class KthvalueOpCUDAKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + PADDLE_ENFORCE_EQ( + platform::is_gpu_place(ctx.GetPlace()), true, + platform::errors::InvalidArgument( + "It must use CUDAPlace, you must check your device set.")); + auto* input = ctx.Input("X"); + auto* output = ctx.Output("Out"); + auto* indices = ctx.Output("Indices"); + int k = static_cast(ctx.Attr("k")); + int axis = static_cast(ctx.Attr("axis")); + bool keepdim = static_cast(ctx.Attr("keepdim")); + const auto& in_dims = input->dims(); + if (axis < 0) axis += in_dims.size(); + auto out_dims = output->dims(); + const T* input_data = input->data(); + T* output_data = output->mutable_data(ctx.GetPlace()); + int64_t* indices_data = indices->mutable_data(ctx.GetPlace()); + + if (axis == in_dims.size() - 1) { + const int64_t& input_height = framework::product( + framework::slice_ddim(in_dims, 0, in_dims.size() - 1)); + const int64_t& input_width = in_dims[in_dims.size() - 1]; + const auto& dev_ctx = ctx.cuda_device_context(); + PADDLE_ENFORCE_EQ(SortKthvalue(dev_ctx, input, input_width, + input_height, k, output, indices), + true, platform::errors::External( + "KthvalueOP: Error when use cub sorting")); + return; + } else { + std::vector trans; + for (int i = 0; i < axis; i++) { + trans.emplace_back(i); + } + trans.emplace_back(in_dims.size() - 1); + for (int i = axis + 1; i < in_dims.size() - 1; i++) { + trans.emplace_back(i); + } + trans.emplace_back(axis); + if (!keepdim) { + std::vector tmp_out_shape; + for (int i = 0; i < axis; i++) { + tmp_out_shape.emplace_back(in_dims[i]); + } + tmp_out_shape.emplace_back(1); + for (int i = axis + 1; i < in_dims.size(); i++) { + tmp_out_shape.emplace_back(in_dims[i]); + } + framework::DDim tmp_out_dims = framework::make_ddim(tmp_out_shape); + output->Resize(tmp_out_dims); + indices->Resize(tmp_out_dims); + } + framework::DDim trans_dims(in_dims); + framework::DDim trans_out_dims(in_dims); + for (int i = 0; i < trans.size(); i++) { + trans_dims[i] = in_dims[trans[i]]; + trans_out_dims[i] = in_dims[trans[i]]; + } + trans_out_dims[in_dims.size() - 1] = 1; + framework::Tensor trans_input; + trans_input.mutable_data(trans_dims, ctx.GetPlace()); + int ndims = trans.size(); + const auto& dev_ctx = ctx.cuda_device_context(); + TransCompute(ndims, dev_ctx, *input, + &trans_input, trans); + framework::Tensor trans_ind, trans_out; + trans_ind.mutable_data(trans_out_dims, ctx.GetPlace()); + trans_out.mutable_data(trans_out_dims, ctx.GetPlace()); + const int64_t input_height = framework::product( + framework::slice_ddim(trans_dims, 0, trans_dims.size() - 1)); + const int64_t input_width = trans_dims[trans_dims.size() - 1]; + PADDLE_ENFORCE_EQ( + SortKthvalue(dev_ctx, &trans_input, input_width, input_height, k, + &trans_out, &trans_ind), + true, + platform::errors::External("KthvalueOP: Error when use cub sorting")); + TransCompute( + ndims, dev_ctx, trans_ind, indices, trans); + TransCompute(ndims, dev_ctx, trans_out, + output, trans); + if (!keepdim) { + output->Resize(out_dims); + indices->Resize(out_dims); + } + } + } +}; + +template +class KthvalueOpGradCUDAKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + PADDLE_ENFORCE_EQ( + platform::is_gpu_place(context.GetPlace()), true, + platform::errors::InvalidArgument( + "It must use CUDAPlace, you must check your device set.")); + auto* x = context.Input("X"); + auto* out_grad = + context.Input(framework::GradVarName("Out")); + auto* indices = context.Input("Indices"); + auto* x_grad = + context.Output(framework::GradVarName("X")); + int axis = context.Attr("axis"); + int k = static_cast(context.Attr("k")); + const auto& in_dims = x->dims(); + auto out_dims = indices->dims(); + if (axis < 0) axis += in_dims.size(); + T* x_grad_data = x_grad->mutable_data(context.GetPlace()); + const T* out_grad_data = out_grad->data(); + const int64_t* indices_data = indices->data(); + int pre, n, post; + GetDims(in_dims, axis, &pre, &n, &post); + auto& dev_ctx = context.cuda_device_context(); + int block_size = getBlockSize(post * k); + int max_threads = dev_ctx.GetMaxPhysicalThreadCount(); + const int max_blocks = std::max(((max_threads - 1) / block_size + 1), 1); + int grid_size = std::min(max_blocks, pre); + AssignGradWithAxis<<>>( + out_grad_data, indices_data, x_grad_data, pre, post, n, 1); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP_CUDA_KERNEL( + kthvalue, + ops::KthvalueOpCUDAKernel, + ops::KthvalueOpCUDAKernel, + ops::KthvalueOpCUDAKernel, + ops::KthvalueOpCUDAKernel); +REGISTER_OP_CUDA_KERNEL( + kthvalue_grad, + ops::KthvalueOpGradCUDAKernel, + ops::KthvalueOpGradCUDAKernel, + ops::KthvalueOpGradCUDAKernel, + ops::KthvalueOpGradCUDAKernel); diff --git a/cuda_code/kvstore_utils_3.cu b/cuda_code/kvstore_utils_3.cu new file mode 100644 index 0000000000000000000000000000000000000000..438fe29dac4ed483390765fd8dadede9997099bc --- /dev/null +++ b/cuda_code/kvstore_utils_3.cu @@ -0,0 +1,112 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/*! + * Copyright (c) 2017 by Contributors + * \file kvstore_utils.cu + * \brief gpu implementation of util functions + */ +#if defined(_MSC_VER) && __CUDACC_VER_MAJOR__ == 8 && __CUDACC_VER_BUILD__ != 44 +// Many CUDA 8 compilers other than V8.0.44 crash on Windows +#pragma warning("Potential crash on CUDA compiler detected. Switching sorting from CUB to Thrust") +#define SORT_WITH_THRUST +#include +#include +#include +#else +#undef SORT_WITH_THRUST +#endif +#include "./kvstore_utils.h" +#include +#include +#include "../common/utils.h" + +namespace mxnet { +namespace kvstore { + +template +size_t UniqueImplGPU(const Resource& rsc, mshadow::Stream *s, + IType *dptr, const size_t size) { + // estimate unique temp space. The first byte is reserved to store the number + // of unique values selected + const size_t num_selected_bytes = sizeof(size_t); + size_t unique_temp_bytes = 0; + size_t *null_ptr = nullptr; + size_t *null_dptr = nullptr; + cudaStream_t stream = mshadow::Stream::GetStream(s); + cub::DeviceSelect::Unique(NULL, unique_temp_bytes, null_dptr, null_dptr, + null_ptr, size, stream); + // estimate sort temp space + const size_t sort_output_bytes = size * sizeof(IType); + size_t sort_temp_bytes = 0; +#ifndef SORT_WITH_THRUST + // The least-significant bit index (inclusive) needed for key comparison + const int begin_bit = 0; + // The most-significant bit index (exclusive) needed for key comparison + const int end_bit = sizeof(IType) * 8; + cub::DeviceRadixSort::SortKeys(NULL, sort_temp_bytes, null_dptr, null_dptr, + size, begin_bit, end_bit, stream); +#else + // sort_temp_bytes remains 0 because thrust request memory by itself +#endif + // request temp storage + const size_t total_workspace = num_selected_bytes + sort_output_bytes + + std::max(sort_temp_bytes, unique_temp_bytes); + mshadow::Tensor workspace = rsc + .get_space_typed(mshadow::Shape1(total_workspace), s); + // temp space layout: num_selected_ptr, sort_output_bytes, unique/sort_temp_storage + size_t* num_selected_ptr = reinterpret_cast(workspace.dptr_); + IType* sort_output_ptr = reinterpret_cast(workspace.dptr_ + num_selected_bytes); + void *temp_storage = static_cast(workspace.dptr_ + + num_selected_bytes + sort_output_bytes); + // execute the sort kernel +#ifndef SORT_WITH_THRUST + cub::DeviceRadixSort::SortKeys(temp_storage, sort_temp_bytes, dptr, sort_output_ptr, + size, begin_bit, end_bit, stream); +#else + thrust::sort(thrust::cuda::par.on(stream), + dptr, dptr + size, thrust::greater()); + CUDA_CALL(cudaMemcpy(sort_output_ptr, dptr, sort_output_bytes, + cudaMemcpyDeviceToDevice)); +#endif + // execute unique kernel + cub::DeviceSelect::Unique(temp_storage, unique_temp_bytes, sort_output_ptr, dptr, + num_selected_ptr, size, stream); + // retrieve num selected unique values + size_t num_selected_out = 0; + CUDA_CALL(cudaMemcpy(&num_selected_out, num_selected_ptr, num_selected_bytes, + cudaMemcpyDeviceToHost)); + return num_selected_out; +} + +template<> +void UniqueImpl(const Resource& rsc, mshadow::Stream *s, + const NDArray &out) { + const size_t num_elements = out.shape().Size(); + CHECK_EQ(out.storage_type(), kRowSparseStorage) << "row_sparse NDArray is expected"; + MSHADOW_IDX_TYPE_SWITCH(out.dtype(), IType, { + IType *dptr = out.data().dptr(); + size_t num_selected_out = UniqueImplGPU(rsc, s, dptr, num_elements); + // set the shape of data/aux_data according to the number of unique values + out.set_aux_shape(rowsparse::kIdx, mshadow::Shape1(num_selected_out)); + }); +} + +} // namespace kvstore +} // namespace mxnet diff --git a/cuda_code/l2_expanded_float_float_float_uint32_1.cu b/cuda_code/l2_expanded_float_float_float_uint32_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..3e84786db5a0345bce7bbfb7ea4ca35fc181ee2a --- /dev/null +++ b/cuda_code/l2_expanded_float_float_float_uint32_1.cu @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2021, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +namespace raft { +namespace distance { +namespace detail { +template void +distance( + const float* x, + const float* y, + float* dist, + std::uint32_t m, + std::uint32_t n, + std::uint32_t k, + void* workspace, + std::size_t worksize, + cudaStream_t stream, + bool isRowMajor, + float metric_arg); + +} // namespace detail +} // namespace distance +} // namespace raft diff --git a/cuda_code/l2_sqrt_expanded_float_float_float_uint32.cu b/cuda_code/l2_sqrt_expanded_float_float_float_uint32.cu new file mode 100644 index 0000000000000000000000000000000000000000..e85058e34f19487ee54ff8276188a317525bf7c7 --- /dev/null +++ b/cuda_code/l2_sqrt_expanded_float_float_float_uint32.cu @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2021-2022, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +namespace raft { +namespace distance { +namespace detail { + +template void +distance( + const float* x, + const float* y, + float* dist, + std::uint32_t m, + std::uint32_t n, + std::uint32_t k, + void* workspace, + std::size_t worksize, + cudaStream_t stream, + bool isRowMajor, + float metric_arg); + +} // namespace detail +} // namespace distance +} // namespace raft diff --git a/cuda_code/lab1_3_mu_shared.cu b/cuda_code/lab1_3_mu_shared.cu new file mode 100644 index 0000000000000000000000000000000000000000..f61f4fdd4828d09de3acb55227d7055fff252637 --- /dev/null +++ b/cuda_code/lab1_3_mu_shared.cu @@ -0,0 +1,141 @@ +#include +#include +#include +#include +#include +#include + + +#define BLOCKSIZE 16 + +typedef struct{ + size_t width; + size_t height; + size_t stride; + float * elements; + +}matrix_t; + +__device__ float getElement(const matrix_t * mat, int row, int col){ + return mat->elements[mat->stride * row + col]; +} + +__device__ void setElement(matrix_t * mat, int row, int col, float value){ + mat->elements[mat->stride * row + col] = value; +} + +__device__ matrix_t getSubMatrix(matrix_t mat, int row, int col){ + matrix_t matAns; + matAns.width = BLOCKSIZE; + matAns.height = BLOCKSIZE; + matAns.stride = mat.stride; + + matAns.elements = mat.elements + row * BLOCKSIZE * mat.stride + col * BLOCKSIZE; + + return matAns; +} + +__global__ void matMulKernel(matrix_t ma, matrix_t mb, matrix_t mc){ + float cValue = 0; + + int blockRow = blockIdx.y; + int blockCol = blockIdx.x; + + int row = threadIdx.y; + int col = threadIdx.x; + for (int subIdx = 0; subIdx < ma.width / BLOCKSIZE; ++subIdx){ + __shared__ float s_subMa[BLOCKSIZE][BLOCKSIZE]; + __shared__ float s_subMb[BLOCKSIZE][BLOCKSIZE]; + + matrix_t subMatA = getSubMatrix(ma, blockRow, subIdx); + matrix_t subMatB = getSubMatrix(mb, subIdx, blockCol); + + s_subMa[row][col] = getElement(&subMatA, row, col); + s_subMb[row][col] = getElement(&subMatB, row, col); + + __syncthreads(); + + for (int k = 0; k < BLOCKSIZE; ++k) { + cValue += (s_subMa[row][k] * s_subMb[k][col]); + } + + __syncthreads(); + } + + matrix_t subMatC = getSubMatrix(mc, blockRow, blockCol); + setElement(&subMatC, row, col, cValue); +} + +void callMatMulKernel(){ + matrix_t matA; + matA.width = 1024; + matA.height = 2048; + matA.stride = matA.width; + matA.elements = (float *)malloc(matA.width * matA.height * sizeof(float)); + + matrix_t matB; + matB.width = 1024; + matB.height = 1024; + matB.stride = matB.width; + matB.elements = (float *)malloc(matB.width * matB.height * sizeof(float)); + + matrix_t matAns; + matAns.width = matB.width; + matAns.height = matA.height; + matAns.stride = matAns.width; + matAns.elements = (float *)malloc(matAns.width * matAns.height * sizeof(float)); + memset(matAns.elements,0, matAns.width * matAns.height * sizeof(float)); + + for (int i = 0; i < matA.width * matA.height; ++i){ + matA.elements[i] = i * 0.1; + } + for (int i = 0; i < matB.width * matB.height; ++i){ + matB.elements[i] = i * 0.1; + } + + matrix_t d_matA; + d_matA.width = matA.width; + d_matA.height = matA.height; + d_matA.stride = matA.stride; + size_t size = d_matA.width * d_matA.height * sizeof(float); + cudaMalloc(&d_matA.elements, size); + cudaMemcpy(d_matA.elements, matA.elements, size, cudaMemcpyHostToDevice); + + matrix_t d_matB; + d_matB.width = matB.width; + d_matB.height = matB.height; + d_matB.stride = matB.stride; + size = d_matB.width * d_matB.height * sizeof(float); + cudaMalloc(&d_matB.elements, size); + cudaMemcpy(d_matB.elements, matB.elements, size, cudaMemcpyHostToDevice); + + matrix_t d_matC; + d_matC.width = matAns.width; + d_matC.height = matAns.height; + d_matC.stride = matAns.stride; + cudaMalloc(&d_matC.elements,d_matC.width * d_matC.height * sizeof(float)); + + dim3 threadsPerBlock(BLOCKSIZE, BLOCKSIZE); + dim3 blocksPerGrid(matB.width / threadsPerBlock.x, matA.height / threadsPerBlock.y); + matMulKernel<<>>(d_matA, d_matB, d_matC); + + cudaMemcpy(matAns.elements, d_matC.elements,d_matC.width * d_matC.height * sizeof(float),cudaMemcpyDeviceToHost); + + cudaFree(d_matA.elements); + cudaFree(d_matB.elements); + cudaFree(d_matC.elements); + + free(matA.elements); + free(matB.elements); + free(matAns.elements); + return; +} + +int main(){ + struct timeval tv1, tv0; + gettimeofday(&tv0, NULL); + callMatMulKernel(); + gettimeofday(&tv1, NULL); + printf("time: %lf\n", double(tv1.tv_usec - tv0.tv_usec)/1000000 + (double)(tv1.tv_sec - tv0.tv_sec)); + return 0; +} diff --git a/cuda_code/lal_born_coul_long.cu b/cuda_code/lal_born_coul_long.cu new file mode 100644 index 0000000000000000000000000000000000000000..71e5e0ae502e8847bc006788deaf82da9477868d --- /dev/null +++ b/cuda_code/lal_born_coul_long.cu @@ -0,0 +1,273 @@ +// ************************************************************************** +// buck_coul_long.cu +// ------------------- +// Trung Dac Nguyen (ORNL) +// +// Device code for acceleration of the buck/coul/long pair style +// +// __________________________________________________________________________ +// This file is part of the LAMMPS Accelerator Library (LAMMPS_AL) +// __________________________________________________________________________ +// +// begin : +// email : nguyentd@ornl.gov +// ***************************************************************************/ + +#ifdef NV_KERNEL + +#include "lal_aux_fun1.h" +#ifndef _DOUBLE_DOUBLE +texture pos_tex; +texture q_tex; +#else +texture pos_tex; +texture q_tex; +#endif + +#else +#define pos_tex x_ +#define q_tex q_ +#endif + +__kernel void k_born_coul_long(const __global numtyp4 *restrict x_, + const __global numtyp4 *restrict coeff1, + const __global numtyp4 *restrict coeff2, + const int lj_types, + const __global numtyp *restrict sp_lj_in, + const __global int *dev_nbor, + const __global int *dev_packed, + __global acctyp4 *restrict ans, + __global acctyp *restrict engv, + const int eflag, const int vflag, const int inum, + const int nbor_pitch, + const __global numtyp *restrict q_, + const __global numtyp4 *restrict cutsq_sigma, + const numtyp cut_coulsq, const numtyp qqrd2e, + const numtyp g_ewald, const int t_per_atom) { + int tid, ii, offset; + atom_info(t_per_atom,ii,tid,offset); + + __local numtyp sp_lj[8]; + sp_lj[0]=sp_lj_in[0]; + sp_lj[1]=sp_lj_in[1]; + sp_lj[2]=sp_lj_in[2]; + sp_lj[3]=sp_lj_in[3]; + sp_lj[4]=sp_lj_in[4]; + sp_lj[5]=sp_lj_in[5]; + sp_lj[6]=sp_lj_in[6]; + sp_lj[7]=sp_lj_in[7]; + + acctyp energy=(acctyp)0; + acctyp e_coul=(acctyp)0; + acctyp4 f; + f.x=(acctyp)0; f.y=(acctyp)0; f.z=(acctyp)0; + acctyp virial[6]; + for (int i=0; i<6; i++) + virial[i]=(acctyp)0; + + if (ii0) { + if (rsq < cut_coulsq) + e_coul += prefactor*(_erfc-factor_coul); + if (rsq < cutsq_sigma[mtype].y) { + numtyp e=coeff2[mtype].x*rexp - coeff2[mtype].y*r6inv + + coeff2[mtype].z*r2inv*r6inv; + energy+=factor_lj*(e-coeff2[mtype].w); + } + } + if (vflag>0) { + virial[0] += delx*delx*force; + virial[1] += dely*dely*force; + virial[2] += delz*delz*force; + virial[3] += delx*dely*force; + virial[4] += delx*delz*force; + virial[5] += dely*delz*force; + } + } + + } // for nbor + store_answers_q(f,energy,e_coul,virial,ii,inum,tid,t_per_atom,offset,eflag, + vflag,ans,engv); + } // if ii +} + +__kernel void k_born_coul_long_fast(const __global numtyp4 *restrict x_, + const __global numtyp4 *restrict coeff1_in, + const __global numtyp4 *restrict coeff2_in, + const __global numtyp *restrict sp_lj_in, + const __global int *dev_nbor, + const __global int *dev_packed, + __global acctyp4 *restrict ans, + __global acctyp *restrict engv, + const int eflag, const int vflag, const int inum, + const int nbor_pitch, + const __global numtyp *restrict q_, + const __global numtyp4 *restrict cutsq_sigma, + const numtyp cut_coulsq, const numtyp qqrd2e, + const numtyp g_ewald, const int t_per_atom) { + int tid, ii, offset; + atom_info(t_per_atom,ii,tid,offset); + + __local numtyp4 coeff1[MAX_SHARED_TYPES*MAX_SHARED_TYPES]; + __local numtyp4 coeff2[MAX_SHARED_TYPES*MAX_SHARED_TYPES]; + __local numtyp sp_lj[8]; + if (tid<8) + sp_lj[tid]=sp_lj_in[tid]; + if (tid0) + coeff2[tid]=coeff2_in[tid]; + } + + acctyp energy=(acctyp)0; + acctyp e_coul=(acctyp)0; + acctyp4 f; + f.x=(acctyp)0; f.y=(acctyp)0; f.z=(acctyp)0; + acctyp virial[6]; + for (int i=0; i<6; i++) + virial[i]=(acctyp)0; + + __syncthreads(); + + if (ii0) { + if (rsq < cut_coulsq) + e_coul += prefactor*(_erfc-factor_coul); + if (rsq < cutsq_sigma[mtype].y) { + numtyp e=coeff2[mtype].x*rexp - coeff2[mtype].y*r6inv + + coeff2[mtype].z*r2inv*r6inv; + energy+=factor_lj*(e-coeff2[mtype].w); + } + } + if (vflag>0) { + virial[0] += delx*delx*force; + virial[1] += dely*dely*force; + virial[2] += delz*delz*force; + virial[3] += delx*dely*force; + virial[4] += delx*delz*force; + virial[5] += dely*delz*force; + } + } + + } // for nbor + store_answers_q(f,energy,e_coul,virial,ii,inum,tid,t_per_atom,offset,eflag, + vflag,ans,engv); + } // if ii +} + diff --git a/cuda_code/layer_norm_grad_grad_impl_2.cu b/cuda_code/layer_norm_grad_grad_impl_2.cu new file mode 100644 index 0000000000000000000000000000000000000000..6a4f3c776bd9dc5d86ccf4227deb0c34c0148173 --- /dev/null +++ b/cuda_code/layer_norm_grad_grad_impl_2.cu @@ -0,0 +1,415 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "backend/kernel_compiler/gpu/cuda_impl/layer_norm_grad_grad_impl.cuh" +#include "backend/kernel_compiler/gpu/cuda_impl/layer_norm_impl.cuh" + +constexpr int THREAD_PER_BLOCK = 256; +constexpr int NUM_PER_THREAD_REDUCE = 4; +constexpr int WARP_SIZE = 32; +constexpr int NUM_SHARED_SUM_INPUT = 7; +constexpr int NUM_SHARED_SUM_GAMMA = 3; + +template +inline __device__ T my_pow(T a, double b) { + return pow(a, static_cast(b)); +} + + +template <> +inline __device__ half my_pow(half a, double b) { + return __float2half(pow(__half2float(a), static_cast(b))); +} + + +template +inline __device__ void GammaAndBetaThreadReduce(const int &col, const int &row_dim, const int &col_dim, + const int &mean_dim, const T &epsilon, const T *dy, const T *x, + const T *mean, const T *var, const T *grad_dx, T *part1, T *part2, + T *part3, const T *global_sum1, const T *global_sum2) { + int loop_num = (row_dim + NUM_PER_THREAD_REDUCE - 1) / NUM_PER_THREAD_REDUCE; + for (int i = threadIdx.x; i < loop_num; i += blockDim.x) { + for (int j = 0; j < NUM_PER_THREAD_REDUCE; j++) { + int row = NUM_PER_THREAD_REDUCE * i + j; + if (row >= row_dim) { + return; + } + + int pos = row * col_dim + col; + int mean_offset = pos / mean_dim; + + T v1 = x[pos] - mean[mean_offset]; + T v2 = my_pow(var[mean_offset] + epsilon, -0.5); + + part1[0] += dy[pos] * v1 * v2 * global_sum2[pos]; + part2[0] += dy[pos] * global_sum1[pos]; + part3[0] += dy[pos] * v2 * grad_dx[pos]; + } + } +} + + +template +inline __device__ void GammaAndBetaWarpReduce(T *part1, T *part2, T *part3) { + for (int delta = (WARP_SIZE >> 1); delta > 0; delta >>= 1) { + part1[0] += __shfl_down_sync(0xffffffff, part1[0], delta); + part2[0] += __shfl_down_sync(0xffffffff, part2[0], delta); + part3[0] += __shfl_down_sync(0xffffffff, part3[0], delta); + } +} + + +template +inline __device__ void GammaAndBetaBlockReduce(const int &col, const int &row_dim, T *part1, T *part2, T *part3, + T *d_gamma) { + // load data to share memory + // thread(0, 32, 64, 96, ...) keep the data + DynamicSharedMem share_mem; + if (threadIdx.x % WARP_SIZE == 0) { + int offset = threadIdx.x / WARP_SIZE * NUM_SHARED_SUM_GAMMA; + share_mem.addr()[offset] = part1[0]; + share_mem.addr()[offset + 1] = part2[0]; + share_mem.addr()[offset + 2] = part3[0]; + } + __syncthreads(); + + for (int stride = blockDim.x / WARP_SIZE / 2; stride > 0; stride >>= 1) { + if (threadIdx.x < stride) { + int offset = (threadIdx.x + stride) * NUM_SHARED_SUM_GAMMA; + share_mem.addr()[threadIdx.x * NUM_SHARED_SUM_GAMMA] += share_mem.addr()[offset]; + share_mem.addr()[threadIdx.x * NUM_SHARED_SUM_GAMMA + 1] += share_mem.addr()[offset + 1]; + share_mem.addr()[threadIdx.x * NUM_SHARED_SUM_GAMMA + 2] += share_mem.addr()[offset + 2]; + } + } + __syncthreads(); + + if (threadIdx.x == 0) { + d_gamma[col] = share_mem.addr()[0] + share_mem.addr()[1] + share_mem.addr()[2]; + } +} + + +template +__global__ void GammaAndBetaPropKernel(const int row_dim, const int col_dim, const int mean_dim, const T epsilon, + const T *dy, const T *x, const T *mean, const T *var, const T *grad_dx, + T *d_gamma, T *global_sum1, T *global_sum2) { + for (int col = blockIdx.x; col < col_dim; col += gridDim.x) { + T part1 = 0; + T part2 = 0; + T part3 = 0; + GammaAndBetaThreadReduce(col, row_dim, col_dim, mean_dim, epsilon, dy, x, mean, var, grad_dx, &part1, &part2, + &part3, global_sum1, global_sum2); + GammaAndBetaWarpReduce(&part1, &part2, &part3); + GammaAndBetaBlockReduce(col, row_dim, &part1, &part2, &part3, d_gamma); + } +} + + +template +inline __device__ void InputThreadReduceInnerMean(const int &row, const int &col_dim, const int ¶m_dim, + const T &epsilon, T *sum1, T *sum2, T *sum3, T *sum4, + const T *dy, const T *x, const T *mean, const T *var, + const T *gamma, const T *grad_dx) { + int loop_num = (col_dim + NUM_PER_THREAD_REDUCE - 1) / NUM_PER_THREAD_REDUCE; + for (int i = threadIdx.x; i < loop_num; i += blockDim.x) { + for (int j = 0; j < NUM_PER_THREAD_REDUCE; j++) { + int col = NUM_PER_THREAD_REDUCE * i + j; + if (col >= col_dim) { + return; + } + int pos = row * col_dim + col; + int gamma_offset = pos % param_dim; + + T v1 = x[pos] - mean[row]; + T v2 = my_pow(var[row] + epsilon, -0.5); + T v3 = v1 * v2; + T v4 = dy[pos] * gamma[gamma_offset]; + + sum1[0] -= v2 * grad_dx[pos]; + sum2[0] -= v3 * v2 * grad_dx[pos]; + sum3[0] += v4; + sum4[0] += v4 * v3; + } + } +} + + +template +inline __device__ void InputWarpReduceInnerMean(T *sum1, T *sum2, T *sum3, T *sum4) { + for (int delta = (WARP_SIZE >> 1); delta > 0; delta >>= 1) { + sum1[0] += __shfl_down_sync(0xffffffff, sum1[0], delta); + sum2[0] += __shfl_down_sync(0xffffffff, sum2[0], delta); + sum3[0] += __shfl_down_sync(0xffffffff, sum3[0], delta); + sum4[0] += __shfl_down_sync(0xffffffff, sum4[0], delta); + } +} + + +template +inline __device__ void InputBlockReduceInnerMean(const int &col_dim, T *sum1, T *sum2, T *sum3, T *sum4, T *share_mem) { + // load data to share memory + // thread(0, 32, 64, 96, ...) keep the data + if (threadIdx.x % WARP_SIZE == 0) { + int offset = threadIdx.x / WARP_SIZE * NUM_SHARED_SUM_INPUT; + share_mem[offset] = sum1[0]; + share_mem[offset + 1] = sum2[0]; + share_mem[offset + 2] = sum3[0]; + share_mem[offset + 3] = sum4[0]; + } + __syncthreads(); + + for (int stride = blockDim.x / WARP_SIZE / 2; stride > 0; stride >>= 1) { + if (threadIdx.x < stride) { + int offset = (threadIdx.x + stride) * NUM_SHARED_SUM_INPUT; + + share_mem[threadIdx.x * NUM_SHARED_SUM_INPUT] += share_mem[offset]; + share_mem[threadIdx.x * NUM_SHARED_SUM_INPUT + 1] += share_mem[offset + 1]; + share_mem[threadIdx.x * NUM_SHARED_SUM_INPUT + 2] += share_mem[offset + 2]; + share_mem[threadIdx.x * NUM_SHARED_SUM_INPUT + 3] += share_mem[offset + 3]; + } + } + __syncthreads(); +} + + +template +inline __device__ void InputThreadReduceOuterMean(const int &row, const int &col_dim, const int ¶m_dim, + const T &epsilon, T *sum5, T *sum6, T *sum7, T *share_mem, + const T *dy, const T *x, const T *mean, const T *var, const T *gamma, + const T *grad_dx, const T *grad_dg, T *d_x) { + int loop_num = (col_dim + NUM_PER_THREAD_REDUCE - 1) / NUM_PER_THREAD_REDUCE; + for (int i = threadIdx.x; i < loop_num; i += blockDim.x) { + for (int j = 0; j < NUM_PER_THREAD_REDUCE; j++) { + int col = NUM_PER_THREAD_REDUCE * i + j; + if (col >= col_dim) { + return; + } + int pos = row * col_dim + col; + int gamma_offset = pos % param_dim; + + T v1 = x[pos] - mean[row]; + T v2 = my_pow(var[row] + epsilon, -0.5); + T v3 = dy[pos] * gamma[gamma_offset]; + + T v4 = v3 - share_mem[2] * (1.0 / col_dim) - v1 * v2 * share_mem[3] * (1.0 / col_dim); + T v5 = v3 * share_mem[1] * (1.0 / col_dim); + T v6 = grad_dx[pos] * v2 * share_mem[3] * (-1.0 / col_dim); + T v7 = dy[pos] * grad_dg[gamma_offset]; + T v8 = v5 + v6 + v7; + + T part1 = v4 * grad_dx[pos]; + T part2 = v1 * v8; + T part3 = v2 * v8; + d_x[pos] = part3; + + sum5[0] += part1; + sum6[0] += part2; + sum7[0] -= part3; + } + } +} + + +template <> +inline __device__ void InputThreadReduceOuterMean(const int &row, const int &col_dim, const int ¶m_dim, + const half &epsilon, half *sum5, half *sum6, half *sum7, + half *share_mem, const half *dy, const half *x, const half *mean, + const half *var, const half *gamma, const half *grad_dx, + const half *grad_dg, half *d_x) { + int loop_num = (col_dim + NUM_PER_THREAD_REDUCE - 1) / NUM_PER_THREAD_REDUCE; + for (int i = threadIdx.x; i < loop_num; i += blockDim.x) { + for (int j = 0; j < NUM_PER_THREAD_REDUCE; j++) { + int col = NUM_PER_THREAD_REDUCE * i + j; + if (col >= col_dim) { + return; + } + int pos = row * col_dim + col; + int gamma_offset = pos % param_dim; + + half v1 = x[pos] - mean[row]; + half v2 = my_pow(var[row] + epsilon, -0.5); + half v3 = dy[pos] * gamma[gamma_offset]; + half v4 = v3 - share_mem[2] * __float2half(1.0 / col_dim) - v1 * v2 * share_mem[3] * __float2half(1.0 / col_dim); + half v5 = v3 * share_mem[1] * __float2half(1.0 / col_dim); + half v6 = grad_dx[pos] * v2 * share_mem[3] * __float2half(-1.0 / col_dim); + half v7 = dy[pos] * grad_dg[gamma_offset]; + half v8 = v5 + v6 + v7; + + half part1 = v4 * grad_dx[pos]; + half part2 = v1 * v8; + half part3 = v2 * v8; + d_x[pos] = part3; + + sum5[0] += part1; + sum6[0] += part2; + sum7[0] -= part3; + } + } +} + + +template +inline __device__ void InputWarpReduceOuterMean(T *sum5, T *sum6, T *sum7) { + for (int delta = (WARP_SIZE >> 1); delta > 0; delta >>= 1) { + sum5[0] += __shfl_down_sync(0xffffffff, sum5[0], delta); + sum6[0] += __shfl_down_sync(0xffffffff, sum6[0], delta); + sum7[0] += __shfl_down_sync(0xffffffff, sum7[0], delta); + } +} + +template +inline __device__ void InputBlockReduceOuterMean(const int &col_dim, T *sum5, T *sum6, T *sum7, T *share_mem) { + // load data to share memory + // thread(0, 32, 64, 96, ...) keep the data + if (threadIdx.x % WARP_SIZE == 0) { + int offset = threadIdx.x / WARP_SIZE * NUM_SHARED_SUM_INPUT; + + share_mem[offset + 4] = sum5[0]; + share_mem[offset + 5] = sum6[0]; + share_mem[offset + 6] = sum7[0]; + } + __syncthreads(); + + for (int stride = blockDim.x / WARP_SIZE / 2; stride > 0; stride >>= 1) { + if (threadIdx.x < stride) { + int offset = (threadIdx.x + stride) * NUM_SHARED_SUM_INPUT; + + share_mem[threadIdx.x * NUM_SHARED_SUM_INPUT + 4] += share_mem[offset + 4]; + share_mem[threadIdx.x * NUM_SHARED_SUM_INPUT + 5] += share_mem[offset + 5]; + share_mem[threadIdx.x * NUM_SHARED_SUM_INPUT + 6] += share_mem[offset + 6]; + } + } + __syncthreads(); +} + + +template +inline __device__ void InputProp(const int &row, const int &col_dim, const int ¶m_dim, const T &epsilon, + const T *dy, const T *x, const T *mean, const T *var, const T *gamma, + const T *grad_dx, const T *grad_dg, const T *grad_db, T *d_dy, T *d_x, + const T *share_mem, T *global_sum1, T *global_sum2) { + for (int col = threadIdx.x; col < col_dim; col += blockDim.x) { + int pos = (row * col_dim + col); + int gamma_offset = pos % param_dim; + + T v1 = x[pos] - mean[row]; + T v2 = my_pow(var[row] + epsilon, -0.5); + T v3 = v1 * v2; + + T part1 = gamma[gamma_offset] * grad_dx[pos] * v2; + T part2 = gamma[gamma_offset] * share_mem[0] * (1.0 / col_dim); + T part3 = gamma[gamma_offset] * v3 * share_mem[1] * (1.0 / col_dim); + T part4 = v3 * grad_dg[gamma_offset]; + d_dy[pos] = part1 + part2 + part3 + part4 + grad_db[gamma_offset]; + + T part5 = v1 * (my_pow(var[row] + epsilon, -1.5) * ((share_mem[4]+ share_mem[5]) * (-1.0 / col_dim))); + d_x[pos] += part5 + share_mem[6] * (1.0 / col_dim); + + global_sum1[pos] = share_mem[0] * (1.0 / col_dim); + global_sum2[pos] = share_mem[1] * (1.0 / col_dim); + } +} + + +template <> +inline __device__ void InputProp(const int &row, const int &col_dim, const int ¶m_dim, const half &epsilon, + const half *dy, const half *x, const half *mean, const half *var, const half *gamma, + const half *grad_dx, const half *grad_dg, const half *grad_db, half *d_dy, half *d_x, + const half *share_mem, half *global_sum1, half *global_sum2) { + for (int col = threadIdx.x; col < col_dim; col += blockDim.x) { + int pos = (row * col_dim + col); + int gamma_offset = pos % param_dim; + + half v1 = x[pos] - mean[row]; + half v2 = my_pow(var[row] + epsilon, -0.5); + half v3 = v1 * v2; + + half part1 = gamma[gamma_offset] * grad_dx[pos] * v2; + half part2 = gamma[gamma_offset] * share_mem[0] * __float2half(1.0 / col_dim); + half part3 = gamma[gamma_offset] * v3 * share_mem[1] * __float2half(1.0 / col_dim); + half part4 = v3 * grad_dg[gamma_offset]; + d_dy[pos] = part1 + part2 + part3 + part4 + grad_db[gamma_offset]; + + half part5 = v1 * (my_pow(var[row] + epsilon, -1.5) * + ((share_mem[4]+ share_mem[5]) * __float2half(-1.0 / col_dim))); + d_x[pos] += part5 + share_mem[6] * __float2half(1.0 / col_dim); + + global_sum1[pos] = share_mem[0] * __float2half(1.0 / col_dim); + global_sum2[pos] = share_mem[1] * __float2half(1.0 / col_dim); + } +} + + +template +__global__ void InputPropKernel(const int row_dim, const int col_dim, const int param_dim, const T epsilon, + const T *dy, const T *x, const T *mean, const T *var, const T *gamma, + const T *grad_dx, const T *grad_dg, const T *grad_db, T *d_dy, T *d_x, T *global_sum1, + T *global_sum2) { + for (int row = blockIdx.x; row < row_dim; row += gridDim.x) { + T sum1 = 0; + T sum2 = 0; + T sum3 = 0; + T sum4 = 0; + T sum5 = 0; + T sum6 = 0; + T sum7 = 0; + DynamicSharedMem share_mem; + + InputThreadReduceInnerMean(row, col_dim, param_dim, epsilon, &sum1, &sum2, &sum3, &sum4, dy, x, mean, var, gamma, + grad_dx); + InputWarpReduceInnerMean(&sum1, &sum2, &sum3, &sum4); + InputBlockReduceInnerMean(col_dim, &sum1, &sum2, &sum3, &sum4, share_mem.addr()); + + InputThreadReduceOuterMean(row, col_dim, param_dim, epsilon, &sum5, &sum6, &sum7, share_mem.addr(), dy, x, mean, + var, gamma, grad_dx, grad_dg, d_x); + InputWarpReduceOuterMean(&sum5, &sum6, &sum7); + InputBlockReduceOuterMean(col_dim, &sum5, &sum6, &sum7, share_mem.addr()); + InputProp(row, col_dim, param_dim, epsilon, dy, x, mean, var, gamma, grad_dx, grad_dg, grad_db, d_dy, d_x, + share_mem.addr(), global_sum1, global_sum2); + } +} + + +template +void LayerNormGradGrad(const int &row_dim, const int &col_dim, const int ¶m_dim, T *global_sum1, T *global_sum2, + const T &epsilon, const T *dy, const T *x, const T *mean, const T *var, const T *gamma, + const T* grad_dx, const T* grad_dg, const T* grad_db, T *d_dy, T *d_x, T *d_gamma, + cudaStream_t stream) { + int share_mem_size = THREAD_PER_BLOCK / WARP_SIZE * NUM_SHARED_SUM_INPUT * sizeof(T); + InputPropKernel<<>>(row_dim, col_dim, param_dim, epsilon, dy, x, + mean, var, gamma, grad_dx, grad_dg, grad_db, + d_dy, d_x, global_sum1, global_sum2); + share_mem_size = THREAD_PER_BLOCK / WARP_SIZE * NUM_SHARED_SUM_GAMMA * sizeof(T); + int param_reduce_dim = row_dim * col_dim / param_dim; + GammaAndBetaPropKernel<<>>(param_reduce_dim, param_dim, + col_dim, epsilon, dy, x, mean, var, + grad_dx, d_gamma, global_sum1, + global_sum2); +} + + +template void LayerNormGradGrad(const int &row_dim, const int &col_dim, const int ¶m_dim, float *global_sum1, + float *global_sum2, const float &epsilon, const float *dy, const float *x, + const float *mean, const float *var, const float *gamma, const float *grad_dx, + const float *grad_dg, const float *grad_db, float *d_dy, float *d_x, float *d_gamma, + cudaStream_t stream); +template void LayerNormGradGrad(const int &row_dim, const int &col_dim, const int ¶m_dim, half *global_sum1, + half *global_sum2, const half &epsilon, const half *dy, const half *x, const half *mean, + const half *var, const half *gamma, const half *grad_dx, const half *grad_dg, + const half *grad_db, half *d_dy, half *d_x, half *d_gamma, cudaStream_t stream); diff --git a/cuda_code/layer_norm_grad_impl_4.cu b/cuda_code/layer_norm_grad_impl_4.cu new file mode 100644 index 0000000000000000000000000000000000000000..35d200b92a671c5a420475f463a709a80ee15c41 --- /dev/null +++ b/cuda_code/layer_norm_grad_impl_4.cu @@ -0,0 +1,257 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "backend/kernel_compiler/gpu/cuda_impl/layer_norm_grad_impl.cuh" +#include "backend/kernel_compiler/gpu/cuda_impl/layer_norm_impl.cuh" + +constexpr int NUM_PER_THREAD_REDUCE = 4; +constexpr int WARP_SIZE = 32; + +template +inline __device__ T my_pow(T a, double b) { + return pow(a, static_cast(b)); +} + +template <> +inline __device__ half my_pow(half a, double b) { + return __float2half(pow(__half2float(a), static_cast(b))); +} + +template +inline __device__ void GammaAndBetaThreadReduce(const int &col, const int &row_dim, const int &col_dim, + const int &mean_dim, const T &epsilon, const T *dy, const T *x, + const T *mean, const T *var, T *dg, T *db) { + int loop_num = (row_dim + NUM_PER_THREAD_REDUCE - 1) / NUM_PER_THREAD_REDUCE; + for (int i = threadIdx.x; i < loop_num; i += blockDim.x) { + for (int j = 0; j < NUM_PER_THREAD_REDUCE; j++) { + int row = NUM_PER_THREAD_REDUCE * i + j; + if (row >= row_dim) { + return; + } + + int pos = row * col_dim + col; + int mean_offset = pos / mean_dim; + dg[0] += dy[pos] * my_pow(var[mean_offset] + epsilon, -0.5) * (x[pos] - mean[mean_offset]); + db[0] += dy[pos]; + } + } +} + +template +inline __device__ void GammaAndBetaWarpReduce(T *dg, T *db) { + for (int delta = (WARP_SIZE >> 1); delta > 0; delta >>= 1) { + dg[0] += __shfl_down_sync(0xffffffff, dg[0], delta); + db[0] += __shfl_down_sync(0xffffffff, db[0], delta); + } +} + +template +inline __device__ void GammaAndBetaBlockReduce(const int &col, const int &row_dim, T *dg, T *db, T *dg_addr, + T *db_addr) { + // load data to share memory + // thread(0, 32, 64, 96, ...) keep the data + DynamicSharedMem share_mem; + if (threadIdx.x % WARP_SIZE == 0) { + int offset = threadIdx.x / WARP_SIZE * 2; + share_mem.addr()[offset] = dg[0]; + share_mem.addr()[offset + 1] = db[0]; + } + __syncthreads(); + + for (int stride = blockDim.x / WARP_SIZE / 2; stride > 0; stride >>= 1) { + if (threadIdx.x < stride) { + int offset = (threadIdx.x + stride) * 2; + share_mem.addr()[threadIdx.x * 2] += share_mem.addr()[offset]; + share_mem.addr()[threadIdx.x * 2 + 1] += share_mem.addr()[offset + 1]; + } + } + __syncthreads(); + + if (threadIdx.x == 0) { + dg_addr[col] = share_mem.addr()[0]; + db_addr[col] = share_mem.addr()[1]; + } +} + +template +__global__ void GammaAndBetaPropKernel(const int row_dim, const int col_dim, const int mean_dim, const T epsilon, + const T *dy, const T *x, const T *mean_addr, const T *var_addr, T *dg_addr, + T *db_addr) { + // row: [0:param_axis] + // col: [param_axis:] + // dg[i][j] = dy[i][j] * (var[i] + epsilon, -0.5) * (x[i][j] - mean[i]) + // dg[j] = \Sigma_{j}dg[i][j] + for (int col = blockIdx.x; col < col_dim; col += gridDim.x) { + T dg = 0; + T db = 0; + GammaAndBetaThreadReduce(col, row_dim, col_dim, mean_dim, epsilon, dy, x, mean_addr, var_addr, &dg, &db); + GammaAndBetaWarpReduce(&dg, &db); + GammaAndBetaBlockReduce(col, row_dim, &dg, &db, dg_addr, db_addr); + } +} + +template +inline __device__ void InputThreadReduce(const int &row, const int &col_dim, const int ¶m_dim, const T &epsilon, + T *sum1, T *sum2, T *sum3, const T *dy, const T *x, const T *mean, + const T *var, const T *gamma) { + int loop_num = (col_dim + NUM_PER_THREAD_REDUCE - 1) / NUM_PER_THREAD_REDUCE; + for (int i = threadIdx.x; i < loop_num; i += blockDim.x) { + for (int j = 0; j < NUM_PER_THREAD_REDUCE; j++) { + int col = NUM_PER_THREAD_REDUCE * i + j; + if (col >= col_dim) { + return; + } + + int pos = row * col_dim + col; + int gamma_offset = pos % param_dim; + T v1 = dy[pos] * gamma[gamma_offset]; + T v2 = x[pos] - mean[row]; + + sum1[0] += -0.5 * v1 * v2 * my_pow(var[row] + epsilon, -1.5); + sum2[0] += v1; + sum3[0] += -2.0 * v2; + } + } +} + +template <> +inline __device__ void InputThreadReduce(const int &row, const int &col_dim, const int ¶m_dim, const half &epsilon, + half *sum1, half *sum2, half *sum3, const half *dy, const half *x, + const half *mean, const half *var, const half *gamma) { + int loop_num = (col_dim + NUM_PER_THREAD_REDUCE - 1) / NUM_PER_THREAD_REDUCE; + for (int i = threadIdx.x; i < loop_num; i += blockDim.x) { + for (int j = 0; j < NUM_PER_THREAD_REDUCE; j++) { + int col = NUM_PER_THREAD_REDUCE * i + j; + if (col >= col_dim) { + return; + } + + int pos = row * col_dim + col; + int gamma_offset = pos % param_dim; + half v1 = dy[pos] * gamma[gamma_offset]; + half v2 = x[pos] - mean[row]; + + sum1[0] += __float2half(-0.5) * v1 * v2 * my_pow(var[row] + epsilon, -1.5); + sum2[0] += v1; + sum3[0] += __float2half(-2.0) * v2; + } + } +} + +template +inline __device__ void InputWarpReduce(T *sum1, T *sum2, T *sum3) { + for (int delta = (WARP_SIZE >> 1); delta > 0; delta >>= 1) { + sum1[0] += __shfl_down_sync(0xffffffff, sum1[0], delta); + sum2[0] += __shfl_down_sync(0xffffffff, sum2[0], delta); + sum3[0] += __shfl_down_sync(0xffffffff, sum3[0], delta); + } +} + +template +inline __device__ void InputBlockReduce(const int &col_dim, T *sum1, T *sum2, T *sum3, T *share_mem) { + // load data to share memory + // thread(0, 32, 64, 96, ...) keep the data + if (threadIdx.x % WARP_SIZE == 0) { + int offset = threadIdx.x / WARP_SIZE * 3; + share_mem[offset] = sum1[0]; + share_mem[offset + 1] = sum2[0]; + share_mem[offset + 2] = sum3[0]; + } + __syncthreads(); + + for (int stride = blockDim.x / WARP_SIZE / 2; stride > 0; stride >>= 1) { + if (threadIdx.x < stride) { + int offset = (threadIdx.x + stride) * 3; + share_mem[threadIdx.x * 3] += share_mem[offset]; + share_mem[threadIdx.x * 3 + 1] += share_mem[offset + 1]; + share_mem[threadIdx.x * 3 + 2] += share_mem[offset + 2]; + } + } + __syncthreads(); +} + +template +inline __device__ void InputProp(const int &row, const int &col_dim, const int ¶m_dim, const T &epsilon, + const T *dy, const T *x, const T *mean, const T *var, const T *gamma, T *dx, + const T *share_mem) { + for (int col = threadIdx.x; col < col_dim; col += blockDim.x) { + int pos = (row * col_dim + col); + int gamma_offset = pos % param_dim; + T v1 = dy[pos] * gamma[gamma_offset]; + T v2 = x[pos] - mean[row]; + T v3 = my_pow(var[row] + epsilon, -0.5); + dx[pos] = v1 * v3 + share_mem[0] * (2.0 / col_dim) * v2 + + (-1.0 * v3 * share_mem[1] + (1.0 / col_dim) * share_mem[0] * share_mem[2]) * (1.0 / col_dim); + } +} + +template <> +inline __device__ void InputProp(const int &row, const int &col_dim, const int ¶m_dim, const half &epsilon, + const half *dy, const half *x, const half *mean, const half *var, const half *gamma, + half *dx, const half *share_mem) { + for (int col = threadIdx.x; col < col_dim; col += blockDim.x) { + int pos = (row * col_dim + col); + int gamma_offset = pos % param_dim; + half v1 = dy[pos] * gamma[gamma_offset]; + half v2 = x[pos] - mean[row]; + half v3 = my_pow(var[row] + epsilon, -0.5); + dx[pos] = v1 * v3 + share_mem[0] * __float2half(2.0 / col_dim) * v2 + + (__float2half(-1.0) * v3 * share_mem[1] + __float2half(1.0 / col_dim) * share_mem[0] * share_mem[2]) * + __float2half(1.0 / col_dim); + } +} + +template +__global__ void InputPropKernel(const int row_dim, const int col_dim, const int param_dim, const T epsilon, const T *dy, + const T *x, const T *mean, const T *var, const T *gamma, T *dx) { + for (int row = blockIdx.x; row < row_dim; row += gridDim.x) { + T sum1 = 0; + T sum2 = 0; + T sum3 = 0; + DynamicSharedMem share_mem; + InputThreadReduce(row, col_dim, param_dim, epsilon, &sum1, &sum2, &sum3, dy, x, mean, var, gamma); + InputWarpReduce(&sum1, &sum2, &sum3); + InputBlockReduce(col_dim, &sum1, &sum2, &sum3, share_mem.addr()); + InputProp(row, col_dim, param_dim, epsilon, dy, x, mean, var, gamma, dx, share_mem.addr()); + } +} + +template +void LayerNormGrad(const int &row_dim, const int &col_dim, const int ¶m_dim, const T &epsilon, const T *dy, + const T *x, const T *mean, const T *var, const T *gamma, T *dx, T *dg, T *db, cudaStream_t stream) { + const int thread_per_block = 256; + int share_mem_size = thread_per_block / WARP_SIZE * 3 * sizeof(T); + InputPropKernel<<>>(row_dim, col_dim, param_dim, epsilon, dy, x, + mean, var, gamma, dx); + + share_mem_size = thread_per_block / WARP_SIZE * 2 * sizeof(T); + // GammaAndBetaPropKernel<<>>(row_dim, col_dim, epsilon, dy, x, + // mean, + // var, dg, db); + int param_reduce_dim = row_dim * col_dim / param_dim; + GammaAndBetaPropKernel<<>>(param_reduce_dim, param_dim, col_dim, + epsilon, dy, x, mean, var, dg, db); +} + +template void LayerNormGrad(const int &row_dim, const int &col_dim, const int ¶m_dim, const float &epsilon, + const float *dy, const float *x, const float *mean, const float *var, const float *gamma, + float *dx, float *dg, float *db, cudaStream_t stream); +template void LayerNormGrad(const int &row_dim, const int &col_dim, const int ¶m_dim, const half &epsilon, + const half *dy, const half *x, const half *mean, const half *var, const half *gamma, + half *dx, half *dg, half *db, cudaStream_t stream); diff --git a/cuda_code/lcc_app.cu b/cuda_code/lcc_app.cu new file mode 100644 index 0000000000000000000000000000000000000000..f9628d1701789072fb4c7c2ee47b5946ecaaf22e --- /dev/null +++ b/cuda_code/lcc_app.cu @@ -0,0 +1,250 @@ +// ---------------------------------------------------------------------------- +// Gunrock -- Fast and Efficient GPU Graph Library +// ---------------------------------------------------------------------------- +// This source code is distributed under the terms of LICENSE.TXT +// in the root directory of this source distribution. +// ---------------------------------------------------------------------------- + +/** + * @file lcc_app.cu + * + * @brief Local Clustering Coeffecient (LCC) application + */ + +#include + +// Utilities and correctnelcc-checking +#include + +// Graph definations +#include +#include +#include + +// triangle counting includes +#include +#include + +namespace gunrock { +namespace app { +namespace lcc { + +cudaError_t UseParameters(util::Parameters ¶meters) { + cudaError_t retval = cudaSuccess; + GUARD_CU(UseParameters_app(parameters)); + GUARD_CU(UseParameters_problem(parameters)); + GUARD_CU(UseParameters_enactor(parameters)); + GUARD_CU(UseParameters_test(parameters)); + + GUARD_CU(parameters.Use( + "num-triangles", + util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::INTERNAL_PARAMETER, + 0, "number of output colors", __FILE__, __LINE__)); + + return retval; +} + +/** + * @brief Run LCC tests + * @tparam GraphT Type of the graph + * @tparam ValueT Type of the distances + * @param[in] parameters Excution parameters + * @param[in] graph Input graph + * @param[in] ref_distances Reference distances + * @param[in] target Whether to perform the LCC + * \return cudaError_t error melccage(s), if any + */ +template +cudaError_t RunTests(util::Parameters ¶meters, GraphT &graph, + typename GraphT::ValueT *ref_lcc_counts, + util::Location target = util::DEVICE) { + cudaError_t retval = cudaSuccess; + typedef typename GraphT::VertexT VertexT; + typedef typename GraphT::SizeT SizeT; + typedef Problem ProblemT; + typedef Enactor EnactorT; + util::CpuTimer cpu_timer, total_timer; + cpu_timer.Start(); + total_timer.Start(); + + // parse configurations from parameters + bool quiet_mode = parameters.Get("quiet"); + int num_runs = parameters.Get("num-runs"); + std::string validation = parameters.Get("validation"); + util::Info info("LCC", parameters, graph); // initialize Info structure + + ValueT *h_lcc_counts = new ValueT[graph.nodes]; + + ProblemT problem(parameters); + EnactorT enactor; + GUARD_CU(problem.Init(graph, target)); + GUARD_CU(enactor.Init(problem, target)); + cpu_timer.Stop(); + parameters.Set("preprocess-time", cpu_timer.ElapsedMillis()); + + // perform LCC + for (int run_num = 0; run_num < num_runs; ++run_num) { + GUARD_CU(problem.Reset(target)); + GUARD_CU(enactor.Reset(graph.edges, target)); + util::PrintMsg("__________________________", !quiet_mode); + + cpu_timer.Start(); + GUARD_CU(enactor.Enact()); + cpu_timer.Stop(); + info.CollectSingleRun(cpu_timer.ElapsedMillis()); + + util::PrintMsg( + "--------------------------\nRun " + std::to_string(run_num) + + " elapsed: " + std::to_string(cpu_timer.ElapsedMillis()) + + " ms, #iterations = " + + std::to_string(enactor.enactor_slices[0].enactor_stats.iteration), + !quiet_mode); + + if (validation == "each") { + GUARD_CU(problem.Extract(h_lcc_counts)); + SizeT num_errors = app::lcc::Validate_Results( + parameters, graph, h_lcc_counts, ref_lcc_counts, false); + } + } + + cpu_timer.Start(); + // Copy out results + GUARD_CU(problem.Extract(h_lcc_counts)); + if (validation == "last") { + SizeT num_errors = app::lcc::Validate_Results(parameters, graph, h_lcc_counts, + ref_lcc_counts, false); + } + + int num_triangles = 0; + for (auto i = 0; i < graph.nodes; i++) { + num_triangles += h_lcc_counts[i]; + } + parameters.Set("num-triangles", num_triangles); + + // compute running statistics + info.ComputeTraversalStats(enactor, (VertexT *)NULL); +// Display_Memory_Usage(problem); +#ifdef ENABLE_PERFORMANCE_PROFILING + // Display_Performance_Profiling(&enactor); +#endif + // Clean up + GUARD_CU(enactor.Release(target)); + GUARD_CU(problem.Release(target)); + delete[] h_lcc_counts; + h_lcc_counts = NULL; + cpu_timer.Stop(); + total_timer.Stop(); + + info.Finalize(cpu_timer.ElapsedMillis(), total_timer.ElapsedMillis()); + return retval; +} + +} // namespace lcc +} // namespace app +} // namespace gunrock + +/* + * @brief Entry of gunrock_lcc function + * @tparam GraphT Type of the graph + * @tparam ValueT Type of the distances + * @param[in] parameters Excution parameters + * @param[in] graph Input graph + * @param[out] distances Return shortest distance to source per vertex + * @param[out] preds Return predecelccors of each vertex + * \return double Return accumulated elapsed times for all runs + */ +template +double gunrock_lcc(gunrock::util::Parameters ¶meters, GraphT &graph, + typename GraphT::VertexT *lcc_counts) { + typedef typename GraphT::VertexT VertexT; + typedef gunrock::app::lcc::Problem ProblemT; + typedef gunrock::app::lcc::Enactor EnactorT; + gunrock::util::CpuTimer cpu_timer; + gunrock::util::Location target = gunrock::util::DEVICE; + double total_time = 0; + if (parameters.UseDefault("quiet")) parameters.Set("quiet", true); + + // Allocate problem and enactor on GPU, and initialize them + ProblemT problem(parameters); + EnactorT enactor; + problem.Init(graph, target); + enactor.Init(problem, target); + + int num_runs = parameters.Get("num-runs"); + for (int run_num = 0; run_num < num_runs; ++run_num) { + problem.Reset(target); + enactor.Reset(target); + + cpu_timer.Start(); + enactor.Enact(); + cpu_timer.Stop(); + + total_time += cpu_timer.ElapsedMillis(); + // problem.Extract(node, lcc_counts, NULL, target); + problem.Extract(lcc_counts, target); + } + + enactor.Release(target); + problem.Release(target); + return total_time; +} + +/* + * @brief Simple interface take in graph as CSR format + * @param[in] num_nodes Number of verilcces in the input graph + * @param[in] num_edges Number of edges in the input graph + * @param[in] row_offsets CSR-formatted graph input row offsets + * @param[in] col_indices CSR-formatted graph input column indices + * @param[in] edge_values CSR-formatted graph input edge weights + * @param[in] num_runs Number of runs to perform LCC + * @param[in] sources Sources to begin traverse, one for each run + * @param[in] mark_preds Whether to output predecelccor info + * @param[out] distances Return shortest distance to source per vertex + * @param[out] preds Return predecelccors of each vertex + * \return double Return accumulated elapsed times for all runs + */ +template +double lcc(const SizeT num_nodes, const SizeT num_edges, + const SizeT *row_offsets, const VertexT *col_indices, + const GValueT *edge_values, const int num_runs, VertexT *lcc_counts) { + typedef typename gunrock::app::TestGraph + GraphT; + typedef typename GraphT::CsrT CsrT; + + // Setup parameters + gunrock::util::Parameters parameters("lcc"); + gunrock::graphio::UseParameters(parameters); + gunrock::app::lcc::UseParameters(parameters); + gunrock::app::UseParameters_test(parameters); + parameters.Parse_CommandLine(0, NULL); + parameters.Set("graph-type", "by-palcc"); + parameters.Set("num-runs", num_runs); + bool quiet = parameters.Get("quiet"); + GraphT graph; + // Alccign pointers into gunrock graph format + graph.CsrT::Allocate(num_nodes, num_edges, gunrock::util::HOST); + graph.CsrT::row_offsets.SetPointer(row_offsets, num_nodes + 1, + gunrock::util::HOST); + graph.CsrT::column_indices.SetPointer(col_indices, num_edges, + gunrock::util::HOST); + graph.CsrT::edge_values.SetPointer(edge_values, num_edges, + gunrock::util::HOST); + graph.FromCsr(graph.csr(), true, quiet); + gunrock::graphio::LoadGraph(parameters, graph); + + // Run the LCC + double elapsed_time = gunrock_lcc(parameters, graph, lcc_counts); + // Cleanup + graph.Release(); + + return elapsed_time; +} + +// Leave this at the end of the file +// Local Variables: +// mode:c++ +// c-file-style: "NVIDIA" +// End: diff --git a/cuda_code/learrays.cu b/cuda_code/learrays.cu new file mode 100644 index 0000000000000000000000000000000000000000..18edb46e2f116c5c274a5602e516dfd1c501cc8e --- /dev/null +++ b/cuda_code/learrays.cu @@ -0,0 +1,207 @@ +/* learrays.cu + * Ernest Yeung + * ernestyalumni@gmail.com + * demonstrate arrays, but in CUDA C/C++ + * */ +#include +#include "./common/errors.h" + +#include + +__constant__ float dev_hds[3]; + +__constant__ float3 dev_cnus[4]; + + + +void set2DerivativeParameters(const float hd_i[3] ) +{ + float unscaled_cnu[4] { 2.f / 3.f , -1.f / 12.f , + 0.f , 0.f} ; + + float3 *cnus = new float3[4]; + + for (int nu = 0 ; nu < 4; ++nu ) { + cnus[nu].x = unscaled_cnu[nu]*(1.f/hd_i[0] ); + cnus[nu].y = unscaled_cnu[nu]*(1.f/hd_i[1] ); + cnus[nu].z = unscaled_cnu[nu]*(1.f/hd_i[2] ); + } + + for (int nu = 0 ; nu < 4; ++nu ) { + std::cout<< " cnus values : nu : " << nu << " : .x " << cnus[nu].x << " : .y " << + cnus[nu].y << " : .z " << cnus[nu].z << std::endl; + } + + cudaMemcpyToSymbol( dev_cnus, cnus, sizeof(float3)*4, 0, cudaMemcpyHostToDevice) ; // offset from start is 0 + + delete[] cnus; + + + +} + +__device__ float dev_dirder2(float stencil[2][2], float c_nus[4]) { + int NU {2}; + float tempvalue {0.f}; + + for (int nu = 0; nu < NU; ++nu ) { + tempvalue += c_nus[nu]*( stencil[nu][1] - stencil[nu][0] ); + } + return tempvalue; +} + +//__global__ void testdiv(float3 dfloat3* dev_divres_in) { +__global__ void testdiv(float3* dev_divres_in) { + + // sanity check + for (int nu = 0 ; nu < 4; ++nu ) { +// std::cout << " cnus values : nu : " << nu << " : .x " << dev_cnus_in[nu].x << " : .y " << +// dev_cnus_in[nu].y << " : .z " << dev_cnus_in[nu].z << std::endl; + printf( " cnus values : nu : %d : .x %f : .y %f : .z %f \n ", nu, dev_cnus[nu].x, + dev_cnus[nu].y, dev_cnus[nu].z ); + + } + + float stencilx[2][2] { { 1.2f, 1.6f }, { 12.1f, 16.2f } }; + float stencily[2][2] { { 0.32f, 0.056f }, { 32.1f, 5.12f } }; + float stencilz[2][2] { { 3.712f, 0.036f }, { 0.5f, 26.2f } }; + + float c_nusx[4] { dev_cnus[0].x, dev_cnus[1].x, dev_cnus[2].x, dev_cnus[3].x } ; + float c_nusy[4] { dev_cnus[0].y, dev_cnus[1].y, dev_cnus[2].y, dev_cnus[3].y } ; + float c_nusz[4] { dev_cnus[0].z, dev_cnus[1].z, dev_cnus[2].z, dev_cnus[3].z } ; + +/* + std::cout << " c_nusx : " << c_nusx[0] << " " << c_nusx[1] << " " << c_nusx[2] << " " << + c_nusx[3] << std::endl; + std::cout << " c_nusy : " << c_nusy[0] << " " << c_nusy[1] << " " << c_nusy[2] << " " << + c_nusy[3] << std::endl; + std::cout << " c_nusz : " << c_nusz[0] << " " << c_nusz[1] << " " << c_nusz[2] << " " << + c_nusz[3] << std::endl; +*/ + + printf( " c_nusx : %f %f %f %f \n ", c_nusx[0], c_nusx[1], c_nusx[2], c_nusx[3] ); + printf( " c_nusy : %f %f %f %f \n ", c_nusy[0], c_nusy[1], c_nusy[2], c_nusy[3] ); + printf( " c_nusz : %f %f %f %f \n ", c_nusz[0], c_nusz[1], c_nusz[2], c_nusz[3] ); + + + float divresx { dev_dirder2( stencilx, c_nusx ) } ; + float divresy { dev_dirder2( stencily, c_nusy ) } ; + float divresz { dev_dirder2( stencilz, c_nusz ) } ; + +// std::cout << " divresx : " << divresx << std::endl; +// std::cout << " divresy : " << divresy << std::endl; +// std::cout << " divresz : " << divresz << std::endl; + + printf( " divresx : %f \n " , divresx ) ; + printf( " divresy : %f \n " , divresy ) ; + printf( " divresz : %f \n " , divresz ) ; + + dev_divres_in->x = divresx; + dev_divres_in->y = divresy; + dev_divres_in->z = divresz; + + +} + +__global__ void sanitycheck_assign( float3 *dev_result_in ) { + dev_result_in->x = 1.f; + dev_result_in->y = 2.f; + dev_result_in->z = 3.f; + + printf( " dev_result_in->x : %f \n " , dev_result_in->x ) ; + printf( " dev_result_in->y : %f \n " , dev_result_in->y ) ; + printf( " dev_result_in->z : %f \n " , dev_result_in->z ) ; + +} + +__global__ void sanitycheck_const() { + for (int nu = 0; nu < 4 ; ++nu ) { + printf( " dev_cnus for nu : %d : .x : %f , .y : %f .z : %f \n " , nu, dev_cnus[nu].x , + dev_cnus[nu].y , dev_cnus[nu].z ); + + + } + +} + +// sanity check const2 doesn't work can't use dev_cnus as argument from host +/* +__global__ void sanitycheck_const2(float3 dev_cnus_in[4]) { + for (int nu = 0; nu < 4 ; ++nu ) { + printf( " dev_cnus for nu : %d : .x : %f , .y : %f .z : %f \n " , nu, dev_cnus_in[nu].x , + dev_cnus_in[nu].y , dev_cnus_in[nu].z ); + + + } + +} +*/ + + +int main() { + const float hds[3] { 0.1, 0.01, 0.001 }; + + std::cout << " These are values for hds : " << hds[0] << " " << hds[1] << " " << hds[2] << std::endl; + + cudaMemcpyToSymbol( dev_hds, hds, sizeof(float)*3,0,cudaMemcpyHostToDevice) ; + + set2DerivativeParameters( hds ); + + +// cf. http://stackoverflow.com/questions/24460507/cuda-invalid-argument-when-trying-to-copy-struct-to-devices-memory-cudamemcpy +// what DID NOT work: float3* divresult did not work; Segmentation Fault, memory address wasn't found + float3 divresult; + float3* dev_divresult; + + HANDLE_ERROR( + cudaMalloc( (void**)&dev_divresult , sizeof(float3) ) ); + +// sanity check + + sanitycheck_assign<<<1,1>>>(dev_divresult) ; + + sanitycheck_const<<<1,1>>>() ; + +// sanitycheck_const2 doesn't work +// sanitycheck_const2<<<1,1>>>(dev_cnus) ; + + + testdiv<<<1,1>>>(dev_divresult); + + HANDLE_ERROR( + cudaMemcpy( &divresult, dev_divresult, sizeof(float3), cudaMemcpyDeviceToHost) ); + +// what DID NOT work; cudaMemcpy( divresult, ... ) when float3* divresult + + + std::cout << " These are values for divresult, which was cudaMemcpy'ed from dev_divresult : .x " + << divresult.x << " : .y " << divresult.y << " : .z " << divresult.z << std::endl; + +// std::cout << divresult->x << std::endl; +// std::cout << (*divresult).x << std::endl; + + /* ==================== arithmetic for thread-block sizing ==================== */ + + int m = 100; + int p = ((int) log2(m) ); + std::cout << " p : " << p << std::endl; + std::cout << " 2^p : " << pow(2,p) << std::endl; + std::cout << " 2^(p+1) : " << pow(2,p+1) << std::endl; + + m = 200; + p = ((int) log2(m) ); + std::cout << " p : " << p << std::endl; + std::cout << " 2^p : " << pow(2,p) << std::endl; + std::cout << " 2^(p+1) : " << pow(2,p+1) << std::endl; + + m =1; + p = ((int) log2(m) ); + std::cout << " p : " << p << std::endl; + std::cout << " 2^p : " << pow(2,p) << std::endl; + std::cout << " 2^(p+1) : " << pow(2,p+1) << std::endl; + + + + HANDLE_ERROR( + cudaFree( dev_divresult) ); +} diff --git a/cuda_code/libcuarma_blas2.cu b/cuda_code/libcuarma_blas2.cu new file mode 100644 index 0000000000000000000000000000000000000000..8aeb6628b060e2be9ffc470b9340e5f0d2f7a72c --- /dev/null +++ b/cuda_code/libcuarma_blas2.cu @@ -0,0 +1,187 @@ +/* ========================================================================= + Copyright (c) 2015-2017, COE of Peking University, Shaoqiang Tang. + + ----------------- + cuarma - COE of Peking University, Shaoqiang Tang. + ----------------- + + Author Email yangxianpku@pku.edu.cn + + Code Repo https://github.com/yangxianpku/cuarma + + License: MIT (X11) License +============================================================================= */ + + +#include +#include +#include "head_define.h" +#include "cuarma.hpp" +#include "cuarma/vector.hpp" + +template +ScalarType diff(ScalarType const & s1, ScalarType const & s2) +{ + if (s1 > s2 || s1 < s2) + return (s1 - s2) / std::max(std::fabs(s1), std::fabs(s2)); + return ScalarType(0); +} + +template +ScalarType diff(std::vector const & v1, cuarmaVectorType const & arma_vec) +{ + std::vector v2_cpu(arma_vec.size()); + cuarma::backend::finish(); + cuarma::copy(arma_vec, v2_cpu); + + ScalarType inf_norm = 0; + for (unsigned int i=0;i 0 ) + v2_cpu[i] = std::fabs(v2_cpu[i] - v1[i]) / std::max( std::fabs(v2_cpu[i]), std::fabs(v1[i]) ); + else + v2_cpu[i] = 0.0; + + if (v2_cpu[i] > inf_norm) + inf_norm = v2_cpu[i]; + } + + return inf_norm; +} + +template +void check(T const & t, U const & u, EpsilonT eps) +{ + EpsilonT rel_error = std::fabs(static_cast(diff(t,u))); + if (rel_error > eps) + { + std::cerr << "Relative error: " << rel_error << std::endl; + std::cerr << "Aborting!" << std::endl; + exit(EXIT_FAILURE); + } + std::cout << "SUCCESS "; +} + +int main() +{ + std::size_t size1 = 13; // at least 7 + std::size_t size2 = 11; // at least 7 + float eps_float = 1e-5f; + double eps_double = 1e-12; + + cuarmaBackend my_backend; + cuarmaBackendCreate(&my_backend); + + std::vector ref_float_x(size1); for (std::size_t i=0; i(i); + std::vector ref_float_y(size2); for (std::size_t i=0; i(size2 - i); + std::vector ref_float_A(size1*size2); for (std::size_t i=0; i(3*i); + std::vector ref_float_B(size1*size2); for (std::size_t i=0; i(2*i); + + std::vector ref_double_x(size1, 1.0); for (std::size_t i=0; i(i); + std::vector ref_double_y(size2, 2.0); for (std::size_t i=0; i(size2 - i); + std::vector ref_double_A(size1*size2, 3.0); for (std::size_t i=0; i(3*i); + std::vector ref_double_B(size1*size2, 4.0); for (std::size_t i=0; i(2*i); + + // Host setup + cuarma::vector host_float_x = cuarma::scalar_vector(size1, 1.0f, cuarma::context(cuarma::MAIN_MEMORY)); for (std::size_t i=0; i host_float_y = cuarma::scalar_vector(size2, 2.0f, cuarma::context(cuarma::MAIN_MEMORY)); for (std::size_t i=0; i host_float_A = cuarma::scalar_vector(size1*size2, 3.0f, cuarma::context(cuarma::MAIN_MEMORY)); for (std::size_t i=0; i host_float_B = cuarma::scalar_vector(size1*size2, 4.0f, cuarma::context(cuarma::MAIN_MEMORY)); for (std::size_t i=0; i host_double_x = cuarma::scalar_vector(size1, 1.0, cuarma::context(cuarma::MAIN_MEMORY)); for (std::size_t i=0; i host_double_y = cuarma::scalar_vector(size2, 2.0, cuarma::context(cuarma::MAIN_MEMORY)); for (std::size_t i=0; i host_double_A = cuarma::scalar_vector(size1*size2, 3.0, cuarma::context(cuarma::MAIN_MEMORY)); for (std::size_t i=0; i host_double_B = cuarma::scalar_vector(size1*size2, 4.0, cuarma::context(cuarma::MAIN_MEMORY)); for (std::size_t i=0; i cuda_float_x = cuarma::scalar_vector(size1, 1.0f, cuarma::context(cuarma::CUDA_MEMORY)); for (std::size_t i=0; i cuda_float_y = cuarma::scalar_vector(size2, 2.0f, cuarma::context(cuarma::CUDA_MEMORY)); for (std::size_t i=0; i cuda_float_A = cuarma::scalar_vector(size1*size2, 3.0f, cuarma::context(cuarma::CUDA_MEMORY)); for (std::size_t i=0; i cuda_float_B = cuarma::scalar_vector(size1*size2, 4.0f, cuarma::context(cuarma::CUDA_MEMORY)); for (std::size_t i=0; i cuda_double_x = cuarma::scalar_vector(size1, 1.0, cuarma::context(cuarma::CUDA_MEMORY)); for (std::size_t i=0; i cuda_double_y = cuarma::scalar_vector(size2, 2.0, cuarma::context(cuarma::CUDA_MEMORY)); for (std::size_t i=0; i cuda_double_A = cuarma::scalar_vector(size1*size2, 3.0, cuarma::context(cuarma::CUDA_MEMORY)); for (std::size_t i=0; i cuda_double_B = cuarma::scalar_vector(size1*size2, 4.0, cuarma::context(cuarma::CUDA_MEMORY)); for (std::size_t i=0; i(host_float_A), 2, 1, 2, 3, cuarmaInt(size2), + cuarma::blas::host_based::detail::extract_raw_pointer(host_float_y), 1, 3, + 0.1234f, + cuarma::blas::host_based::detail::extract_raw_pointer(host_float_x), 1, 2); + check(ref_float_x, host_float_x, eps_float); + cuarmaHostDgemv(my_backend, + cuarmaRowMajor, cuarmaNoTrans, + cuarmaInt(size1/3), cuarmaInt(size2/4), 3.1415, cuarma::blas::host_based::detail::extract_raw_pointer(host_double_A), 2, 1, 2, 3, cuarmaInt(size2), + cuarma::blas::host_based::detail::extract_raw_pointer(host_double_y), 1, 3, + 0.1234, + cuarma::blas::host_based::detail::extract_raw_pointer(host_double_x), 1, 2); + check(ref_double_x, host_double_x, eps_double); + + +#ifdef CUARMA_WITH_CUDA + std::cout << std::endl << "CUDA: "; + cuarmaCUDASgemv(my_backend, + cuarmaRowMajor, cuarmaNoTrans, + cuarmaInt(size1/3), cuarmaInt(size2/4), 3.1415f, cuarma::cuda_arg(cuda_float_A), 2, 1, 2, 3, size2, + cuarma::cuda_arg(cuda_float_y), 1, 3, + 0.1234f, + cuarma::cuda_arg(cuda_float_x), 1, 2); + check(ref_float_x, cuda_float_x, eps_float); + cuarmaCUDADgemv(my_backend, + cuarmaRowMajor, cuarmaNoTrans, + cuarmaInt(size1/3), cuarmaInt(size2/4), 3.1415, cuarma::cuda_arg(cuda_double_A), 2, 1, 2, 3, size2, + cuarma::cuda_arg(cuda_double_y), 1, 3, + 0.1234, + cuarma::cuda_arg(cuda_double_x), 1, 2); + check(ref_double_x, cuda_double_x, eps_double); +#endif + + cuarmaBackendDestroy(&my_backend); + + // + // That's it. + // + std::cout << std::endl << "!!!! TEST COMPLETED SUCCESSFULLY !!!!" << std::endl; + + return EXIT_SUCCESS; +} + diff --git a/cuda_code/libcublas.cu b/cuda_code/libcublas.cu new file mode 100644 index 0000000000000000000000000000000000000000..c02a90cf77d5ffd7bc197ff5587dbc4d691f6dee --- /dev/null +++ b/cuda_code/libcublas.cu @@ -0,0 +1,86 @@ + +// ================================================================================================= +// Project: +// Exploring the performance of general matrix-multiplication on an NVIDIA Tesla K40m GPU. +// +// File information: +// Institution.... SURFsara +// Author......... Cedric Nugteren +// Changed at..... 2014-10-30 +// License........ MIT license +// Tab-size....... 4 spaces +// Line length.... 100 characters +// +// ================================================================================================= + +// Common include +#include "common.h" + +// Include CUDA and cuBLAS (API v2) +#include + +// ================================================================================================= + +// Matrix-multiplication using the cuBLAS library. This function copies the input matrices to the +// GPU, runs SGEMM, and copies the output matrix back to the CPU. +void libcublas(float* A, float* B, float* C, + int K, int M, int N, + int timerID) { + + // cuBLAS configuration + cublasStatus_t status; + cublasHandle_t handle; + status = cublasCreate(&handle); + + // Prepare CUDA memory objects + float* bufA = 0; + float* bufB = 0; + float* bufC = 0; + cudaMalloc((void**)&bufA, M*K*sizeof(*A)); + cudaMalloc((void**)&bufB, K*N*sizeof(*B)); + cudaMalloc((void**)&bufC, M*N*sizeof(*C)); + + // Copy matrices to the GPU (also C to erase the results of the previous run) + cudaMemcpy((void*)bufA, (void*)A, M*K*sizeof(*A), cudaMemcpyHostToDevice); + cudaMemcpy((void*)bufB, (void*)B, K*N*sizeof(*B), cudaMemcpyHostToDevice); + cudaMemcpy((void*)bufC, (void*)C, M*N*sizeof(*C), cudaMemcpyHostToDevice); + + // Configure SGEMM + float alpha = ALPHA; + float beta = BETA; + + // Start the timed loop + double startTime = timer(); + for (int r=0; r +#include +#include +#include +#include + +// includes, library +#include "cudpp/cudpp.h" + +// includes, project +#include "cutil_inline.h" +#include "cutil_math.h" + +//////////////////////////////////////////////////////////////////////////////// +// declaration, types + +// Boolean +typedef unsigned char Bool; +enum { + False = 0, + True = 1 +}; + +// 2D height field +struct HeightField { + int width; + float* height; +}; + +// Ray +struct Ray { + float3 origin; + float2 dir; + int length; + float oneOverLength; +}; + +//////////////////////////////////////////////////////////////////////////////// +// declaration, variables + +// Height field texture reference +texture g_HeightFieldTex; + +//////////////////////////////////////////////////////////////////////////////// +// declaration, forward +void runTest( int argc, char** argv); +__global__ void computeAngles_kernel(const Ray, float*); +__global__ void computeVisibilities_kernel(const float*, const float*, int, Bool*); +void lineOfSight_gold(const HeightField, const Ray, Bool*); +__device__ __host__ float2 getLocation(const Ray, int); +__device__ __host__ float getAngle(const Ray, float2, float); + +//////////////////////////////////////////////////////////////////////////////// +// Program main +//////////////////////////////////////////////////////////////////////////////// +int +main( int argc, char** argv) +{ + runTest( argc, argv); + cutilExit(argc, argv); +} + +//////////////////////////////////////////////////////////////////////////////// +//! Run a line-of-sight test for CUDA +//////////////////////////////////////////////////////////////////////////////// +void runTest(int argc, char** argv) +{ + //////////////////////////////////////////////////////////////////////////// + // Device initialization + + // use command-line specified CUDA device, otherwise use device with highest Gflops/s + if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") ) + cutilDeviceInit(argc, argv); + else + cudaSetDevice( cutGetMaxGflopsDeviceId() ); + + //////////////////////////////////////////////////////////////////////////// + // Timer + + // Create + uint timer; + cutilCheckError(cutCreateTimer(&timer)); + + // Number of iterations to get accurate timing +#ifdef __DEVICE_EMULATION__ + uint numIterations = 1; +#else + uint numIterations = 100; +#endif + + //////////////////////////////////////////////////////////////////////////// + // Height field + + HeightField heightField; + + // Allocate in host memory + int2 dim = make_int2(10000, 100); + heightField.width = dim.x; + int heightFieldSize = dim.x * dim.y * sizeof(float); + cutilSafeMalloc(heightField.height = (float*)malloc(heightFieldSize)); + + // Fill in with an arbitrary sine surface + for (int x = 0; x < dim.x; ++x) + for (int y = 0; y < dim.y; ++y) { + float amp = 0.1f * (x + y); + float period = 2.0f + amp; + *(heightField.height + dim.x * y + x) = + amp * (sinf(sqrtf((float)(x * x + y * y)) * 2.0f * 3.1416f / period) + 1.0f); + } + + // Allocate CUDA array in device memory + cudaChannelFormatDesc channelDesc = + cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat); + cudaArray* heightFieldArray; + cutilSafeCall(cudaMallocArray(&heightFieldArray, &channelDesc, dim.x, dim.y)); + + // Initialize device memory + cutilSafeCall(cudaMemcpyToArray(heightFieldArray, 0, 0, heightField.height, + heightFieldSize, cudaMemcpyHostToDevice)); + + // Set texture parameters + g_HeightFieldTex.addressMode[0] = cudaAddressModeClamp; + g_HeightFieldTex.addressMode[1] = cudaAddressModeClamp; + g_HeightFieldTex.filterMode = cudaFilterModePoint; + g_HeightFieldTex.normalized = 0; + + // Bind CUDA array to texture reference + cutilSafeCall(cudaBindTextureToArray(g_HeightFieldTex, heightFieldArray, + channelDesc)); + + //////////////////////////////////////////////////////////////////////////// + // Ray (starts at origin and traverses the height field diagonally) + + Ray ray; + ray.origin = make_float3(0, 0, 2.0f); + int2 dir = make_int2(dim.x - 1, dim.y - 1); + ray.dir = make_float2((float)dir.x, (float)dir.y); + ray.length = max(abs(dir.x), abs(dir.y)); + ray.oneOverLength = 1.0f / ray.length; + + //////////////////////////////////////////////////////////////////////////// + // View angles + + // Allocate view angles for each point along the ray + float* d_angles; + int raySize = ray.length * sizeof(float); + cutilSafeCall(cudaMalloc((void**)&d_angles, raySize)); + + // Allocate result of max-scan operation on the array of view angles + float* d_scannedAngles; + cutilSafeCall(cudaMalloc((void**)&d_scannedAngles, raySize)); + + //////////////////////////////////////////////////////////////////////////// + // Visibility results + + // Allocate visibility results for each point along the ray + Bool* d_visibilities; + cutilSafeCall(cudaMalloc((void**)&d_visibilities, raySize)); + Bool* h_visibilities; + cutilSafeMalloc(h_visibilities = (Bool*)malloc(raySize)); + Bool* h_visibilitiesRef; + cutilSafeMalloc(h_visibilitiesRef = (Bool*)malloc(raySize)); + + //////////////////////////////////////////////////////////////////////////// + // Reference solution + lineOfSight_gold(heightField, ray, h_visibilitiesRef); + + //////////////////////////////////////////////////////////////////////////// + // Device solution + + // Execution configuration + dim3 block(256); + dim3 grid((uint)ceil(ray.length / (double)block.x)); + + // Scan configuration + CUDPPHandle scanPlan; + + CUDPPConfiguration config; + config.algorithm = CUDPP_SCAN; + config.op = CUDPP_MAX; + config.datatype = CUDPP_FLOAT; + config.options = CUDPP_OPTION_FORWARD | CUDPP_OPTION_INCLUSIVE; + cudppPlan(&scanPlan, config, ray.length, 1, 0); + + // Compute device solution + printf("Line of sight\n"); + cutStartTimer(timer); + for (uint i = 0; i < numIterations; ++i) { + + // Compute view angle for each point along the ray + computeAngles_kernel<<>>(ray, d_angles); + cutilCheckMsg("Kernel execution failed"); + + // Perform a max-scan operation on the array of view angles + cudppScan(scanPlan, d_scannedAngles, d_angles, ray.length); + cutilCheckMsg("Kernel execution failed"); + + // Compute visibility results based on the array of view angles + // and its scanned version + computeVisibilities_kernel<<>>(d_angles, d_scannedAngles, + ray.length, d_visibilities); + cutilCheckMsg("Kernel execution failed"); + } + cudaThreadSynchronize(); + cutStopTimer(timer); + cudppDestroyPlan(scanPlan); + cutilCheckMsg("Kernel execution failed"); + + // Copy visibility results back to the host + cutilSafeCall(cudaMemcpy(h_visibilities, d_visibilities, raySize, + cudaMemcpyDeviceToHost)); + + // Compare device visibility results against reference results + CUTBoolean res = cutCompareub(h_visibilitiesRef, h_visibilities, ray.length); + printf("%s \n", (1 == res) ? "PASSED" : "FAILED"); + printf("Average time: %f ms\n\n", cutGetTimerValue(timer) / numIterations); + cutResetTimer(timer); + + // Cleanup memory + free(heightField.height); + free(h_visibilities); + free(h_visibilitiesRef); + cutilSafeCall(cudaFree(d_angles)); + cutilSafeCall(cudaFree(d_scannedAngles)); + cutilSafeCall(cudaFree(d_visibilities)); + cutilSafeCall(cudaFreeArray(heightFieldArray)); + + cudaThreadExit(); +} + +//////////////////////////////////////////////////////////////////////////////// +//! Compute view angles for each point along the ray +//! @param ray ray +//! @param angles view angles +//////////////////////////////////////////////////////////////////////////////// +__global__ void computeAngles_kernel(const Ray ray, float* angles) +{ + uint i = blockDim.x * blockIdx.x + threadIdx.x; + if (i < ray.length) { + float2 location = getLocation(ray, i + 1); + float height = tex2D(g_HeightFieldTex, location.x, location.y); + float angle = getAngle(ray, location, height); + angles[i] = angle; + } +} + +//////////////////////////////////////////////////////////////////////////////// +//! Compute visibility for each point along the ray +//! @param angles view angles +//! @param scannedAngles max-scanned view angles +//! @param numAngles number of view angles +//! @param visibilities boolean array indicating the visibility of each point +//! along the ray +//////////////////////////////////////////////////////////////////////////////// +__global__ void computeVisibilities_kernel(const float* angles, + const float* scannedAngles, + int numAngles, + Bool* visibilities) +{ + uint i = blockDim.x * blockIdx.x + threadIdx.x; + if (i < numAngles) + visibilities[i] = scannedAngles[i] <= angles[i]; +} + +//////////////////////////////////////////////////////////////////////////////// +//! Compute reference data set +//! @param heightField height field +//! @param ray ray +//! @param visibilities boolean array indicating the visibility of each point +//! along the ray +//////////////////////////////////////////////////////////////////////////////// +void lineOfSight_gold(const HeightField heightField, const Ray ray, + Bool* visibilities) +{ + float angleMax = asinf(-1.0f); + for (int i = 0; i < ray.length; ++i) { + float2 location = getLocation(ray, i + 1); + float height = *(heightField.height + + heightField.width * (int)floorf(location.y) + + (int)floorf(location.x)); + float angle = getAngle(ray, location, height); + if (angle > angleMax) { + angleMax = angle; + visibilities[i] = True; + } + else + visibilities[i] = False; + } +} + +//////////////////////////////////////////////////////////////////////////////// +//! Compute the 2D coordinates of the point located at i steps from the origin +//! of the ray +//! @param ray ray +//! @param i integer offset along the ray +//////////////////////////////////////////////////////////////////////////////// +__device__ __host__ float2 getLocation(const Ray ray, int i) +{ + float step = i * ray.oneOverLength; + return make_float2(ray.origin.x, ray.origin.y) + step * ray.dir; +} + +//////////////////////////////////////////////////////////////////////////////// +//! Compute the angle of view between a 3D point and the origin of the ray +//! @param ray ray +//! @param location 2D coordinates of the input point +//! @param height height of the input point +//////////////////////////////////////////////////////////////////////////////// +__device__ __host__ float getAngle(const Ray ray, float2 location, float height) +{ + float2 dir = location - make_float2(ray.origin.x, ray.origin.y); + return atanf((height - ray.origin.z) / length(dir)); +} diff --git a/cuda_code/lineOfSight_6.cu b/cuda_code/lineOfSight_6.cu new file mode 100644 index 0000000000000000000000000000000000000000..743be6fbb4f394df1be1c003a6c22ba279ce5d49 --- /dev/null +++ b/cuda_code/lineOfSight_6.cu @@ -0,0 +1,341 @@ +/* + * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. + * + * Please refer to the NVIDIA end user license agreement (EULA) associated + * with this source code for terms and conditions that govern your use of + * this software. Any use, reproduction, disclosure, or distribution of + * this software and related documentation outside the terms of the EULA + * is strictly prohibited. + * + */ + +// This sample is an implementation of a simple line-of-sight algorithm: +// Given a height map and a ray originating at some observation point, +// it computes all the points along the ray that are visible from the +// observation point. +// It is based on the description made in "Guy E. Blelloch. Vector models +// for data-parallel computing. MIT Press, 1990" and uses open source CUDA +// Thrust Library + +#ifdef _WIN32 +# define NOMINMAX +#endif + +// includes, system +#include +#include +#include +#include +#include + +// includes, project +#include +#include +#include + +// includes, library +#include +#include +#include +#include + +//////////////////////////////////////////////////////////////////////////////// +// declaration, types + +// Boolean +typedef unsigned char Bool; +enum +{ + False = 0, + True = 1 +}; + +// 2D height field +struct HeightField +{ + int width; + float *height; +}; + +// Ray +struct Ray +{ + float3 origin; + float2 dir; + int length; + float oneOverLength; +}; + +//////////////////////////////////////////////////////////////////////////////// +// declaration, variables + +// Height field texture reference +texture g_HeightFieldTex; + +//////////////////////////////////////////////////////////////////////////////// +// declaration, forward +int runTest(int argc, char **argv); +__global__ void computeAngles_kernel(const Ray, float *); +__global__ void computeVisibilities_kernel(const float *, const float *, int, Bool *); +void lineOfSight_gold(const HeightField, const Ray, Bool *); +__device__ __host__ float2 getLocation(const Ray, int); +__device__ __host__ float getAngle(const Ray, float2, float); + +//////////////////////////////////////////////////////////////////////////////// +// Program main +//////////////////////////////////////////////////////////////////////////////// +int +main(int argc, char **argv) +{ + int res = runTest(argc, argv); + + if (res != 1) + { + printf("Test failed!\n"); + exit(EXIT_FAILURE); + } + + printf("Test passed\n"); + exit(EXIT_SUCCESS); + +} + +//////////////////////////////////////////////////////////////////////////////// +//! Run a line-of-sight test for CUDA +//////////////////////////////////////////////////////////////////////////////// +int runTest(int argc, char **argv) +{ + //////////////////////////////////////////////////////////////////////////// + // Device initialization + + printf("[%s] - Starting...\n", argv[0]); + + // use command-line specified CUDA device, otherwise use device with highest Gflops/s + findCudaDevice(argc, (const char **)argv); + + //////////////////////////////////////////////////////////////////////////// + // Timer + + // Create + StopWatchInterface *timer; + sdkCreateTimer(&timer); + + // Number of iterations to get accurate timing + uint numIterations = 100; + + //////////////////////////////////////////////////////////////////////////// + // Height field + + HeightField heightField; + + // Allocate in host memory + int2 dim = make_int2(10000, 100); + heightField.width = dim.x; + thrust::host_vector height(dim.x * dim.y); + heightField.height = (float *)&height[0]; + + // + // Fill in with an arbitrary sine surface + for (int x = 0; x < dim.x; ++x) + for (int y = 0; y < dim.y; ++y) + { + float amp = 0.1f * (x + y); + float period = 2.0f + amp; + *(heightField.height + dim.x * y + x) = + amp * (sinf(sqrtf((float)(x * x + y * y)) * 2.0f * 3.1416f / period) + 1.0f); + } + + // Allocate CUDA array in device memory + cudaChannelFormatDesc channelDesc = + cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat); + cudaArray *heightFieldArray; + checkCudaErrors(cudaMallocArray(&heightFieldArray, &channelDesc, dim.x, dim.y)); + + // Initialize device memory + checkCudaErrors(cudaMemcpyToArray(heightFieldArray, 0, 0, heightField.height, + dim.x * dim.y * sizeof(float), cudaMemcpyHostToDevice)); + + // Set texture parameters + g_HeightFieldTex.addressMode[0] = cudaAddressModeClamp; + g_HeightFieldTex.addressMode[1] = cudaAddressModeClamp; + g_HeightFieldTex.filterMode = cudaFilterModePoint; + g_HeightFieldTex.normalized = 0; + + // Bind CUDA array to texture reference + checkCudaErrors(cudaBindTextureToArray(g_HeightFieldTex, heightFieldArray, + channelDesc)); + + //////////////////////////////////////////////////////////////////////////// + // Ray (starts at origin and traverses the height field diagonally) + + Ray ray; + ray.origin = make_float3(0, 0, 2.0f); + int2 dir = make_int2(dim.x - 1, dim.y - 1); + ray.dir = make_float2((float)dir.x, (float)dir.y); + ray.length = max(abs(dir.x), abs(dir.y)); + ray.oneOverLength = 1.0f / ray.length; + + //////////////////////////////////////////////////////////////////////////// + // View angles + + // Allocate view angles for each point along the ray + thrust::device_vector d_angles(ray.length); + + // Allocate result of max-scan operation on the array of view angles + thrust::device_vector d_scannedAngles(ray.length); + + //////////////////////////////////////////////////////////////////////////// + // Visibility results + + // Allocate visibility results for each point along the ray + thrust::device_vector d_visibilities(ray.length); + thrust::host_vector h_visibilities(ray.length); + thrust::host_vector h_visibilitiesRef(ray.length); + + //////////////////////////////////////////////////////////////////////////// + // Reference solution + lineOfSight_gold(heightField, ray, (Bool *)&h_visibilitiesRef[0]); + + //////////////////////////////////////////////////////////////////////////// + // Device solution + + // Execution configuration + dim3 block(256); + dim3 grid((uint)ceil(ray.length / (double)block.x)); + + // Compute device solution + printf("Line of sight\n"); + sdkStartTimer(&timer); + + for (uint i = 0; i < numIterations; ++i) + { + + // Compute view angle for each point along the ray + computeAngles_kernel<<>>(ray, thrust::raw_pointer_cast(&d_angles[0])); + getLastCudaError("Kernel execution failed"); + + // Perform a max-scan operation on the array of view angles + thrust::inclusive_scan(d_angles.begin(), d_angles.end(), d_scannedAngles.begin(), thrust::maximum()); + getLastCudaError("Kernel execution failed"); + + // Compute visibility results based on the array of view angles + // and its scanned version + computeVisibilities_kernel<<>>(thrust::raw_pointer_cast(&d_angles[0]), + thrust::raw_pointer_cast(&d_scannedAngles[0]), + ray.length, + thrust::raw_pointer_cast(&d_visibilities[0])); + getLastCudaError("Kernel execution failed"); + } + + cudaDeviceSynchronize(); + sdkStopTimer(&timer); + getLastCudaError("Kernel execution failed"); + + // Copy visibility results back to the host + thrust::copy(d_visibilities.begin(), d_visibilities.end(), h_visibilities.begin()); + + // Compare device visibility results against reference results + bool res = compareData(thrust::raw_pointer_cast(&h_visibilitiesRef[0]), + thrust::raw_pointer_cast(&h_visibilities[0]), ray.length, 0.0f, 0.0f); + printf("Average time: %f ms\n\n", sdkGetTimerValue(&timer) / numIterations); + sdkResetTimer(&timer); + + // Cleanup memory + checkCudaErrors(cudaFreeArray(heightFieldArray)); + return res; +} + +//////////////////////////////////////////////////////////////////////////////// +//! Compute view angles for each point along the ray +//! @param ray ray +//! @param angles view angles +//////////////////////////////////////////////////////////////////////////////// +__global__ void computeAngles_kernel(const Ray ray, float *angles) +{ + uint i = blockDim.x * blockIdx.x + threadIdx.x; + + if (i < ray.length) + { + float2 location = getLocation(ray, i + 1); + float height = tex2D(g_HeightFieldTex, location.x, location.y); + float angle = getAngle(ray, location, height); + angles[i] = angle; + } +} + +//////////////////////////////////////////////////////////////////////////////// +//! Compute visibility for each point along the ray +//! @param angles view angles +//! @param scannedAngles max-scanned view angles +//! @param numAngles number of view angles +//! @param visibilities boolean array indicating the visibility of each point +//! along the ray +//////////////////////////////////////////////////////////////////////////////// +__global__ void computeVisibilities_kernel(const float *angles, + const float *scannedAngles, + int numAngles, + Bool *visibilities) +{ + uint i = blockDim.x * blockIdx.x + threadIdx.x; + + if (i < numAngles) + { + visibilities[i] = scannedAngles[i] <= angles[i]; + } +} + +//////////////////////////////////////////////////////////////////////////////// +//! Compute reference data set +//! @param heightField height field +//! @param ray ray +//! @param visibilities boolean array indicating the visibility of each point +//! along the ray +//////////////////////////////////////////////////////////////////////////////// +void lineOfSight_gold(const HeightField heightField, const Ray ray, + Bool *visibilities) +{ + float angleMax = asinf(-1.0f); + + for (int i = 0; i < ray.length; ++i) + { + float2 location = getLocation(ray, i + 1); + float height = *(heightField.height + + heightField.width * (int)floorf(location.y) + + (int)floorf(location.x)); + float angle = getAngle(ray, location, height); + + if (angle > angleMax) + { + angleMax = angle; + visibilities[i] = True; + } + else + { + visibilities[i] = False; + } + } +} + +//////////////////////////////////////////////////////////////////////////////// +//! Compute the 2D coordinates of the point located at i steps from the origin +//! of the ray +//! @param ray ray +//! @param i integer offset along the ray +//////////////////////////////////////////////////////////////////////////////// +__device__ __host__ float2 getLocation(const Ray ray, int i) +{ + float step = i * ray.oneOverLength; + return make_float2(ray.origin.x, ray.origin.y) + ray.dir * step; +} + +//////////////////////////////////////////////////////////////////////////////// +//! Compute the angle of view between a 3D point and the origin of the ray +//! @param ray ray +//! @param location 2D coordinates of the input point +//! @param height height of the input point +//////////////////////////////////////////////////////////////////////////////// +__device__ __host__ float getAngle(const Ray ray, float2 location, float height) +{ + float2 dir = location - make_float2(ray.origin.x, ray.origin.y); + return atanf((height - ray.origin.z) / length(dir)); +} diff --git a/cuda_code/linear_solver_4.cu b/cuda_code/linear_solver_4.cu new file mode 100644 index 0000000000000000000000000000000000000000..f8ba56e7e17ca132b2694020aba65c9b263647b6 --- /dev/null +++ b/cuda_code/linear_solver_4.cu @@ -0,0 +1,561 @@ +#include "split_pairwise.cuh" +#include + +#include +#include +#include + + +namespace NKernel { + + + //System size <= ROW_SIZE — number of rows for decompose, + // in pfound and pair classification we don't need last line + template + __launch_bounds__(BLOCK_SIZE) + __global__ void ExtractMatricesAndTargetsImpl(const float* linearSystem, + const int matCount, + const int rowSize, + float* matrices, + float* targets, + float* matrixDiag + ) { + const int lineSize = 32; + const int matricesPerBlock = BLOCK_SIZE / lineSize; + const int localMatrixIdx = threadIdx.x / lineSize; + + int matrixIdx = blockIdx.x * matricesPerBlock + localMatrixIdx; + + if (matrixIdx >= matCount) { + return; + } + + + linearSystem += ((size_t)matrixIdx) * (rowSize * (rowSize + 1) / 2 + rowSize); + matrices += ((size_t)matrixIdx) * (rowSize * (rowSize + 1) / 2); + targets += ((size_t)matrixIdx) * rowSize; + matrixDiag += ((size_t)matrixIdx) * rowSize; + + const int x = threadIdx.x & (lineSize - 1); + + #pragma unroll 8 + for (int i = x; i < rowSize * (rowSize + 1) / 2; i += lineSize) { + matrices[i] = linearSystem[i]; + } + + #pragma unroll 8 + for (int i = x; i < rowSize; i += lineSize) { + targets[i] = linearSystem[rowSize * (rowSize + 1) / 2 + i]; + } + + #pragma unroll 8 + for (int i = x; i < rowSize; i += lineSize) { + matrixDiag[i] = linearSystem[i * (i + 1) / 2 + i]; + } + } + + void ExtractMatricesAndTargets(const float* linearSystem, int matCount, int rowSize, float* matrices, float* targets, float* matrixDiag, TCudaStream stream) { + const int blockSize = 256; + const int numBlocks = (matCount * 32 + blockSize - 1) / blockSize; + if (numBlocks > 0) { + ExtractMatricesAndTargetsImpl << < numBlocks, blockSize, 0, stream >> > (linearSystem, matCount, rowSize, matrices, targets, matrixDiag); + } + } + + + //System size <= ROW_SIZE — number of rows for decompose, + // in pfound and pair classification we don't need last line + template + __launch_bounds__(BlockSize) + __global__ void CholeskyDecompositionImpl(float* lower, int matCount) { + + const int logicalWarpSize = (RowSize < 32 ? RowSize : 32); + const int matricesPerBlock = BlockSize / logicalWarpSize; + const int localMatrixIdx = threadIdx.x / logicalWarpSize; + + const int N = RowSize / logicalWarpSize; + + int matrixIdx = blockIdx.x * matricesPerBlock + localMatrixIdx; + + if (matrixIdx >= matCount) + return; + + lower += ((size_t)matrixIdx) * (RowSize * (RowSize + 1) / 2); + + const int x = threadIdx.x & (logicalWarpSize - 1); + + float currentLine[N]; + + + __shared__ float LjjData[matricesPerBlock]; + volatile float* Ljj = &LjjData[localMatrixIdx]; + + if (x == 0) { + const float l00 = __ldg(lower); + lower[0] = sqrtf(l00); + } + __syncwarp(); + + // #pragma unroll + for (int row = 1; row < SystemSize; ++row) { + //we don't modify this value in matrix, so it's pretty safe to load it with ldg. + #pragma unroll + for (int k = 0; k < N; ++k) { + const int col = x + 32 * k; + currentLine[k] = col <= row ? LdgWithFallback(lower, row * (row + 1) / 2 + col) : 0.0f; + } + + __syncwarp(); + + int reduceSize = 1; + #pragma unroll + for (int col = 0; col < row; ++col) { + + if (col & reduceSize) { + reduceSize <<= 1; + } + + float tmp = 0.0f; + { + #pragma unroll + for (int k = 0; k < N; ++k) { + const int colIdx = x + k * 32; + if (colIdx <= col) { + const float val = lower[col * (col + 1) / 2 + colIdx]; + tmp += colIdx < col ? val * currentLine[k] : 0; + if (colIdx == col) { + Ljj[0] = val; + } + } + } + } + + float sum = ShuffleReduce(x, tmp, min(reduceSize, 32)); + sum = __shfl_sync(0xFFFFFF, sum, 0, logicalWarpSize); + + + const float ljj = Ljj[0]; + + #pragma unroll + for (int k = 0; k < N; ++k) { + const int colIdx = x + 32 * k; + if (colIdx == col) { + currentLine[k] = ljj > 0 ? (currentLine[k] - sum) / (ljj + 1e-7f) : 0.0f; + } + } + __syncwarp(); + } + + { + float tmp = 0; + #pragma unroll + for (int k = 0; k < N; ++k) { + const int col = x + 32 * k; + if (col < row) { + tmp += currentLine[k] * currentLine[k]; + } + } + + float sum = ShuffleReduce(x, tmp, min(reduceSize, 32)); + sum = __shfl_sync(0xFFFFFF, sum, 0, logicalWarpSize); + + __syncwarp(); + + #pragma unroll + for (int k = 0; k < N; ++k) { + const int rowIdx = x + 32 * k; + if (rowIdx == row) { + const float tmp2 = currentLine[k] - sum; + currentLine[k] = tmp2 > 1e-8f ? sqrtf(tmp2) : 1e-4f; + } + } + __syncwarp(); + } + + + #pragma unroll + for (int k = 0; k < N; ++k) { + const int colIdx = x + 32 * k; + if (colIdx <= row) { + WriteThrough(lower + row * (row + 1) / 2 + colIdx, currentLine[k]); + } + } + __syncwarp(); + } + } + + class TDirectSystem { + private: + const float* Data; + float* Target; + public: + + __device__ TDirectSystem(const float* data, float* target, int rowSize) + : Data(data) + , Target(target) + { + (void)rowSize; + } + + __forceinline__ __device__ float Get(int row, int col) const { + return LdgWithFallback(Data, row * (row + 1) / 2 + col); + } + + __forceinline__ __device__ float GetTarget(int row) const { + return LdgWithFallback(Target, row); + } + + __forceinline__ __device__ void WriteSolution(int row, float solution) const { + WriteThrough(Target + row, solution); + } + + }; + + + class TTransposedSystem { + private: + const float* Data; + float* Target; + int RowSize; + public: + + __device__ TTransposedSystem(const float* data, float* target, int rowSize) + : Data(data) + , Target(target) + , RowSize(rowSize) { + } + + __forceinline__ __device__ float Get(int row, int col) const { + row = RowSize - row - 1; + col = RowSize - col - 1; + return LdgWithFallback(Data, col * (col + 1) / 2 + row); + } + + __forceinline__ __device__ float GetTarget(int row) const { + return LdgWithFallback(Target, RowSize - row - 1); + } + + __forceinline__ __device__ void WriteSolution(int row, float solution) const { + WriteThrough(Target + RowSize - row - 1, solution); + } + }; + + + template + __global__ void SolveForwardImpl(const float* lower, int rowSize, int systemSize, int matCount, float* targets) { + const int matricesPerBlock = BlockSize / rowSize; + + int matrixIdx = blockIdx.x * matricesPerBlock + threadIdx.x / rowSize; + const int col = threadIdx.x & (rowSize - 1); + const int inBlockOffset = threadIdx.x / rowSize; + + __shared__ float solutionsData[BlockSize]; + __shared__ float dotProductCacheData[BlockSize]; + + if (matrixIdx >= matCount) { + return; + } + + lower += ((size_t)matrixIdx) * rowSize * (rowSize + 1) / 2; + targets += matrixIdx * rowSize; + + float* solutions = &solutionsData[inBlockOffset * rowSize]; + float* dotProductCache = &dotProductCacheData[inBlockOffset * rowSize]; + + TLowerMatrixSystem system(lower, targets, systemSize); + solutions[col] = col < systemSize ? system.GetTarget(col) : 0; + __syncthreads(); + + + int reduceSize = 1; + + #pragma unroll + for (int row = 0; row < systemSize; ++row) { + + if (row & reduceSize) { + reduceSize <<= 1; + } + + dotProductCache[col] = col <= row ? system.Get(row, col) : 0.0f; + __syncthreads(); + + float lastCoeff = 0.0f; + + if (col == 0) { + lastCoeff = dotProductCache[row]; + dotProductCache[row] = 0; + } + __syncthreads(); + + dotProductCache[col] *= solutions[col]; + __syncthreads(); + + const float sum = FastInBlockReduce(col, dotProductCache, reduceSize); + + if (col == 0) { + solutions[row] = lastCoeff > 1e-20f ? (solutions[row] - sum) / (lastCoeff + 1e-20f) : 0; + } + + __syncthreads(); + } + + if (col < systemSize) { + system.WriteSolution(col, solutions[col]); + } + } + + + + template + __global__ void RegularizeImpl(float* lower, int rowSize, + int matCount, float lambda0, float lambda1) { + const int matricesPerBlock = BLOCK_SIZE / rowSize; + int matrixIdx = blockIdx.x * matricesPerBlock + threadIdx.x / rowSize; + lower += ((size_t)matrixIdx) * rowSize * (rowSize + 1) / 2; + + const int col = threadIdx.x & (rowSize - 1); + if (matrixIdx >= matCount) { + return; + } + + const float cellPrior = 1.0f / rowSize; + + float trace = 0; + float pseudoRank = 0; + for (int row = 0; row < rowSize; ++row) { + const float val = __ldg(lower + row * (row + 1) / 2 + row); + trace += val; + pseudoRank += val > 1e-9f; + } + + __syncthreads(); + + #pragma unroll 8 + for (int row = 0; row < rowSize; ++row) { + //beta prior (uniform). Makes rank(lower) = rowSize - 1 + if (col <= row) { + float val = __ldg(lower + row * (row + 1) / 2 + col); + if (col == row && val <= 1e-7f) { + val += trace / pseudoRank + 0.1f; + } + if (col == row) { + val += 0.05f * trace / pseudoRank + 1e-20f; + } + val += col < row ? -lambda0 * cellPrior : (lambda0 * (1 - cellPrior) + lambda1); + WriteThrough(lower + row * (row + 1) / 2 + col, val); + } + } + } + + + + void Regularize(float* matrices, int rowSize, int matCount, double lambdaNonDiag, double lambdaDiag, TCudaStream stream) { + const int blockSize = 256; + const int numBlocks = (matCount * rowSize + blockSize - 1) / blockSize; + if (numBlocks > 0) { + RegularizeImpl<<>>(matrices, rowSize, matCount, lambdaNonDiag, lambdaDiag); + } + } + + + template + __global__ void ZeroMeanImpl(float* solutions, int rowSize, int matCount) { + + const int matricesPerBlock = BLOCK_SIZE / rowSize; + + const int matrixIdx = blockIdx.x * matricesPerBlock + threadIdx.x / rowSize; + const int tid = threadIdx.x; + const int col = threadIdx.x & (rowSize - 1); + const int inBlockOffset = threadIdx.x / rowSize; + + __shared__ double beta[BLOCK_SIZE]; + __shared__ double line[BLOCK_SIZE]; + + if (matrixIdx >= matCount) { + return; + } + + solutions += matrixIdx * rowSize; + beta[tid] = col != (rowSize - 1) ? solutions[col] : 0; + line[tid] = beta[tid]; + __syncthreads(); + + for (int s = rowSize >> 1; s > 0; s >>= 1) { + if (col < s) { + line[tid] += line[tid + s]; + } + __syncthreads(); + } + + beta[tid] -= line[rowSize * inBlockOffset] / rowSize; + solutions[col] = beta[tid]; + } + + template + __global__ void CalcScoresCholeskyImpl(const float* linearSystem, + const float* solutions, + int rowSize, + int matCount, + float* scores) { + + const int matricesPerBlock = BLOCK_SIZE / rowSize; + + const int matrixIdx = blockIdx.x * matricesPerBlock + threadIdx.x / rowSize; + const int tid = threadIdx.x; + const int col = threadIdx.x & (rowSize - 1); + const int inBlockOffset = threadIdx.x / rowSize; + + __shared__ float beta[BLOCK_SIZE]; + __shared__ float line[BLOCK_SIZE]; + + if (matrixIdx >= matCount) { + return; + } + + linearSystem += ((size_t)matrixIdx) * (rowSize * (rowSize + 1) / 2 + rowSize); + solutions += matrixIdx * rowSize; + scores += matrixIdx; + + beta[tid] = solutions[col]; + line[tid] = beta[tid]; + const float tidTarget = linearSystem[rowSize * (rowSize + 1) / 2 + col]; + + __syncthreads(); + //we store matrix cholesky-decomposition. For score we need to maximize ||beta^{T}L||^2 - 2 (1) + //score to minimize: (A\beta - y)^{T}W(A\beta - y) + \beta^{T} J \beta, where J — some positive-defined matrix + //we don't need square sum, so we maximize (1) + + { + float partb1 = 0; + #pragma unroll 4 + for (int row = 0; row < rowSize; ++row) { + double val = col <= row ? LdgWithFallback(linearSystem, row * (row + 1) / 2 + col) + : LdgWithFallback(linearSystem, col * (col + 1) / 2 + row); + val *= beta[rowSize * inBlockOffset + row]; + partb1 += val; + } + line[tid] = beta[tid] * (tidTarget - 0.5 * partb1); + } + __syncthreads(); + + for (int s = rowSize >> 1; s > 0; s >>= 1) { + if (col < s) { + line[tid] += line[tid + s]; + } + __syncthreads(); + } + + if (col == 0) { + scores[0] = line[tid]; + } + } + + + //Inplace solver + template + inline void RunCholeskySolver(float* matrices, float* solutions, + int rowSize, int matCount, + TCudaStream stream) { + + const int numBlocksCholesky = (matCount * min(rowSize, 32) + BLOCK_SIZE - 1) / BLOCK_SIZE; + + if (numBlocksCholesky > 0) { + #define CHOLESKY_DECOMPOSITION(ROW_SIZE) \ + const int SYSTEM_SIZE = ROW_SIZE - REMOVE_LAST; \ + CholeskyDecompositionImpl <<< numBlocksCholesky, BLOCK_SIZE, 0, stream>>> (matrices, matCount); \ + break; + + switch (rowSize) { + case 1: { + CHOLESKY_DECOMPOSITION(1); + } + case 2: { + CHOLESKY_DECOMPOSITION(2); + } + case 4: { + CHOLESKY_DECOMPOSITION(4); + } + case 8: { + CHOLESKY_DECOMPOSITION(8); + } + case 16: { + CHOLESKY_DECOMPOSITION(16); + } + case 32: { + CHOLESKY_DECOMPOSITION(32); + } + case 64: { + CHOLESKY_DECOMPOSITION(64); + } + case 128: { + CHOLESKY_DECOMPOSITION(128); + } + case 256: { + CHOLESKY_DECOMPOSITION(256); + } + } + + const int solverNumBlocks = (matCount * rowSize + SOLVER_BLOCK_SIZE - 1) / SOLVER_BLOCK_SIZE; + if (solverNumBlocks) { + SolveForwardImpl << < solverNumBlocks, SOLVER_BLOCK_SIZE, 0, stream >> > (matrices, rowSize, rowSize - REMOVE_LAST, matCount, solutions); + SolveForwardImpl << < solverNumBlocks, SOLVER_BLOCK_SIZE, 0, stream >> > (matrices, rowSize, rowSize - REMOVE_LAST, matCount, solutions); + } + } + } + + template + inline void RunCalcScores(const float* linearSystem, const float* solutions, int rowSize, float* scores, + int matCount, TCudaStream stream) { + const int numBlocks = (matCount * BLOCK_SIZE + BLOCK_SIZE - 1) / BLOCK_SIZE; + + CalcScoresCholeskyImpl << < numBlocks, BLOCK_SIZE, 0, stream >> >(linearSystem, solutions, rowSize, matCount, scores); + } + + void ZeroMean(float* solutions, int rowSize, int matCount, TCudaStream stream) { + const int blockSize = 256; + const int numBlocks = (matCount * rowSize + blockSize - 1) / blockSize; + if (numBlocks > 0) { + ZeroMeanImpl << < numBlocks, blockSize, 0, stream >> > (solutions, rowSize, matCount); + } + } + + void CalcScores(const float* linearSystem, const float* solutions, + float* scores, int rowSize, int matCount, TCudaStream stream) + { + if (rowSize == 256) { + RunCalcScores<256>(linearSystem, solutions, rowSize, scores, matCount, stream); + } else { + RunCalcScores<128>(linearSystem, solutions, rowSize, scores, matCount, stream); + } + } + + void CholeskySolver(float* matrices, float* solutions, int rowSize, int matCount, bool removeLast, TCudaStream stream) + { + + if (removeLast) { + RunCholeskySolver<128, 256, 1>(matrices, solutions, rowSize, matCount, stream); + } else { + RunCholeskySolver<128, 256, 0>(matrices, solutions, rowSize, matCount, stream); + } + } + + void SolverForward(float* matrices, float* solutions, int rowSize, int matCount, TCudaStream stream) { + const int blockSize = 256; + const int numBlocks = (matCount * rowSize + blockSize - 1) / blockSize; + if (numBlocks > 0) { + SolveForwardImpl<<>>(matrices, rowSize, rowSize - 1, matCount, solutions); + } + } + + + void SolverBackward(float* matrices, float* solutions, int rowSize, int matCount, TCudaStream stream) { + const int blockSize = 256; + const int numBlocks = (matCount * rowSize + blockSize - 1) / blockSize; + if (numBlocks > 0) { + SolveForwardImpl<<>>(matrices, rowSize, rowSize - 1, matCount, solutions); + } + } + + + +} diff --git a/cuda_code/linkage_11.cu b/cuda_code/linkage_11.cu new file mode 100644 index 0000000000000000000000000000000000000000..5915f33c3610e930968292ccca028f8932040202 --- /dev/null +++ b/cuda_code/linkage_11.cu @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2019-2021, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include "benchmark.cuh" + +namespace ML { +namespace Bench { +namespace linkage { + +struct Params { + DatasetParams data; + BlobsParams blobs; +}; + +template +class Linkage : public BlobsFixture { + public: + Linkage(const std::string& name, const Params& p) + : BlobsFixture(name, p.data, p.blobs) {} + + protected: + void runBenchmark(::benchmark::State& state) override { + using MLCommon::Bench::CudaEventTimer; + if (!this->params.rowMajor) { + state.SkipWithError("Single-Linkage only supports row-major inputs"); + } + + this->loopOnState(state, [this]() { + out_arrs.labels = labels; + out_arrs.children = out_children; + + ML::single_linkage_neighbors( + *this->handle, this->data.X, this->params.nrows, this->params.ncols, + &out_arrs, raft::distance::DistanceType::L2Unexpanded, 15, 50); + }); + } + + void allocateTempBuffers(const ::benchmark::State& state) override { + this->alloc(labels, this->params.nrows); + this->alloc(out_children, (this->params.nrows - 1) * 2); + } + + void deallocateTempBuffers(const ::benchmark::State& state) override { + this->dealloc(labels, this->params.nrows); + this->dealloc(out_children, (this->params.nrows - 1) * 2); + } + + private: + int* labels; + int* out_children; + raft::hierarchy::linkage_output out_arrs; +}; + +std::vector getInputs() { + std::vector out; + Params p; + p.data.rowMajor = true; + p.blobs.cluster_std = 5.0; + p.blobs.shuffle = false; + p.blobs.center_box_min = -10.0; + p.blobs.center_box_max = 10.0; + p.blobs.seed = 12345ULL; + std::vector> rowcols = { + {35000, 128}, {16384, 128}, {12288, 128}, {8192, 128}, {4096, 128}, + }; + for (auto& rc : rowcols) { + p.data.nrows = rc.first; + p.data.ncols = rc.second; + for (auto nclass : std::vector({1})) { + p.data.nclasses = nclass; + out.push_back(p); + } + } + return out; +} + +ML_BENCH_REGISTER(Params, Linkage, "blobs", getInputs()); + +} // namespace linkage +} // end namespace Bench +} // end namespace ML diff --git a/cuda_code/linspace_kernel.cu b/cuda_code/linspace_kernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..3a6ff365c11db8fa4940cacb5fc75c5ebe50ebbb --- /dev/null +++ b/cuda_code/linspace_kernel.cu @@ -0,0 +1,97 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/phi/kernels/linspace_kernel.h" + +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" +#include "paddle/phi/backends/gpu/gpu_context.h" +#include "paddle/phi/core/kernel_registry.h" +#include "paddle/phi/kernels/copy_kernel.h" +#include "paddle/phi/kernels/funcs/data_type_transform.h" +#include "paddle/phi/kernels/funcs/math_function.h" + +namespace phi { + +template +__global__ void LinspaceKernelInner( + T start, T stop, double step, int64_t size, T* out) { + int64_t index = blockIdx.x * blockDim.x + threadIdx.x; + + for (; index < size; index += blockDim.x * gridDim.x) { + if (index < size / 2) { + out[index] = static_cast(start + step * index); + } else { + out[index] = static_cast(stop - step * (size - index - 1)); + } + } +} + +template +__global__ void LinspaceSpecialKernel(T start, T* out) { + out[0] = static_cast(start); +} + +template +void LinspaceKernel(const Context& ctx, + const DenseTensor& start, + const DenseTensor& stop, + const DenseTensor& number, + DataType dtype, + DenseTensor* out) { + auto start_t = phi::funcs::TransDataType(ctx, start, dtype); + auto stop_t = phi::funcs::TransDataType(ctx, stop, dtype); + + DenseTensor n_start; + DenseTensor n_stop; + DenseTensor n_num; + phi::Copy(ctx, start_t, phi::CPUPlace(), false, &n_start); + T start_data = n_start.data()[0]; + phi::Copy(ctx, stop_t, phi::CPUPlace(), false, &n_stop); + T stop_data = n_stop.data()[0]; + phi::Copy(ctx, number, phi::CPUPlace(), false, &n_num); + int64_t num = static_cast(n_num.data()[0]); + + PADDLE_ENFORCE_GT( + num, + 0, + phi::errors::InvalidArgument("The num of linspace op should be larger " + "than 0, but received num is %d", + num)); + + out->Resize(phi::make_ddim({num})); + T* out_data = ctx.template Alloc(out); + + double step = 0; + auto stream = ctx.stream(); + int block = 512; + int grid = (num + block - 1) / block; + if (num != 1) { + step = (static_cast(stop_data - start_data)) / (num - 1); + LinspaceKernelInner<<>>( + start_data, stop_data, step, num, out_data); + } else { + LinspaceSpecialKernel<<>>(start_data, out_data); + } +} + +} // namespace phi + +PD_REGISTER_KERNEL(linspace, + GPU, + ALL_LAYOUT, + phi::LinspaceKernel, + float, + int32_t, + int64_t, + double) {} diff --git a/cuda_code/lod_tensor_test_6.cu b/cuda_code/lod_tensor_test_6.cu new file mode 100644 index 0000000000000000000000000000000000000000..4dd7810c1b25cbfeb7d6d79034a97db3f1d67ebb --- /dev/null +++ b/cuda_code/lod_tensor_test_6.cu @@ -0,0 +1,72 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include + +#include "gtest/gtest.h" +#include "paddle/fluid/framework/init.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/platform/assert.h" +#include "paddle/fluid/platform/place.h" + +__global__ void test(size_t* a, int size) { + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < size; + i += blockDim.x * gridDim.x) { + a[i] *= 2; + } +} + +TEST(LoD, data) { + paddle::framework::InitDevices(); + + paddle::framework::LoD lod{{0, 1, 2}}; + lod.push_back({0, 2, 4, 5}); + lod.push_back(std::vector({0, 1, 6, 8, 10, 11})); + + auto& v = lod[0]; + paddle::platform::CUDAPlace gpu(0); + test<<<1, 1>>>(v.CUDAMutableData(gpu), v.size()); + cudaDeviceSynchronize(); + for (size_t i = 0; i < v.size(); ++i) { + EXPECT_EQ(v[i], i * 2); + } +} + +TEST(LoDTensor, LoDInGPU) { + paddle::framework::InitDevices(); + + paddle::framework::LoDTensor lod_tensor; + paddle::platform::CUDAPlace place(0); + + paddle::framework::LoD src_lod; + src_lod.push_back(std::vector{0, 2, 4, 6, 8, 10, 12, 14}); + + lod_tensor.Resize({14, 16}); + lod_tensor.mutable_data(place); + + lod_tensor.set_lod(src_lod); + EXPECT_EQ(lod_tensor.lod_element(0, 2).first, 4UL); + EXPECT_EQ(lod_tensor.lod_element(0, 4).first, 8UL); + + auto lod = lod_tensor.lod(); + + test<<<1, 8>>>(lod[0].CUDAMutableData(place), lod[0].size()); + cudaDeviceSynchronize(); + + for (size_t i = 0; i < src_lod[0].size(); ++i) { + EXPECT_EQ(lod[0].data()[i], src_lod[0].data()[i] * 2); + } +} diff --git a/cuda_code/lod_tensor_test_8.cu b/cuda_code/lod_tensor_test_8.cu new file mode 100644 index 0000000000000000000000000000000000000000..97e69cdb2e5e1e64031c899f5e04020665485ba8 --- /dev/null +++ b/cuda_code/lod_tensor_test_8.cu @@ -0,0 +1,50 @@ +/* + Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include +#include +#include "paddle/framework/lod_tensor.h" +#include "paddle/platform/assert.h" + +#include + +__global__ void test(size_t* a, int size) { + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < size; + i += blockDim.x * gridDim.x) { + a[i] *= 2; + } +} + +TEST(LoDTensor, LoDInGPU) { + paddle::framework::LoDTensor lod_tensor; + paddle::platform::GPUPlace place(0); + + paddle::framework::LoD src_lod; + src_lod.push_back(std::vector{0, 2, 4, 6, 8, 10, 12, 14}); + + lod_tensor.Resize({14, 16}); + lod_tensor.mutable_data(place); + + lod_tensor.set_lod(src_lod); + CHECK_EQ(lod_tensor.lod_element(0, 2), 4); + CHECK_EQ(lod_tensor.lod_element(0, 4), 8); + + auto lod = lod_tensor.lod(); + + test<<<1, 8>>>(lod[0].data(), lod[0].size()); + cudaDeviceSynchronize(); + + for (size_t i = 0; i < src_lod[0].size(); ++i) { + CHECK_EQ(lod[0].data()[i], src_lod[0].data()[i] * 2); + } +} diff --git a/cuda_code/lookup_table_2.cu b/cuda_code/lookup_table_2.cu new file mode 100644 index 0000000000000000000000000000000000000000..1473ebf67c68cd104ffce2fc5864319f8be624fa --- /dev/null +++ b/cuda_code/lookup_table_2.cu @@ -0,0 +1,80 @@ +// Copyright (c) 2019-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include "dali/core/convert.h" +#include "dali/core/span.h" +#include "dali/operators/generic/lookup_table.h" + +namespace dali { + +namespace detail { + +template +__global__ void LookupValuesImpl(const LutSampleDesc *samples, const kernels::BlockDesc<1> *blocks, + const OutputType *lookup_table, const OutputType default_value) { + const auto &block = blocks[blockIdx.x]; + const auto &sample = samples[block.sample_idx]; + + auto *output = reinterpret_cast(sample.output); + const auto *input = reinterpret_cast(sample.input); + for (int64_t x = threadIdx.x + block.start.x; x < block.end.x; x += blockDim.x) { + DoLookup(output[x], input[x], lookup_table, default_value); + } +} + +} // namespace detail + +template<> +void LookupTable::RunImpl(DeviceWorkspace &ws) { + const auto &input = ws.Input(0); + const auto &shape = input.shape(); + auto &output = ws.Output(0); + output.SetLayout(input.GetLayout()); + + const auto stream = ws.stream(); + + auto num_samples = shape.num_samples(); + samples_.resize(num_samples); + for (int sample_id = 0; sample_id < num_samples; sample_id++) { + samples_[sample_id].output = output.raw_mutable_tensor(sample_id); + samples_[sample_id].input = input.raw_tensor(sample_id); + } + samples_dev_.from_host(samples_, stream); + + auto collapsed_shape = collapse_dims<1>(shape, {std::make_pair(0, shape.sample_dim())}); + + block_setup_.SetupBlocks(collapsed_shape, true); + blocks_dev_.from_host(block_setup_.Blocks(), stream); + + TYPE_SWITCH(input.type(), dali::type2id, InputType, LUT_IN_TYPES, ( + TYPE_SWITCH(output_type_, dali::type2id, OutputType, LUT_OUT_TYPES, ( + + const OutputType *lookup_table = lut_.data(); + OutputType default_value = ConvertSat(default_value_f_); + + dim3 grid_dim = block_setup_.GridDim(); + dim3 block_dim = block_setup_.BlockDim(); + + detail::LookupValuesImpl<<>>( + samples_dev_.data(), blocks_dev_.data(), lookup_table, default_value); + + ), DALI_FAIL(make_string("Unsupported output type: ", output_type_)); ); // NOLINT + ), DALI_FAIL(make_string("Unsupported input type: ", input.type())); ); // NOLINT +} + +DALI_REGISTER_OPERATOR(LookupTable, LookupTable, GPU); + +} // namespace dali diff --git a/cuda_code/lookup_table_op_30.cu b/cuda_code/lookup_table_op_30.cu new file mode 100644 index 0000000000000000000000000000000000000000..27eee3436af8107cef2aa3577ea238be49edf1af --- /dev/null +++ b/cuda_code/lookup_table_op_30.cu @@ -0,0 +1,116 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" +#include "paddle/platform/assert.h" +#include "paddle/platform/cuda_helper.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; + +template +__global__ void LookupTable(T* output, const T* table, const int32_t* ids, + const int N, const int K, const int D) { + int idx = threadIdx.x; + int idy = blockIdx.x + threadIdx.y * GridDimX; + + while (idy < K) { + int id = ids[idy]; + PADDLE_ASSERT(id >= 0); + PADDLE_ASSERT(id < N); + T* out = output + idy * D; + const T* tab = table + id * D; + for (int i = idx; i < D; i += BlockDimX) { + out[i] = tab[i]; + } + idy += BlockDimY * GridDimX; + } +} + +template +__global__ void LookupTableGrad(T* table, const T* output, const int32_t* ids, + const int N, const int K, const int D) { + int idx = threadIdx.x; + int idy = blockIdx.x + threadIdx.y * GridDimX; + + while (idy < K) { + int id = ids[idy]; + PADDLE_ASSERT(id >= 0); + PADDLE_ASSERT(id < N); + const T* out = output + idy * D; + T* tab = table + id * D; + for (int i = idx; i < D; i += BlockDimX) { + paddle::platform::CudaAtomicAdd(&tab[i], out[i]); + } + idy += BlockDimY * GridDimX; + } +} + +template +class LookupTableCUDAKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + auto table_t = context.Input("W"); + auto ids_t = context.Input("Ids"); + auto output_t = context.Output("Out"); + + size_t N = table_t->dims()[0]; + size_t D = table_t->dims()[1]; + size_t K = product(ids_t->dims()); + auto ids = ids_t->data(); + auto table = table_t->data(); + auto output = output_t->mutable_data(context.GetPlace()); + + dim3 threads(128, 8); + dim3 grids(8, 1); + LookupTable<<>>(output, table, ids, N, K, D); + } +}; + +template +class LookupTableGradCUDAKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + auto ids_t = context.Input("Ids"); + auto d_output_t = context.Input(framework::GradVarName("Out")); + auto d_table_t = context.Output(framework::GradVarName("W")); + + int N = d_table_t->dims()[0]; + int D = d_table_t->dims()[1]; + int K = product(ids_t->dims()); + const int32_t* ids = ids_t->data(); + const T* d_output = d_output_t->data(); + T* d_table = d_table_t->mutable_data(context.GetPlace()); + + auto t = framework::EigenVector::Flatten(*d_table_t); + t.device(context.GetEigenDevice()) = + t.constant(static_cast(0)); + + dim3 threads(128, 8); + dim3 grids(8, 1); + LookupTableGrad<<>>(d_table, d_output, ids, N, + K, D); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP_GPU_KERNEL(lookup_table, ops::LookupTableCUDAKernel); +REGISTER_OP_GPU_KERNEL(lookup_table_grad, + ops::LookupTableGradCUDAKernel); diff --git a/cuda_code/lookup_table_op_8.cu b/cuda_code/lookup_table_op_8.cu new file mode 100644 index 0000000000000000000000000000000000000000..89c84d9e14377315659efc1f3b8a5a9d0406b336 --- /dev/null +++ b/cuda_code/lookup_table_op_8.cu @@ -0,0 +1,235 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/lookup_table_op.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" +#include "paddle/fluid/platform/float16.h" + +namespace paddle { +namespace operators { + +template +__global__ void LookupTable(T *output, const T *table, const int64_t *ids, + const int64_t N, const int64_t K, const int64_t D, + const int64_t padding_idx) { + int idx = threadIdx.x; + int idy = blockIdx.x + threadIdx.y * GridDimX; + + while (idy < K) { + int64_t id = ids[idy]; + PADDLE_ENFORCE( + id >= 0, + "Variable value (input) of OP(fluid.layers.embedding) " + "expected >= 0 and < %ld, but got %ld. Please check input value.", + N, id); + PADDLE_ENFORCE( + id < N, + "Variable value (input) of OP(fluid.layers.embedding) " + "expected >= 0 and < %ld, but got %ld. Please check input value.", + N, id); + T *out = output + idy * D; + const T *tab = table + id * D; + for (int i = idx; i < D; i += BlockDimX) { + if (PaddingFlag) { + if (id == padding_idx) + out[i] = static_cast(0); + else + out[i] = tab[i]; + } else { + out[i] = tab[i]; + } + } + idy += BlockDimY * GridDimX; + } +} + +template +__global__ void LookupTableGrad(T *table, const T *output, const int64_t *ids, + const int64_t N, const int64_t K, + const int64_t D) { + int idx = threadIdx.x; + int idy = blockIdx.x + threadIdx.y * GridDimX; + + while (idy < K) { + int64_t id = ids[idy]; + PADDLE_ENFORCE( + id >= 0, + "Variable value (input) of OP(fluid.layers.embedding) " + "expected >= 0 and < %ld, but got %ld. Please check input value.", + N, id); + PADDLE_ENFORCE( + id < N, + "Variable value (input) of OP(fluid.layers.embedding) " + "expected >= 0 and < %ld, but got %ld. Please check input value.", + N, id); + const T *out = output + idy * D; + T *tab = table + id * D; + for (int i = idx; i < D; i += BlockDimX) { + paddle::platform::CudaAtomicAdd(&tab[i], out[i]); + } + idy += BlockDimY * GridDimX; + } +} + +template +class LookupTableCUDAKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext &context) const override { + auto *table_t = context.Input("W"); + auto *ids_t = context.Input("Ids"); + auto *output_t = context.Output("Out"); + int64_t padding_idx = context.Attr("padding_idx"); + + auto id_name = context.InputNames("Ids").front(); + auto out_name = context.OutputNames("Out").front(); + + size_t N = table_t->dims()[0]; + size_t D = table_t->dims()[1]; + size_t K = ids_t->numel(); + + auto *ids = ids_t->data(); + auto *table = table_t->data(); + auto *output = output_t->mutable_data(context.GetPlace()); + +#ifdef PADDLE_WITH_HIP + dim3 threads(64, 4); +#else + dim3 threads(128, 8); +#endif // PADDLE_WITH_HIP + dim3 grids(8, 1); +#ifdef PADDLE_WITH_HIP + if (padding_idx == -1) + LookupTable< + T, 64, 4, 8, + false><<>>( + output, table, ids, N, K, D, padding_idx); + else + LookupTable< + T, 64, 4, 8, + true><<>>( + output, table, ids, N, K, D, padding_idx); +#else + if (padding_idx == -1) + LookupTable< + T, 128, 8, 8, + false><<>>( + output, table, ids, N, K, D, padding_idx); + else + LookupTable< + T, 128, 8, 8, + true><<>>( + output, table, ids, N, K, D, padding_idx); +#endif // PADDLE_WITH_HIP + } +}; + +template +class LookupTableGradCUDAKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext &context) const override { + auto &dev_ctx = + context.template device_context(); + bool is_sparse = context.Attr("is_sparse"); + + // Since paddings are not trainable and fixed in forward, the gradient of + // paddings makes no sense and we don't deal with it in backward. + if (is_sparse) { + auto *ids = context.Input("Ids"); + auto *table = context.Input("W"); + auto *d_output = context.Input(framework::GradVarName("Out")); + auto *d_table = context.Output(framework::GradVarName("W")); + + auto *ids_data = ids->data(); + int64_t ids_num = ids->numel(); + + auto stream = dev_ctx.stream(); + // copy GPU memory to CPU pinned memory + framework::Vector new_rows; + new_rows.resize(ids_num); + auto gpu_place = context.GetPlace(); + + // TODO(yuyang18): Strange code here. + memory::Copy(gpu_place, new_rows.CUDAMutableData(context.GetPlace()), + gpu_place, ids_data, ids_num * sizeof(int64_t), stream); + d_table->set_rows(new_rows); + + auto *d_table_value = d_table->mutable_value(); + d_table_value->Resize({ids_num, table->dims()[1]}); + d_table_value->mutable_data(context.GetPlace()); + + auto *d_table_data = d_table_value->data(); + auto *d_output_data = d_output->data(); + auto d_output_dims = d_output->dims(); + auto d_output_dims_2d = + framework::flatten_to_2d(d_output_dims, d_output_dims.size() - 1); + PADDLE_ENFORCE_EQ(d_table_value->dims(), d_output_dims_2d, + platform::errors::InvalidArgument( + "ShapeError: The shape of lookup_table@Grad and " + "output@Grad should be same. " + "But received lookup_table@Grad's shape = [%s], " + "output@Grad's shape = [%s].", + d_table_value->dims(), d_output_dims_2d)); + memory::Copy(gpu_place, d_table_data, gpu_place, d_output_data, + d_output->numel() * sizeof(T), stream); + + } else { + auto ids_t = context.Input("Ids"); + auto d_output_t = context.Input(framework::GradVarName("Out")); + auto d_table_t = context.Output(framework::GradVarName("W")); + + int N = d_table_t->dims()[0]; + int D = d_table_t->dims()[1]; + int K = ids_t->numel(); + const int64_t *ids = ids_t->data(); + const T *d_output = d_output_t->data(); + T *d_table = d_table_t->mutable_data(context.GetPlace()); + + auto t = framework::EigenVector::Flatten(*d_table_t); + t.device(*dev_ctx.eigen_device()) = t.constant(static_cast(0)); + +#ifdef PADDLE_WITH_HIP + dim3 threads(64, 4); +#else + dim3 threads(128, 8); +#endif // PADDLE_WITH_HIP + dim3 grids(8, 1); + +#ifdef PADDLE_WITH_HIP + LookupTableGrad<<>>( + d_table, d_output, ids, N, K, D); +#else + LookupTableGrad<<>>( + d_table, d_output, ids, N, K, D); +#endif // PADDLE_WITH_HIP + } + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +namespace plat = paddle::platform; +REGISTER_OP_CUDA_KERNEL(lookup_table, ops::LookupTableCUDAKernel, + ops::LookupTableCUDAKernel, + ops::LookupTableCUDAKernel, + ops::LookupTableCUDAKernel, + ops::LookupTableCUDAKernel); +REGISTER_OP_CUDA_KERNEL(lookup_table_grad, + ops::LookupTableGradCUDAKernel, + ops::LookupTableGradCUDAKernel, + ops::LookupTableGradCUDAKernel); diff --git a/cuda_code/lossFunctions.cu b/cuda_code/lossFunctions.cu new file mode 100644 index 0000000000000000000000000000000000000000..6048f2311e680fbc6ffcf6574e8149c673c467e9 --- /dev/null +++ b/cuda_code/lossFunctions.cu @@ -0,0 +1,186 @@ +// +// SkyNet Project +// Copyright (C) 2018 by Contributors +// +// This code is licensed under the MIT License. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files(the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and / or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions : +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. +// +#include "../stdafx.h" +#include "snOperatorCUDA/src/Operator/lossFunction.h" + +using namespace std; +using namespace SN_Base; + + +__global__ void softMaxACrossEntropyFwd(snSize iosz, snFloat* inout){ + + size_t inStepByD = iosz.w * iosz.h, // step out by input + inStepByN = inStepByD * iosz.d; // step out by batch + + // gridDim.x - number of out layers + + inout += blockIdx.x * inStepByN; + + __shared__ int tmax; + __shared__ snFloat tsumm; + + tmax = 0; + tsumm = 0; + + __syncthreads(); + + unsigned int i = threadIdx.x; + while (i < inStepByN){ + + atomicMax(&tmax, int(inout[i] * 100.F)); // TODO redo to reduction + + i += blockDim.x; + } + + __syncthreads(); + + i = threadIdx.x; + while (i < inStepByN){ + + inout[i] = ((inout[i] - tmax / 100.F) > -20) ? exp(inout[i] - tmax / 100.F) : 0.1E-8F; + + atomicAdd(&tsumm, inout[i]); // TODO redo to reduction + + i += blockDim.x; + } + + __syncthreads(); + + i = threadIdx.x; + while (i < inStepByN){ + + inout[i] /= tsumm; + + i += blockDim.x; + } +} + +__global__ void softMaxACrossEntropyBwd(snSize iosz, snFloat* out, snFloat* targ, snFloat* grad){ + + size_t inStepByD = iosz.w * iosz.h, // step out by input + inStepByN = inStepByD * iosz.d; // step out by batch + + // gridDim.x - number of out layers + // gridDim.y - batch size + + grad += blockIdx.x * inStepByD + blockIdx.y * inStepByN; + out += blockIdx.x * inStepByD + blockIdx.y * inStepByN; + targ += blockIdx.x * inStepByD + blockIdx.y * inStepByN; + + unsigned int i = threadIdx.x; + + while (i < inStepByD){ + + grad[i] = out[i] - targ[i]; + + i += blockDim.x; + } +} + +__global__ void binaryCrossEntropyBwd(snSize iosz, snFloat* out, snFloat* targ, snFloat* grad){ + + size_t inStepByD = iosz.w * iosz.h, // step out by input + inStepByN = inStepByD * iosz.d; // step out by batch + + // gridDim.x - number of out layers + // gridDim.y - batch size + + grad += blockIdx.x * inStepByD + blockIdx.y * inStepByN; + out += blockIdx.x * inStepByD + blockIdx.y * inStepByN; + targ += blockIdx.x * inStepByD + blockIdx.y * inStepByN; + + unsigned int i = threadIdx.x; + + while (i < inStepByD){ + + grad[i] = (out[i] - targ[i]) / (out[i] * (1.F - out[i])); + + i += blockDim.x; + } +} + +__global__ void regressionMSEBwd(snSize iosz, snFloat* out, snFloat* targ, snFloat* grad){ + + size_t inStepByD = iosz.w * iosz.h, // step out by input + inStepByN = inStepByD * iosz.d; // step out by batch + + // gridDim.x - number of out layers + // gridDim.y - batch size + + grad += blockIdx.x * inStepByD + blockIdx.y * inStepByN; + out += blockIdx.x * inStepByD + blockIdx.y * inStepByN; + targ += blockIdx.x * inStepByD + blockIdx.y * inStepByN; + + unsigned int i = threadIdx.x; + + while (i < inStepByD){ + + grad[i] = 2 * (out[i] - targ[i]) / inStepByN; + + i += blockDim.x; + } +} + + +void lossForward(const snSize& sz, snFloat* inout, lossType loss){ + + dim3 dimBlock(256); + dim3 dimGrid(int(sz.n)); + + switch (loss){ + case lossType::softMaxACrossEntropy: + softMaxACrossEntropyFwd <<>>(sz, inout); + break; + + case lossType::binaryCrossEntropy: + break; + + case lossType::regressionMSE: + break; + } +} + +void lossBackward(const snSize& sz, snFloat* out, snFloat* targ, snFloat* grad, lossType loss){ + + dim3 dimBlock(128); + dim3 dimGrid(int(sz.d), int(sz.n)); + + switch (loss){ + case lossType::softMaxACrossEntropy: + + softMaxACrossEntropyBwd << > >(sz, out, targ, grad); + break; + + case lossType::binaryCrossEntropy: + + binaryCrossEntropyBwd << > >(sz, out, targ, grad); + break; + + case lossType::regressionMSE: // Mean Square Error + + regressionMSEBwd << > >(sz, out, targ, grad); + break; + } +} diff --git a/cuda_code/lp_norm_op_kernel_1.cu b/cuda_code/lp_norm_op_kernel_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..6862734f4f443157acebe47f1895f76253a26257 --- /dev/null +++ b/cuda_code/lp_norm_op_kernel_1.cu @@ -0,0 +1,217 @@ +#ifdef USE_CUDA + +#include "dragon/core/context_cuda.h" +#include "dragon/utils/device/common_cub.h" +#include "dragon/utils/math_functions.h" +#include "dragon/utils/op_kernels.h" + +namespace dragon { + +namespace kernels { + +namespace { + +template +__global__ void _L1Normalize( + const int NxS, + const int S, + const int C, + const AccT normalizer, + const AccT epsilon, + const T* x, + T* y) { + __shared__ AccT norm; + __shared__ typename BlockReduce::TempStorage storage; + CUDA_2D_KERNEL_LOOP1(i, NxS) { + auto offset = i / S * C * S + i % S; + AccT sum = AccT(0); + CUDA_2D_KERNEL_LOOP2(j, C) { + sum += abs(convert::To(x[offset + j * S])); + } + sum = BlockReduce(storage).Sum(sum); + if (threadIdx.x == 0) { + norm = max(sum / normalizer, epsilon); + } + __syncthreads(); + CUDA_2D_KERNEL_LOOP2(j, C) { + auto index = offset + j * S; + y[index] = convert::To(convert::To(x[index]) / norm); + } + } +} + +template +__global__ void _L2Normalize( + const int NxS, + const int S, + const int C, + const AccT normalizer, + const AccT epsilon, + const T* x, + T* y) { + __shared__ AccT norm; + __shared__ typename BlockReduce::TempStorage storage; + CUDA_2D_KERNEL_LOOP1(i, NxS) { + auto offset = i / S * C * S + i % S; + AccT sum = AccT(0); + CUDA_2D_KERNEL_LOOP2(j, C) { + sum += math::utils::Square(convert::To(x[offset + j * S])); + } + sum = BlockReduce(storage).Sum(sum); + if (threadIdx.x == 0) { + norm = max(sqrt(sum / normalizer), epsilon); + } + __syncthreads(); + CUDA_2D_KERNEL_LOOP2(j, C) { + auto index = offset + j * S; + y[index] = convert::To(convert::To(x[index]) / norm); + } + } +} + +template +__global__ void _L1NormalizeGrad( + const int NxS, + const int S, + const int C, + const AccT normalizer, + const AccT epsilon, + const T* dy, + const T* x, + T* dx) { + __shared__ AccT norm, norm2, sum; + __shared__ typename BlockReduce::TempStorage storage; + CUDA_2D_KERNEL_LOOP1(i, NxS) { + auto offset = i / S * C * S + i % S; + AccT val1 = AccT(0), val2 = AccT(0); + CUDA_2D_KERNEL_LOOP2(j, C) { + auto index = offset + j * S; + val1 += abs(convert::To(x[index])); + val2 += convert::To(dy[index]) * convert::To(x[index]); + } + val1 = BlockReduce(storage).Sum(val1); + val2 = BlockReduce(storage).Sum(val2); + if (threadIdx.x == 0) { + norm = max(val1 / normalizer, epsilon); + norm2 = pow(norm, AccT(2)); + sum = val2 / normalizer; + } + __syncthreads(); + CUDA_2D_KERNEL_LOOP2(j, C) { + auto index = offset + j * S; + dx[index] = convert::To( + (convert::To(dy[index]) / norm) - + ((math::utils::Sign(convert::To(x[index])) / norm2) * sum)); + } + } +} + +template +__global__ void _L2NormalizeGrad( + const int NxS, + const int S, + const int C, + const AccT normalizer, + const AccT epsilon, + const T* dy, + const T* x, + T* dx) { + __shared__ AccT norm, norm3, sum; + __shared__ typename BlockReduce::TempStorage storage; + CUDA_2D_KERNEL_LOOP1(i, NxS) { + auto offset = i / S * C * S + i % S; + AccT val1 = AccT(0), val2 = AccT(0); + CUDA_2D_KERNEL_LOOP2(j, C) { + auto index = offset + j * S; + val1 += math::utils::Square(convert::To(x[index])); + val2 += convert::To(dy[index]) * convert::To(x[index]); + } + val1 = BlockReduce(storage).Sum(val1); + val2 = BlockReduce(storage).Sum(val2); + if (threadIdx.x == 0) { + norm = max(sqrt(val1 / normalizer), epsilon); + norm3 = pow(norm, AccT(3)); + sum = val2 / normalizer; + } + __syncthreads(); + CUDA_2D_KERNEL_LOOP2(j, C) { + auto index = offset + j * S; + dx[index] = convert::To( + (convert::To(dy[index]) / norm) - + ((convert::To(x[index]) / norm3) * sum)); + } + } +} + +} // namespace + +/* ------------------- Launcher Separator ------------------- */ + +#define DEFINE_KERNEL_LAUNCHER(name, T, AccT) \ + template <> \ + void name( \ + const int N, \ + const int S, \ + const int C, \ + const float normalizer, \ + const float epsilon, \ + const T* x, \ + T* y, \ + CUDAContext* ctx) { \ + const auto NxS = N * S; \ + _##name::type, AccT> \ + <<cuda_stream()>>>( \ + NxS, \ + S, \ + C, \ + AccT(normalizer), \ + AccT(epsilon), \ + reinterpret_cast::type*>(x), \ + reinterpret_cast::type*>(y)); \ + } + +#define DEFINE_GRAD_KERNEL_LAUNCHER(name, T, AccT) \ + template <> \ + void name( \ + const int N, \ + const int S, \ + const int C, \ + const float normalizer, \ + const float epsilon, \ + const T* dy, \ + const T* x, \ + T* dx, \ + CUDAContext* ctx) { \ + const auto NxS = N * S; \ + _##name::type, AccT> \ + <<cuda_stream()>>>( \ + NxS, \ + S, \ + C, \ + AccT(normalizer), \ + AccT(epsilon), \ + reinterpret_cast::type*>(dy), \ + reinterpret_cast::type*>(x), \ + reinterpret_cast::type*>(dx)); \ + } + +DEFINE_KERNEL_LAUNCHER(L1Normalize, float16, float); +DEFINE_KERNEL_LAUNCHER(L1Normalize, float, float); +DEFINE_KERNEL_LAUNCHER(L1Normalize, double, double); +DEFINE_KERNEL_LAUNCHER(L2Normalize, float16, float); +DEFINE_KERNEL_LAUNCHER(L2Normalize, float, float); +DEFINE_KERNEL_LAUNCHER(L2Normalize, double, double); +DEFINE_GRAD_KERNEL_LAUNCHER(L1NormalizeGrad, float16, float); +DEFINE_GRAD_KERNEL_LAUNCHER(L1NormalizeGrad, float, float); +DEFINE_GRAD_KERNEL_LAUNCHER(L1NormalizeGrad, double, double); +DEFINE_GRAD_KERNEL_LAUNCHER(L2NormalizeGrad, float16, float); +DEFINE_GRAD_KERNEL_LAUNCHER(L2NormalizeGrad, float, float); +DEFINE_GRAD_KERNEL_LAUNCHER(L2NormalizeGrad, double, double); +#undef DEFINE_KERNEL_LAUNCHER +#undef DEFINE_GRAD_KERNEL_LAUNCHER + +} // namespace kernels + +} // namespace dragon + +#endif // USE_CUDA diff --git a/cuda_code/lstmLayer_1.cu b/cuda_code/lstmLayer_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..d3759ae68eab658fc788c26e52cf1a1a0af588bc --- /dev/null +++ b/cuda_code/lstmLayer_1.cu @@ -0,0 +1,620 @@ +/* ****************************************************************************** + * + * + * This program and the accompanying materials are made available under the + * terms of the Apache License, Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0. + * + * See the NOTICE file distributed with this work for additional + * information regarding copyright ownership. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + * + * SPDX-License-Identifier: Apache-2.0 + ******************************************************************************/ + // + // @author AbdelRauf + // + + #include + #include "cudnnUtils.h" + #include + + namespace sd { + namespace ops { + namespace platforms { + +//our implementation designed for 1 physical layer +constexpr int numLayers = 1; + +//we will copy without using cudnnGetRNNLinLayerMatrixParams : 1 pseudo layer , isBidirectional : 2 pseudo layer +void copyWeights(const cudaStream_t &stream , bool isBidirectional, uint8_t *weightsSpace, size_t weightsSize, uint8_t *inputWeightsData, uint8_t *recurrentWeightsData, uint8_t *biasesData, int inputSize, int hiddenSize, int dataTypeSize){ + int pseudo_layer_count = isBidirectional ? 2 :1 ; + uint8_t * wptr = weightsSpace; + auto wEnd = wptr + weightsSize; + + //copy size for 1 full pseudo layer + //in bidirectional 1 layer consist of 2 pseduo layers + auto input_pseudo_size = 4 * inputSize * hiddenSize * dataTypeSize; + auto hidden_pseudo_size = 4 * hiddenSize * hiddenSize * dataTypeSize; + for(int i=0; i< pseudo_layer_count ; i++){ + if(wptr + input_pseudo_size + hidden_pseudo_size > wEnd) return; + //copy input weights + if(inputWeightsData){ + cudaMemcpyAsync(wptr, inputWeightsData, input_pseudo_size, cudaMemcpyDeviceToDevice, stream); + inputWeightsData += input_pseudo_size; + } + wptr += input_pseudo_size; + //copy recurrent weights + if(recurrentWeightsData){ + cudaMemcpyAsync(wptr, recurrentWeightsData, hidden_pseudo_size, cudaMemcpyDeviceToDevice, stream); + recurrentWeightsData += hidden_pseudo_size; + } + wptr += hidden_pseudo_size; + } + + //copy bias first 4 + auto bias_size = 4 * hiddenSize * dataTypeSize; + for(int i=0; i< pseudo_layer_count ; i++){ + //refill first 4 biases + if(biasesData && wptr + bias_size < wEnd){ + cudaMemcpyAsync(wptr, biasesData, bias_size, cudaMemcpyDeviceToDevice, stream); + biasesData += bias_size; + } + wptr += bias_size; + //refill next 4 with zeros + if(wptr + bias_size < wEnd){ + cudaMemsetAsync(wptr, 0, bias_size, stream); + wptr += bias_size; + } + } + //memset the rest + if( wEnd-wptr ) cudaMemsetAsync(wptr, 0 , wEnd-wptr, stream); +} + +void cudnn_rnn_old(LaunchContext *contextPtr, int dataFormat, NDArray *input, NDArray *inputWeights, NDArray *recurrentWeights, + NDArray *biases, NDArray *prevAct, NDArray *prevMemCell, NDArray *outputActivations, NDArray *finalTimeStepActivations, NDArray *finalMemCellState, + int maxSeqLength, int batchSize, int inputSize, int hiddenSize, double cellClip, bool isBidirectional){ + + nd4j_debug("cudnn rnn api %s \n", "v6"); + + bool training = false; + cudnnHandle_t handle = *(reinterpret_cast(contextPtr->getCuDnnHandle())); + + auto stream = *(contextPtr->getCudaStream()); + CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnSetStream), cudnnSetStream( handle, stream)); + + CudnnTensorList xDescList (maxSeqLength); + CudnnTensorList yDescList (maxSeqLength); + + auto cudnnType = cudnnDataType(input->dataType()); + auto dataTypeSize = input->sizeOfT(); + + CudnnTensor hxDesc, cxDesc, hyDesc, cyDesc; + + constexpr int rankOf = 3; + const int numDirections = isBidirectional ? 2 : 1; + + const int dimsX[rankOf] = {batchSize, inputSize, 1}; + const int stridesX[rankOf] = {inputSize, 1, 1}; + + const int dimsY[rankOf] = {batchSize, hiddenSize * numDirections , 1}; + const int stridesY[rankOf] = {hiddenSize * numDirections, 1, 1}; + + const int dimC[rankOf] = {numLayers * numDirections, batchSize, hiddenSize}; + const int strideC[rankOf] = {batchSize * hiddenSize, hiddenSize, 1}; + + for(int i=0; i= CUDNN_CLIPPING_API_VER + if(cellClip>0 && cudnnGetVersion()>=CUDNN_CLIPPING_API_VER){ + CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnRNNSetClip), cudnnRNNSetClip( handle, rnnDesc, CUDNN_RNN_CLIP_MINMAX, CUDNN_PROPAGATE_NAN, -cellClip, cellClip)); + } +#endif + //set up parameters + size_t weightsSize=0; + CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnGetRNNParamsSize), cudnnGetRNNParamsSize( handle, rnnDesc, xDesc0, &weightsSize, cudnnType)); + + FilterDesc wDesc; + int dimW[] = {(int) weightsSize / dataTypeSize, 1, 1}; + + wDesc.set(cudnnType, CUDNN_TENSOR_NCHW, 3, dimW); + //allocation + void *weightsSpace = manager.allocateDevMem(weightsSize); + + size_t workSpaceSizeInBytes = 0 ; + size_t reserveSpaceSizeInBytes = 0; + + CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnGetRNNWorkspaceSize), cudnnGetRNNWorkspaceSize( handle, rnnDesc, maxSeqLength, xDescList.getDescriptors(), &workSpaceSizeInBytes)); + + void *workSpace = manager.allocateDevMem( workSpaceSizeInBytes); + void *reserveSpace = nullptr; + // training + if(training) { + CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnGetRNNTrainingReserveSize), cudnnGetRNNTrainingReserveSize( handle, rnnDesc, maxSeqLength, xDescList.getDescriptors(), &reserveSpaceSizeInBytes)); + reserveSpace = manager.allocateDevMem( reserveSpaceSizeInBytes); + } + + NDArray::prepareSpecialUse({outputActivations, finalTimeStepActivations, finalMemCellState}, {input, inputWeights, recurrentWeights, biases, prevAct, prevMemCell}); + + uint8_t *biasesData = biases ? (uint8_t*)biases->specialBuffer() : nullptr; + auto prevActData = prevAct ? prevAct->specialBuffer() : nullptr; + auto prevMemCellData = prevMemCell ? prevMemCell->specialBuffer() : nullptr; + auto finalTimeStepActivationsData = finalTimeStepActivations ? finalTimeStepActivations->specialBuffer() : nullptr; + auto finalMemCellStateData = finalMemCellState ? finalMemCellState->specialBuffer() : nullptr; + + // dimension 4*nOut implies order it, ft, c't, ot + // input gate, forget gate, new gate, output gate, input gate, forget gate, new gate, output gate + // Note: our weights should be transposed and duplicated with C order to match cudnn ones + + NDArray inputWeightsT, recurrentWeightsT; + uint8_t *inputWeightsData = nullptr; + uint8_t *recurrentWeightsData = nullptr; + if(inputWeights){ + inputWeightsT= inputWeights->rankOf()==3?inputWeights->permute({0, 2, 1}).dup('c'):inputWeights->transpose().dup('c'); + inputWeightsData = (uint8_t*)inputWeightsT.specialBuffer(); + } + if(recurrentWeights){ + recurrentWeightsT = recurrentWeights->rankOf()==3?recurrentWeights->permute({0, 2, 1}).dup('c'):recurrentWeights->transpose().dup('c'); + recurrentWeightsData = (uint8_t*)recurrentWeightsT.specialBuffer(); + } + + //copy without cudnnGetRNNLinLayerMatrixParams + copyWeights(stream, isBidirectional, (uint8_t*)weightsSpace, weightsSize, inputWeightsData, recurrentWeightsData, biasesData, inputSize, hiddenSize, dataTypeSize); + + //permute based on dataformat + NDArray *argX = input; + NDArray *argOutput= outputActivations; + NDArray permutedX, outputH; + + if(outputActivations!=nullptr && (dataFormat != 0 || outputActivations->ordering()!='c')){ + outputH = NDArray('c', std::vector{maxSeqLength, batchSize, (numDirections * hiddenSize)}, outputActivations->dataType(), contextPtr); + argOutput = &outputH; + } + + if(dataFormat == 1){ + permutedX = input->permute({1, 0, 2}).dup('c'); + argX = &permutedX; + } + + auto xData = argX->specialBuffer(); + auto yData = argOutput ? argOutput->specialBuffer() : nullptr; + + if (training) { + CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnRNNForwardTraining), cudnnRNNForwardTraining( handle, rnnDesc, (int) maxSeqLength, xDescList.getDescriptors(), xData, + hxDesc, prevActData, cxDesc, prevMemCellData, wDesc, + weightsSpace, yDescList.getDescriptors(), yData, hyDesc, + finalTimeStepActivationsData, cyDesc, finalMemCellStateData, workSpace, + workSpaceSizeInBytes, reserveSpace, reserveSpaceSizeInBytes)); + } else { + CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnRNNForwardInference), cudnnRNNForwardInference( handle, rnnDesc, (int) maxSeqLength, xDescList.getDescriptors(), xData, + hxDesc, prevActData, cxDesc, prevMemCellData, wDesc, + weightsSpace, yDescList.getDescriptors(), yData, hyDesc, + finalTimeStepActivationsData, cyDesc, finalMemCellStateData, workSpace, + workSpaceSizeInBytes)); + } + + //remap output + if(outputActivations!=nullptr && argOutput!=outputActivations){ + //refill output + if(dataFormat == 1){ + outputActivations->assign( argOutput->permute({1, 0, 2})); + } + } + NDArray::registerSpecialUse({outputActivations, finalTimeStepActivations, finalMemCellState}, {input, inputWeights, recurrentWeights, biases, prevAct, prevMemCell}); + + return; + +} + +#if CUDNN_VERSION >= CUDNN_NEW_RNN_API_VER + +void cudnn_rnn_v8(LaunchContext *contextPtr, int dataFormat, NDArray *input, NDArray *seqLengthArray, NDArray *inputWeights, NDArray *recurrentWeights, + NDArray *biases, NDArray *prevAct, NDArray *prevMemCell, NDArray *outputActivations, NDArray *finalTimeStepActivations, NDArray *finalMemCellState, + int maxSeqLength, int batchSize, int inputSize, int hiddenSize, double cellClip, bool isBidirectional){ + nd4j_debug("cudnn rnn api %s \n", "v8"); + //seqLengthArray should be int + NDArray *argSeqNdArray = nullptr; + NDArray seqArrIntData; + if(seqLengthArray){ + if(seqLengthArray->ews()==1 && seqLengthArray->dataType()==DataType::INT32){ + argSeqNdArray = seqLengthArray; + }else{ + if(seqLengthArray->dataType()!=DataType::INT32){ + seqArrIntData = seqLengthArray->cast(DataType::INT32); + if(seqArrIntData.ews()!=1) seqArrIntData=seqArrIntData.dup('c'); + }else{ + seqArrIntData = seqLengthArray->dup('c'); + } + argSeqNdArray = &seqArrIntData; + } + }else{ + seqArrIntData = NDArray('c', std::vector{batchSize}, DataType::INT32, contextPtr); + seqArrIntData.assign(maxSeqLength); + argSeqNdArray = &seqArrIntData; + } + PointersManager manager(contextPtr, __func__ ); + bool training = false; + cudnnHandle_t handle = *(reinterpret_cast(contextPtr->getCuDnnHandle())); + auto stream = *(contextPtr->getCudaStream()); + CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnSetStream), cudnnSetStream( handle, stream)); + + auto cudnnType = cudnnDataType(input->dataType()); + auto dataTypeSize = input->sizeOfT(); + + CudnnTensor hDesc, cDesc; + + constexpr int rankOf = 3; + const int numDirections = isBidirectional ? 2 : 1; + + const int dimC[rankOf] = {numLayers * numDirections, batchSize, hiddenSize}; + const int strideC[rankOf] = {batchSize * hiddenSize, hiddenSize, 1}; + + hDesc.set(cudnnType, rankOf, dimC, strideC); + cDesc.set(cudnnType, rankOf, dimC, strideC); + + //dropout section + DropoutDesc dropoutDesc(nullptr); + //dropout + float dropout = 0; + size_t sizeInBytes=0; + void *droupoutMem = nullptr; + uint64_t seed = 1; //seed + if(dropout!=0){ + dropoutDesc.create(); + CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnDropoutGetStatesSize), cudnnDropoutGetStatesSize( handle, &sizeInBytes)); + //allocate and set + droupoutMem = manager.allocateDevMem( sizeInBytes); + dropoutDesc.set(handle, dropout, droupoutMem, sizeInBytes, seed ); + } + + //RNN + RnnDesc rnnDesc; + cudnnRNNMode_t rnnCellMode = CUDNN_LSTM; + cudnnRNNAlgo_t algo = CUDNN_RNN_ALGO_STANDARD; + auto direction = isBidirectional ? CUDNN_BIDIRECTIONAL : CUDNN_UNIDIRECTIONAL; + auto mathPrec = cudnnType; + + //Note: We will set some parameters manually. Some of them could be parameter in future + constexpr auto inputMode = CUDNN_LINEAR_INPUT; + bool use_tensor_ops = false ; // could be parameter in future +#if CUDNN_VERSION >= CUDNN_NEW_RNN_API_VER + cudnnMathType_t mathType = use_tensor_ops ? CUDNN_TENSOR_OP_MATH : CUDNN_FMA_MATH; +#else + cudnnMathType_t mathType = use_tensor_ops ? CUDNN_TENSOR_OP_MATH : CUDNN_DEFAULT_MATH; +#endif + //disable projection + int projSize = hiddenSize; + cudnnRNNBiasMode_t bias_mode = CUDNN_RNN_DOUBLE_BIAS; + uint32_t aux_flags = CUDNN_RNN_PADDED_IO_ENABLED; + + rnnDesc.set(algo, rnnCellMode, bias_mode, direction, inputMode, cudnnType, mathPrec, mathType, inputSize, hiddenSize, projSize, numLayers, dropoutDesc, aux_flags); + if(cellClip>0){ + CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnRNNSetClip), cudnnRNNSetClip( handle, rnnDesc, CUDNN_RNN_CLIP_MINMAX, CUDNN_PROPAGATE_NAN, -cellClip, cellClip)); + } + //set Data desc + RnnDataDesc xDataDesc, yDataDesc; + bool time_major = false; + float padding_fill = 0.0f; + auto hostSeqArr = bufferInHost(*argSeqNdArray); + cudnnRNNDataLayout_t layout = dataFormat==0 ? CUDNN_RNN_DATA_LAYOUT_SEQ_MAJOR_UNPACKED : CUDNN_RNN_DATA_LAYOUT_BATCH_MAJOR_UNPACKED; + xDataDesc.set(cudnnType, layout, maxSeqLength, batchSize, inputSize, hostSeqArr, (void*)&padding_fill); + yDataDesc.set(cudnnType, layout, maxSeqLength, batchSize, hiddenSize * numDirections, hostSeqArr, (void*)&padding_fill); + //set up parameters + size_t weightsSize=0; + CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnGetRNNWeightSpaceSize), cudnnGetRNNWeightSpaceSize( handle, rnnDesc, &weightsSize)); + + //allocation + void *weightsSpace = manager.allocateDevMem(weightsSize); + + // Set up work space and reserved memory + void *workSpace = nullptr; + void *reserveSpace = nullptr; + + size_t workSpaceSizeInBytes = 0 ; + size_t reserveSpaceSizeInBytes = 0; + + cudnnForwardMode_t fwdMode = training ? CUDNN_FWD_MODE_TRAINING : CUDNN_FWD_MODE_INFERENCE; + CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnGetRNNTempSpaceSizes), cudnnGetRNNTempSpaceSizes( handle, rnnDesc, fwdMode, xDataDesc, &workSpaceSizeInBytes, &reserveSpaceSizeInBytes)); + workSpace = manager.allocateDevMem( workSpaceSizeInBytes); + // training + if(training) { + reserveSpace = manager.allocateDevMem( reserveSpaceSizeInBytes); + } + + NDArray::prepareSpecialUse({outputActivations, finalTimeStepActivations, finalMemCellState}, {input, inputWeights, recurrentWeights, biases, prevAct, prevMemCell, argSeqNdArray}); + + auto xData = input->specialBuffer(); + uint8_t *biasesData = biases ? (uint8_t*)biases->specialBuffer() : nullptr; + auto prevActData = prevAct ? prevAct->specialBuffer() : nullptr; + auto prevMemCellData = prevMemCell ? prevMemCell->specialBuffer() : nullptr; + auto yData = outputActivations ? outputActivations->specialBuffer() : nullptr; + auto finalTimeStepActivationsData = finalTimeStepActivations ? finalTimeStepActivations->specialBuffer() : nullptr; + auto finalMemCellStateData = finalMemCellState ? finalMemCellState->specialBuffer() : nullptr; + + // dimension 4*nOut implies order it, ft, c't, ot + // input gate, forget gate, new gate, output gate, input gate, forget gate, new gate, output gate + // Note: our weights should be transposed and duplicated with C order to match cudnn ones + + NDArray inputWeightsT, recurrentWeightsT; + uint8_t *inputWeightsData = nullptr; + uint8_t *recurrentWeightsData = nullptr; + if(inputWeights){ + inputWeightsT= inputWeights->rankOf()==3?inputWeights->permute({0, 2, 1}).dup('c'):inputWeights->transpose().dup('c'); + inputWeightsData = (uint8_t*)inputWeightsT.specialBuffer(); + } + if(recurrentWeights){ + recurrentWeightsT = recurrentWeights->rankOf()==3?recurrentWeights->permute({0, 2, 1}).dup('c'):recurrentWeights->transpose().dup('c'); + recurrentWeightsData = (uint8_t*)recurrentWeightsT.specialBuffer(); + } + + //copy without cudnnGetRNNLinLayerMatrixParams + copyWeights(stream, isBidirectional, (uint8_t*)weightsSpace, weightsSize, inputWeightsData, recurrentWeightsData, biasesData, inputSize, hiddenSize, dataTypeSize); + + CHECK_CUDNN_FAILURE_MSG(STRINGIZE(cudnnRNNForward), cudnnRNNForward( handle, rnnDesc, fwdMode, (const int32_t *)argSeqNdArray->specialBuffer(), xDataDesc, xData, + yDataDesc, yData, hDesc, prevActData,finalTimeStepActivationsData, cDesc, prevMemCellData, finalMemCellStateData, + weightsSize, weightsSpace, workSpaceSizeInBytes, workSpace, reserveSpaceSizeInBytes, reserveSpace)); + + NDArray::registerSpecialUse({outputActivations, finalTimeStepActivations, finalMemCellState}, {input, inputWeights, recurrentWeights, biases, prevAct, prevMemCell}); + + return; + +} + +#endif + + ////////////////////////////////////////////////////////////////////////// + PLATFORM_IMPL(lstmLayer, ENGINE_CUDA) { + + const auto dataFormat = INT_ARG(0); // for unidirectional: 0 = [sL, bS, nIn], 1 = [bS, sL ,nIn], 2 = [bS, nIn, sL], for bidirectional: 3 = [sL, 2, bS, nOut] (for ONNX) + const auto directionMode = INT_ARG(1); // direction: 0 = fwd, 1 = bwd, 2 = bidirectional sum, 3 = bidirectional concat, 4 = bidirectional extra output dim (in conjunction with format dataFormat = 3) + + const auto hasBiases = B_ARG(0); // indicates whether biases array is provided + const auto hasSeqLenArray = B_ARG(1); // indicates whether seqLen array is provided + const auto hasInitH = B_ARG(2); // indicates whether initial output is provided + const auto hasInitC = B_ARG(3); // indicates whether initial cell state is provided + const auto hasPH = B_ARG(4); // indicates whether peephole connections are present + const auto retFullSeq = B_ARG(5); // indicates whether to return whole time sequence h {h_0, h_1, ... , h_sL-1} + const auto retLastH = B_ARG(6); // indicates whether to return output at last time step only, in this case shape would be [bS, nOut] (exact shape depends on dataFormat argument) + const auto retLastC = B_ARG(7); // indicates whether to return cells state at last time step only, in this case shape would be [bS, nOut] (exact shape depends on dataFormat argument) + + const auto cellClip = T_ARG(0); // cell clipping value, if it = 0 then do not apply clipping + + const auto x = INPUT_VARIABLE(0); // input + const auto Wx = INPUT_VARIABLE(1); // input weights + const auto Wr = INPUT_VARIABLE(2); // recurrent weights + + int count = 3; + const auto b = hasBiases ? INPUT_VARIABLE(count++) : nullptr; // biases + const auto seqLengthArray = hasSeqLenArray ? INPUT_VARIABLE(count++) : nullptr; // seqLen vector + const auto hI = hasInitH ? INPUT_VARIABLE(count++) : nullptr; // initial output + const auto cI = hasInitC ? INPUT_VARIABLE(count++) : nullptr; // initial cell state + const auto Wp = hasPH ? INPUT_VARIABLE(count++) : nullptr; // peephole weights + + count = 0; + auto h = retFullSeq ? OUTPUT_VARIABLE(count++) : nullptr; // output + auto hL = retLastH ? OUTPUT_VARIABLE(count++) : nullptr; // output at last step + auto cL = retLastC ? OUTPUT_VARIABLE(count++) : nullptr; // cell state at last step + + REQUIRE_TRUE(cellClip >= 0 , 0, "LSTM_LAYER operation: cell clipping value should be nonnegative (>=0) !"); + REQUIRE_TRUE(retFullSeq || retLastH || retLastC, 0, "LSTM_LAYER operation: please specify what output arrays to produce !"); + // evaluate dimensions + const Nd4jLong seqLength = dataFormat == 3 ? x->sizeAt(0) : x->sizeAt(dataFormat); + const Nd4jLong bS = dataFormat == 1 || dataFormat == 2 ? x->sizeAt(0) : x->sizeAt(1); + const Nd4jLong nIn = dataFormat == 2 ? x->sizeAt(1) : x->sizeAt(2); + const Nd4jLong nOut = Wx->sizeAt(-1) / 4; + const Nd4jLong hiddenSize = nOut; + + auto contextPtr = block.launchContext(); + bool isBidirectional = directionMode >= 2; + + if(!isBidirectional) { // no bidirectional + // Wx validation + if(Wx->rankOf() != 2 || Wx->sizeAt(0) != nIn) + REQUIRE_TRUE(false, 0, "LSTM_LAYER operation: wrong shape of input weights, expected is %s, but got %s instead !", ShapeUtils::shapeAsString({nIn, 4*nOut}).c_str(), ShapeUtils::shapeAsString(Wx).c_str()); + // Wr validation + if(Wr->rankOf() != 2 || Wr->sizeAt(0) != nOut || Wr->sizeAt(1) != 4*nOut) + REQUIRE_TRUE(false, 0, "LSTM_LAYER operation: wrong shape of recurrent weights, expected is %s, but got %s instead !", ShapeUtils::shapeAsString({nOut, 4*nOut}).c_str(), ShapeUtils::shapeAsString(Wr).c_str()); + // biases validation + if(b != nullptr && (b->rankOf() != 1 || b->sizeAt(0) != 4*nOut)) + REQUIRE_TRUE(false, 0, "LSTM_LAYER operation: wrong shape of biases, expected is %s, but got %s instead !", ShapeUtils::shapeAsString({4*nOut}).c_str(), ShapeUtils::shapeAsString(b).c_str()); + // initial output validation + if(hI != nullptr && (hI->rankOf() != 2 || hI->sizeAt(0) != bS || hI->sizeAt(1) != nOut)) + REQUIRE_TRUE(false, 0, "LSTM_LAYER operation: wrong shape of initial output, expected is %s, but got %s instead !", ShapeUtils::shapeAsString({bS, nOut}).c_str(), ShapeUtils::shapeAsString(hI).c_str()); + // initial cell validation + if(cI != nullptr && (cI->rankOf() != 2 || cI->sizeAt(0) != bS || cI->sizeAt(1) != nOut)) + REQUIRE_TRUE(false, 0, "LSTM_LAYER operation: wrong shape of initial cell state, expected is %s, but got %s instead !", ShapeUtils::shapeAsString({bS, nOut}).c_str(), ShapeUtils::shapeAsString(cI).c_str()); + } + else { // bidirectional + // Wx validation + if(Wx->rankOf() != 3 || Wx->sizeAt(0) != 2 || Wx->sizeAt(1) != nIn) + REQUIRE_TRUE(false, 0, "LSTM_LAYER operation: wrong shape of input weights, expected is %s, but got %s instead !", ShapeUtils::shapeAsString({2, nIn, 4*nOut}).c_str(), ShapeUtils::shapeAsString(Wx).c_str()); + // Wr validation + if(Wr->rankOf() != 3 || Wr->sizeAt(0) != 2 || Wr->sizeAt(1) != nOut || Wr->sizeAt(2) != 4*nOut) + REQUIRE_TRUE(false, 0, "LSTM_LAYER operation: wrong shape of recurrent weights, expected is %s, but got %s instead !", ShapeUtils::shapeAsString({2, nOut, 4*nOut}).c_str(), ShapeUtils::shapeAsString(Wr).c_str()); + // biases validation + if(b != nullptr && (b->rankOf() != 2 || b->sizeAt(0) != 2 || b->sizeAt(1) != 4*nOut)) + REQUIRE_TRUE(false, 0, "LSTM_LAYER operation: wrong shape of biases, expected is %s, but got %s instead !", ShapeUtils::shapeAsString({2, 4*nOut}).c_str(), ShapeUtils::shapeAsString(b).c_str()); + // initial output validation + if(hI != nullptr && (hI->rankOf() != 3 || hI->sizeAt(0) != 2 || hI->sizeAt(1) != bS || hI->sizeAt(2) != nOut)) + REQUIRE_TRUE(false, 0, "LSTM_LAYER operation: wrong shape of initial output, expected is %s, but got %s instead !", ShapeUtils::shapeAsString({2, bS, nOut}).c_str(), ShapeUtils::shapeAsString(hI).c_str()); + // initial cell validation + if(cI != nullptr && (cI->rankOf() != 3 || cI->sizeAt(0) != 2 || cI->sizeAt(1) != bS || cI->sizeAt(2) != nOut)) + REQUIRE_TRUE(false, 0, "LSTM_LAYER operation: wrong shape of initial cell state, expected is %s, but got %s instead !", ShapeUtils::shapeAsString({2, bS, nOut}).c_str(), ShapeUtils::shapeAsString(cI).c_str()); + } + +#if CUDNN_VERSION < CUDNN_NEW_RNN_API_VER + cudnn_rnn_old( contextPtr, dataFormat, x, Wx, Wr, b, hI, cI, h, hL, cL, seqLength, bS, nIn, hiddenSize, (double)cellClip, isBidirectional); +#else + if(cudnnGetVersion() >= CUDNN_NEW_RNN_API_VER){ + cudnn_rnn_v8( contextPtr, dataFormat, x, seqLengthArray, Wx, Wr, b, hI, cI, h, hL, cL, seqLength, bS, nIn, hiddenSize, (double)cellClip, isBidirectional); + }else{ + cudnn_rnn_old( contextPtr, dataFormat, x, Wx, Wr, b, hI, cI, h, hL, cL, seqLength, bS, nIn, hiddenSize, (double)cellClip, isBidirectional); + } +#endif + + return Status::OK(); + } + +// Cudnn Lstm: +// Forward inference implemented using v6, and v8 (when version > 8.0.1) api calls. +// As our Cuda Lstm implementation has 1 layer. Cudnn implementation was implemented for 1 physical layer +// Cudnn helper restrictions: +// - all NDArrays should be the same type +// - dataFormat should be 0 or 1 +// - only unidirectional (directionMode == 0) and bidirectional concat (directionMode == 3) +// - no peephole connection +// - Clipping is allowed for cudnn version >= 7.2.1 +// - SeqLen array is allowed for cudnn version >= 8.0.1 +// - gateActivation: sigmoid, cellActivation and outputActivation: tanh +// - NDArrays (excluding the weight arrays, as we have to transpose or permute it) should follow 'c' order and ews()==1 + PLATFORM_CHECK(lstmLayer, ENGINE_CUDA) { + + const auto dataFormat = INT_ARG(0); // for unidirectional: 0 = [sL, bS, nIn], 1 = [bS, sL ,nIn], 2 = [bS, nIn, sL], for bidirectional: 3 = [sL, 2, bS, nOut] (for ONNX) + const auto directionMode = INT_ARG(1); // direction: 0 = fwd, 1 = bwd, 2 = bidirectional sum, 3 = bidirectional concat, 4 = bidirectional extra output dim (in conjunction with format dataFormat = 3) + // integer numbers corresponding to activations: 0=tanh, 1=relu, 2=sigmoid, 3=affine, 4=leaky relu, 5= thresholded relu, 6=scaled tanh, 7=hard sigmoid, 8=ELU, 9=softsign, 10=softplus + const auto gateAct = INT_ARG(2); // activation for input (i), forget (f) and output (o) gates + const auto cellAct = INT_ARG(3); // activation for cell state (c) + const auto outAct = INT_ARG(4); // activation for output (h) + + const auto hasBiases = B_ARG(0); // indicates whether biases array is provided + const auto hasSeqLenArray = B_ARG(1); // indicates whether seqLen array is provided + const auto hasInitH = B_ARG(2); // indicates whether initial output is provided + const auto hasInitC = B_ARG(3); // indicates whether initial cell state is provided + const auto hasPH = B_ARG(4); // indicates whether peephole connections are present + const auto retFullSeq = B_ARG(5); // indicates whether to return whole time sequence h {h_0, h_1, ... , h_sL-1} + const auto retLastH = B_ARG(6); // indicates whether to return output at last time step only, in this case shape would be [bS, nOut] (exact shape depends on dataFormat argument) + const auto retLastC = B_ARG(7); // indicates whether to return cells state at last time step only, in this case shape would be [bS, nOut] (exact shape depends on dataFormat argument) + + const auto cellClip = T_ARG(0); // cell clipping value, if it = 0 then do not apply clipping + + const auto x = INPUT_VARIABLE(0); // input + const auto Wx = INPUT_VARIABLE(1); // input weights + const auto Wr = INPUT_VARIABLE(2); // recurrent weights + + int count = 3; + const auto b = hasBiases ? INPUT_VARIABLE(count++) : nullptr; // biases + const auto hI = hasInitH ? INPUT_VARIABLE(count++) : nullptr; // initial output + const auto cI = hasInitC ? INPUT_VARIABLE(count++) : nullptr; // initial cell state + + count = 0; + auto h = retFullSeq ? OUTPUT_VARIABLE(count++) : nullptr; // output + auto hL = retLastH ? OUTPUT_VARIABLE(count++) : nullptr; // output at last step + auto cL = retLastC ? OUTPUT_VARIABLE(count++) : nullptr; // cell state at last step + + DataType xType = x->dataType(); + DataType WxType = Wx->dataType(); + DataType WrType = Wr->dataType(); + + Requirements req("CUDNN LSTMLAYER OP"); + //cudnn related restrictions //gateAct: sigmoid, cellAct: tanh adn et cetera + // integer numbers corresponding to activations: 0=tanh, 1=relu, 2=sigmoid, 3=affine, + // 4=leaky relu, 5= thresholded relu, 6=scaled tanh, 7=hard sigmoid, 8=ELU, 9=softsign, 10=softplus + req.expectEq(makeInfoVariable(gateAct, "gate Activation"),makeInfoVariable(2, "sigmoid")) && + req.expectEq(makeInfoVariable(cellAct, "cell Activation"),makeInfoVariable(2, "tanh")) && + req.expectEq(makeInfoVariable(outAct, "out Activation"),makeInfoVariable(2, "tanh")) && + req.expectFalse(makeInfoVariable(hasPH, HAVE_PEEPHOLE), EXPECTED_NOT_SUPPORTED) && + req.expectIn(makeInfoVariable(directionMode,"directionMode"), {0, 3}) && + req.expectIn(makeInfoVariable(dataFormat, "data Format"), {0, 1}); + + if(req){ + //cudnn api version related restrictions in our helpers + size_t cudnn_version = cudnnGetVersion(); + //though seqlengthArray was added in earlier versions we do not handle it below 8.0.0.1 + #if CUDNN_VERSION < CUDNN_NEW_RNN_API_VER + //implRestrictions = implRestrictions && !hasSeqLenArray; + req.expectFalse(makeInfoVariable(hasSeqLenArray, HAVE_SEQLENARR), EXPECTED_NOT_SUPPORTED); + #else + //implRestrictions = implRestrictions && (cudnn_version >= CUDNN_NEW_RNN_API_VER || !hasSeqLenArray); + if(cudnn_version= CUDNN_CLIPPING_API_VER || cellClip==0); + if(cudnn_version < CUDNN_CLIPPING_API_VER){ + req.expectEq(makeInfoVariable(cellClip, MSG_CELL_CLIPPING), 0) ; + } + } + //restriction that comes either from not setting Descriptor or not handling manipulation: + //restrict0: the same types + req.expectEq(makeInfoVariable(x->ordering(), ORDERING_MSG_INPUT0), 'c') && + req.expectEq(makeInfoVariable(x->ews(), EWS_MSG_INPUT0), 1) && + req.expectEq(makeInfoVariable(WxType, TYPE_MSG_INPUT1), makeInfoVariable(xType, TYPE_MSG_INPUT0) ) && + req.expectEq(makeInfoVariable(WrType, TYPE_MSG_INPUT2), makeInfoVariable(xType, TYPE_MSG_INPUT0) ); + if(b) req.expectEq(makeInfoVariable(b->dataType(), TYPE_MSG_INPUT_ "#bias"), makeInfoVariable(xType, TYPE_MSG_INPUT0) ); + if(hI){ + req.expectEq(makeInfoVariable(hI->dataType(), TYPE_MSG_INPUT_ "#hI"), makeInfoVariable(xType, TYPE_MSG_INPUT0)) && + req.expectEq(makeInfoVariable(hI->ordering(), ORDERING_MSG_INPUT_ "#hI"), 'c') && + req.expectEq(makeInfoVariable(hI->ews(), EWS_MSG_INPUT_ "#hI"), 1); + } + if(cI){ + req.expectEq(makeInfoVariable(cI->dataType(), TYPE_MSG_INPUT_ "#cI"), makeInfoVariable(xType, TYPE_MSG_INPUT0)) && + req.expectEq(makeInfoVariable(cI->ordering(), ORDERING_MSG_INPUT_ "#cI"), 'c') && + req.expectEq(makeInfoVariable(cI->ews(), EWS_MSG_INPUT_ "#cI"), 1); + } + if(h){ + req.expectEq(makeInfoVariable(h->dataType(), TYPE_MSG_OUTPUT_ "#h"), makeInfoVariable(xType, TYPE_MSG_INPUT0)) && + req.expectEq(makeInfoVariable(h->ordering(), ORDERING_MSG_OUTPUT_ "#h"), 'c') && + req.expectEq(makeInfoVariable(h->ews(), EWS_MSG_OUTPUT_ "#h"), 1); + } + if(hL){ + req.expectEq(makeInfoVariable(hL->dataType(), TYPE_MSG_OUTPUT_ "#hL"), makeInfoVariable(xType, TYPE_MSG_INPUT0)) && + req.expectEq(makeInfoVariable(hL->ordering(), ORDERING_MSG_OUTPUT_ "#hL"), 'c') && + req.expectEq(makeInfoVariable(hL->ews(), EWS_MSG_OUTPUT_ "#hL"), 1); + } + if(cL){ + req.expectEq(makeInfoVariable(cL->dataType(), TYPE_MSG_OUTPUT_ "#cL"), makeInfoVariable(xType, TYPE_MSG_INPUT0)) && + req.expectEq(makeInfoVariable(cL->ordering(), ORDERING_MSG_OUTPUT_ "#cL"), 'c') && + req.expectEq(makeInfoVariable(cL->ews(), EWS_MSG_OUTPUT_ "#cL"), 1); + } + req.logTheSuccess(); + return req; + } + + + + } + } + } diff --git a/cuda_code/lstm_builtin_math.cu b/cuda_code/lstm_builtin_math.cu new file mode 100644 index 0000000000000000000000000000000000000000..3cd3b4cd78f21bb607adb39f501de3b295c3c735 --- /dev/null +++ b/cuda_code/lstm_builtin_math.cu @@ -0,0 +1,216 @@ + +#include + +#include "lstm_builtin_math_cuda.h" + +__global__ void run_tfunc(float* out, float in, int tFuncIndex) +{ + lstm_transfer_list_cu[tFuncIndex](out, in); +} + +__global__ void run_tfunc_de(float* out, float in, int tFuncIndex) +{ + lstm_transfer_derivative_list_cu[tFuncIndex](out, in); +} + +__device__ void (*lstm_transfer_list_cu[])(float*, float) = { + lstm_sigmoid_cu, + lstm_modified_sigmoid_cu, + lstm_tanh_cu, + lstm_gaussian_cu, + lstm_bent_identity_cu, + lstm_softplus_cu, + lstm_softsign_cu, + lstm_sinc_cu, + lstm_sinusoid_cu, + lstm_identity_cu, + lstm_relu_cu +}; + +__device__ void (*lstm_transfer_derivative_list_cu[])(float*, float) = { + lstm_sigmoid_derivative_cu, + lstm_modified_sigmoid_derivative_cu, + lstm_tanh_derivative_cu, + lstm_gaussian_derivative_cu, + lstm_bent_identity_derivative_cu, + lstm_softplus_derivative_cu, + lstm_softsign_derivative_cu, + lstm_sinc_derivative_cu, + lstm_sinusoid_derivative_cu, + lstm_identity_derivative_cu, + lstm_relu_derivative_cu +}; + +__device__ void lstm_sigmoid_cu(float* dstPtr, float x) +{ + *dstPtr = 1.0 / (1.0 + exp(-x)); +} + +__device__ void lstm_sigmoid_derivative_cu(float* dstPtr, float x) +{ + float tmp; + + lstm_sigmoid_cu(&tmp, x); + *dstPtr = tmp * (1.0 - tmp); +} + +__device__ void lstm_modified_sigmoid_cu(float* dstPtr, float x) +{ + float tmp; + + lstm_sigmoid_cu(&tmp, x); + *dstPtr = 2.0 * tmp - 1.0; +} + +__device__ void lstm_modified_sigmoid_derivative_cu(float* dstPtr, float x) +{ + float tmp; + + lstm_sigmoid_derivative_cu(&tmp, x); + *dstPtr = 2.0 * tmp; +} + +__device__ void lstm_tanh_cu(float* dstPtr, float x) +{ + *dstPtr = 2.0 / (1.0 + exp(-2.0 * x)) - 1.0; +} + +__device__ void lstm_tanh_derivative_cu(float* dstPtr, float x) +{ + float tmp; + + lstm_tanh_cu(&tmp, x); + *dstPtr = 1.0 - tmp * tmp; +} + +__device__ void lstm_gaussian_cu(float* dstPtr, float x) +{ + *dstPtr = exp(-pow(x, 2) * 0.5); +} + +__device__ void lstm_gaussian_derivative_cu(float* dstPtr, float x) +{ + *dstPtr = -x * exp(-pow(x, 2) * 0.5); +} + +__device__ void lstm_modified_gaussian_cu(float* dstPtr, float x) +{ + if(x == 0) + { + *dstPtr = 1; + } + else + { + *dstPtr = sin(x) / x; + } +} + +__device__ void lstm_modified_gaussian_derivative_cu(float* dstPtr, float x) +{ + if(x == 0) + { + *dstPtr = 0; + } + else + { + *dstPtr = (cos(x) / x) - (sin(x) / pow(x, 2)); + } +} + +__device__ void lstm_bent_identity_cu(float* dstPtr, float x) +{ + *dstPtr = (sqrt(pow(x, 2) + 1.0) - 1) / 2.0 + x; +} + +__device__ void lstm_bent_identity_derivative_cu(float* dstPtr, float x) +{ + *dstPtr = x / (2.0 * sqrt(pow(x, 2) + 1)) + 1; +} + +__device__ void lstm_softplus_cu(float* dstPtr, float x) +{ + *dstPtr = log(1.0 + exp(x)); +} + +__device__ void lstm_softplus_derivative_cu(float* dstPtr, float x) +{ + *dstPtr = 1.0 / (1.0 + exp(-x)); +} + +__device__ void lstm_softsign_cu(float* dstPtr, float x) +{ + *dstPtr = x / (1 + fabs(x)); +} + +__device__ void lstm_softsign_derivative_cu(float* dstPtr, float x) +{ + *dstPtr = 1.0 / pow(1.0 + fabs(x), 2); +} + +__device__ void lstm_sinc_cu(float* dstPtr, float x) +{ + if(x == 0.0) + { + *dstPtr = 1.0; + } + else + { + *dstPtr = sin(x) / x; + } +} + +__device__ void lstm_sinc_derivative_cu(float* dstPtr, float x) +{ + if(x == 0.0) + { + *dstPtr = 0.0; + } + else + { + *dstPtr = (cos(x) / x) - (sin(x) / pow(x, 2)); + } +} + +__device__ void lstm_sinusoid_cu(float* dstPtr, float x) +{ + *dstPtr = sin(x); +} + +__device__ void lstm_sinusoid_derivative_cu(float* dstPtr, float x) +{ + *dstPtr = cos(x); +} + +__device__ void lstm_identity_cu(float* dstPtr, float x) +{ + *dstPtr = x; +} + +__device__ void lstm_identity_derivative_cu(float* dstPtr, float x) +{ + *dstPtr = 1; +} + +__device__ void lstm_relu_cu(float* dstPtr, float x) +{ + if(x < 0.0) + { + *dstPtr = 0; + } + else + { + *dstPtr = x; + } +} + +__device__ void lstm_relu_derivative_cu(float* dstPtr, float x) +{ + if(x < 0.0) + { + *dstPtr = 0; + } + else + { + *dstPtr = 1; + } +} diff --git a/cuda_code/lstm_unit_op_1.cu b/cuda_code/lstm_unit_op_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..acf094238fff92711edf00b4180266138362add1 --- /dev/null +++ b/cuda_code/lstm_unit_op_1.cu @@ -0,0 +1,180 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +/* Acknowledgement: the following code is strongly inspired by +https://github.com/caffe2/caffe2/blob/master/caffe2/operators/lstm_unit_op_gpu.cu +*/ + +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/cross_entropy_op.h" +#include "paddle/fluid/operators/lstm_unit_op.h" +#include "paddle/fluid/platform/assert.h" +#include "paddle/fluid/platform/hostdevice.h" + +namespace paddle { +namespace operators { + +#define CUDA_1D_KERNEL_LOOP(i, n) \ + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ + i += blockDim.x * gridDim.x) + +template +__device__ Dtype cuda_sigmoid(const Dtype x) { + return Dtype(1) / (Dtype(1) + exp(-x)); +} + +template +__device__ Dtype cuda_tanh(const Dtype x) { + return Dtype(1 - exp(-2. * x)) / (Dtype(1) + exp(-2. * x)); +} + +template +__global__ void LSTMUnitKernel(const int nthreads, const int dim, + const T* C_prev, const T* X, T* C, T* H, + const T forget_bias) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + const int n = index / dim; + const int d = index % dim; + + const T* X_offset = X + 4 * dim * n; + const T i = cuda_sigmoid(X_offset[d]); + const T f = cuda_sigmoid(X_offset[1 * dim + d] + forget_bias); + const T o = cuda_sigmoid(X_offset[2 * dim + d]); + const T g = cuda_tanh(X_offset[3 * dim + d]); + const T c_prev = C_prev[index]; + const T c = f * c_prev + i * g; + C[index] = c; + const T tanh_c = cuda_tanh(c); + H[index] = o * tanh_c; + } +} + +template +__global__ void LSTMUnitGradientKernel(const int nthreads, const int dim, + const T* C_prev, const T* X, const T* C, + const T* H, const T* C_diff, + const T* H_diff, T* C_prev_diff, + T* X_diff, const T forget_bias) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + const int n = index / dim; + const int d = index % dim; + const T* X_offset = X + 4 * dim * n; + T* c_prev_diff = C_prev_diff + index; + T* X_diff_offset = X_diff + 4 * dim * n; + T* i_diff = X_diff_offset + d; + T* f_diff = X_diff_offset + 1 * dim + d; + T* o_diff = X_diff_offset + 2 * dim + d; + T* g_diff = X_diff_offset + 3 * dim + d; + + const T i = cuda_sigmoid(X_offset[d]); + const T f = cuda_sigmoid(X_offset[1 * dim + d] + forget_bias); + const T o = cuda_sigmoid(X_offset[2 * dim + d]); + const T g = cuda_tanh(X_offset[3 * dim + d]); + const T c_prev = C_prev[index]; + const T c = C[index]; + const T tanh_c = cuda_tanh(c); + const T c_term_diff = + C_diff[index] + H_diff[index] * o * (1 - tanh_c * tanh_c); + *c_prev_diff = c_term_diff * f; + *i_diff = c_term_diff * g * i * (1 - i); + *f_diff = c_term_diff * c_prev * f * (1 - f); + *o_diff = H_diff[index] * tanh_c * o * (1 - o); + *g_diff = c_term_diff * i * (1 - g * g); + } +} + +template +class LstmUnitOpCUDAKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), + "It must use CUDAPlace."); + + auto* x_tensor = ctx.Input("X"); + auto* c_prev_tensor = ctx.Input("C_prev"); + auto* c_tensor = ctx.Output("C"); + auto* h_tensor = ctx.Output("H"); + + auto forget_bias = static_cast(ctx.Attr("forget_bias")); + + int b_size = c_tensor->dims()[0]; + int D = c_tensor->dims()[1]; + + const T* X = x_tensor->data(); + const T* C_prev = c_prev_tensor->data(); + + T* C = c_tensor->mutable_data(ctx.GetPlace()); + T* H = h_tensor->mutable_data(ctx.GetPlace()); + + int block = 512; + int n = b_size * D; + int grid = (n + block - 1) / block; + + LSTMUnitKernel<<>>(n, D, C_prev, X, C, H, forget_bias); + } +}; + +template +class LstmUnitGradOpCUDAKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), + "It must use CUDAPlace."); + + auto x_tensor = ctx.Input("X"); + auto c_prev_tensor = ctx.Input("C_prev"); + auto c_tensor = ctx.Input("C"); + auto h_tensor = ctx.Input("H"); + + auto hdiff_tensor = ctx.Input(framework::GradVarName("H")); + auto cdiff_tensor = ctx.Input(framework::GradVarName("C")); + + auto xdiff_tensor = ctx.Output(framework::GradVarName("X")); + auto c_prev_diff_tensor = + ctx.Output(framework::GradVarName("C_prev")); + + auto* X = x_tensor->data(); + auto* C_prev = c_prev_tensor->data(); + auto* C = c_tensor->data(); + auto* H = h_tensor->data(); + + auto* H_diff = hdiff_tensor->data(); + auto* C_diff = cdiff_tensor->data(); + + auto* C_prev_diff = c_prev_diff_tensor->mutable_data(ctx.GetPlace()); + auto* X_diff = xdiff_tensor->mutable_data(ctx.GetPlace()); + + int N = c_tensor->dims()[0]; + int D = c_tensor->dims()[1]; + + auto forget_bias = static_cast(ctx.Attr("forget_bias")); + + int block = 512; + int n = N * D; + int grid = (n + block - 1) / block; + + LSTMUnitGradientKernel<<>>(n, D, C_prev, X, C, H, C_diff, + H_diff, C_prev_diff, X_diff, + forget_bias); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP_CUDA_KERNEL(lstm_unit, ops::LstmUnitOpCUDAKernel, + ops::LstmUnitOpCUDAKernel); +REGISTER_OP_CUDA_KERNEL(lstm_unit_grad, ops::LstmUnitGradOpCUDAKernel, + ops::LstmUnitGradOpCUDAKernel); diff --git a/cuda_code/magma_zdiagcheck_3.cu b/cuda_code/magma_zdiagcheck_3.cu new file mode 100644 index 0000000000000000000000000000000000000000..91d0d8e0effe94324d7cdfa20e7a0a0d63902f0e --- /dev/null +++ b/cuda_code/magma_zdiagcheck_3.cu @@ -0,0 +1,97 @@ +/* + -- MAGMA (version 2.0) -- + Univ. of Tennessee, Knoxville + Univ. of California, Berkeley + Univ. of Colorado, Denver + @date + + @precisions normal z -> c d s + +*/ +#include "magmasparse_internal.h" + +#define BLOCK_SIZE 256 + + +// kernel +__global__ void +zdiagcheck_kernel( + int num_rows, + int num_cols, + magmaDoubleComplex_ptr dval, + magmaIndex_ptr drowptr, + magmaIndex_ptr dcolind, + magma_int_t * dinfo ) +{ + int row = blockIdx.x*blockDim.x+threadIdx.x; + int j; + + if(rowcuda_stream() >>> + ( dA.num_rows, dA.num_cols, dA.dval, dA.drow, dA.dcol, dinfo ); + info = hinfo[0]; + magma_igetvector( 1, dinfo, 1, hinfo, 1, queue ); + info = hinfo[0]; + +cleanup: + magma_free( dinfo ); + magma_free_cpu( hinfo ); + + return info; +} diff --git a/cuda_code/mainSpanish.cu b/cuda_code/mainSpanish.cu new file mode 100644 index 0000000000000000000000000000000000000000..8b48b22c858231bbd6e83442a4f6d4e3e79fa076 --- /dev/null +++ b/cuda_code/mainSpanish.cu @@ -0,0 +1,2243 @@ +/* Filename: main.cu **************************************************************************** / + * + * INPUT: + * -Particulas.in: + * cantParticles + * type x y z Vx Vy Vz q ; where + * dt ; (x,y,z) = posición respecto de algún (0,0,0) + * temp0 ; (Vx,Vy,Vz) = Velocidades iniciales + * tautp ; dt = delta_tiempo + * tempi ; q = carga + * ; temp0 = temperatura target + * ; tempi = temperatura inicial (No se usa aún) + * ; tautp = factor de corrección de velocidades + * + * + * + * -TablaCoeficientesLennard + * type sigma epsilon mass min max ; donde min y max indican de qué valor + * ; a qué valor hay que densificar las muestras + * ; (NO ESTA IMPLEMENTADO AUN) + * + * ALGORITMO: + * 1-Levantar Coeficientes + * 2-Armar matriz de lennard para cant_samples_r muestras + * Para cada tipo de partícula: + * Calcular en funcion de los coeficientes el potencial para cant_samples_r valores r + * 3-Levantar partículas + * Ordenar y armar índices + * Para cada iteración de MD: + * 4-Calcular distancias: + * Cada partícula contra todas las otras + * Armar matriz de distancias + * 5-Calcular las derivadas respecto de r para cada par de partículas + * 6-Calcular fuerza para cada particula: + * Cada partícula contra todas las otras: matriz 3D + * Obtener fuerza resultante para cada partícula: vector 3D + * 7-Calcular nuevas posiciones: vector 3D + * + ***************************************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + + +/** **************************************************************** **/ +/** ************* DEFAULT GLOBAL VARIABLES VALUES ****************** **/ +#define BLOCK_SIZE_X 512 +#define BLOCK_SIZE_Y 1 +#define BLOCK_SIZE (BLOCK_SIZE_X*BLOCK_SIZE_Y) + +#define TEXTURE_MEM_SIZE 50000 +#define DIF_FINITAS_DELTA 4 + +/** Variables físicas **/ +#define CANT_TYPES 70 +#define MAx 90 +#define MIn 0.01 +#define DIST (MAx - MIn) + +#define DELTA_TIEMPO 0.0001 +#define TEMP 100 +#define TAO 0.1 + +#define BOX_MAX 999 // distancia máxima del 0 para cada coordenada + // Determinamos un cubo de volumen = (2*BOX_MAX) ^3 + +/** Filenames **/ +char* lennardTableFileName = "Input_Mache/TablaCoeficientesLennard"; +char* particlesFileName = "Input_Mache/particles.in"; +char* debugOutputFilename = "Output_Mache/debug.out"; +char* outputFilename = "Output_Mache/results.out"; +char* crdFilename = "Output_Mache/mdcrd"; +char* timeFilename = "Output_Mache/times.out"; + +using namespace std; +// streamsize ss = cout.precision(); + + + +//cudaSetDevice(1); +/** **************************************************************** **/ +/** ******************** GLOBAL VARIABLES ************************** **/ +texture texRef; +double delta_tiempo = DELTA_TIEMPO; +double temp0 = TEMP; +double tempi; +double tautp = TAO; + +double Boltzmann_cte = 0.0019872041; +double box_max_x = BOX_MAX; +double box_max_y = BOX_MAX; +double box_max_z = BOX_MAX; +bool box = true; +double cut = 12; + +int cant_steps = 1; +int cant_types = CANT_TYPES; + +bool CPU=false; +bool derivative = false; +bool analytic = false; +bool results = false; +bool amberResults = false; +bool coordinates = false; +bool periodicity = false; +bool text=false; + + + + + +/** **************************************************************** **/ +/** ************************* DEVICE ******************************* **/ + + + +/** + * RECIBE UN VALOR DE EPSILON Y SIGMA (e,s) Y EL ARREGLO CON TODOS LOS DEMAS VALORES (* EPS,* SIG) + * GUARDA EL POTENCIAL(EN LJ_POT) DE e,s VS TODOS LOS VALORES DE EPS Y SIG + */ + +__global__ +void lennard_Kernel(float* LJ_POT, double* EPS, double* SIG, + double e, double s, double var, int width, int height) +{ + /* Elemento de la matriz a calcular */ + unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; + unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; + + if(x >= width || y >= height) {return;} + + /* Variables */ + double sig12 = (double) (s + SIG[y])/2; + double eps12 = (double) sqrt(e * EPS[y]); + double r = (double) MIn+x*var; + + /* Resultado */ + LJ_POT[y*width +x] = (float) 4.0*eps12*( pow((sig12/r),12) - pow((sig12/r),6)); +} + + + +/** **************************************************************** **/ + +/** + * RECIBE UN VALOR DE EPSILON Y SIGMA (e,s) Y EL ARREGLO CON TODOS LOS DEMAS VALORES (* EPS,* SIG) + * GUARDA LA DERIVADA DEL POTENCIAL(EN dLJ_POT) DE e,s VS TODOS LOS VALORES DE EPS Y SIG + */ + +__global__ +void derivatives_lennard_Kernel(float* dLJ_POT, double* EPS, double* SIG, + double e, double s, double var, int width, int height) +{ + /* Elemento de la matriz a calcular */ + unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; + unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; + + if(x >= width || y >= height) {return;} + + /* Variables */ + double sig12 = (double) (s + SIG[y])/2; + double eps12 = (double) sqrt(e * EPS[y]); + double r = (double) MIn+x*var; + + /* Resultado */ + dLJ_POT[y*width +x] = (float) 24.0*eps12*( pow(sig12,6)/ pow(r,7) - 2 * pow(sig12,12)/ pow(r,13)); +} + +/** **************************************************************** **/ +__global__ +void close_distances_kernel(double* X, double* Y, double* Z, double* R, + double* position_x, double* position_y, double* position_z, + double box_x, double box_y, double box_z, int width, int height) +{ + /* Elemento de la matriz a calcular */ + unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; + unsigned int j = blockIdx.y * blockDim.y + threadIdx.y; + + if(i >= width || j >= height) {return;} + unsigned int pos = j*width+i; + + double _X = position_x[i] - position_x[j]; + double _Y = position_y[i] - position_y[j]; + double _Z = position_z[i] - position_z[j]; + + _X = _X - box_x * round((double) _X/box_x); + _Y = _Y - box_y * round((double) _Y/box_y); + _Z = _Z - box_z * round((double) _Z/box_z); + X[pos] = _X; + Y[pos] = _Y; + Z[pos] = _Z; + R[pos] = (double) sqrt( _X*_X + _Y*_Y + _Z*_Z ); +} + + +/** **************************************************************** **/ + +__global__ +void distances_kernel(double* R, double* X, double* Y, double* Z, + double* x1, double* y1, double* z1, int width, int height) +{ + /* Elemento de la matriz a calcular */ + unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; + unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; + + if(x >= width || y >= height) {return;} + + double x_ = x1[x] - x1[y]; + double y_ = y1[x] - y1[y]; + double z_ = z1[x] - z1[y]; + X[y*width+x] = x_; + Y[y*width+x] = y_; + Z[y*width+x] = z_; + R[y*width+x] = (double) sqrt( x_*x_ + y_*y_ + z_*z_ ); +} +/***************************************************************************/ + + +__global__ void derivative_E_r_memory(float* LJPot,double* dEr, double* r, double cut, int* item_to_type, + int cant_samples_r, int cant_types, int width, int height ) +{ + /* Elemento de la matriz a calcular */ + unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; /** particula 2 **/ + unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; /** particula 1 **/ + double erre=r[y*width+x]; + double result; + /* Dentro del bloque correspondiente */ + if(x >= width || y >= height) {return;} + if(x == y || erre >= cut) { + //dEr[y*width+x] = 0; return; + result=0; + } + else{ + + /** type of particles **/ + int t_o_p_1 = item_to_type[y] * cant_types; //this one decides which subMatrix to use + int t_o_p_2 = item_to_type[x] + t_o_p_1; //this one decides which row on these + int posInicial = t_o_p_2 * cant_samples_r; + /** Convierto r a subíndice de matriz de lennard-jones **/ + // float index_x = (float)((double) (r[y*width+x] - MIn) * (double) cant_samples_r / DIST + 0.5); // convert r to x + + // int index=0; + double superior=erre + (DIF_FINITAS_DELTA*DIST/cant_samples_r); + double inferior=erre - (DIF_FINITAS_DELTA*DIST/cant_samples_r); + int indexsup=posInicial + ((superior-MIn)*(cant_samples_r/DIST)); + int indexinf=posInicial + ((inferior-MIn)*(cant_samples_r/DIST)); + + if(superior > MAx) + indexsup=posInicial + cant_samples_r - 1; + if(superiorMAx) + indexinf=posInicial + cant_samples_r - 1; + + double E_r_up = (double) LJPot[indexsup]; + double E_r_dwn = (double) LJPot[indexinf]; + + double r_dif = DIST * 2 * (DIF_FINITAS_DELTA) / cant_samples_r; + result = (E_r_up - E_r_dwn) / (r_dif); + } + + dEr[y*width+x]=result; +} + + + + + +/** **************************************************************** **/ + +void derivative_E_r_cpu(float* LJPot,double* dEr, double* r, double cut, int* item_to_type, + int cant_samples_r, int cant_types, int width, int height, int x, int y) +{ + /* Elemento de la matriz a calcular */ + //unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; /** particula 2 **/ + //unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; /** particula 1 **/ + + /* Dentro del bloque correspondiente */ + //if(x >= width || y >= height) {return;} + if(x == y || r[y*width+x] >= cut) {dEr[y*width+x] = 0; return;} + + + /** type of particles **/ + int t_o_p_1 = item_to_type[y] * cant_types; //this one decides which subMatrix to use + int t_o_p_2 = item_to_type[x] + t_o_p_1; //this one decides which row on these + int posInicial = t_o_p_2 * cant_samples_r; + /** Convierto r a subíndice de matriz de lennard-jones **/ + // float index_x = (float)((double) (r[y*width+x] - MIn) * (double) cant_samples_r / DIST + 0.5); // convert r to x + + // int index=0; + double erre=r[y*width+x]; + double superior=erre + (DIF_FINITAS_DELTA*DIST/cant_samples_r); + double inferior=erre - (DIF_FINITAS_DELTA*DIST/cant_samples_r); + int indexsup=posInicial + ((superior-MIn)*(cant_samples_r/DIST)); + int indexinf=posInicial + ((inferior-MIn)*(cant_samples_r/DIST)); + + if(superior > MAx) + indexsup=posInicial + cant_samples_r - 1; + if(superiorMAx) + indexinf=posInicial + cant_samples_r - 1; + + double E_r_up = (double) LJPot[indexsup]; + double E_r_dwn = (double) LJPot[indexinf]; + + double r_dif = DIST * 2 * (DIF_FINITAS_DELTA) / cant_samples_r; + + dEr[y*width+x] = (E_r_up - E_r_dwn) / (r_dif); +} + + + + +/*************************************************************************/ +__global__ +void derivative_E_r(double* dEr, double* r, double cut, int* item_to_type, + int cant_samples_r, int cant_types, int width, int height) +{ + /* Elemento de la matriz a calcular */ + unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; /** particula 2 **/ + unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; /** particula 1 **/ + double erre= r[y*width+x]; + double result; + /* Dentro del bloque correspondiente */ + if(x >= width || y >= height) {return;} + if(x == y || erre >= cut) { + //dEr[y*width+x] = 0; return; + result=0; + } + + else{ + /* valor del Potencial para la distancia r, + * para el tipo de partícula correspondiente */ + + /** type of particles **/ + float t_o_p_1 = (float) item_to_type[y] * cant_types; //this one decides which subMatrix to use + float t_o_p_2 = (float) item_to_type[x] + 0.5 + t_o_p_1; //this one decides which row on these + + /** Convierto r a subíndice de matriz de lennard-jones **/ + /** r = (MAX-MIN) * X / N + MIN **/ + /** x = (r-MIN) * N / (MAX-MIN) **/ + float index_x = (float)((double) (erre - MIn) * (double) cant_samples_r / DIST + 0.5); // convert r to x + /* + double rposta=r[y*width+x]; + if(rposta> MAx) + rposta=MAx; + else + if(rposta= width || y >= height) {return;} + if(x == y || erre >= cut) { + //dEr[y*width+x] = 0; return; + result=0; + } + else{ + /* valor del Potencial para la distancia r, + * para el tipo de partícula correspondiente */ + + /** type of particles **/ + //float t_o_p_1 = (float) item_to_type[y] * cant_types; //this one decides which subMatrix to use + //float t_o_p_2 = (float) item_to_type[x] + 0.5 + t_o_p_1; //this one decides which row on these + + + int t_o_p_1 = item_to_type[y] * cant_types; //this one decides which subMatrix to use + int t_o_p_2 = item_to_type[x] + t_o_p_1; //this one decides which row on these + int posInicial=t_o_p_2 * cant_samples_r; //comienzo de la fila?? + /** Convierto r a subíndice de matriz de lennard-jones **/ + /** r = (MAX-MIN) * X / N + MIN **/ + /** x = (r-MIN) * N / (MAX-MIN) **/ + //float index_x = (float)((double) (r[y*width+x] - MIn) * (double) cant_samples_r / DIST + 0.5); // convert r to x + //int index=0; + int posMax=cant_samples_r -2; + + float sesgo=(erre-MIn) *(cant_samples_r/DIST); + if(sesgo>posMax) + result = dLJPot[posInicial+ posMax]; + else + if(sesgo<0) + result = dLJPot[posInicial]; + else + result = dLJPot[posInicial+(int)ceil(sesgo)]; + } + dEr[y*width+x]=result; + +/* + if(erre > MAx) + dEr[y*width+x] = dLJPot[posInicial + cant_samples_r - 1]; + else + if(erre= width || y >= height) {return;} + if(x == y || r[y*width+x] >= cut) {dEr[y*width+x] = 0; return;} + /* valor del Potencial para la distancia r, + * para el tipo de partícula correspondiente */ + + /** type of particles **/ + //float t_o_p_1 = (float) item_to_type[y] * cant_types; //this one decides which subMatrix to use + //float t_o_p_2 = (float) item_to_type[x] + 0.5 + t_o_p_1; //this one decides which row on these + + + int t_o_p_1 = item_to_type[y] * cant_types; //this one decides which subMatrix to use + int t_o_p_2 = item_to_type[x] + t_o_p_1; //this one decides which row on these + int posInicial=t_o_p_2 * cant_samples_r; //comienzo de la fila?? + /** Convierto r a subíndice de matriz de lennard-jones **/ + /** r = (MAX-MIN) * X / N + MIN **/ + /** x = (r-MIN) * N / (MAX-MIN) **/ + //float index_x = (float)((double) (r[y*width+x] - MIn) * (double) cant_samples_r / DIST + 0.5); // convert r to x + //int index=0; + double erre=r[y*width+x]; + if(erre > MAx) + dEr[y*width+x] = dLJPot[posInicial + cant_samples_r - 1]; + else + if(erre= width || y >= height) {return;} + if(x == y || erre >= cut) { + result=0; + //dEr[y*width+x] = 0; + //return; + } + else{ + /* valor del Potencial para la distancia r, + * para el tipo de partícula correspondiente */ + + /** type of particles **/ + float t_o_p_1 = (float) item_to_type[y] * cant_types; //this one decides which subMatrix to use + float t_o_p_2 = (float) item_to_type[x] + 0.5 + t_o_p_1; //this one decides which row on these + + /** Convierto r a subíndice de matriz de lennard-jones **/ + /** r = (MAX-MIN) * X / N + MIN **/ + /** x = (r-MIN) * N / (MAX-MIN) **/ + float index_x = (float)((double) (erre - MIn) * (double) cant_samples_r / DIST + 0.5); // convert r to x + /* double rposta=r[y*width+x]; + if(rposta> MAx) + rposta=MAx; + else + if(rposta= width || y >= height) {return;} + if(x == y || r[y*width+x] >= cut) {Er[y*width+x] = 0; return;} + + /* valor del Potencial para la distancia r, + * para el tipo de partícula correspondiente */ + + /** type of particles **/ + float t_o_p_1 = (float) item_to_type[y]; //this one decides which subMatrix to use + float t_o_p_2 = (float) item_to_type[x]; //this one decides which row on these + float row = t_o_p_2 + 0.5 + (t_o_p_1* cant_types); + /** Convierto r a subíndice de matriz de lennard-jones **/ + /** r = (MAX-MIN) * X / N + MIN **/ + /** x = (r-MIN) * N / (MAX-MIN) **/ + float index_x = (float)((double) (r[y*width+x] - MIn) * (double) cant_samples_r / DIST + 0.5); // convert r to x + +/* + double rposta=r[y*width+x]; + if(rposta> MAx) + rposta=MAx; + else + if(rposta= width || y >= height) {return;} + if(x == y || r[y*width+x] >= cut) {dEr[y*width+x] = 0; return;} + + /* valor del Potencial para la distancia r, + * para el tipo de partícula correspondiente */ + + /** type of particle 2 **/ + int type_i = item_to_type[x]; + int type_j = item_to_type[y]; + + double sig12 = (double) (SIG[type_i] + SIG[type_j])/2; + double eps12 = (double) sqrt(EPS[type_i] * EPS[type_j]); + + dEr[y*width+x] = (double) 24.0*eps12*( pow(sig12,6)/ pow(r[y*width+x],7) - 2 * pow(sig12,12)/ pow(r[y*width+x],13)); +} + + + +__global__ +void derivative_E_r_analytic(double* dEr, double* r, double cut, int* item_to_type, int cant_samples_r, + double* EPS, double* SIG, int width, int height) +{ + /* Elemento de la matriz a calcular */ + unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; /** particula 2 **/ + unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; /** particula 1 **/ + + /* Dentro del bloque correspondiente */ + double erre=r[y*width+x]; + double result; + if(x >= width || y >= height) {return;} + + if(x == y || erre >= cut) { + //dEr[y*width+x] = 0; return; + result=0; + } + else{ + /* valor del Potencial para la distancia r, + * para el tipo de partícula correspondiente */ + + /** type of particle 2 **/ + int type_i = item_to_type[x]; + int type_j = item_to_type[y]; + + double sig12 = (double) (SIG[type_i] + SIG[type_j])/2; + double eps12 = (double) sqrt(EPS[type_i] * EPS[type_j]); + + result = (double) 24.0*eps12*( pow(sig12,6)/ pow(erre,7) - 2 * pow(sig12,12)/ pow(erre,13)); + } + dEr[y*width+x]=result; + +} +__global__ +void E_r_analytic(double* Er, double* r, double cut, int* item_to_type, int cant_samples_r, + double* EPS, double* SIG, int width, int height) +{ + /* Elemento de la matriz a calcular */ + unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; /** particula 2 **/ + unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; /** particula 1 **/ + + /* Dentro del bloque correspondiente */ + if(x >= width || y >= height) {return;} + if(x == y || r[y*width+x] >= cut) {Er[y*width+x] = 0; return;} + + /* valor del Potencial para la distancia r, + * para el tipo de partícula correspondiente */ + + /** type of particle 2 **/ + int type_i = item_to_type[x]; + int type_j = item_to_type[y]; + + double sig12 = (double) (SIG[type_i] + SIG[type_j])/2; + double eps12 = (double) sqrt(EPS[type_i] * EPS[type_j]); + + Er[y*width+x] = (double) 4.0*eps12*( pow((sig12/r[y*width+x]),12) - pow((sig12/r[y*width+x]),6)); +} + + + +/** **************************************************************** **/ +/** -ANALYTIC */ +/* ***************************************************************** **/ + + +/** **************************************************************** **/ + + + /* Fx = dE(r) / dr * (x1-x2) / r */ +__global__ +void Parcial_Forces_Kernel(double* force, double* dEr, double* dif, double* r, int width, int height) +{ + /* Elemento de la matriz a calcular */ + unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; + unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; + + if(x >= width || y >= height) {return;} + if(x == y) {force[y*width+x] = 0; return;} + + //force[y*width+x] = dEr[y*width+x] * dif[y*width+x] ; + + force[y*width+x] = dEr[y*width+x] * dif[y*width+x] / r[y*width+x]; +} + +/** **************************************************************** **/ + + +__global__ +void Resultant_Forces_Kernel(double* result, double* forces, int cant) +{ + /* Elemento del vector a calcular */ + unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; + + if(x >= cant) {return;} + + int i = 0; + double tmp = 0; + int row = x*cant; + for(; i < cant; i++){ + tmp += forces[row + i]; + } + result[x] = tmp; +} + +/** **************************************************************** **/ + + +/* V(t + Dt/2) = V(t - Dt/2) + [ F(t) * Dt ] / m */ +__global__ +void Resultant_Velocities_Kernel(double* velocity, double* old_velocity, double* force, double* m, + int* item_to_type, double delta_tiempo, int cant_particles) +{ + /* Elemento de la matriz a calcular */ + unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; + + if(i >= cant_particles) {return;} + + double Vt = old_velocity[i]; + int type = item_to_type[i]; + double dtx = delta_tiempo*20.454999999999; + //double dtx=delta_tiempo; + + /* Result */ + velocity[i] = Vt + ( (force[i]*dtx) / m[type] ); +} + +/** **************************************************************** **/ + + +/* P(t + Dt) = P(t) + V(t + Dt/2) * Dt */ +__global__ +void Resultant_Positions_Kernel(double* positions, double* velocity, double delta_tiempo, int cant) +{ + /* Elemento del vector a calcular */ + unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; + + if(i >= cant) {return;} + double dtx = delta_tiempo*20.454999999999; + //double dtx=delta_tiempo; + positions[i] = positions[i] + (velocity[i] * dtx); +} + +/** **************************************************************** **/ + + + +/* -BOX_MAX 0 BOX_MAX */ +/* |-----------------|-----------------| */ + +__global__ +void Adjustin_Positions_Kernel(double* position, double box_max, int cant) +{ + /* Elemento del vector a calcular */ + unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; + + if(i >= cant) {return;} + double pos = position[i] - box_max; + if(pos > 0){ + position[i] = -box_max + fmod(pos, (double) (2*box_max)); + } + if(pos < -2*box_max){ + position[i] = box_max + fmod(pos, (double) (2*box_max)); + } + +} + + +/** **************************************************************** **/ + + +/* Ek = |v|^2 * m / 2 */ +/* Ek_x = (v_x)^2 * m / 2 */ +__global__ +void Kinetic_Energy_Kernel(double* kE, double* vold, double* v, double* m, int* item_to_type, int cant) +{ + /* Elemento del vector a calcular */ + unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; + + if(i>= cant) {return;} + + double vi = vold[i] + v[i]; + // double vi=v[i]; + int type = item_to_type[i]; + + // kE[i] = vi * vi * m[type] / 2; + + kE[i] = vi * vi * m[type] / 8; +} + +/** **************************************************************** **/ + + +__global__ +void Total_Kinetic_Energy_Kernel(double* kE, double* Ke_x, double* Ke_y, double* Ke_z, int cant) +{ + /* Elemento del vector a calcular */ + unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; + + if(i>= cant) {return;} + + kE[i] = Ke_x[i] + Ke_y[i] + Ke_z[i]; +} + +/** **************************************************************** **/ + + +__global__ +void Corrected_Velocities_Kernel(double* vold, double* v, double lambda, int cant){ + /* Elemento del vector a calcular */ + unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; + + if(i>= cant) {return;} + vold[i] = v[i]; + //vold[i] = v[i] * lambda; +} + + + + + + + + + + + +/** **************************************************************** **/ +/** *************************** HOST ******************************* **/ + +int main( int argc, char* argv[] ) +{ + + cudaSetDevice(1); + + //PROCESO LOS PARAMETROS DE ENTRADA + for(uint i = 0; i < argc; i++){ + if(strcmp(argv[i], "-t") == 0){ + /* outputTimeFilename */ + timeFilename = argv[i+1]; + } + if(strcmp(argv[i], "-a") == 0){ + /* ANALYTIC mode */ + analytic = true; + } + if(strcmp(argv[i], "-d") == 0){ + /* DERIVATIVE mode */ + derivative = true; + } + if(strcmp(argv[i], "-r") == 0){ + /* RESULTS or TIMER mode */ + results = true; + amberResults = true; + } + if(strcmp(argv[i], "-ar") == 0){ + /* RESULTS */ + amberResults = true; + } + + if(strcmp(argv[i], "-c") == 0){ + /* PRINT mdcrd file */ + coordinates = true; + } + + if(strcmp(argv[i], "-p") == 0){ + /* Periodicity */ + periodicity = true; + } + if(strcmp(argv[i], "-cpu") == 0){ + CPU = true; + } + if(strcmp(argv[i], "-tex") == 0){ + text = true; + } + // if(strcmp(argv[i], "-cut") == 0){ + // cut = atoi(argv[i+1]); + // } + + + + } + + + + //IMPRIMO QUE CARAJO ESTOY EJECUTANDO + if (derivative) + cout << "Derivative" << endl; + if (analytic) + cout << "Analytic" << endl; + if(results){ + cout << "DEBUG mode ON" << endl; + } + if(amberResults){ + cout << "AMBER results ON" << endl; + } + + + + + //CONFIGURAR OUTPUT + + fstream out; + fstream crd; + //if(results or amberResults){ + /* Output file */ + out.open(outputFilename,fstream::out); + streamsize ss = out.precision(); + out << setprecision(20); + //} + if(coordinates){ + /* CRD output file */ + crd.open(crdFilename,fstream::out); + crd << setprecision(3); + crd.setf( std::ios::fixed, std:: ios::floatfield ); + crd << " POS(x) POS(y) POS(z)" << endl; + } + + struct timeval tv1, tv2; + fstream taim; + if(!results){ //timer mode ON + /* Time output file */ + taim.open(timeFilename, fstream::app | fstream::out); + taim << setprecision(20); + } + + + + + + + + + /* Levantamos Coeficientes de Lennard */ + ifstream table (lennardTableFileName); + table >> cant_types; + /**Variables y memoria*/ + size_t cant_types_size = cant_types * sizeof(double); + + vector h_type; + h_type.resize(cant_types); + double* h_sigma = (double*) ( malloc(cant_types_size)); + double* h_epsilon = (double*) ( malloc(cant_types_size)); + double* h_mass = (double*) ( malloc(cant_types_size)); + + + /**Levantamos datos*/ + for(int j = 0; j> h_type[j]; + table >> h_sigma[j]; + table >> h_epsilon[j]; + table >> h_mass[j]; + } + table.close(); + + + +// ***************** +//VARIABLES PARA GUARDAR ENERGIA TOTAL + +double diferencia, etotalX , etotinicial; + +//****************************** + + + + + + + /*******************************/ + /*Armamos matrices de lennard */ + /******************************/ + /**Variables y memoria**/ + int cant_samples_r = TEXTURE_MEM_SIZE/(sizeof(float)); // cant of original sample values (máximo permitido por mem de textura) + double var = DIST / ((double) cant_samples_r); // variation of r + size_t cant_samples_r_size = cant_samples_r * sizeof(float); + + float* h_dLJPot; + float* h_LJPot; + + if(derivative) + h_dLJPot = (float*) malloc(cant_samples_r_size*cant_types*cant_types); // #samples * #particles * #particles (*float) + else + h_LJPot = (float*) malloc(cant_samples_r_size*cant_types*cant_types); // #samples * #particles * #particles (*float) + + int width = cant_samples_r; + int height = cant_types; + dim3 dimBlock(BLOCK_SIZE_X,BLOCK_SIZE_Y); + dim3 dimGrid( (int) ceil((double)width / (double)dimBlock.x), (int) ceil((double)height / (double)dimBlock.y) ); + + + double* d_EPS; //ARRAY PARA TODOS LOS VALORES DE EPSILON + double* d_SIG; //ARRAY PARA TODOS LOS VALORES DE SIGMA + float* d_LJPot; + float* d_dLJPot; + cudaMalloc(&d_EPS, cant_types_size); + cudaMalloc(&d_SIG, cant_types_size); + cudaMemcpy(d_EPS, h_epsilon, cant_types_size, cudaMemcpyHostToDevice); + cudaMemcpy(d_SIG, h_sigma, cant_types_size, cudaMemcpyHostToDevice); + //if(derivative) + cudaMalloc(&d_dLJPot, cant_samples_r_size * cant_types); + //else + cudaMalloc(&d_LJPot, cant_samples_r_size * cant_types); + + + + /** Rellenamos datos con CUDA **/ + //CANTIDAD TOTAL DE THREADS: EN X=cant_samples_r EN Y=cant_types + if(derivative) { //LLENO LA TEXTURA CON LAS DERIVADAS PARA CADA PAR DE TIPOS + for(int a = 0; a>>(d_dLJPot, d_EPS, d_SIG, h_epsilon[a], h_sigma[a], var, width, height); + cudaMemcpy( (float*) &(h_dLJPot[(a*cant_samples_r*cant_types)]), d_dLJPot, cant_types * cant_samples_r_size, cudaMemcpyDeviceToHost); + } + } else { + //LLENO LA TEXTURA CON LAS DERIVADAS DEL POTENCIAL PARA CADA PAR DE TIPOS + for(int a = 0; a>>(d_LJPot, d_EPS, d_SIG, h_epsilon[a], h_sigma[a], var, width, height); + cudaMemcpy( (float*) &(h_LJPot[(a*cant_samples_r*cant_types)]), d_LJPot, cant_types * cant_samples_r_size, cudaMemcpyDeviceToHost); + } + } + + /** Liberamos memoria de CUDA **/ + cudaFree(&d_EPS); + cudaFree(&d_SIG); + cudaFree(&d_LJPot); + cudaFree(&d_dLJPot); + +//out<<"paso "<< endl; + +cudaError err; +if(!CPU){ + if(!text){ + if(derivative){ + err= cudaMalloc(&d_dLJPot, cant_samples_r_size * cant_types*cant_types); + if( err != cudaSuccess) + { + printf("CUDA error paso1: %s\n", cudaGetErrorString(err)); + } + err=cudaMemcpy( d_dLJPot, h_dLJPot, cant_types * cant_types * cant_samples_r_size, cudaMemcpyHostToDevice); + if( err != cudaSuccess) + { + printf("CUDA error paso2: %s\n", cudaGetErrorString(err)); + } + + } + else{ + err= cudaMalloc(&d_LJPot, cant_samples_r_size * cant_types*cant_types); + if( err != cudaSuccess) + { + printf("CUDA error paso3: %s\n", cudaGetErrorString(err)); + } + err= cudaMemcpy( d_LJPot, h_LJPot, cant_types * cant_types * cant_samples_r_size, cudaMemcpyHostToDevice); + if( err != cudaSuccess) + { + printf("CUDA error paso4: %s\n", cudaGetErrorString(err)); + } + + } + } +} + + /**_ DEBUG **/ + if(results){ + if(derivative) + out << " derivative LENNARD " << endl; + else + out << " LENNARD " << endl; + for(int a = 0; a h_particle_type; + particles >> cant_particles; //PRIMER LINEA DE particles.in ES EL NUMERO DE PARTICULAS QUE HAY + size_t cant_particles_size = cant_particles * sizeof(double); + h_position_x = (double*)malloc(cant_particles_size); + h_position_y = (double*)malloc(cant_particles_size); + h_position_z = (double*)malloc(cant_particles_size); + h_velocity_x = (double*)malloc(cant_particles_size); + h_velocity_y = (double*)malloc(cant_particles_size); + h_velocity_z = (double*)malloc(cant_particles_size); + h_velocity_old_x = (double*)malloc(cant_particles_size); + h_velocity_old_y = (double*)malloc(cant_particles_size); + h_velocity_old_z = (double*)malloc(cant_particles_size); + h_chargue = (double*)malloc(cant_particles_size); + h_particle_type.resize(cant_particles); + + + /** Guardamos datos en memoria : coordenadas, velocidades, tipos, cargas **/ + for(uint i = 0; i < cant_particles ; i++) { + particles >> h_particle_type[i]; + + particles >> h_position_x[i]; + particles >> h_position_y[i]; + particles >> h_position_z[i]; + + particles >> h_velocity_old_x[i]; + particles >> h_velocity_old_y[i]; + particles >> h_velocity_old_z[i]; + + particles >> h_chargue[i]; + } + + + + + + + /** Perioricidad **/ + //TODO: por ahora usamos cubo, + //situamos el cero en el centro del mismo + //Recibimos en orden x, y, z + particles >> box; + if(box){ + cout << " Levantamos caja" << endl; + particles >> h_box_x; + particles >> h_box_y; + particles >> h_box_z; + particles >> h_box_alpha; + particles >> h_box_beta; + particles >> h_box_gamma; + if( h_box_alpha != 90 or h_box_beta != 90 or h_box_gamma != 90){ + cout << " Se forzaron los angulos para que sea un CUBO: " << endl; + } + box_max_x = h_box_x/2; + box_max_y = h_box_y/2; + box_max_z = h_box_z/2; + } + /** Parametros **/ + particles >> cant_steps; + particles >> delta_tiempo; + particles >> temp0; + particles >> tempi; + particles >> tautp; + particles >> cut; + particles.close(); + + + + + +// if(results){ +// /** DEBUG **/ +// out << " INITIAL VALUES" << endl; +// for(int i = 0; i>>(d_distance_r, d_distance_x, d_distance_y, d_distance_z, + d_position_x, d_position_y, d_position_z, width, height); + + } else { + /**Rellenamos datos**/ + close_distances_kernel<<>>(d_distance_x, d_distance_y, d_distance_z, d_distance_r, + d_position_x, d_position_y, d_position_z, + h_box_x, h_box_y, h_box_z, width, height); + + } + + + + //TRAIGO AL HOST LAS DISTANCIAS PORQUE LAS VOY A NECESITAR PARA HACER EL CALCULO DE dEr EN CPU + if (CPU) + cudaMemcpy(h_distance_r, d_distance_r, s_size, cudaMemcpyDeviceToHost); + + + + + //if(results){ + /** DEBUG **/ + /*cudaMemcpy(h_distance_r, d_distance_r, s_size, cudaMemcpyDeviceToHost); + cudaMemcpy(h_distance_x, d_distance_x, s_size, cudaMemcpyDeviceToHost); + cudaMemcpy(h_distance_y, d_distance_y, s_size, cudaMemcpyDeviceToHost); + cudaMemcpy(h_distance_z, d_distance_z, s_size, cudaMemcpyDeviceToHost); + + if (step %10000 == 0){ + + out << " DISTANCES - R" << endl << " "; + for(int i = 0; i>>(d_dEr, d_distance_r, cut, d_item_particle, cant_samples_r, d_EPS, d_SIG, width, height); + //if(step %100 ==0) + E_r_analytic<<>>(d_Er, d_distance_r, cut, d_item_particle, cant_samples_r, d_EPS, d_SIG, width, height); + + } else { + if(derivative){ + if (CPU){ //TABLA DE DERIVADAS SOBRE CPU + double *h_dEr=(double *) malloc(s_size*cant_particles); + int x,y; + for (x=0;x>>(d_dEr, d_distance_r, cut, d_item_particle, cant_samples_r, cant_types, width, height); + else{ + direct_derivative_E_r_memory<<>>(d_dLJPot, d_dEr, d_distance_r, cut, d_item_particle, cant_samples_r, cant_types, width, height); + //out<<"Salio"<>>(d_Er, d_distance_r, cut, d_item_particle, cant_samples_r, d_EPS, d_SIG, width, height); + + } else { //USO TABLAS DE POTENCIALES Y DIFERENCIAS FINITAS + if(CPU){ //TABLAS HACIENDO EL CALCULO SOBRE CPU + // out<< "ENTRO A CPU por tablas"<>>(d_dEr, d_distance_r, cut, d_item_particle, cant_samples_r, cant_types, width, height); + //out<<"salio text tabla"<>>(d_LJPot, d_dEr, d_distance_r, cut, d_item_particle, cant_samples_r, cant_types, width, height); + } + //if(step %100 == 0) + E_r_analytic<<>>(d_Er, d_distance_r, cut, d_item_particle, cant_samples_r, d_EPS, d_SIG, width, height); + + //E_r<<>>(d_Er, d_distance_r, cut, d_item_particle, cant_samples_r, cant_types, width, height); + } + } + + // if(amberResults){ + //if(!derivative){ + /** DEBUG **/ + //out << " Lennard-Jones" << endl << " "; + double vdwaals = 0; + double (*h_Er)[cant_particles] = (double (*)[cant_particles]) ( malloc(s_size)); + cudaError errato=cudaMemcpy(h_Er, d_Er, s_size, cudaMemcpyDeviceToHost); + if( errato != cudaSuccess) + { + printf("CUDA error paso1: %s\n", cudaGetErrorString(err)); + } + + for(int i = 0; i>>(d_Force_x, d_dEr, d_distance_x, d_distance_r, width, height); + Parcial_Forces_Kernel<<>>(d_Force_y, d_dEr, d_distance_y, d_distance_r, width, height); + Parcial_Forces_Kernel<<>>(d_Force_z, d_dEr, d_distance_z, d_distance_r, width, height); + + //if(results){ + /** DEBUG **/ + /*double fuerzaTot=0; + cudaMemcpy(h_Force_x, d_Force_x, s_size, cudaMemcpyDeviceToHost); + cudaMemcpy(h_Force_y, d_Force_y, s_size, cudaMemcpyDeviceToHost); + cudaMemcpy(h_Force_z, d_Force_z, s_size, cudaMemcpyDeviceToHost); + out << " FORCES" << endl << " "; + for(int i = 0; i>>(d_Force_x_resultant, d_Force_x, cant_particles); + Resultant_Forces_Kernel<<>>(d_Force_y_resultant, d_Force_y, cant_particles); + Resultant_Forces_Kernel<<>>(d_Force_z_resultant, d_Force_z, cant_particles); + +// if(results){ + /** DEBUG **/ +/* + cudaMemcpy(h_Force_x_resultant, d_Force_x_resultant, cant_particles_size, cudaMemcpyDeviceToHost); + cudaMemcpy(h_Force_y_resultant, d_Force_y_resultant, cant_particles_size, cudaMemcpyDeviceToHost); + cudaMemcpy(h_Force_z_resultant, d_Force_z_resultant, cant_particles_size, cudaMemcpyDeviceToHost); + //out << " RESULTANT FORCES" << endl; + for(int i = 0; i>>(d_position_x, d_velocity_x, delta_tiempo, cant_particles); + Resultant_Positions_Kernel<<>>(d_position_y, d_velocity_y, delta_tiempo, cant_particles); + Resultant_Positions_Kernel<<>>(d_position_z, d_velocity_z, delta_tiempo, cant_particles); + + if(results){ + /** DEBUG **/ + cudaMemcpy(h_position_x, d_position_x, cant_particles_size, cudaMemcpyDeviceToHost); + cudaMemcpy(h_position_y, d_position_y, cant_particles_size, cudaMemcpyDeviceToHost); + cudaMemcpy(h_position_z, d_position_z, cant_particles_size, cudaMemcpyDeviceToHost); + out << " RESULTANT POSITIONS" << endl; + for(int i = 0; i>>(d_position_x, box_max_x, cant_particles); + Adjustin_Positions_Kernel<<>>(d_position_y, box_max_y, cant_particles); + Adjustin_Positions_Kernel<<>>(d_position_z, box_max_z, cant_particles); + } + if(coordinates){ + /** DEBUG **/ + cudaMemcpy(h_position_x, d_position_x, cant_particles_size, cudaMemcpyDeviceToHost); + cudaMemcpy(h_position_y, d_position_y, cant_particles_size, cudaMemcpyDeviceToHost); + cudaMemcpy(h_position_z, d_position_z, cant_particles_size, cudaMemcpyDeviceToHost); + if(results){ + out << " RESULTANT POSITIONS in the CUBE" << endl; + for(int i = 0; i>>(d_kinetic_energy_x, d_velocity_old_x, d_velocity_x, d_mass, d_item_particle, cant_particles); + Kinetic_Energy_Kernel<<>>(d_kinetic_energy_y, d_velocity_old_y, d_velocity_y, d_mass, d_item_particle, cant_particles); + Kinetic_Energy_Kernel<<>>(d_kinetic_energy_z, d_velocity_old_z, d_velocity_z, d_mass, d_item_particle, cant_particles); + + if(results){ + /** DEBUG **/ + cudaMemcpy(h_kinetic_energy_x, d_kinetic_energy_x, cant_particles_size, cudaMemcpyDeviceToHost); + cudaMemcpy(h_kinetic_energy_y, d_kinetic_energy_y, cant_particles_size, cudaMemcpyDeviceToHost); + cudaMemcpy(h_kinetic_energy_z, d_kinetic_energy_z, cant_particles_size, cudaMemcpyDeviceToHost); + out << " KINETIC ENERGY" << endl; + for(int i = 0; i>>(d_kinetic_energy, d_kinetic_energy_x, d_kinetic_energy_y, d_kinetic_energy_z, cant_particles); + + + /* */ + /** Calculamos la Energía cinética total del sistema **/ + cudaMemcpy(h_kinetic_energy, d_kinetic_energy, cant_particles_size, cudaMemcpyDeviceToHost); + double Ek_TOT = 0; + for(int i = 0; i +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + + +/** **************************************************************** **/ +/** ************* DEFAULT GLOBAL VARIABLES VALUES ****************** **/ +#define BLOCK_SIZE_X 32 +#define BLOCK_SIZE_Y 16 +#define BLOCK_SIZE (BLOCK_SIZE_X*BLOCK_SIZE_Y) + +#define TEXTURE_MEM_SIZE 65000 +#define DIF_FINITAS_DELTA 4 + +/** Variables físicas **/ +#define CANT_TYPES 37 +#define MAx 15 +#define MIn 0.3 +#define DIST (MAx - MIn) + +#define DELTA_TIEMPO 0.001 +#define TEMP 100 +#define TAO 0.1 + +#define BOX_MAX 999 // distancia máxima del 0 para cada coordenada + // Determinamos un cubo de volumen = (2*BOX_MAX) ^3 + +/** Filenames **/ +char* lennardTableFileName = "Input_Mache/TablaCoeficientesLennard"; +char* particlesFileName = "Input_Mache/particles.in"; +char* debugOutputFilename = "Output_Mache/debug.out"; +char* outputFilename = "Output_Mache/results.out"; +char* crdFilename = "Output_Mache/mdcrd"; +char* timeFilename = "Output_Mache/times.out"; + +using namespace std; +// streamsize ss = cout.precision(); + +/** **************************************************************** **/ +/** ******************** GLOBAL VARIABLES ************************** **/ +texture texRef; +double delta_tiempo = DELTA_TIEMPO; +double temp0 = TEMP; +double tempi; +double tautp = TAO; + +double Boltzmann_cte = 0.0019872041; +double box_max_x = BOX_MAX; +double box_max_y = BOX_MAX; +double box_max_z = BOX_MAX; +bool box = true; +double cut = 12; + +int cant_steps = 1; +int cant_types = CANT_TYPES; + + +bool derivative = false; +bool analytic = false; +bool results = false; +bool amberResults = false; +bool coordenates = false; + + +/** **************************************************************** **/ +/** ************************* DEVICE ******************************* **/ + +__global__ +void lennard_Kernel(float* LJ_POT, double* EPS, double* SIG, + double e, double s, double var, int width, int height) +{ + /* Elemento de la matriz a calcular */ + unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; + unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; + + if(x >= width || y >= height) {return;} + + /* Variables */ + double sig12 = (double) (s + SIG[y])/2; + double eps12 = (double) sqrt(e * EPS[y]); + double r = (double) MIn+x*var; + + /* Resultado */ + LJ_POT[y*width +x] = (float) 4.0*eps12*( pow((sig12/r),12) - pow((sig12/r),6)); +} + +/** **************************************************************** **/ + +__global__ +void derivatives_lennard_Kernel(float* dLJ_POT, double* EPS, double* SIG, + double e, double s, double var, int width, int height) +{ + /* Elemento de la matriz a calcular */ + unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; + unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; + + if(x >= width || y >= height) {return;} + + /* Variables */ + double sig12 = (double) (s + SIG[y])/2; + double eps12 = (double) sqrt(e * EPS[y]); + double r = (double) MIn+x*var; + + /* Resultado */ + dLJ_POT[y*width +x] = (float) 24.0*eps12*( pow(sig12,6)/ pow(r,7) - 2 * pow(sig12,12)/ pow(r,13)); +} + +/** **************************************************************** **/ +__global__ +void close_distances_kernel(double* X, double* Y, double* Z, double* R, + double* position_x, double* position_y, double* position_z, + double box_x, double box_y, double box_z, int width, int height) +{ + /* Elemento de la matriz a calcular */ + unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; + unsigned int j = blockIdx.y * blockDim.y + threadIdx.y; + + if(i >= width || j >= height) {return;} + unsigned int pos = j*width+i; + + double _X = position_x[i] - position_x[j]; + double _Y = position_y[i] - position_y[j]; + double _Z = position_z[i] - position_z[j]; + + _X = _X - box_x * round((double) _X/box_x); + _Y = _Y - box_y * round((double) _Y/box_y); + _Z = _Z - box_z * round((double) _Z/box_z); + X[pos] = _X; + Y[pos] = _Y; + Z[pos] = _Z; + R[pos] = (double) sqrt( _X*_X + _Y*_Y + _Z*_Z ); +} + + +/** **************************************************************** **/ + +__global__ +void distances_kernel(double* R, double* X, double* Y, double* Z, + double* x1, double* y1, double* z1, int width, int height) +{ + /* Elemento de la matriz a calcular */ + unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; + unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; + + if(x >= width || y >= height) {return;} + + double x_ = x1[x] - x1[y]; + double y_ = y1[x] - y1[y]; + double z_ = z1[x] - z1[y]; + X[y*width+x] = x_; + Y[y*width+x] = y_; + Z[y*width+x] = z_; + R[y*width+x] = (double) sqrt( x_*x_ + y_*y_ + z_*z_ ); +} + +/** **************************************************************** **/ + +__global__ +void derivative_E_r(double* dEr, double* r, double cut, int* item_to_type, + int cant_samples_r, int cant_types, int width, int height) +{ + /* Elemento de la matriz a calcular */ + unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; /** particula 2 **/ + unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; /** particula 1 **/ + + /* Dentro del bloque correspondiente */ + if(x >= width || y >= height) {return;} + if(x == y || r[y*width+x] >= cut) {dEr[y*width+x] = 0; return;} + + + /* valor del Potencial para la distancia r, + * para el tipo de partícula correspondiente */ + + /** type of particles **/ + float t_o_p_1 = (float) item_to_type[y] * cant_types; //this one decides which subMatrix to use + float t_o_p_2 = (float) item_to_type[x] + 0.5 + t_o_p_1; //this one decides which row on these + + /** Convierto r a subíndice de matriz de lennard-jones **/ + /** r = (MAX-MIN) * X / N + MIN **/ + /** x = (r-MIN) * N / (MAX-MIN) **/ + float index_x = (float)((double) (r[y*width+x] - MIn) * (double) cant_samples_r / DIST + 0.5); // convert r to x + + + double E_r_up = (double) tex2D( texRef, index_x + DIF_FINITAS_DELTA, t_o_p_2 ); + double E_r_dwn = (double) tex2D( texRef, index_x - DIF_FINITAS_DELTA, t_o_p_2 ); + + + double r_dif = DIST * 2 * (DIF_FINITAS_DELTA) / cant_samples_r; + + + dEr[y*width+x] = (E_r_up - E_r_dwn) / (r_dif); +} + +/** **************************************************************** **/ + +__global__ +void direct_derivative_E_r(double* dEr, double* r, double cut, int* item_to_type, + int cant_samples_r, int cant_types, int width, int height) +{ + /* Elemento de la matriz a calcular */ + unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; /** particula 2 **/ + unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; /** particula 1 **/ + + /* Dentro del bloque correspondiente */ + if(x >= width || y >= height) {return;} + if(x == y || r[y*width+x] >= cut) {dEr[y*width+x] = 0; return;} + + /* valor del Potencial para la distancia r, + * para el tipo de partícula correspondiente */ + + /** type of particles **/ + float t_o_p_1 = (float) item_to_type[y] * cant_types; //this one decides which subMatrix to use + float t_o_p_2 = (float) item_to_type[x] + 0.5 + t_o_p_1; //this one decides which row on these + + /** Convierto r a subíndice de matriz de lennard-jones **/ + /** r = (MAX-MIN) * X / N + MIN **/ + /** x = (r-MIN) * N / (MAX-MIN) **/ + float index_x = (float)((double) (r[y*width+x] - MIn) * (double) cant_samples_r / DIST + 0.5); // convert r to x + + + dEr[y*width+x] = (double) tex2D( texRef, index_x, t_o_p_2 ); +} + +/** **************************************************************** **/ + + +__global__ +void E_r(double* Er, double* r, double cut, int* item_to_type, + int cant_samples_r, int cant_types, int width, int height) +{ + /* Elemento de la matriz a calcular */ + unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; /** particula 2 **/ + unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; /** particula 1 **/ + + /* Dentro del bloque correspondiente */ + if(x >= width || y >= height) {return;} + if(x == y || r[y*width+x] >= cut) {Er[y*width+x] = 0; return;} + + /* valor del Potencial para la distancia r, + * para el tipo de partícula correspondiente */ + + /** type of particles **/ + float t_o_p_1 = (float) item_to_type[y]; //this one decides which subMatrix to use + float t_o_p_2 = (float) item_to_type[x]; //this one decides which row on these + float row = t_o_p_2 + 0.5 + (t_o_p_1* cant_types); + /** Convierto r a subíndice de matriz de lennard-jones **/ + /** r = (MAX-MIN) * X / N + MIN **/ + /** x = (r-MIN) * N / (MAX-MIN) **/ + float index_x = (float)((double) (r[y*width+x] - MIn) * (double) cant_samples_r / DIST + 0.5); // convert r to x + + Er[y*width+x] = (double) tex2D( texRef, index_x, row ); +} + +/* ***************************************************************** **/ +/** +ANALYTIC */ +/** **************************************************************** **/ + +__global__ +void derivative_E_r_analytic(double* dEr, double* r, double cut, int* item_to_type, int cant_samples_r, + double* EPS, double* SIG, int width, int height) +{ + /* Elemento de la matriz a calcular */ + unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; /** particula 2 **/ + unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; /** particula 1 **/ + + /* Dentro del bloque correspondiente */ + if(x >= width || y >= height) {return;} + if(x == y || r[y*width+x] >= cut) {dEr[y*width+x] = 0; return;} + + /* valor del Potencial para la distancia r, + * para el tipo de partícula correspondiente */ + + /** type of particle 2 **/ + int type_i = item_to_type[x]; + int type_j = item_to_type[y]; + + double sig12 = (double) (SIG[type_i] + SIG[type_j])/2; + double eps12 = (double) sqrt(EPS[type_i] * EPS[type_j]); + + dEr[y*width+x] = (double) 24.0*eps12*( pow(sig12,6)/ pow(r[y*width+x],7) - 2 * pow(sig12,12)/ pow(r[y*width+x],13)); +} + +__global__ +void E_r_analytic(double* Er, double* r, double cut, int* item_to_type, int cant_samples_r, + double* EPS, double* SIG, int width, int height) +{ + /* Elemento de la matriz a calcular */ + unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; /** particula 2 **/ + unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; /** particula 1 **/ + + /* Dentro del bloque correspondiente */ + if(x >= width || y >= height) {return;} + if(x == y || r[y*width+x] >= cut) {Er[y*width+x] = 0; return;} + + /* valor del Potencial para la distancia r, + * para el tipo de partícula correspondiente */ + + /** type of particle 2 **/ + int type_i = item_to_type[x]; + int type_j = item_to_type[y]; + //float carga=1; + //float constante=1; + //double coulomb=(constante*carga)/r[y*width+x]; + double sig12 = (double) (SIG[type_i] + SIG[type_j])/2; + double eps12 = (double) sqrt(EPS[type_i] * EPS[type_j]); + + Er[y*width+x] = (double) 4.0*eps12*( pow((sig12/r[y*width+x]),12) - pow((sig12/r[y*width+x]),6)); +} + + + +/** **************************************************************** **/ +/** -ANALYTIC */ +/* ***************************************************************** **/ + + +/** **************************************************************** **/ + + + /* Fx = dE(r) / dr * (x1-x2) / r */ +__global__ +void Parcial_Forces_Kernel(double* force, double* dEr, double* dif, double* r, int width, int height) +{ + /* Elemento de la matriz a calcular */ + unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; + unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; + + if(x >= width || y >= height) {return;} + if(x == y) {force[y*width+x] = 0; return;} + + force[y*width+x] = dEr[y*width+x] * dif[y*width+x] / r[y*width+x]; +} + +/** **************************************************************** **/ + + +__global__ +void Resultant_Forces_Kernel(double* result, double* forces, int cant) +{ + /* Elemento del vector a calcular */ + unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; + + if(x >= cant) {return;} + + int i = 0; + double tmp = 0; + int row = x*cant; + for(; i < cant; i++){ + tmp += forces[row + i]; + } + result[x] = tmp; +} + +/** **************************************************************** **/ + + +/* V(t + Dt/2) = V(t - Dt/2) + [ F(t) * Dt ] / m */ +__global__ +void Resultant_Velocities_Kernel(double* velocity, double* old_velocity, double* force, double* m, + int* item_to_type, double delta_tiempo, int cant_particles) +{ + /* Elemento de la matriz a calcular */ + unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; + + if(i >= cant_particles) {return;} + + double Vt = old_velocity[i]; + int type = item_to_type[i]; + double dtx = delta_tiempo*20.455; + /* Result */ + velocity[i] = Vt + ( (force[i]*dtx) / m[type] ); +} + +/** **************************************************************** **/ + + +/* P(t + Dt) = P(t) + V(t + Dt/2) * Dt */ +__global__ +void Resultant_Positions_Kernel(double* positions, double* velocity, double delta_tiempo, int cant) +{ + /* Elemento del vector a calcular */ + unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; + + if(i >= cant) {return;} + double dtx = delta_tiempo*20.455; + positions[i] = positions[i] + (velocity[i] * dtx); +} + +/** **************************************************************** **/ + + + +/* -BOX_MAX 0 BOX_MAX */ +/* |-----------------|-----------------| */ + +__global__ +void Adjustin_Positions_Kernel(double* position, double box_max, int cant) +{ + /* Elemento del vector a calcular */ + unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; + + if(i >= cant) {return;} + double pos = position[i] - box_max; + if(pos > 0){ + position[i] = -box_max + fmod(pos, (double) (2*box_max)); + } + if(pos < -2*box_max){ + position[i] = box_max + fmod(pos, (double) (2*box_max)); + } + +} + + +/** **************************************************************** **/ + + +/* Ek = |v|^2 * m / 2 */ +/* Ek_x = (v_x)^2 * m / 2 */ +__global__ +void Kinetic_Energy_Kernel(double* kE, double* vold, double* v, double* m, int* item_to_type, int cant) +{ + /* Elemento del vector a calcular */ + unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; + + if(i>= cant) {return;} + + double vi = vold[i] + v[i]; + int type = item_to_type[i]; + + + kE[i] = vi * vi * m[type] / 8; +} + +/** **************************************************************** **/ + + +__global__ +void Total_Kinetic_Energy_Kernel(double* kE, double* Ke_x, double* Ke_y, double* Ke_z, int cant) +{ + /* Elemento del vector a calcular */ + unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; + + if(i>= cant) {return;} + + kE[i] = Ke_x[i] + Ke_y[i] + Ke_z[i]; +} + + +__global__ void dummy_kernel(){} + +/** **************************************************************** **/ + + +__global__ +void Corrected_Velocities_Kernel(double* vold, double* v, double lambda, int cant){ + /* Elemento del vector a calcular */ + unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; + + if(i>= cant) {return;} + + vold[i] = v[i] * lambda; +} + + + + + + + + + + + +/** **************************************************************** **/ +/** *************************** HOST ******************************* **/ + +int main( int argc, char* argv[] ) +{ + for(uint i = 0; i < argc; i++){ + if(strcmp(argv[i], "-t") == 0){ + /* outputTimeFilename */ + timeFilename = argv[i+1]; + } + if(strcmp(argv[i], "-a") == 0){ + /* ANALYTIC mode */ + analytic = true; + } + if(strcmp(argv[i], "-d") == 0){ + /* DERIVATIVE mode */ + derivative = true; + } + if(strcmp(argv[i], "-r") == 0){ + /* RESULTS or TIMER mode */ + results = true; + amberResults = true; + } + if(strcmp(argv[i], "-ar") == 0){ + /* RESULTS or TIMER mode */ + amberResults = true; + } + + if(strcmp(argv[i], "-c") == 0){ + /* PRINT mdcrd file */ + coordenates = true; + } + } + if (derivative) + cout << "Derivative" << endl; + if (analytic) + cout << "Analytic" << endl; + if(results){ + cout << "DEBUG mode ON" << endl; + } + if(amberResults){ + cout << "AMBER results ON" << endl; + } + + + fstream out; + fstream crd; + if(results or amberResults){ + /* Output file */ + out.open(outputFilename,fstream::out); + streamsize ss = out.precision(); + out << setprecision(20); + } + if(coordenates){ + /* CRD output file */ + crd.open(crdFilename,fstream::out); + crd << setprecision(3); + crd.setf( std::ios::fixed, std:: ios::floatfield ); + crd << " POS(x) POS(y) POS(z)" << endl; + } + + struct timeval tv1, tv2; + fstream taim; + if(!results){ //timer mode ON + /* Time output file */ + taim.open(timeFilename, fstream::app | fstream::out); + taim << setprecision(20); + } + + /* Levantamos Coeficientes de Lennard */ + ifstream table (lennardTableFileName); + table >> cant_types; + /**Variables y memoria*/ + size_t cant_types_size = cant_types * sizeof(double); + + vector h_type; + h_type.resize(cant_types); + double* h_sigma = (double*) ( malloc(cant_types_size)); + double* h_epsilon = (double*) ( malloc(cant_types_size)); + double* h_mass = (double*) ( malloc(cant_types_size)); + + /**Levantamos datos*/ + for(int j = 0; j> h_type[j]; + table >> h_sigma[j]; + table >> h_epsilon[j]; + table >> h_mass[j]; + } + table.close(); + + + + /* Armamos matrices de lennard */ + /**Variables y memoria**/ + int cant_samples_r = TEXTURE_MEM_SIZE/(sizeof(float)); // cant of original sample values (máximo permitido por mem de textura) + double var = DIST / ((double) cant_samples_r); // variation of r + size_t cant_samples_r_size = cant_samples_r * sizeof(float); + + float* h_dLJPot; + float* h_LJPot; + + if(derivative) + h_dLJPot = (float*) malloc(cant_samples_r_size*cant_types*cant_types); // #samples * #particles * #particles (*float) + else + h_LJPot = (float*) malloc(cant_samples_r_size*cant_types*cant_types); // #samples * #particles * #particles (*float) + + int width = cant_samples_r; + int height = cant_types; + dim3 dimBlock(BLOCK_SIZE_X,BLOCK_SIZE_Y); + dim3 dimGrid( (int) ceil((double)width / (double)dimBlock.x), (int) ceil((double)height / (double)dimBlock.y) ); + double* d_EPS; + double* d_SIG; + float* d_LJPot; + float* d_dLJPot; + cudaMalloc(&d_EPS, cant_types_size); + cudaMalloc(&d_SIG, cant_types_size); + cudaMemcpy(d_EPS, h_epsilon, cant_types_size, cudaMemcpyHostToDevice); + cudaMemcpy(d_SIG, h_sigma, cant_types_size, cudaMemcpyHostToDevice); + if(derivative) + cudaMalloc(&d_dLJPot, cant_samples_r_size * cant_types); + else + cudaMalloc(&d_LJPot, cant_samples_r_size * cant_types); + + /** Rellenamos datos con CUDA **/ + if(derivative) { + for(int a = 0; a>>(d_dLJPot, d_EPS, d_SIG, h_epsilon[a], h_sigma[a], var, width, height); + cudaMemcpy( (float*) &(h_dLJPot[(a*cant_samples_r*cant_types)]), d_dLJPot, cant_types * cant_samples_r_size, cudaMemcpyDeviceToHost); + } + } else { + for(int a = 0; a>>(d_LJPot, d_EPS, d_SIG, h_epsilon[a], h_sigma[a], var, width, height); + cudaMemcpy( (float*) &(h_LJPot[(a*cant_samples_r*cant_types)]), d_LJPot, cant_types * cant_samples_r_size, cudaMemcpyDeviceToHost); + } + } + + /** Liberamos memoria de CUDA **/ + cudaFree(&d_EPS); + cudaFree(&d_SIG); + cudaFree(&d_LJPot); + + if(results){ + /** DEBUG **/ + if(derivative) + out << " derivative LENNARD " << endl; + else + out << " LENNARD " << endl; + for(int a = 0; a h_particle_type; + particles >> cant_particles; + size_t cant_particles_size = cant_particles * sizeof(double); + h_position_x = (double*)malloc(cant_particles_size); + h_position_y = (double*)malloc(cant_particles_size); + h_position_z = (double*)malloc(cant_particles_size); + h_velocity_x = (double*)malloc(cant_particles_size); + h_velocity_y = (double*)malloc(cant_particles_size); + h_velocity_z = (double*)malloc(cant_particles_size); + h_velocity_old_x = (double*)malloc(cant_particles_size); + h_velocity_old_y = (double*)malloc(cant_particles_size); + h_velocity_old_z = (double*)malloc(cant_particles_size); + h_chargue = (double*)malloc(cant_particles_size); + h_particle_type.resize(cant_particles); + + /** Guardamos datos **/ + for(uint i = 0; i < cant_particles ; i++) { + particles >> h_particle_type[i]; + + particles >> h_position_x[i]; + particles >> h_position_y[i]; + particles >> h_position_z[i]; + + particles >> h_velocity_old_x[i]; + particles >> h_velocity_old_y[i]; + particles >> h_velocity_old_z[i]; + + particles >> h_chargue[i]; + } + /** Perioricidad **/ + //TODO: por ahora usamos cubo, + //situamos el cero en el centro del mismo + //Recibimos en orden x, y, z + particles >> box; + if(box){ + cout << " Levantamos caja" << endl; + particles >> h_box_x; + particles >> h_box_y; + particles >> h_box_z; + particles >> h_box_alpha; + particles >> h_box_beta; + particles >> h_box_gamma; + if( h_box_alpha != 90 or h_box_beta != 90 or h_box_gamma != 90){ + cout << " Se forzaron los angulos para que sea un CUBO: " << endl; + } + box_max_x = h_box_x/2; + box_max_y = h_box_y/2; + box_max_z = h_box_z/2; + } + /** Parametros **/ + particles >> cant_steps; + particles >> delta_tiempo; + particles >> temp0; + particles >> tempi; + particles >> tautp; + particles >> cut; + particles.close(); + + +// if(results){ +// /** DEBUG **/ +// out << " INITIAL VALUES" << endl; +// for(int i = 0; i>>(d_distance_x, d_distance_y, d_distance_z, d_distance_r, + d_position_x, d_position_y, d_position_z, + h_box_x, h_box_y, h_box_z, width, height); + + distances_kernel<<>>(d_distance_r, d_distance_x, d_distance_y, d_distance_z, + d_position_x, d_position_y, d_position_z, width, height); + + + if(results){ + /** DEBUG **/ + cudaMemcpy(h_distance_r, d_distance_r, s_size, cudaMemcpyDeviceToHost); + cudaMemcpy(h_distance_x, d_distance_x, s_size, cudaMemcpyDeviceToHost); + cudaMemcpy(h_distance_y, d_distance_y, s_size, cudaMemcpyDeviceToHost); + cudaMemcpy(h_distance_z, d_distance_z, s_size, cudaMemcpyDeviceToHost); + + out << " DISTANCES" << endl << " "; + double (*matriz)[cant_particles] = (double (*)[cant_particles]) h_distance_r; + for(int i = 0; i>>(d_dEr, d_distance_r, cut, d_item_particle, cant_samples_r, d_EPS, d_SIG, width, height); + //if(amberResults){ + /** Calculo la energia E(r) para debug **/ + // E_r_analytic<<>>(d_Er, d_distance_r, cut, d_item_particle, cant_samples_r, d_EPS, d_SIG, width, height); + //} + } else { + /** Calculo de la derivada dE(r)/dr usando diferencias finitas **/ + if(derivative){ +// derivative_E_r_analytic<<>>(d_dEr, d_distance_r, cut, d_item_particle, cant_samples_r, d_EPS, d_SIG, width, height); + direct_derivative_E_r<<>>(d_dEr, d_distance_r, cut, d_item_particle, cant_samples_r, cant_types, width, height); + } else { + // derivative_E_r_analytic<<>>(d_dEr, d_distance_r, cut, d_item_particle, cant_samples_r, d_EPS, d_SIG, width, height); + derivative_E_r<<>>(d_dEr, d_distance_r, cut, d_item_particle, cant_samples_r, cant_types, width, height); + //if(amberResults){ + } /** Calculo la energia E(r) para debug **/ + // E_r<<>>(d_Er, d_distance_r, cut, d_item_particle, cant_samples_r, cant_types, width, height); + //E_r_analytic<<>>(d_Er, d_distance_r, cut, d_item_particle, cant_samples_r, d_EPS, d_SIG, width, height); + //} + //} + + } + + if(amberResults){ + if(!derivative){ + /** DEBUG **/ + out << " Lennard-Jones" << endl << " "; + double vdwaals = 0; + double (*h_Er)[cant_particles] = (double (*)[cant_particles]) ( malloc(s_size)); + cudaMemcpy(h_Er, d_Er, s_size, cudaMemcpyDeviceToHost); + for(int i = 0; i>>(d_Force_x, d_dEr, d_distance_x, d_distance_r, width, height); + Parcial_Forces_Kernel<<>>(d_Force_y, d_dEr, d_distance_y, d_distance_r, width, height); + Parcial_Forces_Kernel<<>>(d_Force_z, d_dEr, d_distance_z, d_distance_r, width, height); + + if(results){ + /** DEBUG **/ + cudaMemcpy(h_Force_x, d_Force_x, s_size, cudaMemcpyDeviceToHost); + cudaMemcpy(h_Force_y, d_Force_y, s_size, cudaMemcpyDeviceToHost); + cudaMemcpy(h_Force_z, d_Force_z, s_size, cudaMemcpyDeviceToHost); + out << " FORCES" << endl << " "; + for(int i = 0; i>>(d_Force_x_resultant, d_Force_x, cant_particles); + Resultant_Forces_Kernel<<>>(d_Force_y_resultant, d_Force_y, cant_particles); + Resultant_Forces_Kernel<<>>(d_Force_z_resultant, d_Force_z, cant_particles); + + if(results){ + /** DEBUG **/ + cudaMemcpy(h_Force_x_resultant, d_Force_x_resultant, cant_particles_size, cudaMemcpyDeviceToHost); + cudaMemcpy(h_Force_y_resultant, d_Force_y_resultant, cant_particles_size, cudaMemcpyDeviceToHost); + cudaMemcpy(h_Force_z_resultant, d_Force_z_resultant, cant_particles_size, cudaMemcpyDeviceToHost); + out << " RESULTANT FORCES" << endl; + for(int i = 0; i>>(d_velocity_x, d_velocity_old_x, d_Force_x_resultant, d_mass, d_item_particle, delta_tiempo, cant_particles); + Resultant_Velocities_Kernel<<>>(d_velocity_y, d_velocity_old_y, d_Force_y_resultant, d_mass, d_item_particle, delta_tiempo, cant_particles); + Resultant_Velocities_Kernel<<>>(d_velocity_z, d_velocity_old_z, d_Force_z_resultant, d_mass, d_item_particle, delta_tiempo, cant_particles); + + if(results){ + /** DEBUG **/ + cudaMemcpy(h_velocity_x, d_velocity_x, cant_particles_size, cudaMemcpyDeviceToHost); + cudaMemcpy(h_velocity_y, d_velocity_y, cant_particles_size, cudaMemcpyDeviceToHost); + cudaMemcpy(h_velocity_z, d_velocity_z, cant_particles_size, cudaMemcpyDeviceToHost); + out << " RESULTANT VELOCITIES" << endl; + for(int i = 0; i>>(d_position_x, d_velocity_x, delta_tiempo, cant_particles); + Resultant_Positions_Kernel<<>>(d_position_y, d_velocity_y, delta_tiempo, cant_particles); + Resultant_Positions_Kernel<<>>(d_position_z, d_velocity_z, delta_tiempo, cant_particles); + + if(results){ + /** DEBUG **/ + cudaMemcpy(h_position_x, d_position_x, cant_particles_size, cudaMemcpyDeviceToHost); + cudaMemcpy(h_position_y, d_position_y, cant_particles_size, cudaMemcpyDeviceToHost); + cudaMemcpy(h_position_z, d_position_z, cant_particles_size, cudaMemcpyDeviceToHost); + out << " RESULTANT POSITIONS" << endl; + for(int i = 0; i>>(d_position_x, box_max_x, cant_particles); + Adjustin_Positions_Kernel<<>>(d_position_y, box_max_y, cant_particles); + Adjustin_Positions_Kernel<<>>(d_position_z, box_max_z, cant_particles); + + if(coordenates){ + /** DEBUG **/ + cudaMemcpy(h_position_x, d_position_x, cant_particles_size, cudaMemcpyDeviceToHost); + cudaMemcpy(h_position_y, d_position_y, cant_particles_size, cudaMemcpyDeviceToHost); + cudaMemcpy(h_position_z, d_position_z, cant_particles_size, cudaMemcpyDeviceToHost); + if(results){ + out << " RESULTANT POSITIONS in the CUBE" << endl; + for(int i = 0; i>>(d_kinetic_energy_x, d_velocity_old_x, d_velocity_x, d_mass, d_item_particle, cant_particles); + Kinetic_Energy_Kernel<<>>(d_kinetic_energy_y, d_velocity_old_y, d_velocity_y, d_mass, d_item_particle, cant_particles); + Kinetic_Energy_Kernel<<>>(d_kinetic_energy_z, d_velocity_old_z, d_velocity_z, d_mass, d_item_particle, cant_particles); + + if(results){ + /** DEBUG **/ + cudaMemcpy(h_kinetic_energy_x, d_kinetic_energy_x, cant_particles_size, cudaMemcpyDeviceToHost); + cudaMemcpy(h_kinetic_energy_y, d_kinetic_energy_y, cant_particles_size, cudaMemcpyDeviceToHost); + cudaMemcpy(h_kinetic_energy_z, d_kinetic_energy_z, cant_particles_size, cudaMemcpyDeviceToHost); + out << " KINETIC ENERGY" << endl; + for(int i = 0; i>>(d_kinetic_energy, d_kinetic_energy_x, d_kinetic_energy_y, d_kinetic_energy_z, cant_particles); + + + /* */ + /** Calculamos la Energía cinética total del sistema **/ + cudaMemcpy(h_kinetic_energy, d_kinetic_energy, cant_particles_size, cudaMemcpyDeviceToHost); + double Ek_TOT = 0; + for(int i = 0; i +#include +#include + +#include +#include +#include + + +#ifdef GEM5_FUSION +#include +extern "C" { +void m5_work_begin(uint64_t workid, uint64_t threadid); +void m5_work_end(uint64_t workid, uint64_t threadid); +} +#endif + +//====================================================================================================================================================== +// STRUCTURES, GLOBAL STRUCTURE VARIABLES +//====================================================================================================================================================== + +#include "define.c" + +params_common_change common_change; +__constant__ params_common_change d_common_change; + +params_common common; +__constant__ params_common d_common; + +params_unique unique[ALL_POINTS]; // cannot determine size dynamically so choose more than usually needed +__constant__ params_unique d_unique[ALL_POINTS]; + +//====================================================================================================================================================== +// KERNEL CODE +//====================================================================================================================================================== + +#include "kernel.cu" + +//=============================================================================================================================================================================================================== +//=============================================================================================================================================================================================================== +// MAIN FUNCTION +//=============================================================================================================================================================================================================== +//=============================================================================================================================================================================================================== + +int main(int argc, char *argv []){ + + //====================================================================================================================================================== + // VARIABLES + //====================================================================================================================================================== + + // CUDA kernel execution parameters + dim3 threads; + dim3 blocks; + + // counter + int i; + int frames_processed; + + // frames + char* video_file_name; + avi_t* frames; + fp* frame; + + //====================================================================================================================================================== + // FRAME + //====================================================================================================================================================== + + if(argc!=3){ + printf("ERROR: usage: heartwall \n"); + exit(1); + } + + // open movie file + video_file_name = argv[1]; + frames = (avi_t*)AVI_open_input_file(video_file_name, 1); // added casting + if (frames == NULL) { + AVI_print_error((char *) "Error with AVI_open_input_file"); + return -1; + } + + // common + common.no_frames = AVI_video_frames(frames); + common.frame_rows = AVI_video_height(frames); + common.frame_cols = AVI_video_width(frames); + common.frame_elem = common.frame_rows * common.frame_cols; + common.frame_mem = sizeof(fp) * common.frame_elem; + + // pointers + cudaMalloc((void **)&common_change.d_frame, common.frame_mem); + + //====================================================================================================================================================== + // CHECK INPUT ARGUMENTS + //====================================================================================================================================================== + + frames_processed = atoi(argv[2]); + if(frames_processed<0 || frames_processed>common.no_frames){ + printf("ERROR: %d is an incorrect number of frames specified, select in the range of 0-%d\n", frames_processed, common.no_frames); + return 0; + } + + + //====================================================================================================================================================== + // HARDCODED INPUTS FROM MATLAB + //====================================================================================================================================================== + + //==================================================================================================== + // CONSTANTS + //==================================================================================================== + + common.sSize = 40; + common.tSize = 25; + common.maxMove = 10; + common.alpha = 0.87; + + //==================================================================================================== + // ENDO POINTS + //==================================================================================================== + + common.endoPoints = ENDO_POINTS; + common.endo_mem = sizeof(int) * common.endoPoints; + + common.endoRow = (int *)malloc(common.endo_mem); + common.endoRow[ 0] = 369; + common.endoRow[ 1] = 400; + common.endoRow[ 2] = 429; + common.endoRow[ 3] = 452; + common.endoRow[ 4] = 476; + common.endoRow[ 5] = 486; + common.endoRow[ 6] = 479; + common.endoRow[ 7] = 458; + common.endoRow[ 8] = 433; + common.endoRow[ 9] = 404; + common.endoRow[10] = 374; + common.endoRow[11] = 346; + common.endoRow[12] = 318; + common.endoRow[13] = 294; + common.endoRow[14] = 277; + common.endoRow[15] = 269; + common.endoRow[16] = 275; + common.endoRow[17] = 287; + common.endoRow[18] = 311; + common.endoRow[19] = 339; + cudaMalloc((void **)&common.d_endoRow, common.endo_mem); + + common.endoCol = (int *)malloc(common.endo_mem); + common.endoCol[ 0] = 408; + common.endoCol[ 1] = 406; + common.endoCol[ 2] = 397; + common.endoCol[ 3] = 383; + common.endoCol[ 4] = 354; + common.endoCol[ 5] = 322; + common.endoCol[ 6] = 294; + common.endoCol[ 7] = 270; + common.endoCol[ 8] = 250; + common.endoCol[ 9] = 237; + common.endoCol[10] = 235; + common.endoCol[11] = 241; + common.endoCol[12] = 254; + common.endoCol[13] = 273; + common.endoCol[14] = 300; + common.endoCol[15] = 328; + common.endoCol[16] = 356; + common.endoCol[17] = 383; + common.endoCol[18] = 401; + common.endoCol[19] = 411; + cudaMalloc((void **)&common.d_endoCol, common.endo_mem); + + common.tEndoRowLoc = (int *)malloc(common.endo_mem * common.no_frames); + cudaMalloc((void **)&common.d_tEndoRowLoc, common.endo_mem * common.no_frames); + + common.tEndoColLoc = (int *)malloc(common.endo_mem * common.no_frames); + cudaMalloc((void **)&common.d_tEndoColLoc, common.endo_mem * common.no_frames); + + //==================================================================================================== + // EPI POINTS + //==================================================================================================== + + common.epiPoints = EPI_POINTS; + common.epi_mem = sizeof(int) * common.epiPoints; + + common.epiRow = (int *)malloc(common.epi_mem); + common.epiRow[ 0] = 390; + common.epiRow[ 1] = 419; + common.epiRow[ 2] = 448; + common.epiRow[ 3] = 474; + common.epiRow[ 4] = 501; + common.epiRow[ 5] = 519; + common.epiRow[ 6] = 535; + common.epiRow[ 7] = 542; + common.epiRow[ 8] = 543; + common.epiRow[ 9] = 538; + common.epiRow[10] = 528; + common.epiRow[11] = 511; + common.epiRow[12] = 491; + common.epiRow[13] = 466; + common.epiRow[14] = 438; + common.epiRow[15] = 406; + common.epiRow[16] = 376; + common.epiRow[17] = 347; + common.epiRow[18] = 318; + common.epiRow[19] = 291; + common.epiRow[20] = 275; + common.epiRow[21] = 259; + common.epiRow[22] = 256; + common.epiRow[23] = 252; + common.epiRow[24] = 252; + common.epiRow[25] = 257; + common.epiRow[26] = 266; + common.epiRow[27] = 283; + common.epiRow[28] = 305; + common.epiRow[29] = 331; + common.epiRow[30] = 360; + cudaMalloc((void **)&common.d_epiRow, common.epi_mem); + + common.epiCol = (int *)malloc(common.epi_mem); + common.epiCol[ 0] = 457; + common.epiCol[ 1] = 454; + common.epiCol[ 2] = 446; + common.epiCol[ 3] = 431; + common.epiCol[ 4] = 411; + common.epiCol[ 5] = 388; + common.epiCol[ 6] = 361; + common.epiCol[ 7] = 331; + common.epiCol[ 8] = 301; + common.epiCol[ 9] = 273; + common.epiCol[10] = 243; + common.epiCol[11] = 218; + common.epiCol[12] = 196; + common.epiCol[13] = 178; + common.epiCol[14] = 166; + common.epiCol[15] = 157; + common.epiCol[16] = 155; + common.epiCol[17] = 165; + common.epiCol[18] = 177; + common.epiCol[19] = 197; + common.epiCol[20] = 218; + common.epiCol[21] = 248; + common.epiCol[22] = 276; + common.epiCol[23] = 304; + common.epiCol[24] = 333; + common.epiCol[25] = 361; + common.epiCol[26] = 391; + common.epiCol[27] = 415; + common.epiCol[28] = 434; + common.epiCol[29] = 448; + common.epiCol[30] = 455; + cudaMalloc((void **)&common.d_epiCol, common.epi_mem); + + common.tEpiRowLoc = (int *)malloc(common.epi_mem * common.no_frames); + cudaMalloc((void **)&common.d_tEpiRowLoc, common.epi_mem * common.no_frames); + + common.tEpiColLoc = (int *)malloc(common.epi_mem * common.no_frames); + cudaMalloc((void **)&common.d_tEpiColLoc, common.epi_mem * common.no_frames); + + //==================================================================================================== + // ALL POINTS + //==================================================================================================== + + common.allPoints = ALL_POINTS; + + //====================================================================================================================================================== + // TEMPLATE SIZES + //====================================================================================================================================================== + + // common + common.in_rows = common.tSize + 1 + common.tSize; + common.in_cols = common.in_rows; + common.in_elem = common.in_rows * common.in_cols; + common.in_mem = sizeof(fp) * common.in_elem; + + //====================================================================================================================================================== + // CREATE ARRAY OF TEMPLATES FOR ALL POINTS + //====================================================================================================================================================== + + // common + cudaMalloc((void **)&common.d_endoT, common.in_mem * common.endoPoints); + cudaMalloc((void **)&common.d_epiT, common.in_mem * common.epiPoints); + + //====================================================================================================================================================== + // SPECIFIC TO ENDO OR EPI TO BE SET HERE + //====================================================================================================================================================== + + for(i=0; i 0.5){ + common.mask_conv_ioffset = common.mask_conv_ioffset + 1; + } + common.mask_conv_joffset = (common.mask_cols-1)/2; + if((common.mask_cols-1) % 2 > 0.5){ + common.mask_conv_joffset = common.mask_conv_joffset + 1; + } + + // pointers + for(i=0; i>>(); + + // free frame after each loop iteration, since AVI library allocates memory for every frame fetched + free(frame); + + // print frame progress + printf("%d ", common_change.frame_no); + fflush(NULL); + + } + + //==================================================================================================== + // PRINT FRAME PROGRESS END + //==================================================================================================== + + printf("\n"); + fflush(NULL); + + //==================================================================================================== + // OUTPUT + //==================================================================================================== + + cudaMemcpy(common.tEndoRowLoc, common.d_tEndoRowLoc, common.endo_mem * common.no_frames, cudaMemcpyDeviceToHost); + cudaMemcpy(common.tEndoColLoc, common.d_tEndoColLoc, common.endo_mem * common.no_frames, cudaMemcpyDeviceToHost); + + cudaMemcpy(common.tEpiRowLoc, common.d_tEpiRowLoc, common.epi_mem * common.no_frames, cudaMemcpyDeviceToHost); + cudaMemcpy(common.tEpiColLoc, common.d_tEpiColLoc, common.epi_mem * common.no_frames, cudaMemcpyDeviceToHost); + +#ifdef GEM5_FUSION + m5_work_end(0, 0); +#endif + +#ifdef BENCH_PRINT + for(int f=0; f duration = end_cpu - start_cpu; + + printf("Omp Cpu Test: image %s %f ms \n",argv[1],duration.count()); + + //printArray(cpuHistogram,256); + + int * gpuHistogram = gpuHistogramFromImage(input); + + // Compare histograms + printf("Histograms are identical %d\n", + compareArrays(cpuHistogram,gpuHistogram,256)); + + // sum all up to now cumulative distribution step + int total = reduceSum(gpuHistogram,256); + + // map to closest value of eq frequency + int * finalHistogram = histogram_equalize(gpuHistogram,total,256); + // printArray(finalHistogram,256); + + // apply to image + start_cpu = chrono::high_resolution_clock::now(); + + cpuMapImageWithHistogram(input,output,finalHistogram); + + end_cpu = chrono::high_resolution_clock::now(); + duration = end_cpu - start_cpu; + + printf("map GPU Test: image %s %f ms \n",argv[1],duration.count()); + + gpuMapImageWithHistogram(input,output2,finalHistogram); + + cv::namedWindow("Input",CV_WINDOW_NORMAL); + cv::namedWindow("OutputCPU",CV_WINDOW_NORMAL); + cv::namedWindow("OutputGPU",CV_WINDOW_NORMAL); + + cv::resizeWindow("Input", 800, 600); + cv::resizeWindow("OutputCPU", 800, 600); + cv::resizeWindow("OutputGPU", 800, 600); + + cv::imshow("Input",input); + cv::imshow("OutputCPU",output); + cv::imshow("OutputGPU",output2); + + if(!argv[2]) + cv::waitKey(); + + return 0; +} +int * cpuHistogramFromImage(cv::Mat input){ + // allocate histogram + int * histogram = (int * )calloc(256,sizeof(int)); + // for each pixel + uchar * data = input.ptr(0); + #pragma omp parallel for + for(int i = 0; i>>(d_input,d_histogram,input.cols,input.rows,input.step); + + auto end_cpu = chrono::high_resolution_clock::now(); + chrono::duration duration = end_cpu - start_cpu; + + printf("Gpu Test: %f ms \n",duration.count()); + + cudaMemcpy(histogram,d_histogram,histoSize,cudaMemcpyDeviceToHost); + + //printArray(histogram,256); + + cudaDeviceSynchronize(); + cudaFree(d_input); + cudaFree(d_histogram); + + return histogram; +} +__global__ void gpuHistogramFromImageKernel(unsigned char* input, int * histogram, int width, int height, int step){ + const int xIndex = blockIdx.x * blockDim.x + threadIdx.x; + const int yIndex = blockIdx.y * blockDim.y + threadIdx.y; + if ((xIndex < width) && (yIndex < height)){ + const int tid = yIndex * step + xIndex; + atomicAdd(&histogram[input[tid]], 1); + } +} +void printArray(int * array,int length){ + for(int i=0;i(0); + uchar * data2 = output.ptr(0); + for(int i=0;i>>( + d_input,d_output,d_histogram,input.cols,input.rows,input.step); + + auto end_cpu = chrono::high_resolution_clock::now(); + chrono::duration duration = end_cpu - start_cpu; + + printf("Gpu Test: %f ms \n",duration.count()); + + cudaMemcpy(output.ptr(),d_output,bytes,cudaMemcpyDeviceToHost); + + //printArray(histogram,256); + + cudaDeviceSynchronize(); + + cudaFree(d_input); + cudaFree(d_output); + cudaFree(d_histogram); +} +__global__ void gpuMapImageWithHistogramKernel(unsigned char* input,unsigned char* output, int * histogram, int width, int height, int step){ + __shared__ int * shHistogram; + for(int i = 0;i<256;i++){ + shHistogram[i] = histogram[i]; + } + __syncthreads(); + + const int xIndex = blockIdx.x * blockDim.x + threadIdx.x; + const int yIndex = blockIdx.y * blockDim.y + threadIdx.y; + + if ((xIndex < width) && (yIndex < height)){ + const int tid = yIndex * step + xIndex; + output[tid] =static_cast(shHistogram[input[tid]]); + } +} diff --git a/cuda_code/main_1256.cu b/cuda_code/main_1256.cu new file mode 100644 index 0000000000000000000000000000000000000000..a2dfe0e67ee6661574cad6e55f792b126f77676a --- /dev/null +++ b/cuda_code/main_1256.cu @@ -0,0 +1,175 @@ +#include // Include so GLM picks up the compiler version +#define GLM_FORCE_CUDA + +#include "camera.h" +#include "hittable.h" +#include "template_scenes.h" +#include "bvh.h" +#include "loadOBJ.h" +#include "render.cu" + +#include +#include +#include + +#include + +#define STB_IMAGE_IMPLEMENTATION +#include "libs/stb/stb_image.h" +#define STB_IMAGE_WRITE_IMPLEMENTATION +#include "libs/stb/stb_image_write.h" + +// Since memory for hittables must already be allocated when creating +// them on the GPU, I currently store a static number of how many hittables +// are manually created - num_manually_defined_hittables. +// It is far less than ideal, and a potential workaround would be to instead +// of creating them directly on the GPU, I create a bunch of sphereData and +// triangleData structs, similar to the ones I create for .obj files, which +// are stored on the CPU, so we can use their count to allocate the correct +// amount of memory for hittables, before sending them off the the GPU to be created. +// +// The reason I am not fully keen on that, is that we'll have an extra step +// and also a copy for each sphere and triangle on the CPU, which seems wasteful +// +// Additionally, most high profile renderers, have their own file formats ( +// Arnold .ass, Renderman RIB, etc.) that describe a scene, which contain +// the number of objects to render, so in those cases, the number of hittables +// is always known, so there is no need for neither the above mentioned proceedure +// nor the following manually maintained static value. +__global__ +void manually_populate_scene(Hittable* hittables, int start_id, curandState* rand_state) +{ +#define num_manually_defined_hittables 3 + hittables[start_id+0] = Hittable::sphere(vec3(0,-1000,0), 1000, + Material::lambertian(vec3(0.2, 0.2, 0.35))); + hittables[start_id+1] = Hittable::sphere(vec3(0,.5,0), .5, + Material::metal(vec3(.5, .5, .5), .0)); + hittables[start_id+2] = Hittable::sphere(vec3(.7,.25,0), .25, + Material::dielectric(1.5)); +} + +void createScene(Scene& scene, curandState* rand_state) { + objData obj = load_obj("/content/death-star/models/bunny.obj"); + objData obj2 = load_obj("/content/death-star/models/monkey.obj"); + // scene.num_hittables = obj.num_triangles + num_manually_defined_hittables; + scene.num_hittables = obj.num_triangles + obj2.num_triangles + num_manually_defined_hittables; + + cudaMalloc(&(scene.hittables), scene.num_hittables * sizeof(Hittable)); + + Material* material; + cudaMalloc(&(material), sizeof(Material)); + //create_metal<<<1, 1>>>(material, vec3(.1, .3, .5), .5); + //create_metal<<<1, 1>>>(material, rand_state); + //create_lambertian<<<1, 1>>>(material, vec3(.5, .1, .45)); + create_dielectric<<<1, 1>>>(material, 1.5f); + + Material* material2; + cudaMalloc(&(material2), sizeof(Material)); + create_metal<<<1, 1>>>(material2, vec3(.1, .3, .5), .5); + + int obj_threads = 512; + int obj_dims = (obj.num_triangles + obj_threads - 1) / obj_threads; + create_obj_hittables<<>>(scene.hittables, material, obj, 0, 0.8f); + + obj_dims = (obj2.num_triangles + obj_threads - 1) / obj_threads; + create_obj_hittables<<>>(scene.hittables, material2, obj2, obj.num_triangles, 0.5f); + + // manually_populate_scene<<<1, 1>>>(scene.hittables, obj.num_triangles, rand_state); + manually_populate_scene<<<1, 1>>>(scene.hittables, obj.num_triangles + obj2.num_triangles, rand_state); +} + +void save_to_ppm(float *fb, int nx, int ny) { + std::ofstream ofs; + ofs.open("./image.ppm", std::ios::out | std::ios::binary); + ofs << "P3\n" << nx << " " << ny << "\n255\n"; + for (int j = ny-1; j >= 0; j--) { + for (int i = 0; i < nx; i++) { + size_t pixel_index = j*nx + i; + int ir = int(255.99*fb[pixel_index * 3 + 0]); + int ig = int(255.99*fb[pixel_index * 3 + 1]); + int ib = int(255.99*fb[pixel_index * 3 + 2]); + ofs << ir << " " << ig << " " << ib << "\n"; + } + } + ofs.close(); +} + +void save_to_jpg(float *fb, int nx, int ny) { + uint8_t* imgBuff = (uint8_t*)std::malloc(nx * ny * 3 * sizeof(uint8_t)); + for (int j = ny - 1; j >= 0; --j) { + for (int i = 0; i < nx; ++i) { + size_t index = j * nx + i; + // -- stbi generates a Y flipped image + size_t rev_index = (ny - j - 1) * nx + i; + float r = fb[index * 3 + 0]; + float g = fb[index * 3 + 1]; + float b = fb[index * 3 + 2]; + imgBuff[rev_index * 3 + 0] = int(255.999f * r) & 255; + imgBuff[rev_index * 3 + 1] = int(255.999f * g) & 255; + imgBuff[rev_index * 3 + 2] = int(255.999f * b) & 255; + } + } + //stbi_write_png("out.png", nx, ny, 3, imgBuff, nx * 3); + stbi_write_jpg("image.jpg", nx, ny, 3, imgBuff, 100); + std::free(imgBuff); +} + +int main(int argc, char** argv) { + int width = 1920; + int height = 1080; + int num_samples = 100; + int max_bounces = 8; + + printf("Initializing death-star for %ix%i pixels, %i samples and %i max bounces\n", + width, height, num_samples, max_bounces); + + // Calculate blocks and threads + int tx = 8, ty = 8; // bucket size + + clock_t start, stop; + start = clock(); + + dim3 blocks(width/tx + 1, height/ty + 1); + dim3 threads(tx, ty); + + // CUDA random number generator + curandState *rand_state; + cudaMalloc((void**)&rand_state, (width * height) * sizeof(curandState)); + + // Camera + Camera* camera; + cudaMalloc(&camera, 1 * sizeof(Camera)); + + initialize_renderer<<>>(width, height, rand_state); + initialize_camera<<<1, 1>>>(camera, vec3(-.253,1.731,7.573), vec3(-.253,1.119,.281), + vec3(0,1,0), 20, float(width)/float(height), 0.1, 7.317); + + // Create scene + Scene scene; + createScene(scene, rand_state); + + // Create BVH + BVHNode* bvh_root = create_BVH(scene.hittables, scene.num_hittables); + + // Allocate memory for pixels + float *pixel_buffer, *d_pixel_buffer; + pixel_buffer = (float*)malloc(width * height * 3 * sizeof(float)); + cudaMalloc(&d_pixel_buffer, width * height * 3 * sizeof(float)); + + // Render into buffer + render<<>>(width, height, num_samples, max_bounces, d_pixel_buffer, + bvh_root, rand_state, camera); + + // Copy pixel data from device to cpu + cudaMemcpy(pixel_buffer, d_pixel_buffer, + width * height * 3 * sizeof(float), cudaMemcpyDeviceToHost); + + stop = clock(); + double timer_seconds = ((double)(stop - start)) / CLOCKS_PER_SEC; + std::cout << "took " << timer_seconds << " seconds.\n"; + + //save_to_ppm(pixel_buffer, width, height); + save_to_jpg(pixel_buffer, width, height); + + return 0; +} diff --git a/cuda_code/main_1305.cu b/cuda_code/main_1305.cu new file mode 100644 index 0000000000000000000000000000000000000000..f15cf8959b5526fb4b5a729e2328e5e2865ce861 --- /dev/null +++ b/cuda_code/main_1305.cu @@ -0,0 +1,109 @@ + +#include +#include +#include +#include + +using namespace std; + +#define BLOCK_SIZE 16 + +/** + * Each element of the product matrix c[i][j] is computed from a unique row and + * column of the factor matrices, a[i][k] and b[k][j] + */ + +// Matrix size constants. +constexpr int m_size = 768 * 8; // Must be a multiple of 8. +constexpr int M = m_size / 8; +constexpr int N = m_size / 4; +constexpr int P = m_size / 2; + +#include "verify.cpp" + +__global__ +void hellinger( + const float *__restrict a, + const float *__restrict b, + float *__restrict c, + const int m, const int n, const int k) +{ + int col = blockIdx.x * blockDim.x + threadIdx.x; + int row = blockIdx.y * blockDim.y + threadIdx.y; + if( col < k && row < m) + { + float sum = 0; + for(int i = 0; i < n; i++) + { + sum += sqrtf(a[row * n + i] * b[i * k + col]); + } + const float value = 1.f - sum; + const float gate = (!signbit(value)); + c[row * k + col] = sqrtf(gate * value); + } +} + +int main() { + int i, j; + + // 2D arrays on host side. + float(*a_host)[N] = new float[M][N]; + float(*b_host)[P] = new float[N][P]; + // host-side cpu result + float(*c_host)[P] = new float[M][P]; + // host-side gpu result + float(*c_back)[P] = new float[M][P]; + + for (i = 0; i < M; i++) + for (j = 0; j < N; j++) + a_host[i][j] = 1.f / N; + + srand(123); + for (i = 0; i < N; i++) + for (j = 0; j < P; j++) + b_host[i][j] = rand() % 256; + + for (j = 0; j < P; j++) { + float sum = 0; + for (i = 0; i < N; i++) + sum += b_host[i][j]; + for (i = 0; i < N; i++) + b_host[i][j] /= sum; + } + + float *a_device, *b_device, *c_device; + + cudaMalloc((void **) &a_device, sizeof(float)*M*N); + cudaMalloc((void **) &b_device, sizeof(float)*N*P); + cudaMalloc((void **) &c_device, sizeof(float)*M*P); + + cudaMemcpy(a_device, a_host, sizeof(float)*M*N, cudaMemcpyHostToDevice); + cudaMemcpy(b_device, b_host, sizeof(float)*N*P, cudaMemcpyHostToDevice); + + unsigned int grid_cols = (P + BLOCK_SIZE - 1) / BLOCK_SIZE; + unsigned int grid_rows = (M + BLOCK_SIZE - 1) / BLOCK_SIZE; + dim3 dimGrid(grid_cols, grid_rows); + dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); + + for (int i = 0; i < 100; i++) + hellinger<<>>(a_device, b_device, c_device, M, N, P); + + cudaMemcpy(c_back, c_device, sizeof(int)*M*P, cudaMemcpyDeviceToHost); + + cout << "Problem size: c(" << M << "," << P << ") = a(" << M << "," << N + << ") * b(" << N << "," << P << ")\n"; + +#ifdef VERIFY + VerifyResult(a_host, b_host, c_host, c_back); +#endif + + delete[] a_host; + delete[] b_host; + delete[] c_host; + delete[] c_back; + cudaFree(a_device); + cudaFree(b_device); + cudaFree(c_device); + return 0; +} + diff --git a/cuda_code/main_1334.cu b/cuda_code/main_1334.cu new file mode 100644 index 0000000000000000000000000000000000000000..39e62ac0d7a3e947e25788a418b5752c9a86318b --- /dev/null +++ b/cuda_code/main_1334.cu @@ -0,0 +1,233 @@ +#ifdef _WIN32 +# define WINDOWS_LEAN_AND_MEAN +# define NOMINMAX +# include +#endif +#include +#include +#include +#include +#include +#include "helper_cuda.h" +#include "helper_timer.h" +#include +#include "mergesort.cuh" +#include "bucketsort.cu" +#include "bucketsort_kernel.cu" +#include "mergesort.cu" +#include "mergesort_kernel.cu" + +using namespace std; + +//////////////////////////////////////////////////////////////////////////////// +// Size of the testset (Bitwise shift of 1 over 22 places) +//////////////////////////////////////////////////////////////////////////////// +#define SIZE (1 << 22) +//////////////////////////////////////////////////////////////////////////////// +// Number of tests to average over +//////////////////////////////////////////////////////////////////////////////// +#define TEST 4 +//////////////////////////////////////////////////////////////////////////////// +// The timers for the different parts of the algo +//////////////////////////////////////////////////////////////////////////////// +StopWatchInterface *uploadTimer, *downloadTimer, *bucketTimer, + *mergeTimer, *totalTimer, *cpuTimer; +//////////////////////////////////////////////////////////////////////////////// +// Compare method for CPU sort +//////////////////////////////////////////////////////////////////////////////// +inline int compare(const void *a, const void *b) { + if(*((float *)a) < *((float *)b)) return -1; + else if(*((float *)a) > *((float *)b)) return 1; + else return 0; +} +//////////////////////////////////////////////////////////////////////////////// +// Forward declaration +//////////////////////////////////////////////////////////////////////////////// +void cudaSort(float *origList, float minimum, float maximum, + float *resultList, int numElements); + +//////////////////////////////////////////////////////////////////////////////// +// Program main +//////////////////////////////////////////////////////////////////////////////// +int +main( int argc, char** argv) +{ + + // Create timers for each sort + sdkCreateTimer(&uploadTimer); + sdkCreateTimer(&downloadTimer); + sdkCreateTimer(&bucketTimer); + sdkCreateTimer(&mergeTimer); + sdkCreateTimer(&totalTimer); + sdkCreateTimer(&cpuTimer); + int numElements = 0; + // Number of elements in the test bed + if(strcmp(argv[1],"r") ==0) { + numElements = SIZE; + } + else { + FILE *fp; + fp = fopen(argv[1],"r"); + if(fp == NULL) { + cout << "Error reading file" << endl; + exit(EXIT_FAILURE); + } + int count = 0; + float c; + + while(fscanf(fp,"%f",&c) != EOF) { + count++; +} + fclose(fp); + + numElements = count; +} + cout << "Sorting list of " << numElements << " floats\n"; + // Generate random data + // Memory space the list of random floats will take up + int mem_size = numElements * sizeof(float); + // Allocate enough for the input list + float *cpu_idata = (float *)malloc(mem_size); + // Allocate enough for the output list on the cpu side + float *cpu_odata = (float *)malloc(mem_size); + // Allocate enough memory for the output list on the gpu side + float *gpu_odata = (float *)malloc(mem_size); + + float datamin = FLT_MAX; + float datamax = -FLT_MAX; + if(strcmp(argv[1],"r")==0) { + for (int i = 0; i < numElements; i++) { + // Generate random floats between 0 and 1 for the input data + cpu_idata[i] = ((float) rand() / RAND_MAX); + //Compare data at index to data minimum, if less than current minimum, set that element as new minimum + datamin = min(cpu_idata[i], datamin); + //Same as above but for maximum + datamax = max(cpu_idata[i], datamax); + } + +} else { + FILE *fp; + fp = fopen(argv[1],"r"); + for(int i = 0; i < numElements; i++) { + fscanf(fp,"%f",&cpu_idata[i]); + datamin = min(cpu_idata[i], datamin); + datamax = max(cpu_idata[i],datamax); + } + } + + cout << "Sorting on GPU..." << flush; + // GPU Sort + for (int i = 0; i < TEST; i++) + cudaSort(cpu_idata, datamin, datamax, gpu_odata, numElements); + cout << "done.\n"; +#ifdef VERIFY + cout << "Sorting on CPU..." << flush; + // CPU Sort + memcpy(cpu_odata, cpu_idata, mem_size); + sdkStartTimer(&cpuTimer); + qsort(cpu_odata, numElements, sizeof(float), compare); + sdkStopTimer(&cpuTimer); + cout << "done.\n"; + cout << "Checking result..." << flush; + // Result checking + int count = 0; + for(int i = 0; i < numElements; i++) + if(cpu_odata[i] != gpu_odata[i]) + { + printf("Sort missmatch on element %d: \n", i); + printf("CPU = %f : GPU = %f\n", cpu_odata[i], gpu_odata[i]); + count++; + break; + } + if(count == 0) cout << "PASSED.\n"; + else cout << "FAILED.\n"; +#endif + // Timer report + printf("GPU iterations: %d\n", TEST); +#ifdef TIMER +#ifdef VERIFY + printf("Average CPU execution time: %f ms\n", sdkGetTimerValue(&cpuTimer)); +#endif + printf("Average GPU execution time: %f ms\n", sdkGetTimerValue(&totalTimer) / TEST); + printf(" - Upload : %f ms\n", sdkGetTimerValue(&uploadTimer) / TEST); + printf(" - Download : %f ms\n", sdkGetTimerValue(&downloadTimer) / TEST); + printf(" - Bucket sort : %f ms\n", sdkGetTimerValue(&bucketTimer) / TEST); + printf(" - Merge sort : %f ms\n", sdkGetTimerValue(&mergeTimer) / TEST); +#endif + +#ifdef OUTPUT + FILE *tp; + const char filename2[]="./hybridoutput.txt"; + tp = fopen(filename2,"w"); + for(int i = 0; i < numElements; i++) { + fprintf(tp,"%f ",cpu_idata[i]); + } + + fclose(tp); +#endif + + // Release memory + sdkDeleteTimer(&uploadTimer); + sdkDeleteTimer(&downloadTimer); + sdkDeleteTimer(&bucketTimer); + sdkDeleteTimer(&mergeTimer); + sdkDeleteTimer(&totalTimer); + sdkDeleteTimer(&cpuTimer); + free(cpu_idata); free(cpu_odata); free(gpu_odata); +} + + +void cudaSort(float *origList, float minimum, float maximum, + float *resultList, int numElements) +{ + // Initialization and upload data + float *d_input = NULL; + float *d_output = NULL; + int mem_size = (numElements + DIVISIONS * 4) * sizeof(float); + sdkStartTimer(&uploadTimer); + { + cudaMalloc((void**) &d_input, mem_size); + cudaMalloc((void**) &d_output, mem_size); + cudaMemcpy((void *) d_input, (void *)origList, numElements * sizeof(float), + cudaMemcpyHostToDevice); + init_bucketsort(numElements); + } + sdkStopTimer(&uploadTimer); + + sdkStartTimer(&totalTimer); + + // Bucketsort the list + sdkStartTimer(&bucketTimer); + int *sizes = (int*) malloc(DIVISIONS * sizeof(int)); + int *nullElements = (int*) malloc(DIVISIONS * sizeof(int)); + unsigned int *origOffsets = (unsigned int *) malloc((DIVISIONS + 1) * sizeof(int)); + bucketSort(d_input, d_output, numElements, sizes, nullElements, + minimum, maximum, origOffsets); + sdkStopTimer(&bucketTimer); + + // Mergesort the result + sdkStartTimer(&mergeTimer); + float4 *d_origList = (float4*) d_output, + *d_resultList = (float4*) d_input; + int newlistsize = 0; + + for(int i = 0; i < DIVISIONS; i++) + newlistsize += sizes[i] * 4; + + float4 *mergeresult = runMergeSort( newlistsize, DIVISIONS, d_origList, d_resultList, + sizes, nullElements, origOffsets); //d_origList; + cudaThreadSynchronize(); + sdkStopTimer(&mergeTimer); + sdkStopTimer(&totalTimer); + + // Download result + sdkStartTimer(&downloadTimer); + checkCudaErrors( cudaMemcpy((void *) resultList, + (void *)mergeresult, numElements * sizeof(float), cudaMemcpyDeviceToHost) ); + sdkStopTimer(&downloadTimer); + + // Clean up + finish_bucketsort(); + cudaFree(d_input); cudaFree(d_output); + free(nullElements); free(sizes); +} diff --git a/cuda_code/main_1361.cu b/cuda_code/main_1361.cu new file mode 100644 index 0000000000000000000000000000000000000000..701ed864ae253ba02855925352a079ced90e1a70 --- /dev/null +++ b/cuda_code/main_1361.cu @@ -0,0 +1,220 @@ +// This file is part of ComputeStuff copyright (C) 2017 Christopher Dyken. +// Released under the MIT license, please see LICENSE file for details. + +#include +#include +#include +#include +#include + +#include +#include + +namespace { + + void logFailure(cudaError_t error, const char *file, int line) + { + std::cerr << file << '@' << line << ": CUDA error: " << cudaGetErrorName(error) << std::endl; + abort(); + } +} +#define assertSuccess(a) do { cudaError_t rv = (a); if(rv != cudaSuccess) logFailure(rv, __FILE__, __LINE__); } while(0) + +void assertMatching(const uint32_t* result, const uint32_t* gold, uint32_t N) +{ + for (size_t i = 0; i < N; i++) { + auto a = result[i]; + auto b = gold[i]; + if (a != b) { + std::cerr << "a=" << a << " != b=" << b << std::endl; + abort(); + } + } +} + +void assertMatching(volatile uint32_t* a, uint32_t b) +{ + if (*a != b) { + std::cerr << "a=" << *a << ", b=" << b << std::endl; + abort(); + } +} + +void buildCompactProblemWorstCase(std::vector& out, uint32_t& sum, std::vector& in, uint32_t N, uint32_t m) +{ + sum = 0; + in.resize(N); + out.resize(N); + for (uint32_t i = 0; i < N; i++) { + in[i] = m == 1 ? 1 : ((i % m) == 0); + if (in[i]) { + out[sum++] = i; + } + } +} + +void buildCompactProblemBestCase(std::vector& out, uint32_t& sum, std::vector& in, uint32_t N, uint32_t m) +{ + sum = 0; + in.resize(N); + out.resize(N); + + auto s = (N + m - 1) / m; + for (uint32_t i = 0; i < s; i++) { + in[i] = 1; + out[sum++] = i; + } + for (uint32_t i = s; i < N; i++) { + in[i] = 0; + } +} + +void runCompactTest(uint32_t N, uint32_t m) +{ + cudaStream_t stream; + assertSuccess(cudaStreamCreate(&stream)); + + cudaEvent_t startA, stopA, startB, stopB, startC, stopC, startD, stopD; + assertSuccess(cudaEventCreate(&startA)); + assertSuccess(cudaEventCreate(&startB)); + assertSuccess(cudaEventCreate(&startC)); + assertSuccess(cudaEventCreate(&startD)); + assertSuccess(cudaEventCreate(&stopA)); + assertSuccess(cudaEventCreate(&stopB)); + assertSuccess(cudaEventCreate(&stopC)); + assertSuccess(cudaEventCreate(&stopD)); + + uint32_t* sum_h, *sum_d; + assertSuccess(cudaHostAlloc(&sum_h, sizeof(uint32_t), cudaHostAllocMapped)); + assertSuccess(cudaHostGetDevicePointer(&sum_d, sum_h, 0)); + + uint32_t *out_d, *in_d, *hp5_scratch_d, *scan_scratch_d; + assertSuccess(cudaMalloc(&out_d, sizeof(uint32_t)*N)); + assertSuccess(cudaMalloc(&in_d, sizeof(uint32_t)*N)); + assertSuccess(cudaMalloc(&hp5_scratch_d, ComputeStuff::HP5::scratchByteSize(N))); + assertSuccess(cudaMalloc(&scan_scratch_d, ComputeStuff::Scan::scratchByteSize(N))); + + std::vector out_h(N); + + uint32_t sum; + std::vector out, in; + + // Best case + buildCompactProblemBestCase(out, sum, in, N, m); + assertSuccess(cudaMemcpy(in_d, in.data(), sizeof(uint32_t)*N, cudaMemcpyHostToDevice)); + *sum_h = ~0u; + for (uint32_t i = 0; i < 10; i++) { + ComputeStuff::Scan::compact(out_d, sum_d, scan_scratch_d, in_d, N, stream); + } + cudaEventRecord(startA, stream); + for (uint32_t i = 0; i < 50; i++) { + ComputeStuff::Scan::compact(out_d, sum_d, scan_scratch_d, in_d, N, stream); + } + cudaEventRecord(stopA, stream); + cudaEventSynchronize(stopA); + cudaMemcpy(out_h.data(), out_d, sizeof(uint32_t)*N, cudaMemcpyDeviceToHost); + + assertMatching(sum_h, sum); + assertMatching(out_h.data(), out.data(), sum); + + *sum_h = ~0u; + cudaMemset(out_d, ~0, sizeof(uint32_t)*N); + for (uint32_t i = 0; i < 10; i++) { + ComputeStuff::HP5::compact(out_d, sum_d, hp5_scratch_d, in_d, N, stream); + } + cudaEventRecord(startB, stream); + for (uint32_t i = 0; i < 50; i++) { + ComputeStuff::HP5::compact(out_d, sum_d, hp5_scratch_d, in_d, N, stream); + } + cudaEventRecord(stopB, stream); + cudaEventSynchronize(stopB); + cudaMemcpy(out_h.data(), out_d, sizeof(uint32_t)*N, cudaMemcpyDeviceToHost); + + assertMatching(sum_h, sum); + assertMatching(out_h.data(), out.data(), sum); + + // Worst case + buildCompactProblemWorstCase(out, sum, in, N, m); + assertSuccess(cudaMemcpy(in_d, in.data(), sizeof(uint32_t)*N, cudaMemcpyHostToDevice)); + *sum_h = ~0u; + for (uint32_t i = 0; i < 10; i++) { // Warm-up + ComputeStuff::Scan::compact(out_d, sum_d, scan_scratch_d, in_d, N, stream); + } + cudaEventRecord(startC, stream); + for (uint32_t i = 0; i < 50; i++) { // Perf run + ComputeStuff::Scan::compact(out_d, sum_d, scan_scratch_d, in_d, N, stream); + } + cudaEventRecord(stopC, stream); + cudaEventSynchronize(stopC); + cudaMemcpy(out_h.data(), out_d, sizeof(uint32_t)*N, cudaMemcpyDeviceToHost); + + assertMatching(sum_h, sum); + assertMatching(out_h.data(), out.data(), sum); + + *sum_h = ~0u; + for (uint32_t i = 0; i < 10; i++) { // Warm-up + ComputeStuff::HP5::compact(out_d, sum_d, hp5_scratch_d, in_d, N, stream); + } + cudaEventRecord(startD, stream); + for (uint32_t i = 0; i < 50; i++) { // Perf run + ComputeStuff::HP5::compact(out_d, sum_d, hp5_scratch_d, in_d, N, stream); + } + cudaEventRecord(stopD, stream); + cudaEventSynchronize(stopD); + cudaMemcpy(out_h.data(), out_d, sizeof(uint32_t)*N, cudaMemcpyDeviceToHost); + + assertMatching(sum_h, sum); + assertMatching(out_h.data(), out.data(), sum); + + + float elapsedA, elapsedB, elapsedC, elapsedD; + assertSuccess(cudaEventElapsedTime(&elapsedA, startA, stopA)); + assertSuccess(cudaEventElapsedTime(&elapsedB, startB, stopB)); + assertSuccess(cudaEventElapsedTime(&elapsedC, startC, stopC)); + assertSuccess(cudaEventElapsedTime(&elapsedD, startD, stopD)); + + std::cerr << std::setprecision(3) + << "| " << N << " | " + << (100.0 / m) << "% | " + << sum << " | " + << (elapsedA / 50.0) << "ms | " + << (elapsedB / 50.0) << "ms | " + << (elapsedB / elapsedA) << " | " + << (elapsedC / 50.0) << "ms | " + << (elapsedD / 50.0) << "ms | " + << (elapsedD / elapsedC) << " | " << std::endl; + + assertSuccess(cudaEventDestroy(startA)); + assertSuccess(cudaEventDestroy(startB)); + assertSuccess(cudaEventDestroy(startC)); + assertSuccess(cudaEventDestroy(startD)); + assertSuccess(cudaEventDestroy(stopA)); + assertSuccess(cudaEventDestroy(stopB)); + assertSuccess(cudaEventDestroy(stopC)); + assertSuccess(cudaEventDestroy(stopD)); + assertSuccess(cudaFreeHost(sum_h)); + assertSuccess(cudaFree(out_d)); + assertSuccess(cudaFree(in_d)); + assertSuccess(cudaFree(scan_scratch_d)); + assertSuccess(cudaStreamDestroy(stream)); +} + +int main() +{ + assertSuccess(cudaSetDevice(0)); + + cudaDeviceProp props; + assertSuccess(cudaGetDeviceProperties(&props, 0)); + if (props.major < 3) { + std::cerr << "Compute capability 3.0 is minimum, device " << props.name << " has compute capability " << props.major << "." << props.minor << std::endl; + return -1; + } + + + for (uint64_t N = 1; N < (uint64_t)(props.totalGlobalMem / (sizeof(uint32_t) * 4)); N = 3 * N + N / 3) { + for (uint32_t m = 32; m < 512; m *= 2) { + runCompactTest(static_cast(N), m); + } + } + +} \ No newline at end of file diff --git a/cuda_code/main_1455.cu b/cuda_code/main_1455.cu new file mode 100644 index 0000000000000000000000000000000000000000..4435c127345e06f5361216a536c71bc8547f921f --- /dev/null +++ b/cuda_code/main_1455.cu @@ -0,0 +1,55 @@ +/* + * + * hello world ... from a GPU! + * + */ + +#include +#include + +#include + +#include +#include + +__global__ void hello() { + printf("thread %5i from block %5i says, \"hello, world!\"\n",threadIdx.x,blockIdx.x); +} + +// main! +int main (int argc, char* argv[]) { + + if ( argc != 3 ) { + printf("\n"); + printf(" hello.x -- hello from a gpu!\n"); + printf("\n"); + printf(" usage: ./hello.x n nblocks nthreads_per_block\n"); + printf("\n"); + printf(" nblocks: number of blocks\n"); + printf(" nthreads_per_block: number of threads per block\n"); + printf("\n"); + exit(EXIT_FAILURE); + } + printf("\n"); + + std::stringstream ss; ss << argv[1] << " " << argv[2]; + int nblocks; ss >> nblocks; + int nthreads_per_block; ss >> nthreads_per_block; + + hello<<>>(); + + // check for errors + cudaError_t error = cudaGetLastError(); + if (error!=cudaSuccess) { + printf("\n"); + printf(" error: %s\n\n", cudaGetErrorString(error) ); + printf("\n"); + exit(EXIT_FAILURE); + } + + cudaDeviceReset(); + + printf("\n"); + + return 0; +} diff --git a/cuda_code/main_1465.cu b/cuda_code/main_1465.cu new file mode 100644 index 0000000000000000000000000000000000000000..5c5997e08565c3d161436a59871c9c5bfb9b414c --- /dev/null +++ b/cuda_code/main_1465.cu @@ -0,0 +1,391 @@ +#include +#include "cuda_runtime.h" +#include "device_launch_parameters.h" +#include "definitions.cuh" +#include +#include "stdio.h" + +//Number of elements on which to perform CFD +unsigned int Ni = 512; // Y elements +unsigned int Nj = 512; // X elements +unsigned int nIterations = 10000; // No Of Iterations +unsigned int kernelVersion =2; // Decides which GPU kernel version to call (Set it to 1 or 2) + +int main(int argc, char** argv) +{ + + //Variables for Timing + float cpuTime, gpuTime; + + // CPU and GPU Pointers ( d_XX : refers to pointer pointing to GPU memory. This is just a convention) + float *t = NULL, *t_prev = NULL; + float *d_t = NULL,*d_t_prev= NULL; + + parseCommandLineArguments(argc, (char **)argv); + printf("\n Ni= %d, Nj=%d nIteration=%d",Ni,Nj,nIterations); + + unsigned int size = Ni * Nj * sizeof(float); + + if(!initializeCPU(&t, &t_prev) ) + { + printf("\n Error in allocating memory on CPU!!!"); + unInitializeCPU(&t, &t_prev); + getchar(); + return 0; + } + + if (!initializeGPU(&d_t, &d_t_prev)) + { + printf("\n Error in allocating memory on GPU!!!"); + unInitializeCPU(&t, &t_prev); + unInitializeGPU(&d_t, &d_t_prev); + return 0; + } + + //Perform CFD on CPU + performCPUCFD(t,t_prev, &cpuTime); + + // To temporarily store CPU data. This is just for comparing with GPU output + float *tempBuffer = (float*) calloc(Ni*Nj, sizeof(float)); + memcpy(tempBuffer, t_prev, size); + + //Perform CFD on GPU + if(!performGPUCFD(d_t,d_t_prev, t, t_prev, &gpuTime)) + { + printf("\n GPU Kernel failed !!!"); + unInitializeCPU(&t, &t_prev); + unInitializeGPU(&d_t, &d_t_prev); + if(tempBuffer !=NULL) + free(tempBuffer); + return 0; + } + + printf("\n Is host equal to device = %d", checkHostEqualsDevice(tempBuffer,t)); + printf("\n Speedup = %fx", (float)(cpuTime/gpuTime)); + + unInitializeCPU(&t, &t_prev); + unInitializeGPU(&d_t, &d_t_prev); + + if(tempBuffer !=NULL) + free(tempBuffer); + + printf("\n Finished Processing!!!"); + getchar(); + +} + + +void parseCommandLineArguments(int argc, char**argv) +{ + if (argc >= 1) + { + for (int i=1; i < argc; i++) + { + int bFirstArgIsParam = false; + int string_start = 0; + while (argv[i][string_start] == '-') + string_start++; + char *string_argv = &argv[i][string_start]; + + if (!STRNCASECMP(string_argv, "Ni=", 3)) + { + bFirstArgIsParam = true; + Ni = atoi(&string_argv[3]); + continue; + } + if (!STRNCASECMP(string_argv, "Nj=", 3)) + { + bFirstArgIsParam = true; + Nj = atoi(&string_argv[3]); + continue; + } + if (!STRNCASECMP(string_argv, "iterations=", 11)) + { + bFirstArgIsParam = true; + nIterations = atoi(&string_argv[11]); + continue; + } + if (!STRNCASECMP(string_argv, "kernel=", 7)) + { + bFirstArgIsParam = true; + kernelVersion = atoi(&string_argv[7]); + continue; + } + + if (!bFirstArgIsParam) + { + printf("Invalid arguments\n"); + for (int n=0; n < argc; n++) + { + printf("argv[%d] = %s\n", n, argv[n]); + } + printf("\n"); + exit(0); + } + } + } + + if(( Ni % THREADS_PER_BLOCK_Y != 0) || (Nj % THREADS_PER_BLOCK_X != 0)) + { + fprintf(stderr, "Please specify Ni & Nj as multiple of 16 !!!!"); + getchar(); + exit(0); + } +} + +int initializeCPU(float **t, float **t_prev) +{ + *t = (float*) calloc(Ni*Nj, sizeof(float)); + *t_prev = (float*) calloc(Ni*Nj, sizeof(float)); + + if((*t)==NULL || (*t_prev) == NULL) + return 0; + else + return 1; +} + +void unInitializeCPU(float **t, float **t_prev) +{ + if((*t) !=NULL) + free(*t); + if((*t_prev) != NULL) + free(*t_prev); +} + +int initializeGPU(float **d_t, float **d_t_prev) +{ + + unsigned int size = Ni * Nj * sizeof(float); + + + + // Choose which GPU to run on, change this on a multi-GPU system. + cudaError_t cudaStatus = cudaSetDevice(0); + if (cudaStatus != cudaSuccess) { + fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); + getchar(); + return 0; + } + // Allocate GPU buffers. + cudaStatus = cudaMalloc((void**)&(*d_t), size); + if (cudaStatus != cudaSuccess) { + fprintf(stderr, "cudaMalloc failed!"); + getchar(); + return 0; + } + + // Allocate GPU buffers . + cudaStatus = cudaMalloc((void**)&(*d_t_prev), size); + if (cudaStatus != cudaSuccess) { + fprintf(stderr, "cudaMalloc failed!"); + getchar(); + return 0; + } + + // Memset GPU buffers + cudaStatus = cudaMemset((*d_t),0, size); + if (cudaStatus != cudaSuccess) { + fprintf(stderr, "cudaMemset failed!"); + getchar(); + return 0; + } + + // Memset GPU buffers + cudaStatus = cudaMemset((*d_t_prev),0, size); + if (cudaStatus != cudaSuccess) { + fprintf(stderr, "cudaMemset failed!"); + getchar(); + return 0; + } + + return 1; +} + + +void unInitializeGPU(float **d_t, float **d_t_prev) +{ + cudaError_t cudaStatus; + + if((*d_t)!=NULL) + cudaStatus = cudaFree((*d_t)); + if (cudaStatus != cudaSuccess) { + fprintf(stderr, "cudaFree failed!"); + return; + } + + if((*d_t_prev)!=NULL) + cudaStatus = cudaFree((*d_t_prev)); + if (cudaStatus != cudaSuccess) { + fprintf(stderr, "cudaFree failed!"); + return; + } + + cudaStatus = cudaDeviceReset(); + if (cudaStatus != cudaSuccess) { + fprintf(stderr, "cudaDeviceReset failed!"); + getchar(); + return; + } +} + +void performCPUCFD(float *t, float *t_prev, float *cpuTime) +{ + + float h,x,y; + + h = 1.0f/(Ni-1); + + for(unsigned int i=0;i>>(d_t_prev,d_t, Ni, Nj, h); + + float* pingPong = d_t_prev; + d_t_prev = d_t; + d_t = pingPong; + } + cudaEventRecord(stop, 0); + cudaEventSynchronize(stop); + } + //This calls Version 2 of kernel which uses optimization by copying data to shared memory + else if(kernelVersion ==2) + { + cudaEventRecord(start, 0); + + for(unsigned int k=0;k>>(d_t_prev,d_t, Ni, Nj, h); + + float* pingPong = d_t_prev; + d_t_prev = d_t; + d_t = pingPong; + } + cudaEventRecord(stop, 0); + cudaEventSynchronize(stop); + + } + + float elapsedTime; + cudaEventElapsedTime(&elapsedTime, start, stop); + printf("\n GPU Time:: %f ms", elapsedTime); + + *gpuTime = elapsedTime; + + cudaError_t cudaStatus = cudaMemcpy(t, d_t_prev, sizeof(float) * Ni * Nj , cudaMemcpyDeviceToHost); + if (cudaStatus != cudaSuccess) { + fprintf(stderr, "cudaMemcpy failed!"); + str = cudaGetErrorString(cudaStatus); + fprintf(stderr, "CUDA Error!:: %s\n", str); + getchar(); + return 0; + } + + return 1; +} + +int checkHostEqualsDevice(float* o_host, float* o_device) +{ + int flag =1; + + float tolerance = 0.0001f; + //Compare the results + for(unsigned int j=0;j= tolerance || (o_host[i*Nj+j] - o_device[i*Nj+j]) <= -tolerance) + { + printf("\n D=[%f]!=H=[%f] since Diff > tol %f for [%d][%d]",o_device[i*Nj+j], o_host[i*Nj+j],tolerance, i, j); + flag =0; + //getchar(); + } + } + } + + return flag; +} \ No newline at end of file diff --git a/cuda_code/main_1505.cu b/cuda_code/main_1505.cu new file mode 100644 index 0000000000000000000000000000000000000000..8f5103ae80bbf224ad6b19e3aa0ed9dd958b1066 --- /dev/null +++ b/cuda_code/main_1505.cu @@ -0,0 +1,169 @@ +/** + * + * A program to test a Serriform Neural Network + * Author: Brandon Trabucco + * Date: 2016/07/27 + * + */ + +#include "SerriformNetwork.cuh" +#include "DatasetAdapter.h" +#include "OutputTarget.h" +#include +#include +#include +#include +#include +#include +#include +#include +using namespace std; + +long long getMSec() { + struct timeval tp; + gettimeofday(&tp, NULL); + return tp.tv_sec * 1000 + tp.tv_usec / 1000; +} + +struct tm *getDate() { + time_t t = time(NULL); + struct tm *timeObject = localtime(&t); + return timeObject; +} + +int main(int argc, char *argv[]) { + cout << "Program initializing" << endl; + if (argc < 3) { + cout << argv[0] << " " << endl; + return -1; + } + + int updatePoints = 10; + int savePoints = 10; + int maxEpoch = 10; + int trainingSize = 500; + int overlap = atof(argv[3]); + int sumNeurons = 0; + double errorBound = 0.01; + double mse = 0; + double learningRate = atof(argv[1]), decayRate = atof(argv[2]); + long long networkStart, networkEnd, sumTime = 0; + + const int _day = getDate()->tm_mday; + + + /** + * + * Open file streams to save data samples from Neural Network + * This data can be plotted via GNUPlot + * + */ + ostringstream errorDataFileName; + errorDataFileName << "/u/trabucco/Desktop/Sequential_Convergence_Data_Files/" << + (getDate()->tm_year + 1900) << "-" << (getDate()->tm_mon + 1) << "-" << _day << + "_GPU-SNN-Error_" << learningRate << + "-learning_" << decayRate << "-decay.csv"; + ofstream errorData(errorDataFileName.str(), ios::app); + if (!errorData.is_open()) return -1; + + ostringstream timingDataFileName; + timingDataFileName << "/u/trabucco/Desktop/Sequential_Convergence_Data_Files/" << + (getDate()->tm_year + 1900) << "-" << (getDate()->tm_mon + 1) << "-" << _day << + "_GPU-SNN-Timing_" << learningRate << + "-learning_" << decayRate << "-decay.csv"; + ofstream timingData(timingDataFileName.str(), ios::app); + if (!timingData.is_open()) return -1; + + ostringstream accuracyDataFileName; + accuracyDataFileName << "/u/trabucco/Desktop/Sequential_Convergence_Data_Files/" << + (getDate()->tm_year + 1900) << "-" << (getDate()->tm_mon + 1) << "-" << _day << + "_GPU-SNN-Accuracy_" << learningRate << + "-learning_" << decayRate << "-decay.csv"; + ofstream accuracyData(accuracyDataFileName.str(), ios::app); + if (!accuracyData.is_open()) return -1; + + ostringstream outputDataFileName; + outputDataFileName << "/u/trabucco/Desktop/Sequential_Convergence_Data_Files/" << + (getDate()->tm_year + 1900) << "-" << (getDate()->tm_mon + 1) << "-" << _day << + "_GPU-SNN-Output_" << learningRate << + "-learning_" << decayRate << "-decay.csv"; + ofstream outputData(outputDataFileName.str(), ios::app); + if (!outputData.is_open()) return -1; + outputData << endl << endl; + + + networkStart = getMSec(); + DatasetAdapter dataset = DatasetAdapter(); + networkEnd = getMSec(); + cout << "Language Dataset loaded in " << (networkEnd - networkStart) << "msecs" << endl; + + + SerriformNetwork network = SerriformNetwork(dataset.getCharSize(), overlap, learningRate, decayRate); + OutputTarget target = OutputTarget(dataset.getCharSize(), dataset.getCharSize()); + cout << "Network initialized" << endl; + + + for (int i = 0; i < (argc - 4); i++) { + network.addLayer(atoi(argv[4 + i])); + sumNeurons += atoi(argv[4 + i]); + } network.addLayer(dataset.getCharSize()); + + + int totalIterations = 0; + bool converged = false; + for (int e = 0; (e < maxEpoch)/* && (!e || (((mse1 + mse2)/2) > errorBound))*/; e++) { + int c = 0, n = 0; + vector error, output; + + networkStart = getMSec(); + for (int i = 0; i < trainingSize && dataset.nextChar(); i++) { + DatasetExample data = dataset.getChar(); + error = network.train(target.getOutputFromTarget(data.current), + target.getOutputFromTarget(data.next)); + } + + dataset.reset(); + + for (int i = 0; i < trainingSize && dataset.nextChar(); i++) { + DatasetExample data = dataset.getChar(); + output = network.classify(target.getOutputFromTarget(data.current)); + + n++; + if (target.getTargetFromOutput(output) == (int)data.next) c++; + } networkEnd = getMSec(); + + sumTime += (networkEnd - networkStart); + totalIterations += 1; + + mse = 0; + for (int i = 0; i < error.size(); i++) + mse += error[i] * error[i]; + mse /= error.size() * 2; + + if (((e + 1) % (maxEpoch / updatePoints)) == 0) { + cout << "Epoch " << e << " completed in " << (networkEnd - networkStart) << "msecs" << endl; + cout << "Error[" << e << "] = " << mse << endl; + cout << "Accuracy[" << e << "] = " << (100.0 * (float)c / (float)n) << endl; + } errorData << e << ", " << mse << endl; + accuracyData << e << ", " << (100.0 * (float)c / (float)n) << endl; + + dataset.reset(); + } + + vector > seed; + seed.push_back(target.getOutputFromTarget((int)'I')); + for (int i = 0; i < 500; i++) { + vector output = network.classify(seed[i]); + seed.push_back(output); + char text = (char)target.getTargetFromOutput(output); + outputData << text; + } + + timingData << sumNeurons << ", " << sumTime << ", " << totalIterations << endl; + timingData.close(); + accuracyData.close(); + errorData.close(); + outputData.close(); + + return 0; +} diff --git a/cuda_code/main_1856.cu b/cuda_code/main_1856.cu new file mode 100644 index 0000000000000000000000000000000000000000..4cde9fbc6b69361a96a843a780693837e305677b --- /dev/null +++ b/cuda_code/main_1856.cu @@ -0,0 +1,127 @@ +#include +#include +#include + +// setting GPU device +const int device0 = 0; +const int device1 = 1; +#define BLOCK_SIZE 16 + +void __global__ matvec(double *y, double *A, double *x, int M, int N) { + int i = blockIdx.x * blockDim.x + threadIdx.x; + int j = blockIdx.y * blockDim.y + threadIdx.y; + + if (i < M && j < N) atomicAdd(&y[i], A[i * N + j] * x[j]); +} + +int main(int argc, char *argv[]) { + + // warm up: + double *dummy_d; + cudaSetDevice(device0); + cudaMalloc((void**)&dummy_d, 0); + cudaSetDevice(device1); + cudaMalloc((void**)&dummy_d, 0); + + // command line argument sets the dimensions of the image + int M,N; + if ( argc == 3 ) { + M = atoi(argv[1]); + N = atoi(argv[2]); + } else { + // default + N = 2048; + M = 2048; + } + + double *d0_A, *d0_b, *d0_c, *d1_A, *d1_b, *d1_c; + double *h_A, *h_b, *h_c; + int size_A = sizeof(double)*N*M, size_b = sizeof(double)*N, size_c = sizeof(double)*M; + + // GPU MULTI + // Allocate memory on host and device + cudaSetDevice(device0); + cudaMalloc((void**)&d0_A, size_A/2); + cudaMalloc((void**)&d0_b, size_b); + cudaMalloc((void**)&d0_c, size_c/2); + cudaSetDevice(device1); + cudaMalloc((void**)&d1_A, size_A/2); + cudaMalloc((void**)&d1_b, size_b); + cudaMalloc((void**)&d1_c, size_c/2); + + cudaMallocHost((void**)&h_A, size_A); + cudaMallocHost((void**)&h_b, size_b); + cudaMallocHost((void**)&h_c, size_c); + + + + + // initialize d_A and d_b + double init_A = 2.0, init_b = 2.0; + // double check_ele = (double)M*(init_A * (double)N + init_b * (double)N); + for (int i = 0; i < M*N; i++) h_A[i] = init_A; + for (int i = 0; i < N; i++) h_b[i] = init_b; + + + // copy data + double time,time_end,time_IO_1,time_IO_2,time_compute,time_compute_end,tot_time_compute; + time = omp_get_wtime(); + cudaSetDevice(device0); + cudaMemcpy(d0_A, h_A, size_A/2, cudaMemcpyHostToDevice); + cudaMemcpy(d0_b, h_b, size_b, cudaMemcpyHostToDevice); + + cudaSetDevice(device1); + cudaMemcpy(d1_A, h_A + size_A/2, size_A/2, cudaMemcpyHostToDevice); + cudaMemcpy(d1_b, h_b, size_b, cudaMemcpyHostToDevice); + time_IO_1 = omp_get_wtime()- time; + + // define grid and threads/block + dim3 dim_grid((((M/2)+BLOCK_SIZE-1) / BLOCK_SIZE), (((N/2)+BLOCK_SIZE-1) / BLOCK_SIZE)); + dim3 dim_block(BLOCK_SIZE,BLOCK_SIZE); + time_compute = omp_get_wtime(); + cudaSetDevice(device0); + matvec<<>>(d0_c, d0_A, d0_b, M/2, N); + cudaSetDevice(device1); + matvec<<>>(d1_c, d1_A, d1_b, M/2, N); + cudaDeviceSynchronize(); + cudaSetDevice(device0); + cudaDeviceSynchronize(); + time_compute_end = omp_get_wtime(); + + // Copy result back to host + cudaSetDevice(device0); + cudaMemcpy(h_c, d0_c, size_c/2, cudaMemcpyDeviceToHost); + cudaSetDevice(device1); + cudaMemcpy(h_c + size_c/2, d1_c, size_c/2, cudaMemcpyDeviceToHost); + + time_end = omp_get_wtime(); + time_IO_2 = time_end - time_compute_end; + tot_time_compute = time_compute_end - time_compute; + + // stats + double GB = 1.0e-09; + double gflops = (N * M * 2 / tot_time_compute) * GB; + double memory = size_A + size_b + size_c; + double memoryGBs = memory * GB * (1 / tot_time_compute); + + printf("%g\t", memory); // footprint + printf("%g\t", gflops); // Gflops + printf("%g\t", gflops / 141.30); // pct. Gflops + + printf("%lg\t", memoryGBs); // bandwidth GB/s + printf("%lg\t", memoryGBs / 17.96); // pct. bandwidth GB/s + + printf("%g\t", time_end - time); // total time + printf("%g\t", time_IO_1 + time_IO_2); // I/O time + printf("%g\n", tot_time_compute); // compute time + + + // Cleanup + cudaFreeHost(h_A), cudaFreeHost(h_b), cudaFreeHost(h_c); + cudaFree(d0_A), cudaFree(d0_b), cudaFree(d0_c); + cudaFree(d1_A), cudaFree(d1_b), cudaFree(d1_c); + + return(0); +} + + diff --git a/cuda_code/main_1943.cu b/cuda_code/main_1943.cu new file mode 100644 index 0000000000000000000000000000000000000000..0c809a78af309d728ba3f2c0625eb7e265ecfa5e --- /dev/null +++ b/cuda_code/main_1943.cu @@ -0,0 +1,121 @@ + +#include "cuda_runtime.h" +#include "device_launch_parameters.h" + +#include + +cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); + +__global__ void addKernel(int *c, const int *a, const int *b) +{ + int i = threadIdx.x; + c[i] = a[i] + b[i]; +} + +int main() +{ + const int arraySize = 5; + const int a[arraySize] = { 1, 2, 3, 4, 5 }; + const int b[arraySize] = { 10, 20, 30, 40, 50 }; + int c[arraySize] = { 0 }; + + // Add vectors in parallel. + cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize); + if (cudaStatus != cudaSuccess) { + fprintf(stderr, "addWithCuda failed!"); + return 1; + } + + printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", + c[0], c[1], c[2], c[3], c[4]); + + // cudaDeviceReset must be called before exiting in order for profiling and + // tracing tools such as Nsight and Visual Profiler to show complete traces. + cudaStatus = cudaDeviceReset(); + if (cudaStatus != cudaSuccess) { + fprintf(stderr, "cudaDeviceReset failed!"); + return 1; + } + + return 0; +} + +// Helper function for using CUDA to add vectors in parallel. +cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) +{ + int *dev_a = 0; + int *dev_b = 0; + int *dev_c = 0; + cudaError_t cudaStatus; + + // Choose which GPU to run on, change this on a multi-GPU system. + cudaStatus = cudaSetDevice(0); + if (cudaStatus != cudaSuccess) { + fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); + goto Error; + } + + // Allocate GPU buffers for three vectors (two input, one output). + cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int)); + if (cudaStatus != cudaSuccess) { + fprintf(stderr, "cudaMalloc failed!"); + goto Error; + } + + cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int)); + if (cudaStatus != cudaSuccess) { + fprintf(stderr, "cudaMalloc failed!"); + goto Error; + } + + cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int)); + if (cudaStatus != cudaSuccess) { + fprintf(stderr, "cudaMalloc failed!"); + goto Error; + } + + // Copy input vectors from host memory to GPU buffers. + cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice); + if (cudaStatus != cudaSuccess) { + fprintf(stderr, "cudaMemcpy failed!"); + goto Error; + } + + cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice); + if (cudaStatus != cudaSuccess) { + fprintf(stderr, "cudaMemcpy failed!"); + goto Error; + } + + // Launch a kernel on the GPU with one thread for each element. + addKernel<<<1, size>>>(dev_c, dev_a, dev_b); + + // Check for any errors launching the kernel + cudaStatus = cudaGetLastError(); + if (cudaStatus != cudaSuccess) { + fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); + goto Error; + } + + // cudaDeviceSynchronize waits for the kernel to finish, and returns + // any errors encountered during the launch. + cudaStatus = cudaDeviceSynchronize(); + if (cudaStatus != cudaSuccess) { + fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); + goto Error; + } + + // Copy output vector from GPU buffer to host memory. + cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost); + if (cudaStatus != cudaSuccess) { + fprintf(stderr, "cudaMemcpy failed!"); + goto Error; + } + +Error: + cudaFree(dev_c); + cudaFree(dev_a); + cudaFree(dev_b); + + return cudaStatus; +} diff --git a/cuda_code/main_202.cu b/cuda_code/main_202.cu new file mode 100644 index 0000000000000000000000000000000000000000..21ae6429557ec422a9b7157af910c1712cd2788d --- /dev/null +++ b/cuda_code/main_202.cu @@ -0,0 +1,485 @@ +/** Modifed version of knn-CUDA from https://github.com/vincentfpgarcia/kNN-CUDA + * The modifications are + * removed texture memory usage + * removed split query KNN computation + * added feature extraction with bilinear interpolation + * + * Last modified by Christopher B. Choy 12/23/2016 + */ + +// Includes +#include +#include +#include +#include +#include + +// Constants used by the program +#define BLOCK_DIM 16 + + +/** + * Computes the distance between two matrix A (reference points) and + * B (query points) containing respectively wA and wB points. + * + * @param A pointer on the matrix A + * @param wA width of the matrix A = number of points in A + * @param B pointer on the matrix B + * @param wB width of the matrix B = number of points in B + * @param dim dimension of points = height of matrices A and B + * @param AB pointer on the matrix containing the wA*wB distances computed + */ +__global__ void cuComputeDistanceGlobal(float *A, int wA, float *B, int wB, + int dim, float *AB) { + + // Declaration of the shared memory arrays As and Bs used to store the + // sub-matrix of A and B + __shared__ float shared_A[BLOCK_DIM][BLOCK_DIM]; + __shared__ float shared_B[BLOCK_DIM][BLOCK_DIM]; + + // Sub-matrix of A (begin, step, end) and Sub-matrix of B (begin, step) + __shared__ int begin_A; + __shared__ int begin_B; + __shared__ int step_A; + __shared__ int step_B; + __shared__ int end_A; + + // Thread index + int tx = threadIdx.x; + int ty = threadIdx.y; + + // Other variables + float tmp; + float ssd = 0; + + // Loop parameters + begin_A = BLOCK_DIM * blockIdx.y; + begin_B = BLOCK_DIM * blockIdx.x; + step_A = BLOCK_DIM * wA; + step_B = BLOCK_DIM * wB; + end_A = begin_A + (dim - 1) * wA; + + // Conditions + int cond0 = (begin_A + tx < wA); // used to write in shared memory + int cond1 = (begin_B + tx < wB); // used to write in shared memory & to + // computations and to write in output matrix + int cond2 = + (begin_A + ty < wA); // used to computations and to write in output matrix + + // Loop over all the sub-matrices of A and B required to compute the block + // sub-matrix + for (int a = begin_A, b = begin_B; a <= end_A; a += step_A, b += step_B) { + // Load the matrices from device memory to shared memory; each thread loads + // one element of each matrix + if (a / wA + ty < dim) { + shared_A[ty][tx] = (cond0) ? A[a + wA * ty + tx] : 0; + shared_B[ty][tx] = (cond1) ? B[b + wB * ty + tx] : 0; + } else { + shared_A[ty][tx] = 0; + shared_B[ty][tx] = 0; + } + + // Synchronize to make sure the matrices are loaded + __syncthreads(); + + // Compute the difference between the two matrixes; each thread computes one + // element of the block sub-matrix + if (cond2 && cond1) { + for (int k = 0; k < BLOCK_DIM; ++k) { + tmp = shared_A[k][ty] - shared_B[k][tx]; + ssd += tmp * tmp; + } + } + + // Synchronize to make sure that the preceding computation is done before + // loading two new sub-matrices of A and B in the next iteration + __syncthreads(); + } + + // Write the block sub-matrix to device memory; each thread writes one element + if (cond2 && cond1) + AB[(begin_A + ty) * wB + begin_B + tx] = ssd; +} + +/** + * Gathers k-th smallest distances for each column of the distance matrix in + * the top. + * + * @param dist distance matrix + * @param ind index matrix + * @param width width of the distance matrix and of the index matrix + * @param height height of the distance matrix and of the index matrix + * @param k number of neighbors to consider + */ +__global__ void cuInsertionSort(float *dist, int *ind, int width, int height, + int k) { + + // Variables + int l, i, j; + float *p_dist; + int *p_ind; + float curr_dist, max_dist; + int curr_row, max_row; + unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x; + + if (xIndex < width) { + // Pointer shift, initialization, and max value + p_dist = dist + xIndex; + p_ind = ind + xIndex; + max_dist = p_dist[0]; + p_ind[0] = 0; + + // Part 1 : sort kth firt elementZ + for (l = 1; l < k; l++) { + curr_row = l * width; + curr_dist = p_dist[curr_row]; + if (curr_dist < max_dist) { + i = l - 1; + for (int a = 0; a < l - 1; a++) { + if (p_dist[a * width] > curr_dist) { + i = a; + break; + } + } + for (j = l; j > i; j--) { + p_dist[j * width] = p_dist[(j - 1) * width]; + p_ind[j * width] = p_ind[(j - 1) * width]; + } + p_dist[i * width] = curr_dist; + p_ind[i * width] = l; + } else { + p_ind[l * width] = l; + } + max_dist = p_dist[curr_row]; + } + + // Part 2 : insert element in the k-th first lines + max_row = (k - 1) * width; + for (l = k; l < height; l++) { + curr_dist = p_dist[l * width]; + if (curr_dist < max_dist) { + i = k - 1; + for (int a = 0; a < k - 1; a++) { + if (p_dist[a * width] > curr_dist) { + i = a; + break; + } + } + for (j = k - 1; j > i; j--) { + p_dist[j * width] = p_dist[(j - 1) * width]; + p_ind[j * width] = p_ind[(j - 1) * width]; + } + p_dist[i * width] = curr_dist; + p_ind[i * width] = l; + max_dist = p_dist[max_row]; + } + } + } +} + +/** + * Computes the square root of the first line (width-th first element) + * of the distance matrix. + * + * @param dist distance matrix + * @param width width of the distance matrix + * @param k number of neighbors to consider + */ +__global__ void cuParallelSqrt(float *dist, int width, int k) { + unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x; + unsigned int yIndex = blockIdx.y * blockDim.y + threadIdx.y; + if (xIndex < width && yIndex < k) + dist[yIndex * width + xIndex] = sqrt(dist[yIndex * width + xIndex]); +} + +//-----------------------------------------------------------------------------------------------// +// K-th NEAREST NEIGHBORS // +//-----------------------------------------------------------------------------------------------// +/** + * K nearest neighbor algorithm + * - Initialize CUDA + * - Allocate device memory + * - Copy point sets (reference and query points) from host to device memory + * - Compute the distances + indexes to the k nearest neighbors for each query + * point + * - Copy distances from device to host memory + * + * @param ref_host reference points ; pointer to linear matrix + * @param ref_width number of reference points ; width of the matrix + * @param query_host query points ; pointer to linear matrix + * @param query_width number of query points ; width of the matrix + * @param height dimension of points ; height of the matrices + * @param k number of neighbor to consider + * @param dist_host distances to k nearest neighbors ; pointer to linear + * matrix + * @param dist_host indexes of the k nearest neighbors ; pointer to linear + * matrix + * + */ +void knn_parallel(float *ref_host, int ref_width, float *query_host, + int query_width, int height, int k, float *dist_host, + int *ind_host) { + + unsigned int size_of_float = sizeof(float); + unsigned int size_of_int = sizeof(int); + + // Variables + float *query_dev; + float *ref_dev; + float *dist_dev; + int *ind_dev; + + + // Allocation of global memory for query points and for distances, CUDA_CHECK + cudaMalloc((void **)&query_dev, query_width * height * size_of_float); + cudaMalloc((void **)&dist_dev, query_width * ref_width * size_of_float); + + // Allocation of global memory for indexes CUDA_CHECK + cudaMalloc((void **)&ind_dev, query_width * k * size_of_int); + + // Allocation of global memory CUDA_CHECK + cudaMalloc((void **)&ref_dev, ref_width * height * size_of_float); + + cudaMemcpy(ref_dev, ref_host, ref_width * height * size_of_float, + cudaMemcpyHostToDevice); + + // Copy of part of query actually being treated + cudaMemcpy(query_dev, query_host, query_width * height * size_of_float, + cudaMemcpyHostToDevice); + + // Grids ans threads + dim3 g_16x16((query_width + 15) / 16, (ref_width + 15) / 16, 1); + dim3 t_16x16(16, 16, 1); + // + dim3 g_256x1((query_width + 255) / 256, 1, 1); + dim3 t_256x1(256, 1, 1); + + dim3 g_k_16x16((query_width + 15) / 16, (k + 15) / 16, 1); + dim3 t_k_16x16(16, 16, 1); + + // Kernel 1: Compute all the distances + cuComputeDistanceGlobal<<>>(ref_dev, ref_width, query_dev, + query_width, height, dist_dev); + +#ifdef DEBUG + cudaMemcpy(dist_host, dist_dev, query_width * ref_width * size_of_float, + cudaMemcpyDeviceToHost); + + for (int i = 0; i < query_width * ref_width; i++) + printf("k1 dist: %d %f\n", i, dist_host[i]); +#endif + + // Kernel 2: Sort each column + cuInsertionSort<<>>(dist_dev, ind_dev, query_width, + ref_width, k); + +#ifdef DEBUG + cudaMemcpy(dist_host, dist_dev, query_width * ref_width * size_of_float, + cudaMemcpyDeviceToHost); + + for (int i = 0; i < query_width * ref_width; i++) + printf("k2 dist: %d %f\n", i, dist_host[i]); + + cudaMemcpy(ind_host, ind_dev, query_width * k * size_of_int, + cudaMemcpyDeviceToHost); + for (int i = 0; i < query_width * k; i++) + printf("k2 index: %d %d\n", i, ind_host[i]); +#endif + + // Kernel 3: Compute square root of k first elements + cuParallelSqrt<<>>(dist_dev, query_width, k); + cudaDeviceSynchronize(); + // Memory copy of output from device to host + cudaMemcpy(dist_host, dist_dev, query_width * k * size_of_float, + cudaMemcpyDeviceToHost); + + cudaMemcpy(ind_host, ind_dev, query_width * k * size_of_int, + cudaMemcpyDeviceToHost); + + // Free memory + cudaFree(ref_dev); + cudaFree(ind_dev); + cudaFree(query_dev); + cudaFree(dist_dev); +} + +float compute_distance(const float *ref, int ref_nb, const float *query, + int query_nb, int dim, int ref_index, int query_index) { + float sum = 0.f; + for (int d = 0; d < dim; ++d) { + const float diff = + ref[d * ref_nb + ref_index] - query[d * query_nb + query_index]; + sum += diff * diff; + } + return sqrtf(sum); +} + +void modified_insertion_sort(float *dist, int *index, int length, int k) { + + // Initialise the first index + index[0] = 0; + + // Go through all points + for (int i = 1; i < length; ++i) { + + // Store current distance and associated index + float curr_dist = dist[i]; + int curr_index = i; + + // Skip the current value if its index is >= k and if it's higher the k-th + // slready sorted mallest value + if (i >= k && curr_dist >= dist[k - 1]) { + continue; + } + + // Shift values (and indexes) higher that the current distance to the right + int j = std::min(i, k - 1); + while (j > 0 && dist[j - 1] > curr_dist) { + dist[j] = dist[j - 1]; + index[j] = index[j - 1]; + --j; + } + + // Write the current distance and index at their position + dist[j] = curr_dist; + index[j] = curr_index; + } +} + +bool knn_c(const float *ref, int ref_nb, const float *query, int query_nb, + int dim, int k, float *knn_dist, int *knn_index) { + // Allocate local array to store all the distances / indexes for a given query + // point + float *dist = (float *)malloc(ref_nb * sizeof(float)); + int *index = (int *)malloc(ref_nb * sizeof(int)); + + // Allocation checks + if (!dist || !index) { + printf("Memory allocation error\n"); + free(dist); + free(index); + return false; + } + + // Process one query point at the time + for (int i = 0; i < query_nb; ++i) { + + // Compute all distances / indexes + for (int j = 0; j < ref_nb; ++j) { + dist[j] = compute_distance(ref, ref_nb, query, query_nb, dim, j, i); + index[j] = j; + } + + // Sort distances / indexes + modified_insertion_sort(dist, index, ref_nb, k); + + // Copy k smallest distances and their associated index + for (int j = 0; j < k; ++j) { + knn_dist[j * query_nb + i] = dist[j]; + knn_index[j * query_nb + i] = index[j]; + } + } + + // Memory clean-up + free(dist); + free(index); + return true; +} + +/** + * Example of use of kNN search CUDA. + */ +int main(void) { + // Variables and parameters + float *ref; // Pointer to reference point array + float *query; // Pointer to query point array + float *dist; // Pointer to distance array + int *ind; // Pointer to index array + int ref_nb = 4096; // Reference point number, max=65535 + int query_nb = 4096; // Query point number, max=65535 + int dim = 68; // Dimension of points + int k = 20; // Nearest neighbors to consider + int iterations = 100; + int c_iterations = 1; + int i; + const float precision = 0.001f; // distance error max + int nb_correct_precisions = 0; + int nb_correct_indexes = 0; + // Memory allocation + ref = (float *)malloc(ref_nb * dim * sizeof(float)); + query = (float *)malloc(query_nb * dim * sizeof(float)); + dist = (float *)malloc(query_nb * k * sizeof(float)); + ind = (int *)malloc(query_nb * k * sizeof(float)); + + // Init + srand(2); + for (i = 0; i < ref_nb * dim; i++) + ref[i] = (float)rand() / (float)RAND_MAX; + for (i = 0; i < query_nb * dim; i++) + query[i] = (float)rand() / (float)RAND_MAX; + + + // Display informations + printf("Number of reference points : %6d\n", ref_nb); + printf("Number of query points : %6d\n", query_nb); + printf("Dimension of points : %4d\n", dim); + printf("Number of neighbors to consider : %4d\n", k); + printf("Processing kNN search :\n"); + + float *knn_dist = (float *)malloc(query_nb * k * sizeof(float)); + int *knn_index = (int *)malloc(query_nb * k * sizeof(int)); + printf("Ground truth computation in progress...\n\n"); + if (!knn_c(ref, ref_nb, query, query_nb, dim, k, knn_dist, knn_index)) { + free(ref); + free(query); + free(knn_dist); + free(knn_index); + return EXIT_FAILURE; + } + + struct timeval tic; + struct timeval toc; + float elapsed_time; + + printf("On CPU: \n"); + gettimeofday(&tic, NULL); + for (i = 0; i < c_iterations; i++) { + knn_c(ref, ref_nb, query, query_nb, dim, k, dist, ind); + } + gettimeofday(&toc, NULL); + elapsed_time = toc.tv_sec - tic.tv_sec; + elapsed_time += (toc.tv_usec - tic.tv_usec) / 1000000.; + printf(" done in %f s for %d iterations (%f s by iteration)\n", elapsed_time, + c_iterations, elapsed_time / (c_iterations)); + + printf("on GPU: \n"); + gettimeofday(&tic, NULL); + for (i = 0; i < iterations; i++) { + knn_parallel(ref, ref_nb, query, query_nb, dim, k, dist, ind); + } + gettimeofday(&toc, NULL); + elapsed_time = toc.tv_sec - tic.tv_sec; + elapsed_time += (toc.tv_usec - tic.tv_usec) / 1000000.; + printf(" done in %f s for %d iterations (%f s by iteration)\n", elapsed_time, + iterations, elapsed_time / (iterations)); + + for (int i = 0; i < query_nb * k; ++i) { + if (fabs(dist[i] - knn_dist[i]) <= precision) { + nb_correct_precisions++; + } + if (ind[i] == knn_index[i]) { + nb_correct_indexes++; + } else { + printf("Mismatch @%d: %d %d\n", i, ind[i], knn_index[i]); + } + } + + float precision_accuracy = nb_correct_precisions / ((float)query_nb * k); + float index_accuracy = nb_correct_indexes / ((float)query_nb * k); + printf("Precision accuracy %f\nIndex accuracy %f\n", precision_accuracy, index_accuracy); + + free(ind); + free(dist); + free(query); + free(ref); +} diff --git a/cuda_code/main_377.cu b/cuda_code/main_377.cu new file mode 100644 index 0000000000000000000000000000000000000000..8c0f0f1f85644e399ad2053c9d76650487bb4824 --- /dev/null +++ b/cuda_code/main_377.cu @@ -0,0 +1,507 @@ +//====================================================================================================100 +// UPDATE +//====================================================================================================100 + +// 2006.03 Rob Janiczek +// --creation of prototype version +// 2006.03 Drew Gilliam +// --rewriting of prototype version into current version +// --got rid of multiple function calls, all code in a +// single function (for speed) +// --code cleanup & commenting +// --code optimization efforts +// 2006.04 Drew Gilliam +// --added diffusion coefficent saturation on [0,1] +// 2009.12 Lukasz G. Szafaryn +// -- reading from image, command line inputs +// 2010.01 Lukasz G. Szafaryn +// --comments + +//====================================================================================================100 +// DEFINE / INCLUDE +//====================================================================================================100 + +#include +#include +#include +#include + +#include "main.h" +#include "extract_kernel.cu" +#include "prepare_kernel.cu" +#include "reduce_kernel.cu" +#include "srad_kernel.cu" +#include "srad2_kernel.cu" +#include "compress_kernel.cu" +#include "graphics.c" +#include "resize.c" +#include "timer.c" + + +//====================================================================================================100 +// MAIN FUNCTION +//====================================================================================================100 + +int main(int argc, char *argv []){ + + //================================================================================80 + // VARIABLES + //================================================================================80 + + // time + long long time0; + long long time1; + long long time2; + long long time3; + long long time4; + long long time5; + long long time6; + long long time7; + long long time8; + long long time9; + long long time10; + long long time11; + long long time12; + + time0 = get_time(); + + // inputs image, input paramenters + fp* image_ori; // originalinput image + int image_ori_rows; + int image_ori_cols; + long image_ori_elem; + + // inputs image, input paramenters + fp* image; // input image + int Nr,Nc; // IMAGE nbr of rows/cols/elements + long Ne; + + // algorithm parameters + int niter; // nbr of iterations + fp lambda; // update step size + + // size of IMAGE + int r1,r2,c1,c2; // row/col coordinates of uniform ROI + long NeROI; // ROI nbr of elements + + // surrounding pixel indicies + int *iN,*iS,*jE,*jW; + + // counters + int iter; // primary loop + long i,j; // image row/col + + // memory sizes + int mem_size_i; + int mem_size_j; + int mem_size_single; + + //================================================================================80 + // GPU VARIABLES + //================================================================================80 + + // CUDA kernel execution parameters + dim3 threads; + int blocks_x; + dim3 blocks; + dim3 blocks2; + + // memory sizes + int mem_size; // matrix memory size + + // HOST + int no; + int mul; + fp total; + fp total2; + fp meanROI; + fp meanROI2; + fp varROI; + fp q0sqr; + + // DEVICE + fp* d_sums; // partial sum + fp* d_sums2; + int* d_iN; + int* d_iS; + int* d_jE; + int* d_jW; + fp* d_dN; + fp* d_dS; + fp* d_dW; + fp* d_dE; + fp* d_I; // input IMAGE on DEVICE + fp* d_c; + + time1 = get_time(); + + //================================================================================80 + // GET INPUT PARAMETERS + //================================================================================80 + + if(argc != 5){ + printf("ERROR: wrong number of arguments\n"); + return 0; + } + else{ + niter = atoi(argv[1]); + lambda = atof(argv[2]); + Nr = atoi(argv[3]); // it is 502 in the original image + Nc = atoi(argv[4]); // it is 458 in the original image + } + + time2 = get_time(); + + //================================================================================80 + // READ IMAGE (SIZE OF IMAGE HAS TO BE KNOWN) + //================================================================================80 + + // read image + image_ori_rows = 502; + image_ori_cols = 458; + image_ori_elem = image_ori_rows * image_ori_cols; + image_ori = (fp*)malloc(sizeof(fp) * image_ori_elem); + + const char* input_image_path = "../data/srad/image.pgm"; + if ( !read_graphics(input_image_path, image_ori, image_ori_rows, image_ori_cols, 1) ) { + printf("ERROR: failed to read input image at %s\n", input_image_path); + if (image_ori != NULL) free(image_ori); + return -1; + } + + time3 = get_time(); + + //================================================================================80 + // RESIZE IMAGE (ASSUMING COLUMN MAJOR STORAGE OF image_orig) + //================================================================================80 + + Ne = Nr*Nc; + + image = (fp*)malloc(sizeof(fp) * Ne); + + resize( image_ori, + image_ori_rows, + image_ori_cols, + image, + Nr, + Nc, + 1); + + time4 = get_time(); + + //================================================================================80 + // SETUP + //================================================================================80 + + r1 = 0; // top row index of ROI + r2 = Nr - 1; // bottom row index of ROI + c1 = 0; // left column index of ROI + c2 = Nc - 1; // right column index of ROI + + // ROI image size + NeROI = (r2-r1+1)*(c2-c1+1); // number of elements in ROI, ROI size + + // allocate variables for surrounding pixels + mem_size_i = sizeof(int) * Nr; // + iN = (int *)malloc(mem_size_i) ; // north surrounding element + iS = (int *)malloc(mem_size_i) ; // south surrounding element + mem_size_j = sizeof(int) * Nc; // + jW = (int *)malloc(mem_size_j) ; // west surrounding element + jE = (int *)malloc(mem_size_j) ; // east surrounding element + + // N/S/W/E indices of surrounding pixels (every element of IMAGE) + for (i=0; i>>(Ne, d_I); + + //checkCUDAError("extract"); + + time7 = get_time(); + + //================================================================================80 + // COMPUTATION + //================================================================================80 + + // printf("iterations: "); + + // execute main loop + for (iter=0; iter>>( Ne, + d_I, + d_sums, + d_sums2); + + //checkCUDAError("prepare"); + + // performs subsequent reductions of sums + blocks2.x = blocks.x; // original number of blocks + blocks2.y = blocks.y; + no = Ne; // original number of sum elements + mul = 1; // original multiplier + + while(blocks2.x != 0){ + + //checkCUDAError("before reduce"); + + // run kernel + reduce<<>>( Ne, + no, + mul, + d_sums, + d_sums2); + + //checkCUDAError("reduce"); + + // update execution parameters + no = blocks2.x; // get current number of elements + if(blocks2.x == 1){ + blocks2.x = 0; + } + else{ + mul = mul * NUMBER_THREADS; // update the increment + blocks_x = blocks2.x/threads.x; // number of blocks + if (blocks2.x % threads.x != 0){ // compensate for division remainder above by adding one grid + blocks_x = blocks_x + 1; + } + blocks2.x = blocks_x; + blocks2.y = 1; + } + + //checkCUDAError("after reduce"); + + } + + //checkCUDAError("before copy sum"); + + // copy total sums to device + mem_size_single = sizeof(fp) * 1; + cudaMemcpy(&total, d_sums, mem_size_single, cudaMemcpyDeviceToHost); + cudaMemcpy(&total2, d_sums2, mem_size_single, cudaMemcpyDeviceToHost); + + //checkCUDAError("copy sum"); + + // calculate statistics + meanROI = total / fp(NeROI); // gets mean (average) value of element in ROI + meanROI2 = meanROI * meanROI; // + varROI = (total2 / fp(NeROI)) - meanROI2; // gets variance of ROI + q0sqr = varROI / meanROI2; // gets standard deviation of ROI + + // execute srad kernel + srad<<>>( lambda, // SRAD coefficient + Nr, // # of rows in input image + Nc, // # of columns in input image + Ne, // # of elements in input image + d_iN, // indices of North surrounding pixels + d_iS, // indices of South surrounding pixels + d_jE, // indices of East surrounding pixels + d_jW, // indices of West surrounding pixels + d_dN, // North derivative + d_dS, // South derivative + d_dW, // West derivative + d_dE, // East derivative + q0sqr, // standard deviation of ROI + d_c, // diffusion coefficient + d_I); // output image + + //checkCUDAError("srad"); + + // execute srad2 kernel + srad2<<>>( lambda, // SRAD coefficient + Nr, // # of rows in input image + Nc, // # of columns in input image + Ne, // # of elements in input image + d_iN, // indices of North surrounding pixels + d_iS, // indices of South surrounding pixels + d_jE, // indices of East surrounding pixels + d_jW, // indices of West surrounding pixels + d_dN, // North derivative + d_dS, // South derivative + d_dW, // West derivative + d_dE, // East derivative + d_c, // diffusion coefficient + d_I); // output image + + //checkCUDAError("srad2"); + + } + + // printf("\n"); + + time8 = get_time(); + + //================================================================================80 + // SCALE IMAGE UP FROM 0-1 TO 0-255 AND COMPRESS + //================================================================================80 + + compress<<>>(Ne, d_I); + + //checkCUDAError("compress"); + + time9 = get_time(); + + //================================================================================80 + // COPY RESULTS BACK TO CPU + //================================================================================80 + + cudaMemcpy(image, d_I, mem_size, cudaMemcpyDeviceToHost); + + //checkCUDAError("copy back"); + + time10 = get_time(); + + //================================================================================80 + // WRITE IMAGE AFTER PROCESSING + //================================================================================80 + + write_graphics( + "image_out.pgm", + image, + Nr, + Nc, + 1, + 255); + + time11 = get_time(); + + //================================================================================80 + // DEALLOCATE + //================================================================================80 + + free(image_ori); + free(image); + free(iN); + free(iS); + free(jW); + free(jE); + + cudaFree(d_I); + cudaFree(d_c); + cudaFree(d_iN); + cudaFree(d_iS); + cudaFree(d_jE); + cudaFree(d_jW); + cudaFree(d_dN); + cudaFree(d_dS); + cudaFree(d_dE); + cudaFree(d_dW); + cudaFree(d_sums); + cudaFree(d_sums2); + + time12 = get_time(); + + //================================================================================80 + // DISPLAY TIMING + //================================================================================80 + + printf("Time spent in different stages of the application:\n"); + printf("%15.12f s, %15.12f %% : SETUP VARIABLES\n", + (float) (time1-time0) / 1000000, (float) (time1-time0) / (float) (time12-time0) * 100); + printf("%15.12f s, %15.12f %% : READ COMMAND LINE PARAMETERS\n", + (float) (time2-time1) / 1000000, (float) (time2-time1) / (float) (time12-time0) * 100); + printf("%15.12f s, %15.12f %% : READ IMAGE FROM FILE\n", + (float) (time3-time2) / 1000000, (float) (time3-time2) / (float) (time12-time0) * 100); + printf("%15.12f s, %15.12f %% : RESIZE IMAGE\n", + (float) (time4-time3) / 1000000, (float) (time4-time3) / (float) (time12-time0) * 100); + printf("%15.12f s, %15.12f %% : GPU DRIVER INIT, CPU/GPU SETUP, MEMORY ALLOCATION\n", + (float) (time5-time4) / 1000000, (float) (time5-time4) / (float) (time12-time0) * 100); + printf("%15.12f s, %15.12f %% : COPY DATA TO CPU->GPU\n", + (float) (time6-time5) / 1000000, (float) (time6-time5) / (float) (time12-time0) * 100); + printf("%15.12f s, %15.12f %% : EXTRACT IMAGE\n", + (float) (time7-time6) / 1000000, (float) (time7-time6) / (float) (time12-time0) * 100); + printf("%15.12f s, %15.12f %% : COMPUTE\n", + (float) (time8-time7) / 1000000, (float) (time8-time7) / (float) (time12-time0) * 100); + printf("%15.12f s, %15.12f %% : COMPRESS IMAGE\n", + (float) (time9-time8) / 1000000, (float) (time9-time8) / (float) (time12-time0) * 100); + printf("%15.12f s, %15.12f %% : COPY DATA TO GPU->CPU\n", + (float) (time10-time9) / 1000000, (float) (time10-time9) / (float) (time12-time0) * 100); + printf("%15.12f s, %15.12f %% : SAVE IMAGE INTO FILE\n", + (float) (time11-time10) / 1000000, (float) (time11-time10) / (float) (time12-time0) * 100); + printf("%15.12f s, %15.12f %% : FREE MEMORY\n", + (float) (time12-time11) / 1000000, (float) (time12-time11) / (float) (time12-time0) * 100); + printf("Total time:\n"); + printf("%.12f s\n", (float) (time12-time0) / 1000000); + +} + +//====================================================================================================100 +// END OF FILE +//====================================================================================================100 diff --git a/cuda_code/main_416.cu b/cuda_code/main_416.cu new file mode 100644 index 0000000000000000000000000000000000000000..4fbe3eb24573cd97e54ad7b3930833735327e428 --- /dev/null +++ b/cuda_code/main_416.cu @@ -0,0 +1,177 @@ +/*************************************************************************** + *cr + *cr (C) Copyright 2010 The Board of Trustees of the + *cr University of Illinois + *cr All Rights Reserved + *cr + ***************************************************************************/ + +/*############################################################################*/ + +#include "main.h" +#include "lbm.h" +#include +#include + +#include +#include "lbm.cu" + +/*############################################################################*/ +static LBM_Grid CUDA_srcGrid, CUDA_dstGrid; + + +/*############################################################################*/ + +struct pb_TimerSet timers; +int main( int nArgs, char* arg[] ) { + MAIN_Param param; + int t; + + pb_InitializeTimerSet(&timers); + struct pb_Parameters* params; + params = pb_ReadParameters(&nArgs, arg); + + + static LBM_GridPtr TEMP_srcGrid; + //Setup TEMP datastructures + LBM_allocateGrid( (float**) &TEMP_srcGrid ); + MAIN_parseCommandLine( nArgs, arg, ¶m, params ); + MAIN_printInfo( ¶m ); + + MAIN_initialize( ¶m ); + + for( t = 1; t <= param.nTimeSteps; t++ ) { + pb_SwitchToTimer(&timers, pb_TimerID_GPU); + CUDA_LBM_performStreamCollide( CUDA_srcGrid, CUDA_dstGrid ); + pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); + LBM_swapGrids( &CUDA_srcGrid, &CUDA_dstGrid ); + + if( (t & 63) == 0 ) { + printf( "timestep: %i\n", t ); +#if 0 + CUDA_LBM_getDeviceGrid((float**)&CUDA_srcGrid, (float**)&TEMP_srcGrid); + LBM_showGridStatistics( *TEMP_srcGrid ); +#endif + } + } + + MAIN_finalize( ¶m ); + + LBM_freeGrid( (float**) &TEMP_srcGrid ); + + pb_SwitchToTimer(&timers, pb_TimerID_NONE); + pb_PrintTimerSet(&timers); + pb_FreeParameters(params); + return 0; +} + +/*############################################################################*/ + +void MAIN_parseCommandLine( int nArgs, char* arg[], MAIN_Param* param, struct pb_Parameters * params ) { + struct stat fileStat; + + if( nArgs < 2 ) { + printf( "syntax: lbm
+#include
+#include "bitmask/legacy/bit_mask.cuh" +#include "string/nvcategory_util.hpp" +#include "rmm/thrust_rmm_allocator.h" +#include "utilities/cuda_utils.hpp" + +namespace { + +/** + * @brief Source table identifier to copy data from. + */ +enum class side : bool { LEFT, RIGHT }; + +using bit_mask::bit_mask_t; +using index_type = thrust::tuple; // `thrust::get<0>` indicates left/right side, `thrust::get<1>` indicates the row index + +/** + * @brief Merges the bits of two validity bitmasks. + * + * Merges the bits from two source bitmask into the destination bitmask + * according to `merged_indices` map such that bit `i` in `destination_mask` + * will be equal to bit `thrust::get<1>(merged_indices[i])` from `source_left_mask` + * if `thrust::get<0>(merged_indices[i])` equals `side::LEFT`; otherwise, + * from `source_right_mask`. + * + * `source_left_mask`, `source_right_mask` and `destination_mask` must not + * overlap. + * + * @tparam left_have_valids Indicates whether source_left_mask is null + * @tparam right_have_valids Indicates whether source_right_mask is null + * @param[in] source_left_mask The left mask whose bits will be merged + * @param[in] source_right_mask The right mask whose bits will be merged + * @param[out] destination_mask The output mask after merging the left and right masks + * @param[in] num_destination_rows The number of bits in the destination_mask + * @param[in] merged_indices The map that indicates from which input mask and which bit + * will be copied to the output. Length must be equal to `num_destination_rows` + */ +template +__global__ void materialize_merged_bitmask_kernel( + bit_mask_t const* const __restrict__ source_left_mask, + bit_mask_t const* const __restrict__ source_right_mask, + bit_mask_t* const destination_mask, + gdf_size_type const num_destination_rows, + index_type const* const __restrict__ merged_indices) { + + gdf_index_type destination_row = threadIdx.x + blockIdx.x * blockDim.x; + + auto active_threads = + __ballot_sync(0xffffffff, destination_row < num_destination_rows); + + while (destination_row < num_destination_rows) { + index_type const& merged_idx = merged_indices[destination_row]; + side const src_side = thrust::get<0>(merged_idx); + gdf_size_type const src_row = thrust::get<1>(merged_idx); + bool const from_left{src_side == side::LEFT}; + bool source_bit_is_valid{true}; + if (left_have_valids && from_left) { + source_bit_is_valid = bit_mask::is_valid(source_left_mask, src_row); + } + else if (right_have_valids && !from_left) { + source_bit_is_valid = bit_mask::is_valid(source_right_mask, src_row); + } + + // Use ballot to find all valid bits in this warp and create the output + // bitmask element + bit_mask_t const result_mask{ + __ballot_sync(active_threads, source_bit_is_valid)}; + + gdf_index_type const output_element = cudf::util::detail::bit_container_index(destination_row); + + // Only one thread writes output + if (0 == threadIdx.x % warpSize) { + destination_mask[output_element] = result_mask; + } + + destination_row += blockDim.x * gridDim.x; + active_threads = + __ballot_sync(active_threads, destination_row < num_destination_rows); + } +} + +void materialize_bitmask(gdf_column const* left_col, + gdf_column const* right_col, + gdf_column* out_col, + index_type const* merged_indices, + cudaStream_t stream) { + constexpr gdf_size_type BLOCK_SIZE{256}; + cudf::util::cuda::grid_config_1d grid_config {out_col->size, BLOCK_SIZE }; + + bit_mask_t* left_valid = reinterpret_cast(left_col->valid); + bit_mask_t* right_valid = reinterpret_cast(right_col->valid); + bit_mask_t* out_valid = reinterpret_cast(out_col->valid); + if (left_valid) { + if (right_valid) { + materialize_merged_bitmask_kernel + <<>> + (left_valid, right_valid, out_valid, out_col->size, merged_indices); + } else { + materialize_merged_bitmask_kernel + <<>> + (left_valid, right_valid, out_valid, out_col->size, merged_indices); + } + } else { + if (right_valid) { + materialize_merged_bitmask_kernel + <<>> + (left_valid, right_valid, out_valid, out_col->size, merged_indices); + } else { + materialize_merged_bitmask_kernel + <<>> + (left_valid, right_valid, out_valid, out_col->size, merged_indices); + } + } + + CHECK_STREAM(stream); +} + +rmm::device_vector +generate_merged_indices(device_table const& left_table, + device_table const& right_table, + rmm::device_vector const& asc_desc, + bool nulls_are_smallest, + cudaStream_t stream) { + + const gdf_size_type left_size = left_table.num_rows(); + const gdf_size_type right_size = right_table.num_rows(); + const gdf_size_type total_size = left_size + right_size; + + thrust::constant_iterator left_side(side::LEFT); + thrust::constant_iterator right_side(side::RIGHT); + + auto left_indices = thrust::make_counting_iterator(static_cast(0)); + auto right_indices = thrust::make_counting_iterator(static_cast(0)); + + auto left_begin_zip_iterator = thrust::make_zip_iterator(thrust::make_tuple(left_side, left_indices)); + auto right_begin_zip_iterator = thrust::make_zip_iterator(thrust::make_tuple(right_side, right_indices)); + + auto left_end_zip_iterator = thrust::make_zip_iterator(thrust::make_tuple(left_side + left_size, left_indices + left_size)); + auto right_end_zip_iterator = thrust::make_zip_iterator(thrust::make_tuple(right_side + right_size, right_indices + right_size)); + + rmm::device_vector merged_indices(total_size); + bool nullable = left_table.has_nulls() || right_table.has_nulls(); + if (nullable){ + auto ineq_op = row_inequality_comparator(right_table, left_table, nulls_are_smallest, asc_desc.data().get()); + thrust::merge(rmm::exec_policy(stream)->on(stream), + left_begin_zip_iterator, + left_end_zip_iterator, + right_begin_zip_iterator, + right_end_zip_iterator, + merged_indices.begin(), + [=] __device__ (thrust::tuple const & right_tuple, + thrust::tuple const & left_tuple) { + return ineq_op(thrust::get<1>(right_tuple), thrust::get<1>(left_tuple)); + }); + } else { + auto ineq_op = row_inequality_comparator(right_table, left_table, nulls_are_smallest, asc_desc.data().get()); + thrust::merge(rmm::exec_policy(stream)->on(stream), + left_begin_zip_iterator, + left_end_zip_iterator, + right_begin_zip_iterator, + right_end_zip_iterator, + merged_indices.begin(), + [=] __device__ (thrust::tuple const & right_tuple, + thrust::tuple const & left_tuple) { + return ineq_op(thrust::get<1>(right_tuple), thrust::get<1>(left_tuple)); + }); + } + + CHECK_STREAM(stream); + + return merged_indices; +} + +} // namespace + +namespace cudf { +namespace detail { + +table merge(table const& left_table, + table const& right_table, + std::vector const& key_cols, + std::vector const& asc_desc, + bool nulls_are_smallest, + cudaStream_t stream = 0) { + CUDF_EXPECTS(left_table.num_columns() == right_table.num_columns(), "Mismatched number of columns"); + if (left_table.num_columns() == 0) { + return cudf::empty_like(left_table); + } + + std::vector left_table_dtypes = cudf::column_dtypes(left_table); + std::vector right_table_dtypes = cudf::column_dtypes(right_table); + CUDF_EXPECTS(std::equal(left_table_dtypes.cbegin(), left_table_dtypes.cend(), right_table_dtypes.cbegin(), right_table_dtypes.cend()), "Mismatched column dtypes"); + CUDF_EXPECTS(key_cols.size() > 0, "Empty key_cols"); + CUDF_EXPECTS(key_cols.size() <= static_cast(left_table.num_columns()), "Too many values in key_cols"); + CUDF_EXPECTS(asc_desc.size() > 0, "Empty asc_desc"); + CUDF_EXPECTS(asc_desc.size() <= static_cast(left_table.num_columns()), "Too many values in asc_desc"); + CUDF_EXPECTS(key_cols.size() == asc_desc.size(), "Mismatched size between key_cols and asc_desc"); + + + auto gdf_col_deleter = [](gdf_column *col) { + gdf_column_free(col); + delete col; + }; + using gdf_col_ptr = typename std::unique_ptr; + std::vector temp_columns_to_free; + std::vector left_cols_sync(const_cast(left_table.begin()), const_cast(left_table.end())); + std::vector right_cols_sync(const_cast(right_table.begin()), const_cast(right_table.end())); + for (gdf_size_type i = 0; i < left_table.num_columns(); i++) { + gdf_column * left_col = const_cast(left_table.get_column(i)); + gdf_column * right_col = const_cast(right_table.get_column(i)); + + if (left_col->dtype != GDF_STRING_CATEGORY){ + continue; + } + + // If the inputs are nvcategory we need to make the dictionaries comparable + + temp_columns_to_free.push_back(gdf_col_ptr(new gdf_column{}, gdf_col_deleter)); + gdf_column * new_left_column_ptr = temp_columns_to_free.back().get(); + temp_columns_to_free.push_back(gdf_col_ptr(new gdf_column{}, gdf_col_deleter)); + gdf_column * new_right_column_ptr = temp_columns_to_free.back().get(); + + *new_left_column_ptr = allocate_like(*left_col, true, stream); + if (new_left_column_ptr->valid) { + CUDA_TRY( cudaMemcpyAsync(new_left_column_ptr->valid, left_col->valid, sizeof(gdf_valid_type)*gdf_num_bitmask_elements(left_col->size), cudaMemcpyDefault, stream) ); + new_left_column_ptr->null_count = left_col->null_count; + } + + *new_right_column_ptr = allocate_like(*right_col, true, stream); + if (new_right_column_ptr->valid) { + CUDA_TRY( cudaMemcpyAsync(new_right_column_ptr->valid, right_col->valid, sizeof(gdf_valid_type)*gdf_num_bitmask_elements(right_col->size), cudaMemcpyDefault, stream) ); + new_right_column_ptr->null_count = right_col->null_count; + } + + gdf_column * tmp_arr_input[2] = {left_col, right_col}; + gdf_column * tmp_arr_output[2] = {new_left_column_ptr, new_right_column_ptr}; + CUDF_TRY( sync_column_categories(tmp_arr_input, tmp_arr_output, 2) ); + + left_cols_sync[i] = new_left_column_ptr; + right_cols_sync[i] = new_right_column_ptr; + } + + table left_sync_table(left_cols_sync); + table right_sync_table(right_cols_sync); + + std::vector left_key_cols_vect(key_cols.size()); + std::transform(key_cols.cbegin(), key_cols.cend(), left_key_cols_vect.begin(), + [&] (gdf_index_type const index) { return left_sync_table.get_column(index); }); + + std::vector right_key_cols_vect(key_cols.size()); + std::transform(key_cols.cbegin(), key_cols.cend(), right_key_cols_vect.begin(), + [&] (gdf_index_type const index) { return right_sync_table.get_column(index); }); + + auto left_key_table = device_table::create(left_key_cols_vect.size(), left_key_cols_vect.data()); + auto right_key_table = device_table::create(right_key_cols_vect.size(), right_key_cols_vect.data()); + rmm::device_vector asc_desc_d(asc_desc); + + rmm::device_vector merged_indices = generate_merged_indices(*left_key_table, *right_key_table, asc_desc_d, nulls_are_smallest, stream); + + // Allocate output table + bool nullable = has_nulls(left_sync_table) || has_nulls(right_sync_table); + table destination_table(left_sync_table.num_rows() + right_sync_table.num_rows(), column_dtypes(left_sync_table), nullable, false, stream); + for (gdf_size_type i = 0; i < destination_table.num_columns(); i++) { + gdf_column const* left_col = left_sync_table.get_column(i); + gdf_column * out_col = destination_table.get_column(i); + + if (left_col->dtype != GDF_STRING_CATEGORY){ + continue; + } + + NVCategory * category = static_cast(left_col->dtype_info.category); + out_col->dtype_info.category = category->copy(); + } + + // Materialize + auto left_device_table_ptr = device_table::create(left_sync_table, stream); + auto right_device_table_ptr = device_table::create(right_sync_table, stream); + auto output_device_table_ptr = device_table::create(destination_table, stream); + auto& left_device_table = *left_device_table_ptr; + auto& right_device_table = *right_device_table_ptr; + auto& output_device_table = *output_device_table_ptr; + + auto index_start_it = thrust::make_zip_iterator(thrust::make_tuple( + thrust::make_counting_iterator(static_cast(0)), + merged_indices.begin())); + auto index_end_it = thrust::make_zip_iterator(thrust::make_tuple( + thrust::make_counting_iterator(static_cast(merged_indices.size())), + merged_indices.end())); + + thrust::for_each(rmm::exec_policy(stream)->on(stream), + index_start_it, + index_end_it, + [=] __device__ (auto const & idx_tuple){ + gdf_size_type dest_row = thrust::get<0>(idx_tuple); + index_type merged_idx = thrust::get<1>(idx_tuple); + side src_side = thrust::get<0>(merged_idx); + gdf_size_type src_row = thrust::get<1>(merged_idx); + device_table const & src_device_table = src_side == side::LEFT ? left_device_table : right_device_table; + copy_row(output_device_table, dest_row, src_device_table, src_row); + }); + + CHECK_STREAM(0); + + if (nullable) { + for (gdf_size_type i = 0; i < destination_table.num_columns(); i++) { + gdf_column const* left_col = left_sync_table.get_column(i); + gdf_column const* right_col = right_sync_table.get_column(i); + gdf_column* out_col = destination_table.get_column(i); + + materialize_bitmask(left_col, right_col, out_col, merged_indices.data().get(), stream); + + out_col->null_count = left_col->null_count + right_col->null_count; + } + } + + return destination_table; +} + +} // namespace detail + +table merge(table const& left_table, + table const& right_table, + std::vector const& key_cols, + std::vector const& asc_desc, + bool nulls_are_smallest) { + return detail::merge(left_table, right_table, key_cols, asc_desc, nulls_are_smallest); +} + +} // namespace cudf diff --git a/cuda_code/mg_gather_utils_2.cu b/cuda_code/mg_gather_utils_2.cu new file mode 100644 index 0000000000000000000000000000000000000000..b71460e8f13ac3bab3c6ac7d4ebd24de62fb4006 --- /dev/null +++ b/cuda_code/mg_gather_utils_2.cu @@ -0,0 +1,271 @@ +/* + * Copyright (c) 2022, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nbr_sampling_utils.cuh" +#include + +struct Prims_Usecase { + bool check_correctness{true}; +}; + +template +class Tests_MG_GatherEdges + : public ::testing::TestWithParam> { + public: + Tests_MG_GatherEdges() {} + static void SetupTestCase() {} + static void TearDownTestCase() {} + + virtual void SetUp() {} + virtual void TearDown() {} + + template + void run_current_test(Prims_Usecase const& prims_usecase, input_usecase_t const& input_usecase) + { + using namespace cugraph::test; + // 1. initialize handle + + raft::handle_t handle{}; + HighResClock hr_clock{}; + + raft::comms::initialize_mpi_comms(&handle, MPI_COMM_WORLD); + auto& comm = handle.get_comms(); + auto const comm_size = comm.get_size(); + auto const comm_rank = comm.get_rank(); + + auto row_comm_size = static_cast(sqrt(static_cast(comm_size))); + while (comm_size % row_comm_size != 0) { + --row_comm_size; + } + cugraph::partition_2d::subcomm_factory_t + subcomm_factory(handle, row_comm_size); + + // 2. create MG graph + + if (cugraph::test::g_perf) { + RAFT_CUDA_TRY(cudaDeviceSynchronize()); // for consistent performance measurement + handle.get_comms().barrier(); + hr_clock.start(); + } + + constexpr bool sort_adjacency_list = true; + + auto [mg_graph, mg_renumber_map_labels] = + cugraph::test::construct_graph( + handle, input_usecase, true, true, false, sort_adjacency_list); + + if (cugraph::test::g_perf) { + RAFT_CUDA_TRY(cudaDeviceSynchronize()); // for consistent performance measurement + handle.get_comms().barrier(); + double elapsed_time{0.0}; + hr_clock.stop(&elapsed_time); + std::cout << "MG construct_graph took " << elapsed_time * 1e-6 << " s.\n"; + } + + auto mg_graph_view = mg_graph.view(); + constexpr edge_t indices_per_source = 2; + constexpr vertex_t repetitions_per_vertex = 5; + constexpr vertex_t source_sample_count = 3; + + // 3. Gather mnmg call + // Generate random vertex ids in the range of current gpu + + auto [global_degree_offsets, global_out_degrees] = + cugraph::detail::get_global_degree_information(handle, mg_graph_view); + auto global_adjacency_list_offsets = cugraph::detail::get_global_adjacency_offset( + handle, mg_graph_view, global_degree_offsets, global_out_degrees); + + // Generate random sources to gather on + auto random_sources = random_vertex_ids(handle, + mg_graph_view.local_vertex_partition_range_first(), + mg_graph_view.local_vertex_partition_range_last(), + source_sample_count, + repetitions_per_vertex); + rmm::device_uvector random_source_gpu_ids(random_sources.size(), handle.get_stream()); + thrust::fill(handle.get_thrust_policy(), + random_source_gpu_ids.begin(), + random_source_gpu_ids.end(), + comm_rank); + + auto [active_sources, active_source_gpu_ids] = + cugraph::detail::gather_active_majors(handle, + mg_graph_view, + random_sources.cbegin(), + random_sources.cend(), + random_source_gpu_ids.cbegin()); + + // get source global out degrees to generate indices + auto active_source_degrees = cugraph::detail::get_active_major_global_degrees( + handle, mg_graph_view, active_sources, global_out_degrees); + + auto random_destination_indices = + generate_random_destination_indices(handle, + active_source_degrees, + mg_graph_view.number_of_vertices(), + mg_graph_view.number_of_edges(), + indices_per_source); + rmm::device_uvector input_destination_indices(random_destination_indices.size(), + handle.get_stream()); + raft::update_device(input_destination_indices.data(), + random_destination_indices.data(), + random_destination_indices.size(), + handle.get_stream()); + + auto [src, dst, gpu_ids, dst_map] = + cugraph::detail::gather_local_edges(handle, + mg_graph_view, + active_sources, + active_source_gpu_ids, + std::move(input_destination_indices), + indices_per_source, + global_degree_offsets, + global_adjacency_list_offsets); + + if (prims_usecase.check_correctness) { + // Gather outputs + auto mg_out_srcs = cugraph::test::device_gatherv(handle, src.data(), src.size()); + auto mg_out_dsts = cugraph::test::device_gatherv(handle, dst.data(), dst.size()); + + // Gather inputs + auto& col_comm = handle.get_subcomm(cugraph::partition_2d::key_naming_t().col_name()); + auto const col_rank = col_comm.get_rank(); + auto sg_random_srcs = cugraph::test::device_gatherv( + handle, active_sources.data(), col_rank == 0 ? active_sources.size() : 0); + auto sg_random_dst_indices = + cugraph::test::device_gatherv(handle, + random_destination_indices.data(), + col_rank == 0 ? random_destination_indices.size() : 0); + + // Gather input graph edgelist + rmm::device_uvector sg_src(0, handle.get_stream()); + rmm::device_uvector sg_dst(0, handle.get_stream()); + std::tie(sg_src, sg_dst, std::ignore) = + mg_graph_view.decompress_to_edgelist(handle, std::nullopt); + + auto aggregated_sg_src = cugraph::test::device_gatherv(handle, sg_src.begin(), sg_src.size()); + auto aggregated_sg_dst = cugraph::test::device_gatherv(handle, sg_dst.begin(), sg_dst.size()); + + sort_coo(handle, mg_out_srcs, mg_out_dsts); + + if (handle.get_comms().get_rank() == int{0}) { + cugraph::graph_t sg_graph(handle); + auto aggregated_edge_iter = thrust::make_zip_iterator( + thrust::make_tuple(aggregated_sg_src.begin(), aggregated_sg_dst.begin())); + thrust::sort(handle.get_thrust_policy(), + aggregated_edge_iter, + aggregated_edge_iter + aggregated_sg_src.size()); + auto sg_graph_properties = + cugraph::graph_properties_t{mg_graph_view.is_symmetric(), mg_graph_view.is_multigraph()}; + + std::tie(sg_graph, std::ignore) = + cugraph::create_graph_from_edgelist( + handle, + std::nullopt, + std::move(aggregated_sg_src), + std::move(aggregated_sg_dst), + std::nullopt, + sg_graph_properties, + false); + auto sg_graph_view = sg_graph.view(); + // Call single gpu gather + auto [sg_out_srcs, sg_out_dsts] = sg_gather_edges(handle, + sg_graph_view, + sg_random_srcs.begin(), + sg_random_srcs.end(), + sg_random_dst_indices.begin(), + sg_graph_view.number_of_vertices(), + indices_per_source); + sort_coo(handle, sg_out_srcs, sg_out_dsts); + + auto passed = thrust::equal( + handle.get_thrust_policy(), sg_out_srcs.begin(), sg_out_srcs.end(), mg_out_srcs.begin()); + passed &= thrust::equal( + handle.get_thrust_policy(), sg_out_dsts.begin(), sg_out_dsts.end(), mg_out_dsts.begin()); + ASSERT_TRUE(passed); + } + } + } +}; + +using Tests_MG_GatherEdges_File = Tests_MG_GatherEdges; + +using Tests_MG_GatherEdges_Rmat = Tests_MG_GatherEdges; + +TEST_P(Tests_MG_GatherEdges_File, CheckInt32Int32Float) +{ + auto param = GetParam(); + run_current_test(std::get<0>(param), std::get<1>(param)); +} + +TEST_P(Tests_MG_GatherEdges_File, CheckInt32Int64Float) +{ + auto param = GetParam(); + run_current_test(std::get<0>(param), std::get<1>(param)); +} + +TEST_P(Tests_MG_GatherEdges_File, CheckInt64Int64Float) +{ + auto param = GetParam(); + run_current_test(std::get<0>(param), std::get<1>(param)); +} + +TEST_P(Tests_MG_GatherEdges_Rmat, CheckInt32Int32Float) +{ + auto param = GetParam(); + run_current_test(std::get<0>(param), std::get<1>(param)); +} + +TEST_P(Tests_MG_GatherEdges_Rmat, CheckInt32Int64Float) +{ + auto param = GetParam(); + run_current_test(std::get<0>(param), std::get<1>(param)); +} + +TEST_P(Tests_MG_GatherEdges_Rmat, CheckInt64Int64Float) +{ + auto param = GetParam(); + run_current_test(std::get<0>(param), std::get<1>(param)); +} + +INSTANTIATE_TEST_SUITE_P( + file_test, + Tests_MG_GatherEdges_File, + ::testing::Combine( + ::testing::Values(Prims_Usecase{true}), + ::testing::Values(cugraph::test::File_Usecase("test/datasets/karate.mtx"), + cugraph::test::File_Usecase("test/datasets/web-Google.mtx"), + cugraph::test::File_Usecase("test/datasets/ljournal-2008.mtx"), + cugraph::test::File_Usecase("test/datasets/webbase-1M.mtx")))); + +INSTANTIATE_TEST_SUITE_P( + rmat_small_test, + Tests_MG_GatherEdges_Rmat, + ::testing::Combine(::testing::Values(Prims_Usecase{false}), + ::testing::Values(cugraph::test::Rmat_Usecase( + 10, 16, 0.57, 0.19, 0.19, 0, false, false, 0, true)))); + +INSTANTIATE_TEST_SUITE_P( + rmat_benchmark_test, /* note that scale & edge factor can be overridden in benchmarking (with + --gtest_filter to select only the rmat_benchmark_test with a specific + vertex & edge type combination) by command line arguments and do not + include more than one Rmat_Usecase that differ only in scale or edge + factor (to avoid running same benchmarks more than once) */ + Tests_MG_GatherEdges_Rmat, + ::testing::Combine(::testing::Values(Prims_Usecase{false}), + ::testing::Values(cugraph::test::Rmat_Usecase( + 20, 32, 0.57, 0.19, 0.19, 0, false, false, 0, true)))); + +CUGRAPH_MG_TEST_PROGRAM_MAIN() diff --git a/cuda_code/mish_kernel.cu b/cuda_code/mish_kernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..a444c61819a05b35d301197bd5059dbf7e8d678b --- /dev/null +++ b/cuda_code/mish_kernel.cu @@ -0,0 +1,138 @@ + +#include +#include +#include + +#include +#include +#include + +#include +#include +#include + + +#define EXP_THRESH 20. + +// kernel function for forward and backward +template +__global__ void MishForward(const int nthreads, + const scalar_t *feat, + scalar_t *activations) { + int tid = threadIdx.x + blockIdx.x * blockDim.x; + int stride = blockDim.x * gridDim.x; + for (int i{tid}; i < nthreads; i+=stride) { + scalar_t val = feat[i]; + if (val > scalar_t(EXP_THRESH)) { + activations[i] = val * tanh(val); + } else { + activations[i] = val * tanh(log1p(exp(val))); + } + } +} + +template +__global__ void MishBackward(const int nthreads, + const scalar_t *feat, + const scalar_t *grad, + scalar_t *grad_feat) { + int tid = threadIdx.x + blockIdx.x * blockDim.x; + int stride = blockDim.x * gridDim.x; + const scalar_t one(1.); + const scalar_t two(2.); + for (int i{tid}; i < nthreads; i+=stride) { + scalar_t val = feat[i]; + scalar_t xtanh; + if (val > scalar_t(EXP_THRESH)) { + xtanh = tanh(val); + } else { + xtanh = tanh(log1p(exp(val))); + } + grad_feat[i] = grad[i] * (xtanh + val * (one - powf(xtanh, two)) * one / (one + exp(-val))); + } +} + + +// cuda forward and backward +at::Tensor Mish_forward_cuda(const at::Tensor &feat) { + // CHECK type and shape + AT_ASSERTM(feat.device().type() == c10::kCUDA, "feat should be cuda"); + + // allocate memory and cuda grid/block + auto activations = at::empty_like(feat); + + const int num_samples = feat.numel(); + dim3 grid(std::min( + THCCeilDiv((int64_t)num_samples, (int64_t)512), (int64_t)4096 + )); + dim3 block(512); + if (activations.numel() == 0) { + THCudaCheck(cudaGetLastError()); + return activations; + } + + // call kernel + AT_DISPATCH_FLOATING_TYPES_AND_HALF(activations.scalar_type(), "mish forward", [&] { + MishForward<<>>( + num_samples, + feat.contiguous().data_ptr(), + activations.contiguous().data_ptr() + ); + }); + THCudaCheck(cudaGetLastError()); + return activations; +} + + +at::Tensor Mish_backward_cuda(const at::Tensor &grad, const at::Tensor &feat) { + // CHECK type and shape + AT_ASSERTM(grad.device().type() == c10::kCUDA, "grad should be cuda"); + AT_ASSERTM(feat.device().type() == c10::kCUDA, "feat should be cuda"); + + // allocate memory and cuda grid/block + auto grad_feat = at::empty_like(feat); + const int num_samples = feat.numel(); + dim3 grid(std::min( + THCCeilDiv((int64_t)num_samples, (int64_t)512), (int64_t)4096 + )); + dim3 block(512); + if (grad_feat.numel() == 0) { + THCudaCheck(cudaGetLastError()); + return grad_feat; + } + + // call kernel + AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_feat.scalar_type(), "mish backwrd", [&] { + MishBackward<<>>( + num_samples, + feat.contiguous().data_ptr(), + grad.contiguous().data_ptr(), + grad_feat.contiguous().data_ptr() + ); + }); + THCudaCheck(cudaGetLastError()); + return grad_feat; +} + +// python inferface +at::Tensor Mish_forward(const at::Tensor &feat) { + if (feat.device().type() != c10::kCUDA) { + AT_ERROR("this mish function only supports gpu mode\n"); + } + at::DeviceGuard guard(feat.device()); + return Mish_forward_cuda(feat); +} + +at::Tensor Mish_backward(const at::Tensor &grad, const at::Tensor &feat) { + // TODO: try AT_ASSERTM + if (feat.device().type() != c10::kCUDA) { + AT_ERROR("this mish function only supports gpu mode\n"); + } + at::DeviceGuard guard(feat.device()); + return Mish_backward_cuda(grad, feat); +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("mish_forward", &Mish_forward, "mish forward"); + m.def("mish_backward", &Mish_backward, "mish backward"); +} diff --git a/cuda_code/mixed_tentusscher_myo_epi_2004_S2_19_1.cu b/cuda_code/mixed_tentusscher_myo_epi_2004_S2_19_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..757b4d1295b75dfe27d6353a6d8741e9739534bc --- /dev/null +++ b/cuda_code/mixed_tentusscher_myo_epi_2004_S2_19_1.cu @@ -0,0 +1,959 @@ +#include +#include +#include "model_gpu_utils.h" + +#include "mixed_tentusscher_myo_epi_2004_S2_19.h" + +extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) +{ + + print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium GPU model\n\n"); + + // execution configuration + const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE; + + size_t size = num_volumes*sizeof(real); + + check_cuda_error(cudaMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ)); + check_cuda_error(cudaMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t))); + + // Get the mapping array + uint32_t *mapping = NULL; + uint32_t *mapping_device = NULL; + if(extra_data) + { + mapping = (uint32_t*)extra_data; + check_cuda_error(cudaMalloc((void **)&mapping_device, extra_data_bytes_size)); + check_cuda_error(cudaMemcpy(mapping_device, mapping, extra_data_bytes_size, cudaMemcpyHostToDevice)); + } + + kernel_set_model_inital_conditions <<>>(*sv, mapping_device, num_volumes); + + check_cuda_error( cudaPeekAtLastError() ); + cudaDeviceSynchronize(); + + check_cuda_error(cudaFree(mapping_device)); + + return pitch_h; + +} + +extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) +{ + + // execution configuration + const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE; + + size_t stim_currents_size = sizeof(real)*num_cells_to_solve; + size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve; + + real *stims_currents_device; + check_cuda_error(cudaMalloc((void **) &stims_currents_device, stim_currents_size)); + check_cuda_error(cudaMemcpy(stims_currents_device, stim_currents, stim_currents_size, cudaMemcpyHostToDevice)); + + + //the array cells to solve is passed when we are using and adapative mesh + uint32_t *cells_to_solve_device = NULL; + if(cells_to_solve != NULL) + { + check_cuda_error(cudaMalloc((void **) &cells_to_solve_device, cells_to_solve_size)); + check_cuda_error(cudaMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, cudaMemcpyHostToDevice)); + } + + // Get the mapping array + uint32_t *mapping = NULL; + uint32_t *mapping_device = NULL; + if(extra_data) + { + mapping = (uint32_t*)extra_data; + check_cuda_error(cudaMalloc((void **)&mapping_device, extra_data_bytes_size)); + check_cuda_error(cudaMemcpy(mapping_device, mapping, extra_data_bytes_size, cudaMemcpyHostToDevice)); + } + else + { + print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); + } + + solve_gpu <<>>(dt, sv, stims_currents_device, cells_to_solve_device, mapping_device, num_cells_to_solve, num_steps); + + check_cuda_error( cudaPeekAtLastError() ); + + check_cuda_error(cudaFree(stims_currents_device)); + if(cells_to_solve_device) check_cuda_error(cudaFree(cells_to_solve_device)); + if(mapping_device) check_cuda_error(cudaFree(mapping_device)); + +} + +__global__ void kernel_set_model_inital_conditions(real *sv, uint32_t *mapping, int num_volumes) +{ + int threadID = blockDim.x * blockIdx.x + threadIdx.x; + + if (threadID < num_volumes) + { + + // Initial conditions for TenTusscher 2004 myocardium + if (mapping[threadID] == 0) + { + // Default initial conditions + /* + *((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt + *((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M + *((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H + *((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J + *((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1 + *((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2 + *((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs + *((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S + *((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R + *((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D + *((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F + *((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa + *((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G + *((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai + *((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR + *((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai + *((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki + */ + // Elnaz's steady-state initial conditions + real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633}; + for (uint32_t i = 0; i < NEQ; i++) + *((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i]; + } + // Initial conditions for TenTusscher 2004 epicardium + else + { + // Default initial conditions + /* + *((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt + *((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M + *((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H + *((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J + *((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1 + *((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2 + *((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs + *((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S + *((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R + *((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D + *((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F + *((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa + *((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G + *((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai + *((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR + *((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai + *((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki + */ + // Elnaz's steady-state initial conditions + real sv_sst[]={-86.4901203553897,0.00131177121243054,0.777786288577117,0.777647224146045,0.000176819820661720,0.484277066680072,0.00295670386892032,0.999998321645759,1.95882583997698e-08,1.91086959570826e-05,0.999769802784257,1.00742294542800,0.999998504302701,3.74218001174359e-05,1.41921088197810,10.0015161419689,139.208342414277}; + for (uint32_t i = 0; i < NEQ; i++) + *((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i]; + } + } +} + +// Solving the model for each cell in the tissue matrix ni x nj +__global__ void solve_gpu(real dt, real *sv, real* stim_currents, + uint32_t *cells_to_solve, uint32_t *mapping, uint32_t num_cells_to_solve, + int num_steps) +{ + int threadID = blockDim.x * blockIdx.x + threadIdx.x; + int sv_id; + + // Each thread solves one cell model + if(threadID < num_cells_to_solve) + { + if(cells_to_solve) + sv_id = cells_to_solve[threadID]; + else + sv_id = threadID; + + real rDY[NEQ]; + + for (int n = 0; n < num_steps; ++n) + { + + if (mapping[sv_id] == 0) + { + RHS_gpu_myo(sv, rDY, stim_currents[threadID], sv_id, dt); + + for(int i = 0; i < NEQ; i++) + { + *((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id); + } + } + else + { + RHS_gpu_epi(sv, rDY, stim_currents[threadID], sv_id, dt); + + for (int i = 0; i < NEQ; i++) + { + *((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id); + } + } + + } + } +} + +inline __device__ void RHS_gpu_myo (real *sv_, real *rDY_, real stim_current, int threadID_, real dt) +{ + + // State variables + real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_); + real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_); + real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_); + real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_); + real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_); + real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_); + real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_); + real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_); + real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_); + real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_); + real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_); + real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_); + real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_); + real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_); + real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_); + real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_); + real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_); + + //External concentrations + real Ko=5.4; + real Cao=2.0; + real Nao=140.0; + + //Intracellular volumes + real Vc=0.016404; + real Vsr=0.001094; + + //Calcium dynamics + real Bufc=0.15f; + real Kbufc=0.001f; + real Bufsr=10.f; + real Kbufsr=0.3f; + real taufca=2.f; + real taug=2.f; + real Vmaxup=0.000425f; + real Kup=0.00025f; + + //Constants + const real R = 8314.472f; + const real F = 96485.3415f; + const real T =310.0f; + real RTONF =(R*T)/F; + + //Cellular capacitance + real CAPACITANCE=0.185; + + //Parameters for currents + //Parameters for IKr + real Gkr=0.096; + //Parameters for Iks + real pKNa=0.03; + +// [!] Myocardium cell + real Gks=0.062; +//Parameters for Ik1 + real GK1=5.405; +//Parameters for Ito +// [!] Myocardium cell + real Gto=0.294; +//Parameters for INa + real GNa=14.838; +//Parameters for IbNa + real GbNa=0.00029; +//Parameters for INaK + real KmK=1.0; + real KmNa=40.0; + real knak=1.362; +//Parameters for ICaL + real GCaL=0.000175; +//Parameters for IbCa + real GbCa=0.000592; +//Parameters for INaCa + real knaca=1000; + real KmNai=87.5; + real KmCa=1.38; + real ksat=0.1; + real n=0.35; +//Parameters for IpCa + real GpCa=0.825; + real KpCa=0.0005; +//Parameters for IpK; + real GpK=0.0146; + + + real IKr; + real IKs; + real IK1; + real Ito; + real INa; + real IbNa; + real ICaL; + real IbCa; + real INaCa; + real IpCa; + real IpK; + real INaK; + real Irel; + real Ileak; + + + real dNai; + real dKi; + real dCai; + real dCaSR; + + real A; +// real BufferFactorc; +// real BufferFactorsr; + real SERCA; + real Caisquare; + real CaSRsquare; + real CaCurrent; + real CaSRCurrent; + + + real fcaold; + real gold; + real Ek; + real Ena; + real Eks; + real Eca; + real CaCSQN; + real bjsr; + real cjsr; + real CaBuf; + real bc; + real cc; + real Ak1; + real Bk1; + real rec_iK1; + real rec_ipK; + real rec_iNaK; + real AM; + real BM; + real AH_1; + real BH_1; + real AH_2; + real BH_2; + real AJ_1; + real BJ_1; + real AJ_2; + real BJ_2; + real M_INF; + real H_INF; + real J_INF; + real TAU_M; + real TAU_H; + real TAU_J; + real axr1; + real bxr1; + real axr2; + real bxr2; + real Xr1_INF; + real Xr2_INF; + real TAU_Xr1; + real TAU_Xr2; + real Axs; + real Bxs; + real Xs_INF; + real TAU_Xs; + real R_INF; + real TAU_R; + real S_INF; + real TAU_S; + real Ad; + real Bd; + real Cd; + real TAU_D; + real D_INF; + real TAU_F; + real F_INF; + real FCa_INF; + real G_INF; + + real inverseVcF2=1/(2*Vc*F); + real inverseVcF=1./(Vc*F); + real Kupsquare=Kup*Kup; +// real BufcKbufc=Bufc*Kbufc; +// real Kbufcsquare=Kbufc*Kbufc; +// real Kbufc2=2*Kbufc; +// real BufsrKbufsr=Bufsr*Kbufsr; +// const real Kbufsrsquare=Kbufsr*Kbufsr; +// const real Kbufsr2=2*Kbufsr; + const real exptaufca=exp(-dt/taufca); + const real exptaug=exp(-dt/taug); + + real sItot; + + //Needed to compute currents + Ek=RTONF*(log((Ko/Ki))); + Ena=RTONF*(log((Nao/Nai))); + Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); + Eca=0.5*RTONF*(log((Cao/Cai))); + Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); + Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ + exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); + rec_iK1=Ak1/(Ak1+Bk1); + rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); + rec_ipK=1./(1.+exp((25-svolt)/5.98)); + + + //Compute currents + INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); + ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* + (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); + Ito=Gto*sr*ss*(svolt-Ek); + IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); + IKs=Gks*sxs*sxs*(svolt-Eks); + IK1=GK1*rec_iK1*(svolt-Ek); + INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* + (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* + (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- + exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); + INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; + IpCa=GpCa*Cai/(KpCa+Cai); + IpK=GpK*rec_ipK*(svolt-Ek); + IbNa=GbNa*(svolt-Ena); + IbCa=GbCa*(svolt-Eca); + + + //Determine total current + (sItot) = IKr + + IKs + + IK1 + + Ito + + INa + + IbNa + + ICaL + + IbCa + + INaK + + INaCa + + IpCa + + IpK + + stim_current; + + + //update concentrations + Caisquare=Cai*Cai; + CaSRsquare=CaSR*CaSR; + CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; + A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; + Irel=A*sd*sg; + Ileak=0.00008f*(CaSR-Cai); + SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); + CaSRCurrent=SERCA-Irel-Ileak; + CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); + dCaSR=dt*(Vc/Vsr)*CaSRCurrent; + bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; + cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); + CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; + CaBuf=Bufc*Cai/(Cai+Kbufc); + dCai=dt*(CaCurrent-CaSRCurrent); + bc=Bufc-CaBuf-dCai-Cai+Kbufc; + cc=Kbufc*(CaBuf+dCai+Cai); + Cai=(sqrt(bc*bc+4*cc)-bc)/2; + + + + dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; + Nai+=dt*dNai; + + dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; + Ki+=dt*dKi; + + //compute steady state values and time constants + AM=1./(1.+exp((-60.-svolt)/5.)); + BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); + TAU_M=AM*BM; + M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); + if (svolt>=-40.) + { + AH_1=0.; + BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); + TAU_H= 1.0/(AH_1+BH_1); + } + else + { + AH_2=(0.057*exp(-(svolt+80.)/6.8)); + BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); + TAU_H=1.0/(AH_2+BH_2); + } + H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); + if(svolt>=-40.) + { + AJ_1=0.; + BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); + TAU_J= 1.0/(AJ_1+BJ_1); + } + else + { + AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* + exp(-0.04391*svolt))*(svolt+37.78)/ + (1.+exp(0.311*(svolt+79.23)))); + BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); + TAU_J= 1.0/(AJ_2+BJ_2); + } + J_INF=H_INF; + + Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); + axr1=450./(1.+exp((-45.-svolt)/10.)); + bxr1=6./(1.+exp((svolt-(-30.))/11.5)); + TAU_Xr1=axr1*bxr1; + Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); + axr2=3./(1.+exp((-60.-svolt)/20.)); + bxr2=1.12/(1.+exp((svolt-60.)/20.)); + TAU_Xr2=axr2*bxr2; + + Xs_INF=1./(1.+exp((-5.-svolt)/14.)); + Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); + Bxs=1./(1.+exp((svolt-60.)/20.)); + TAU_Xs=Axs*Bxs; + +// [!] Myocardium cell + R_INF=1./(1.+exp((20-svolt)/6.)); + S_INF=1./(1.+exp((svolt+20)/5.)); + TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; + TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; + + + D_INF=1./(1.+exp((-5-svolt)/7.5)); + Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; + Bd=1.4/(1.+exp((svolt+5)/5)); + Cd=1./(1.+exp((50-svolt)/20)); + TAU_D=Ad*Bd+Cd; + F_INF=1./(1.+exp((svolt+20)/7)); + //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); + TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML + + + FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ + 0.1/(1.+exp((Cai-0.0005)/0.0001))+ + 0.20/(1.+exp((Cai-0.00075)/0.0008))+ + 0.23 )/1.46; + if(Cai<0.00035) + G_INF=1./(1.+pow((Cai/0.00035),6)); + else + G_INF=1./(1.+pow((Cai/0.00035),16)); + + //Update gates + rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); + rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); + rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); + rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); + rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); + rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); + rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); + rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); + rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); + rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); + fcaold= sfca; + sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; + if(sfca>fcaold && (svolt)>-37.0) + sfca = fcaold; + gold = sg; + sg = G_INF-(G_INF-sg)*exptaug; + + if(sg>gold && (svolt)>-37.0) + sg=gold; + + //update voltage + rDY_[0] = svolt + dt*(-sItot); + rDY_[11] = sfca; + rDY_[12] = sg; + rDY_[13] = Cai; + rDY_[14] = CaSR; + rDY_[15] = Nai; + rDY_[16] = Ki; + +} + +inline __device__ void RHS_gpu_epi (real *sv_, real *rDY_, real stim_current, int threadID_, real dt) +{ + // State variables + real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_); + real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_); + real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_); + real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_); + real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_); + real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_); + real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_); + real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_); + real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_); + real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_); + real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_); + real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_); + real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_); + real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_); + real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_); + real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_); + real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_); + + //External concentrations + real Ko=5.4; + real Cao=2.0; + real Nao=140.0; + + //Intracellular volumes + real Vc=0.016404; + real Vsr=0.001094; + + //Calcium dynamics + real Bufc=0.15f; + real Kbufc=0.001f; + real Bufsr=10.f; + real Kbufsr=0.3f; + real taufca=2.f; + real taug=2.f; + real Vmaxup=0.000425f; + real Kup=0.00025f; + + //Constants + const real R = 8314.472f; + const real F = 96485.3415f; + const real T =310.0f; + real RTONF =(R*T)/F; + + //Cellular capacitance + real CAPACITANCE=0.185; + + //Parameters for currents + //Parameters for IKr + real Gkr=0.096; + //Parameters for Iks + real pKNa=0.03; + // [!] Epicardium cell + real Gks=0.245; + //Parameters for Ik1 + real GK1=5.405; + //Parameters for Ito +// [!] Epicardium cell + real Gto=0.294; +//Parameters for INa + real GNa=14.838; +//Parameters for IbNa + real GbNa=0.00029; +//Parameters for INaK + real KmK=1.0; + real KmNa=40.0; + real knak=1.362; +//Parameters for ICaL + real GCaL=0.000175; +//Parameters for IbCa + real GbCa=0.000592; +//Parameters for INaCa + real knaca=1000; + real KmNai=87.5; + real KmCa=1.38; + real ksat=0.1; + real n=0.35; +//Parameters for IpCa + real GpCa=0.825; + real KpCa=0.0005; +//Parameters for IpK; + real GpK=0.0146; + + real parameters []={14.1722612334159,0.000259112383736700,0.000154841929841439,0.000361218133317334,0.277128455649609,0.153642408006870,0.209381667465666,4.20509839372909,0.0199270314805181,1.58059649007092,1098.43907813844,0.000639220600349527,0.0905927390261824,0.0181442296796367,0.00430751059648478,1.23911116806789e-05}; + + GNa=parameters[0]; + GbNa=parameters[1]; + GCaL=parameters[2]; + GbCa=parameters[3]; + Gto=parameters[4]; + Gkr=parameters[5]; + Gks=parameters[6]; + GK1=parameters[7]; + GpK=parameters[8]; + knak=parameters[9]; + knaca=parameters[10]; + Vmaxup=parameters[11]; + GpCa=parameters[12]; + real arel=parameters[13]; + real crel=parameters[14]; + real Vleak=parameters[15]; + + real IKr; + real IKs; + real IK1; + real Ito; + real INa; + real IbNa; + real ICaL; + real IbCa; + real INaCa; + real IpCa; + real IpK; + real INaK; + real Irel; + real Ileak; + + + real dNai; + real dKi; + real dCai; + real dCaSR; + + real A; +// real BufferFactorc; +// real BufferFactorsr; + real SERCA; + real Caisquare; + real CaSRsquare; + real CaCurrent; + real CaSRCurrent; + + + real fcaold; + real gold; + real Ek; + real Ena; + real Eks; + real Eca; + real CaCSQN; + real bjsr; + real cjsr; + real CaBuf; + real bc; + real cc; + real Ak1; + real Bk1; + real rec_iK1; + real rec_ipK; + real rec_iNaK; + real AM; + real BM; + real AH_1; + real BH_1; + real AH_2; + real BH_2; + real AJ_1; + real BJ_1; + real AJ_2; + real BJ_2; + real M_INF; + real H_INF; + real J_INF; + real TAU_M; + real TAU_H; + real TAU_J; + real axr1; + real bxr1; + real axr2; + real bxr2; + real Xr1_INF; + real Xr2_INF; + real TAU_Xr1; + real TAU_Xr2; + real Axs; + real Bxs; + real Xs_INF; + real TAU_Xs; + real R_INF; + real TAU_R; + real S_INF; + real TAU_S; + real Ad; + real Bd; + real Cd; + real TAU_D; + real D_INF; + real TAU_F; + real F_INF; + real FCa_INF; + real G_INF; + + real inverseVcF2=1/(2*Vc*F); + real inverseVcF=1./(Vc*F); + real Kupsquare=Kup*Kup; +// real BufcKbufc=Bufc*Kbufc; +// real Kbufcsquare=Kbufc*Kbufc; +// real Kbufc2=2*Kbufc; +// real BufsrKbufsr=Bufsr*Kbufsr; +// const real Kbufsrsquare=Kbufsr*Kbufsr; +// const real Kbufsr2=2*Kbufsr; + const real exptaufca=exp(-dt/taufca); + const real exptaug=exp(-dt/taug); + + real sItot; + + //Needed to compute currents + Ek=RTONF*(log((Ko/Ki))); + Ena=RTONF*(log((Nao/Nai))); + Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); + Eca=0.5*RTONF*(log((Cao/Cai))); + Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); + Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ + exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); + rec_iK1=Ak1/(Ak1+Bk1); + rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); + rec_ipK=1./(1.+exp((25-svolt)/5.98)); + + + //Compute currents + INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); + ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* + (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); + Ito=Gto*sr*ss*(svolt-Ek); + IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); + IKs=Gks*sxs*sxs*(svolt-Eks); + IK1=GK1*rec_iK1*(svolt-Ek); + INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* + (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* + (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- + exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); + INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; + IpCa=GpCa*Cai/(KpCa+Cai); + IpK=GpK*rec_ipK*(svolt-Ek); + IbNa=GbNa*(svolt-Ena); + IbCa=GbCa*(svolt-Eca); + + + //Determine total current + (sItot) = IKr + + IKs + + IK1 + + Ito + + INa + + IbNa + + ICaL + + IbCa + + INaK + + INaCa + + IpCa + + IpK + + stim_current; + + + //update concentrations + Caisquare=Cai*Cai; + CaSRsquare=CaSR*CaSR; + CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; + A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; + Irel=A*sd*sg; + Ileak=Vleak*(CaSR-Cai); + SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); + CaSRCurrent=SERCA-Irel-Ileak; + CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); + dCaSR=dt*(Vc/Vsr)*CaSRCurrent; + bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; + cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); + CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; + CaBuf=Bufc*Cai/(Cai+Kbufc); + dCai=dt*(CaCurrent-CaSRCurrent); + bc=Bufc-CaBuf-dCai-Cai+Kbufc; + cc=Kbufc*(CaBuf+dCai+Cai); + Cai=(sqrt(bc*bc+4*cc)-bc)/2; + + + + dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; + Nai+=dt*dNai; + + dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; + Ki+=dt*dKi; + + //compute steady state values and time constants + AM=1./(1.+exp((-60.-svolt)/5.)); + BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); + TAU_M=AM*BM; + M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); + if (svolt>=-40.) + { + AH_1=0.; + BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); + TAU_H= 1.0/(AH_1+BH_1); + } + else + { + AH_2=(0.057*exp(-(svolt+80.)/6.8)); + BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); + TAU_H=1.0/(AH_2+BH_2); + } + H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); + if(svolt>=-40.) + { + AJ_1=0.; + BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); + TAU_J= 1.0/(AJ_1+BJ_1); + } + else + { + AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* + exp(-0.04391*svolt))*(svolt+37.78)/ + (1.+exp(0.311*(svolt+79.23)))); + BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); + TAU_J= 1.0/(AJ_2+BJ_2); + } + J_INF=H_INF; + + Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); + axr1=450./(1.+exp((-45.-svolt)/10.)); + bxr1=6./(1.+exp((svolt-(-30.))/11.5)); + TAU_Xr1=axr1*bxr1; + Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); + axr2=3./(1.+exp((-60.-svolt)/20.)); + bxr2=1.12/(1.+exp((svolt-60.)/20.)); + TAU_Xr2=axr2*bxr2; + + Xs_INF=1./(1.+exp((-5.-svolt)/14.)); + Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); + Bxs=1./(1.+exp((svolt-60.)/20.)); + TAU_Xs=Axs*Bxs; + + R_INF=1./(1.+exp((20-svolt)/6.)); + S_INF=1./(1.+exp((svolt+20)/5.)); + TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; + TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; + + + D_INF=1./(1.+exp((-5-svolt)/7.5)); + Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; + Bd=1.4/(1.+exp((svolt+5)/5)); + Cd=1./(1.+exp((50-svolt)/20)); + TAU_D=Ad*Bd+Cd; + F_INF=1./(1.+exp((svolt+20)/7)); + //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); + TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML + + + FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ + 0.1/(1.+exp((Cai-0.0005)/0.0001))+ + 0.20/(1.+exp((Cai-0.00075)/0.0008))+ + 0.23 )/1.46; + if(Cai<0.00035) + G_INF=1./(1.+pow((Cai/0.00035),6)); + else + G_INF=1./(1.+pow((Cai/0.00035),16)); + + //Update gates + rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); + rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); + rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); + rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); + rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); + rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); + rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); + rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); + rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); + rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); + fcaold= sfca; + sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; + if(sfca>fcaold && (svolt)>-37.0) + sfca = fcaold; + gold = sg; + sg = G_INF-(G_INF-sg)*exptaug; + + if(sg>gold && (svolt)>-37.0) + sg=gold; + + //update voltage + rDY_[0] = svolt + dt*(-sItot); + rDY_[11] = sfca; + rDY_[12] = sg; + rDY_[13] = Cai; + rDY_[14] = CaSR; + rDY_[15] = Nai; + rDY_[16] = Ki; + +} + diff --git a/cuda_code/mixed_vector_test_3.cu b/cuda_code/mixed_vector_test_3.cu new file mode 100644 index 0000000000000000000000000000000000000000..8ea574b31cc5825cc2f788377d66c4c45e723898 --- /dev/null +++ b/cuda_code/mixed_vector_test_3.cu @@ -0,0 +1,106 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ +#include + +#include "glog/logging.h" +#include "gtest/gtest.h" +#include "paddle/fluid/framework/mixed_vector.h" +#include "paddle/fluid/platform/gpu_info.h" + +template +using vec = paddle::framework::Vector; + +TEST(mixed_vector, CPU_VECTOR) { + vec tmp; + for (int i = 0; i < 10; ++i) { + tmp.push_back(i); + } + ASSERT_EQ(tmp.size(), 10UL); + vec tmp2; + tmp2 = tmp; + ASSERT_EQ(tmp2.size(), 10UL); + for (int i = 0; i < 10; ++i) { + ASSERT_EQ(tmp2[i], i); + ASSERT_EQ(tmp2[i], tmp[i]); + } + int cnt = 0; + for (auto& t : tmp2) { + ASSERT_EQ(t, cnt); + ++cnt; + } +} + +static __global__ void multiply_10(int* ptr) { + for (int i = 0; i < 10; ++i) { + ptr[i] *= 10; + } +} + +cudaStream_t GetCUDAStream(paddle::platform::CUDAPlace place) { + return reinterpret_cast( + paddle::platform::DeviceContextPool::Instance().Get(place)) + ->stream(); +} + +TEST(mixed_vector, GPU_VECTOR) { + vec tmp; + for (int i = 0; i < 10; ++i) { + tmp.push_back(i); + } + ASSERT_EQ(tmp.size(), 10UL); + paddle::platform::CUDAPlace gpu(0); + + multiply_10<<<1, 1, 0, GetCUDAStream(gpu)>>>(tmp.MutableData(gpu)); + + for (int i = 0; i < 10; ++i) { + ASSERT_EQ(tmp[i], i * 10); + } +} + +TEST(mixed_vector, MultiGPU) { + if (paddle::platform::GetCUDADeviceCount() < 2) { + LOG(WARNING) << "Skip mixed_vector.MultiGPU since there are not multiple " + "GPUs in your machine."; + return; + } + + vec tmp; + for (int i = 0; i < 10; ++i) { + tmp.push_back(i); + } + ASSERT_EQ(tmp.size(), 10UL); + paddle::platform::CUDAPlace gpu0(0); + paddle::platform::SetDeviceId(0); + multiply_10<<<1, 1, 0, GetCUDAStream(gpu0)>>>(tmp.MutableData(gpu0)); + paddle::platform::CUDAPlace gpu1(1); + auto* gpu1_ptr = tmp.MutableData(gpu1); + paddle::platform::SetDeviceId(1); + multiply_10<<<1, 1, 0, GetCUDAStream(gpu1)>>>(gpu1_ptr); + for (int i = 0; i < 10; ++i) { + ASSERT_EQ(tmp[i], i * 100); + } +} + +TEST(mixed_vector, InitWithCount) { + paddle::framework::Vector vec(10, 10); + for (int i = 0; i < 10; ++i) { + ASSERT_EQ(vec[i], 10); + } +} + +TEST(mixed_vector, ForEach) { + vec tmp; + for (auto& v : tmp) { + } +} diff --git a/cuda_code/mult.output.cu b/cuda_code/mult.output.cu new file mode 100644 index 0000000000000000000000000000000000000000..7384d9c697cfc762d1e14e98c6751a0c579ccfd1 --- /dev/null +++ b/cuda_code/mult.output.cu @@ -0,0 +1,256 @@ +/***************************************** +Emitting C Generated Code +*******************************************/ +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif +#include "cudnn_header.h" +#include "nccl_header.h" +#include +#include +#include +#include "cuda_header.h" +#include +#include +#include "cublas_header.h" +#include +#include "mpi_header.h" +#include "scanner_header.h" +/************* Functions **************/ +__global__ void x17(float* x18, float x19, int x20) { + // begin generating kernel function for FILL of type Float + int x21 = gridDim.x * blockDim.x; + int x22 = threadIdx.x + blockIdx.x * blockDim.x; + while (x22 < x20) { + x18[x22] = x19; + x22 = x22 + x21; + } + // end generating kernel function for FILL of type Float +} +__global__ void x26(float* x27, float* x28, float* x29, int x30) { + // begin generating kernel function for MULT of type Float + int x31 = gridDim.x * blockDim.x; + int x32 = threadIdx.x + blockIdx.x * blockDim.x; + while (x32 < x30) { + int x33 = x32; + x29[x33] = x27[x33] * x28[x33]; + x32 = x32 + x31; + } + // end generating kernel function for MULT of type Float +} +__global__ void x35(float* x36, float* x37, int x38) { + // begin generating kernel function for ACCUM of type Float + int x39 = gridDim.x * blockDim.x; + int x40 = threadIdx.x + blockIdx.x * blockDim.x; + while (x40 < x38) { + int x41 = x40; + x36[x41] = x36[x41] + x37[x41]; + x40 = x40 + x39; + } + // end generating kernel function for ACCUM of type Float +} +__global__ void x42(float* x43, float* x44, float* x45, int x46) { + // begin generating kernel function for SGD of type Float + int x47 = gridDim.x * blockDim.x; + int x48 = threadIdx.x + blockIdx.x * blockDim.x; + while (x48 < x46) { + int x49 = x48; + float x50 = x45[x49] * 0.5 + x44[x49]; + x43[x49] = x43[x49] - x50 * 1.0E-4; + x45[x49] = x50; + x48 = x48 + x47; + } + // end generating kernel function for SGD of type Float +} +/**************** Snippet ****************/ +void Snippet(int x0) { + // begin setting up the MPI/NCCL environment + int x1 = 0; + int x2 = 0; + MPICHECK(MPI_Init(NULL, NULL)); + MPICHECK(MPI_Comm_rank(MPI_COMM_WORLD, &x2)); + MPICHECK(MPI_Comm_size(MPI_COMM_WORLD, &x1)); + MPICHECK(MPI_Barrier(MPI_COMM_WORLD)); + CUDA_CALL(cudaSetDevice(x2)); + ncclUniqueId x3; + NCCLCHECK(ncclGetUniqueId(&x3)); + MPICHECK(MPI_Bcast(&x3, NCCL_UNIQUE_ID_BYTES, MPI_CHAR, 0, MPI_COMM_WORLD)); + ncclComm_t x4; + NCCLCHECK(ncclCommInitRank(&x4, x1, x3, x2)); + cudaStream_t x5; + CUDA_CALL(cudaStreamCreateWithFlags(&x5, cudaStreamNonBlocking)); + // begin setting up the local MPI/NCCL environment + MPI_Comm x6; + int x7 = x2; + MPICHECK(MPI_Comm_split(MPI_COMM_WORLD, x7 / 2, x7, &x6)); + int x8 = 0; + int x9 = 0; + MPICHECK(MPI_Comm_rank(x6, &x9)); + MPICHECK(MPI_Comm_size(x6, &x8)); + ncclUniqueId x10; + NCCLCHECK(ncclGetUniqueId(&x10)); + MPICHECK(MPI_Bcast(&x10, NCCL_UNIQUE_ID_BYTES, MPI_CHAR, 0, x6)); + ncclComm_t x11; + NCCLCHECK(ncclCommInitRank(&x11, x8, x10, x9)); + int x12 = x2; + // end setting up the local MPI/NCCL environment + // end setting up the MPI/NCCL environment + if (x12 >= 0 && x12 < 2) { + int x13 = x9; + // begin initializing GPU array of size 512 and type Float + float* x14 = (float*)malloc(512 * sizeof(float)); + float* x15 = (float*)malloc(0 * sizeof(float)); + CUDA_CALL(cudaMalloc(&x15, (size_t)(512 * sizeof(float)))); + scan_float_array(x14, 512, "golden/weight_rank_%d.data", x13); + CUDA_CALL(cudaMemcpy(x15, x14, (size_t)(512 * sizeof(float)), cudaMemcpyHostToDevice)); + // end initializing GPU array of size 512 and type Float + // begin initializing fixed GPU array of size 512 and type Float and device (pre-rename) x64 + float* x16 = (float*)malloc(0 * sizeof(float)); + CUDA_CALL(cudaMalloc(&x16, (size_t)(512 * sizeof(float)))); + x17<<>>(x16, 0, 512); + // end initializing fixed GPU array of size 512 and type Float and device (pre-rename) x64 + // begin initializing fixed GPU array of size 512 and type Float and device (pre-rename) x64 + float* x23 = (float*)malloc(0 * sizeof(float)); + CUDA_CALL(cudaMalloc(&x23, (size_t)(512 * sizeof(float)))); + x17<<>>(x23, 0, 512); + // end initializing fixed GPU array of size 512 and type Float and device (pre-rename) x64 + // begin initializing fixed GPU array of size 1536 and type Float and device (pre-rename) x64 + float* x24 = (float*)malloc(0 * sizeof(float)); + CUDA_CALL(cudaMalloc(&x24, (size_t)(1536 * sizeof(float)))); + x17<<>>(x24, 0, 1536); + // end initializing fixed GPU array of size 1536 and type Float and device (pre-rename) x64 + int x25 = 0; + int x34 = x12 + 2; + while (x25 != 3) { + int x51 = 0; + while (x51 != 3) { + // begin initializing GPU array of size 512 and type Float + float* x52 = (float*)malloc(512 * sizeof(float)); + float* x53 = (float*)malloc(0 * sizeof(float)); + CUDA_CALL(cudaMalloc(&x53, (size_t)(512 * sizeof(float)))); + scan_float_array(x52, 512, "golden/input1_rank_%d.data", x13); + CUDA_CALL(cudaMemcpy(x53, x52, (size_t)(512 * sizeof(float)), cudaMemcpyHostToDevice)); + // end initializing GPU array of size 512 and type Float + CUDA_CALL(cudaMemcpy(x24 + 512 * x51, x53, (size_t)(512 * sizeof(float)), cudaMemcpyDeviceToDevice)); + // begin computing MULT on GPU for size 512 and type Float at device (pre-rename) x64 with left_operand x162 and right_operand x75 + float* x54 = (float*)malloc(0 * sizeof(float)); + CUDA_CALL(cudaMalloc(&x54, (size_t)(512 * sizeof(float)))); + x26<<>>(x53, x15, x54, 512); + // end computing MULT on GPU for size 512 and type Float at device (pre-rename) x64 with left_operand x162 and right_operand x75 + NCCLCHECK(ncclSend(x54, (size_t)512, ncclFloat32, x34, x4, x5)); + CUDA_CALL(cudaStreamSynchronize(x5)); + x51 = x51 + 1; + } + int x55 = 0; + while (x55 != 3) { + // begin initializing fixed GPU array of size 512 and type Float and device (pre-rename) x64 + float* x56 = (float*)malloc(0 * sizeof(float)); + CUDA_CALL(cudaMalloc(&x56, (size_t)(512 * sizeof(float)))); + x17<<>>(x56, 0, 512); + // end initializing fixed GPU array of size 512 and type Float and device (pre-rename) x64 + NCCLCHECK(ncclRecv(x56, (size_t)512, ncclFloat32, x34, x4, x5)); + CUDA_CALL(cudaStreamSynchronize(x5)); + // begin computing MULT on GPU for size 512 and type Float at device (pre-rename) x64 with left_operand x244 and right_operand x246 + float* x57 = (float*)malloc(0 * sizeof(float)); + CUDA_CALL(cudaMalloc(&x57, (size_t)(512 * sizeof(float)))); + x26<<>>(x24 + 512 * x55, x56, x57, 512); + // end computing MULT on GPU for size 512 and type Float at device (pre-rename) x64 with left_operand x244 and right_operand x246 + // begin computing ACCUM on GPU for size 512 and type Float at device (pre-rename) x64 with base_operand x90 and addition_operand x263 + x35<<>>(x16, x57, 512); + // end computing ACCUM on GPU for size 512 and type Float at device (pre-rename) x64 with base_operand x90 and addition_operand x263 + x55 = x55 + 1; + } + // begin computing SGD on GPU for size 512 and type Float at device (pre-name) x64 with weight x75, grad x90, and momentum x128 + x42<<>>(x15, x16, x23, 512); + // end computing SGD on GPU for size 512 and type Float at device (pre-name) x64 with weight x75, grad x90, and momentum x128 + // begin checking GPU array of size 512 and type Float + float* x58 = (float*)malloc(512 * sizeof(float)); + CUDA_CALL(cudaMemcpy(x58, x16, (size_t)(512 * sizeof(float)), cudaMemcpyDeviceToHost)); + check_float_array_with_file(x58, 512, "golden/weight_grad_rank_%d.data", x13); + // end checking GPU array of size 512 and type Float + // begin checking GPU array of size 512 and type Float + float* x59 = (float*)malloc(512 * sizeof(float)); + CUDA_CALL(cudaMemcpy(x59, x15, (size_t)(512 * sizeof(float)), cudaMemcpyDeviceToHost)); + check_float_array_with_file(x59, 512, "golden/weight_rank_%d.data", x13); + // end checking GPU array of size 512 and type Float + x25 = x25 + 1; + } + } + if (x12 >= 2 && x12 < 4) { + int x13 = x9; + // begin initializing fixed GPU array of size 1536 and type Float and device (pre-rename) x64 + float* x60 = (float*)malloc(0 * sizeof(float)); + CUDA_CALL(cudaMalloc(&x60, (size_t)(1536 * sizeof(float)))); + x17<<>>(x60, 0, 1536); + // end initializing fixed GPU array of size 1536 and type Float and device (pre-rename) x64 + // begin initializing fixed GPU array of size 1536 and type Float and device (pre-rename) x64 + float* x61 = (float*)malloc(0 * sizeof(float)); + CUDA_CALL(cudaMalloc(&x61, (size_t)(1536 * sizeof(float)))); + x17<<>>(x61, 0, 1536); + // end initializing fixed GPU array of size 1536 and type Float and device (pre-rename) x64 + int x62 = 0; + int x63 = x12 - 2; + while (x62 != 3) { + int x64 = 0; + while (x64 != 3) { + // begin initializing GPU array of size 512 and type Float + float* x65 = (float*)malloc(512 * sizeof(float)); + float* x66 = (float*)malloc(0 * sizeof(float)); + CUDA_CALL(cudaMalloc(&x66, (size_t)(512 * sizeof(float)))); + scan_float_array(x65, 512, "golden/input2_rank_%d.data", x13); + CUDA_CALL(cudaMemcpy(x66, x65, (size_t)(512 * sizeof(float)), cudaMemcpyHostToDevice)); + // end initializing GPU array of size 512 and type Float + int x67 = 512 * x64; + CUDA_CALL(cudaMemcpy(x61 + x67, x66, (size_t)(512 * sizeof(float)), cudaMemcpyDeviceToDevice)); + // begin initializing fixed GPU array of size 512 and type Float and device (pre-rename) x64 + float* x68 = (float*)malloc(0 * sizeof(float)); + CUDA_CALL(cudaMalloc(&x68, (size_t)(512 * sizeof(float)))); + x17<<>>(x68, 0, 512); + // end initializing fixed GPU array of size 512 and type Float and device (pre-rename) x64 + NCCLCHECK(ncclRecv(x68, (size_t)512, ncclFloat32, x63, x4, x5)); + CUDA_CALL(cudaStreamSynchronize(x5)); + CUDA_CALL(cudaMemcpy(x60 + x67, x68, (size_t)(512 * sizeof(float)), cudaMemcpyDeviceToDevice)); + x64 = x64 + 1; + } + int x69 = 0; + while (x69 != 3) { + // begin initializing fixed GPU array of size 512 and type Float and device (pre-rename) x64 + float* x70 = (float*)malloc(0 * sizeof(float)); + CUDA_CALL(cudaMalloc(&x70, (size_t)(512 * sizeof(float)))); + x17<<>>(x70, 0, 512); + // end initializing fixed GPU array of size 512 and type Float and device (pre-rename) x64 + // begin initializing fixed GPU array of size 512 and type Float and device (pre-rename) x64 + float* x71 = (float*)malloc(0 * sizeof(float)); + CUDA_CALL(cudaMalloc(&x71, (size_t)(512 * sizeof(float)))); + x17<<>>(x71, 1, 512); + // end initializing fixed GPU array of size 512 and type Float and device (pre-rename) x64 + // begin computing MULT on GPU for size 512 and type Float at device (pre-rename) x64 with left_operand x487 and right_operand x489 + float* x72 = (float*)malloc(0 * sizeof(float)); + CUDA_CALL(cudaMalloc(&x72, (size_t)(512 * sizeof(float)))); + x26<<>>(x61 + 512 * x69, x71, x72, 512); + // end computing MULT on GPU for size 512 and type Float at device (pre-rename) x64 with left_operand x487 and right_operand x489 + // begin computing ACCUM on GPU for size 512 and type Float at device (pre-rename) x64 with base_operand x476 and addition_operand x500 + x35<<>>(x70, x72, 512); + // end computing ACCUM on GPU for size 512 and type Float at device (pre-rename) x64 with base_operand x476 and addition_operand x500 + NCCLCHECK(ncclSend(x70, (size_t)512, ncclFloat32, x63, x4, x5)); + CUDA_CALL(cudaStreamSynchronize(x5)); + x69 = x69 + 1; + } + x62 = x62 + 1; + } + } + NCCLCHECK(ncclCommDestroy(x11)); + NCCLCHECK(ncclCommDestroy(x4)); + MPICHECK(MPI_Finalize()); +} +/***************************************** +End of C Generated Code +*******************************************/ +int main(int argc, char *argv[]) { + if (argc != 2) { + printf("usage: %s \n", argv[0]); + return 0; + } + Snippet(atoi(argv[1])); + return 0; +} diff --git a/cuda_code/multi_1.cu b/cuda_code/multi_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..85a197ce974f495c6ce26f145896ea5b72c1bc95 --- /dev/null +++ b/cuda_code/multi_1.cu @@ -0,0 +1,105 @@ +#include +#include +#include +#include "multShare.h" +// Thread block size +#define BLOCK_SIZE 16 + __global__ void MatMulKernel(const Matrix, const Matrix, Matrix); +/* + * multShare.c + * + * Robert Hochberg + * January 24, 2012 + * + * Based nearly entirely on the code from the CUDA C Programming Guide + */ +// Matrix multiplication - Host code +// Matrix dimensions are assumed to be multiples of BLOCK_SIZE +void MatMul(const Matrix A, const Matrix B, Matrix C) { + // Load A and B to device memory + Matrix d_A; + d_A.width = d_A.stride = A.width; + d_A.height = A.height; + size_t size = A.width * A.height * sizeof(float); + cudaError_t err = cudaMalloc(&d_A.elements, size); + printf("CUDA malloc A: %s\n",cudaGetErrorString(err)); + cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice); + Matrix d_B; + d_B.width = d_B.stride = B.width; + d_B.height = B.height; + size = B.width * B.height * sizeof(float); + err = cudaMalloc(&d_B.elements, size); + printf("CUDA malloc B: %s\n",cudaGetErrorString(err)); +/* 37 */ + cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice); + // Allocate C in device memory + Matrix d_C; + d_C.width = d_C.stride = C.width; + d_C.height = C.height; + size = C.width * C.height * sizeof(float); + err = cudaMalloc(&d_C.elements, size); + printf("CUDA malloc C: %s\n",cudaGetErrorString(err)); + // Invoke kernel + dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); + dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y); + MatMulKernel<<>>(d_A, d_B, d_C); + err = cudaThreadSynchronize(); + printf("Run kernel: %s\n", cudaGetErrorString(err)); + // Read C from device memory + err = cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost); + printf("Copy C off of device: %s\n",cudaGetErrorString(err)); + // Free device memory + cudaFree(d_A.elements); + cudaFree(d_B.elements); + cudaFree(d_C.elements); +} +int main(int argc, char* argv[]){ + Matrix A, B, C; + int a1, a2, b1, b2; + +srand(1234); + a1 = atoi(argv[1]); /* Height of A */ + a2 = atoi(argv[2]); /* Width of A */ + b1 = a2; /* Height of B */ +/* 40 */ + +b2 = atoi(argv[3]); /* Width of B */ +A.height = a1; +A.width = a2; +A.elements = (float*)malloc(A.width * A.height * sizeof(float)); +B.height = b1; +B.width = b2; +B.elements = (float*)malloc(B.width * B.height * sizeof(float)); +C.height = A.height; +C.width = B.width; +C.elements = (float*)malloc(C.width * C.height * sizeof(float)); +for(int i = 0; i < A.height; i++) + for(int j = 0; j < A.width; j++) + A.elements[i*A.width + j] = (rand() % 10); +for(int i = 0; i < B.height; i++) + for(int j = 0; j < B.width; j++) + B.elements[i*B.width + j] = (rand() % 5); +MatMul(A, B, C); +for(int i = 0; i < min(10, A.height); i++){ + for(int j = 0; j < min(10, A.width); j++) + printf("%5.0f ", A.elements[i*A.width + j]); + printf("\n"); +} +printf("\n"); +for(int i = 0; i < min(10, B.height); i++){ + for(int j = 0; j < min(10, B.width); j++) + printf("%5.0f ", B.elements[i*B.width + j]); + printf("\n"); +} +printf("\n"); +/* 41 */ + +for(int i = 0; i < min(10, C.height); i++){ + for(int j = 0; j < min(10, C.width); j++) + printf("%5.0f ", C.elements[i*C.width + j]); + printf("\n"); +} + printf("\n"); +} + +/* 42 */ diff --git a/cuda_code/multi_tensor_scale_kernel.cu b/cuda_code/multi_tensor_scale_kernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..360485dcd02fbfc21a76a2bfa6dd6568b8909499 --- /dev/null +++ b/cuda_code/multi_tensor_scale_kernel.cu @@ -0,0 +1,125 @@ +#include +#include +#include +#include +// Another possibility: +// #include + +#include +// Stringstream is a big hammer, but I want to rely on operator<< for dtype. +#include + +#include "multi_tensor_apply.cuh" +#include "type_shim.h" + +#define BLOCK_SIZE 512 +#define ILP 4 + +template +__device__ __forceinline__ bool is_aligned(T *p) { + return ((uint64_t)p) % (ILP * sizeof(T)) == 0; +} + +template +__device__ __forceinline__ void load_store(T *dst, T *src, int dst_offset, + int src_offset) { + typedef + typename std::aligned_storage::type LT; + ((LT *)dst)[dst_offset] = ((LT *)src)[src_offset]; +} + +template +struct ScaleFunctor { + __device__ __forceinline__ void operator()(int chunk_size, + volatile int *noop_gmem, + TensorListMetadata<2> &tl, + float scale) { + // I'd like this kernel to propagate infs/nans. + // if(*noop_gmem == 1) + // return; + + int tensor_loc = tl.block_to_tensor[blockIdx.x]; + int chunk_idx = tl.block_to_chunk[blockIdx.x]; + int n = tl.sizes[tensor_loc]; + + in_t *in = (in_t *)tl.addresses[0][tensor_loc]; + in += chunk_idx * chunk_size; + + out_t *out = (out_t *)tl.addresses[1][tensor_loc]; + out += chunk_idx * chunk_size; + + n -= chunk_idx * chunk_size; + + bool finite = true; + in_t r_in[ILP]; + out_t r_out[ILP]; + + // to make things simple, we put aligned case in a different code path + if (n % ILP == 0 && chunk_size % ILP == 0 && is_aligned(in) && + is_aligned(out)) { + for (int i_start = threadIdx.x; + i_start * ILP < n && i_start * ILP < chunk_size; + i_start += blockDim.x) { + // load + load_store(r_in, in, 0, i_start); +#pragma unroll + for (int ii = 0; ii < ILP; ii++) { + r_out[ii] = static_cast(r_in[ii]) * scale; + finite = finite && isfinite(r_in[ii]); + } + // store + load_store(out, r_out, i_start, 0); + } + } else { + // Non-divergent exit condition for __syncthreads, not necessary here + for (int i_start = 0; i_start < n && i_start < chunk_size; + i_start += blockDim.x * ILP) { +#pragma unroll + for (int ii = 0; ii < ILP; ii++) { + r_in[ii] = 0; + int i = i_start + threadIdx.x + ii * blockDim.x; + if (i < n && i < chunk_size) r_in[ii] = in[i]; + } + // note for clarification to future michael: + // From a pure memory dependency perspective, there's likely no point + // unrolling the write loop, since writes just fire off once their LDGs + // arrive. Put another way, the STGs are dependent on the LDGs, but not + // on each other. There is still compute ILP benefit from unrolling the + // loop though. +#pragma unroll + for (int ii = 0; ii < ILP; ii++) { + r_out[ii] = static_cast(r_in[ii]) * scale; + finite = finite && isfinite(r_in[ii]); + } +#pragma unroll + for (int ii = 0; ii < ILP; ii++) { + int i = i_start + threadIdx.x + ii * blockDim.x; + if (i < n && i < chunk_size) out[i] = r_out[ii]; + } + } + } + if (!finite) + *noop_gmem = + 1; // Blindly fire off a write. These will race but that's ok. + } +}; + +void multi_tensor_scale_cuda(int chunk_size, at::Tensor noop_flag, + std::vector> tensor_lists, + float scale) { + using namespace at; + // The output (downscaled) type is always float. + // If build times suffer, think about where to put this dispatch, + // and what logic should be moved out of multi_tensor_apply. + + DISPATCH_FLOAT_AND_HALF( + tensor_lists[0][0].scalar_type(), 0, "multi_tensor_scale_cuda", + DISPATCH_FLOAT_AND_HALF( + tensor_lists[1][0].scalar_type(), 1, "multi_tensor_scale_cuda", + multi_tensor_apply<2>(BLOCK_SIZE, chunk_size, noop_flag, tensor_lists, + ScaleFunctor(), + scale);)) + AT_CUDA_CHECK(cudaGetLastError()); + + // AT_CUDA_CHECK(cudaDeviceSynchronize()); +} \ No newline at end of file diff --git a/cuda_code/mymodule_kernels.cu b/cuda_code/mymodule_kernels.cu new file mode 100644 index 0000000000000000000000000000000000000000..b5baeb89e056fa2235be3cb834f47a94ac344a31 --- /dev/null +++ b/cuda_code/mymodule_kernels.cu @@ -0,0 +1,17 @@ +#include "mymodule_kernels.hpp" + +#include + +/** + * \brief A basic CUDA kernel. + */ +__global__ void mymodule_kernel() { + printf("Hello world from CUDA kernel from thread %d\n", threadIdx.x); +} + +void mymodule_kernel_wrapper() { + printf("Will call kernel\n"); + mymodule_kernel<<<1,1>>>(); + cudaDeviceSynchronize(); + printf("Done calling kernel\n"); +} diff --git a/cuda_code/nbinormalization.cu b/cuda_code/nbinormalization.cu new file mode 100644 index 0000000000000000000000000000000000000000..4af53de6755dd382a2cd5ef31d3487ba1edba200 --- /dev/null +++ b/cuda_code/nbinormalization.cu @@ -0,0 +1,664 @@ +/* Copyright (c) 2013-2017, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of NVIDIA CORPORATION nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include + +namespace amgx +{ + +template class NBinormalizationScaler; + +/********************************************************************** + * HOST FUNCTIONS + *********************************************************************/ +template +void computeBetaGammaHost(int rows, IndexType *offsets, IndexType *indices, MatrixType *vals, + VectorType *x, VectorType *y, VectorType *beta, VectorType *gamma) +{ + for (int i = 0; i < rows; i++) { gamma[i] = 0.; } + + for (int i = 0; i < rows; i++) + { + VectorType bi = 0.; + + for (int jj = offsets[i]; jj < offsets[i + 1]; jj++) + { + int col = indices[jj]; + VectorType val = vals[jj]; + bi += (val * val) * y[col]; + gamma[col] += (val * val) * x[i]; + } + + beta[i] = bi; + } +} + +// compute Gamma on its own +template +void computeGammaHost(int rows, IndexType *offsets, IndexType *indices, MatrixType *vals, + VectorType *x, VectorType *gamma) +{ + for (int i = 0; i < rows; i++) { gamma[i] = 0.; } + + for (int i = 0; i < rows; i++) + { + for (int jj = offsets[i]; jj < offsets[i + 1]; jj++) + { + int col = indices[jj]; + VectorType val = vals[jj]; + gamma[col] += (val * val) * x[i]; + } + } +} + +// compute Beta on its own +template +void computeBetaHost(int rows, IndexType *offsets, IndexType *indices, MatrixType *vals, + VectorType *y, VectorType *beta) +{ + for (int i = 0; i < rows; i++) + { + VectorType bi = 0.; + + for (int jj = offsets[i]; jj < offsets[i + 1]; jj++) + { + int col = indices[jj]; + VectorType val = vals[jj]; + bi += (val * val) * y[col]; + } + + beta[i] = bi; + } +} + +template +void scaleMatrixHost(int rows, IndexType *offsets, IndexType *indices, MatrixType *values, + VectorType *x, VectorType *y) +{ + for (int i = 0; i < rows; i++) + { + VectorType fi = sqrt(fabs(x[i])); + + for (int jj = offsets[i]; jj < offsets[i + 1]; jj++) + { + int j = indices[jj]; + VectorType gj = sqrt(fabs(y[j])); + values[jj] *= fi * gj; + } + } +} + +/********************************************************************** + * DEVICE FUNCTIONS + *********************************************************************/ + +// these warp reductions should be able to be replaced with amgx:: functions +template +__device__ __inline__ T warpReduceSum(T val) +{ + if (warpSize > 16) { val += utils::shfl_down(val, 16, warpSize); } + + utils::syncwarp(); + + if (warpSize > 8) { val += utils::shfl_down(val, 8, warpSize); } + + utils::syncwarp(); + + if (warpSize > 4) { val += utils::shfl_down(val, 4, warpSize); } + + utils::syncwarp(); + + if (warpSize > 2) { val += utils::shfl_down(val, 2, warpSize); } + + utils::syncwarp(); + + if (warpSize > 1) { val += utils::shfl_down(val, 1, warpSize); } + + return val; +} + +template +__device__ T warpReduceSumShared(volatile T *vals, const int lane_id) +{ + if (warpSize > 16) { vals[lane_id] += vals[lane_id + 16]; } + + if (warpSize > 8) { vals[lane_id] += vals[lane_id + 8]; } + + if (warpSize > 4) { vals[lane_id] += vals[lane_id + 4]; } + + if (warpSize > 2) { vals[lane_id] += vals[lane_id + 2]; } + + if (warpSize > 1) { vals[lane_id] += vals[lane_id + 1]; } + + return vals[lane_id]; +} + +// compute beta = B*y, gamma = C*x (B = A.^2, C = B^T) +template +__global__ +void computeBetaGammaDevice(IndexType rows, IndexType *offsets, IndexType *indices, MatrixValue *values, + VectorValue *x, VectorValue *y, VectorValue *beta, VectorValue *gamma) +{ + const int vectors_per_block = VectorsPerCTA; + const int vector_id = threadIdx.x / VectorSize; + const int lane_id = threadIdx.x % VectorSize; + + for (int i = vectors_per_block * blockIdx.x + vector_id; i < rows; i += vectors_per_block * gridDim.x) + { + // load start + end pointers + int row_tmp; + + if (lane_id < 2) + { + row_tmp = offsets[i + lane_id]; + } + + // distribute to all other threads in warp + int row_begin = utils::shfl(row_tmp, vector_id * VectorSize, warpSize, utils::activemask()); + int row_end = utils::shfl(row_tmp, vector_id * VectorSize + 1, warpSize, utils::activemask()); + VectorValue bi(0.); + + for (int jj = row_begin + lane_id; utils::any(jj < row_end, utils::activemask()); jj += VectorSize) + { + int col = -1; + VectorValue val(0.); + + if (jj < row_end) + { + col = indices[jj]; + val = values[jj]; + bi += (val * val) * y[col]; + utils::atomic_add(&gamma[col], (val * val) * x[i]); + } + } + + // reduce over bi + VectorValue bi_s = warpReduceSum(bi); + + if (lane_id == 0) + { + beta[i] = bi_s; + } + } +} + +// compute gamma = B^T*x (B = A.^2) +template +__global__ +void computeGammaDevice(int rows, IndexType *offsets, IndexType *indices, MatrixValue *values, + VectorValue *x, VectorValue *gamma) +{ + const int vectors_per_block = CTASize / VectorSize; + const int vector_id = threadIdx.x / VectorSize; + const int lane_id = threadIdx.x % VectorSize; + + for (int i = vectors_per_block * blockIdx.x + vector_id; i < rows; i += vectors_per_block * gridDim.x) + { + // load start + end pointers + int row_tmp; + + if (lane_id < 2) + { + row_tmp = offsets[i + lane_id]; + } + + // distribute to all other threads in warp + int row_begin = utils::shfl(row_tmp, vector_id * VectorSize, warpSize, utils::activemask()); + int row_end = utils::shfl(row_tmp, vector_id * VectorSize + 1, warpSize, utils::activemask()); + + for (int jj = row_begin + lane_id; utils::any(jj < row_end, utils::activemask()); jj += VectorSize) + { + int col = -1; + VectorValue val = 0.; + + if (jj < row_end) + { + col = indices[jj]; + val = values[jj]; + utils::atomic_add(&gamma[col], (val * val) * x[i]); + } + } + } +} + +// compute beta = B*y (B = A.^2) +template +__global__ +void computeBetaDevice(int rows, IndexType *offsets, IndexType *indices, MatrixValue *values, + VectorValue *y, VectorValue *beta) +{ + const int vectors_per_block = CTASize / VectorSize; + const int vector_id = threadIdx.x / VectorSize; + const int lane_id = threadIdx.x % VectorSize; + + for (int i = vectors_per_block * blockIdx.x + vector_id; i < rows; i += vectors_per_block * gridDim.x) + { + // load start + end pointers + int row_tmp; + + if (lane_id < 2) + { + row_tmp = offsets[i + lane_id]; + } + + // distribute to all other threads in warp + int row_begin = utils::shfl(row_tmp, vector_id * VectorSize, warpSize, utils::activemask()); + int row_end = utils::shfl(row_tmp, vector_id * VectorSize + 1, warpSize, utils::activemask()); + VectorValue bi = 0.; + + for (int jj = row_begin + lane_id; utils::any(jj < row_end, utils::activemask()); jj += VectorSize) + { + int col = -1; + VectorValue val = 0.; + + if (jj < row_end) + { + col = indices[jj]; + val = values[jj]; + bi += (val * val) * y[col]; + } + } + + // reduce over bi + VectorValue bi_s = warpReduceSum(bi); + + if (lane_id == 0) + { + beta[i] = bi_s; + } + } +} + +template +__global__ +void setOneOverVector(int N, ValueType *x, ValueType sum1, ValueType *beta) +{ + for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < N; i += gridDim.x * blockDim.x) + { + //x[i] = ( isNotCloseToZero(beta[i]) ? sum1 / beta[i] : sum1 / epsilon(beta[i]) ); + x[i] = ( isNotCloseToZero(beta[i]) ? (sum1 / beta[i]) : (ValueType)1. ); + } +} + +template +struct square_value : public unary_function +{ + __host__ __device__ T operator()(const T &x) const + { + return x * x; + } +}; + +// functor to generate stddev of vectors +template +struct std_f +{ + std_f(T x) : v(x) {}; + T v; + + __host__ __device__ + T operator()(const T &x1, const T &x2) const + { + return (x1 * x2 - v) * (x1 * x2 - v); + } +}; + +// scaled the matrix using diag(F)*A*diag(G), f = sqrt(fabs(x)), g = sqrt(fabs(y)) +template +__global__ +void scaleMatrixDevice(int rows, IndexType *offsets, IndexType *indices, MatrixType *values, + VectorType *x, VectorType *y) +{ + for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < rows; i += gridDim.x * blockDim.x) + { + VectorType fi = sqrt(fabs(x[i])); + + for (int jj = offsets[i]; jj < offsets[i + 1]; jj++) + { + int j = indices[jj]; + VectorType gj = sqrt(fabs(y[j])); + + // scale matrix value in place + if (direction == amgx::SCALE) + { + values[jj] *= fi * gj; + } + else + { + values[jj] /= fi * gj; + } + } + } +} + + +template +__global__ +void getColRowNorms(int rows, IndexType *offsets, IndexType *indices, MatrixType *values, + VectorType *rownorms, VectorType *colnorms) +{ + for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < rows; i += gridDim.x * blockDim.x) + { + for (int jj = offsets[i]; jj < offsets[i + 1]; jj++) + { + int j = indices[jj]; + VectorType curval = values[jj] * values[jj]; + rownorms[i] += curval; + utils::atomic_add(colnorms + j, curval); + } + } +} + +// vector constant scale operand +template +struct vmul_scale_const +{ + T _alpha; + + vmul_scale_const(T alpha): _alpha(alpha) {}; + + __host__ __device__ + T operator()(const T &vec) const + { + return vec * _alpha; + } +}; + + +// vector scale operand +template +struct vmul_scale +{ + + vmul_scale() {}; + + __host__ __device__ + T operator()(const T &vec, const T &alpha) const + { + return (vec * sqrt(fabs(alpha))); + } +}; + +// vector unscale operand +template +struct vmul_unscale +{ + + vmul_unscale() {}; + + __host__ __device__ + T operator()(const T &vec, const T &alpha) const + { + return (vec / sqrt(fabs(alpha))); + } +}; + + +// Setup on Device +template +void NBinormalizationScaler >::setup(Matrix_d &A) +{ + if (A.is_matrix_distributed()) + { + FatalError("Binormalization scaling not supported for distributed matrices", AMGX_ERR_NOT_IMPLEMENTED); + } + + // move these out to config parameters? + const int max_iters = 50; + const ValueTypeB tolerance = 1e-10; + int rows = A.get_num_rows(), cols = A.get_num_cols(); + // temporary vectors + VVector x(rows, 1), y(cols, 1), beta(rows, 0), gamma(cols, 0); + // perform matvecs to get beta and gamma (spmv for beta, spmvT for gamma) + computeBetaGammaDevice<256, 8, 32> <<< 4096, 256>>>(rows, A.row_offsets.raw(), A.col_indices.raw(), A.values.raw(), + x.raw(), y.raw(), beta.raw(), gamma.raw()); + ValueTypeB sum1 = cols, sum2 = rows, std1, std2; + // calculate initial std1 and std2 + thrust::device_ptr x_ptr(x.raw()), y_ptr(y.raw()), beta_ptr(beta.raw()), gamma_ptr(gamma.raw()); + std1 = sqrt(thrust::inner_product(x_ptr, x_ptr + rows, beta_ptr, ValueTypeB(0.), thrust::plus(), std_f(sum1)) / rows) / sum1; + std2 = sqrt(thrust::inner_product(y_ptr, y_ptr + cols, gamma_ptr, ValueTypeB(0.), thrust::plus(), std_f(sum2)) / cols) / sum2; + ValueTypeB std = sqrt(std1 * std1 + std2 * std2); + + for (int t = 0; t < max_iters; t++) + { + if (std < tolerance) { break; } // finished + + // x = sum1 ./ beta + setOneOverVector <<< 4096, 256>>>(rows, x.raw(), sum1, beta.raw()); + // gamma = C*x := B'*x + thrust::fill(gamma.begin(), gamma.end(), ValueTypeB(0.)); + computeGammaDevice<256, 8> <<< 4096, 256>>>(rows, A.row_offsets.raw(), A.col_indices.raw(), A.values.raw(), x.raw(), gamma.raw()); + // gamma = 1 ./ beta + setOneOverVector <<< 4096, 256>>>(cols, y.raw(), sum2, gamma.raw()); + // beta = B*y + computeBetaDevice<256, 8> <<< 4096, 256>>>(rows, A.row_offsets.raw(), A.col_indices.raw(), A.values.raw(), y.raw(), beta.raw()); + //ValueTypeB std_old = std; + std = sqrt(thrust::inner_product(x_ptr, x_ptr + rows, beta_ptr, ValueTypeB(0.), thrust::plus(), std_f(sum1)) / rows) / sum1; + // print it #, current error, convergence rate + //printf("ITER: %d %.3e %.4lg\n",t, std, std / std_old); + } + + //Save scaling vectors for later user, setup complete + left_scale = VVector(beta); + right_scale = VVector(gamma); + this->scaled_before = false; +} + + +// Matrix Scaling on Device +template +void NBinormalizationScaler >::scaleMatrix(Matrix_d &A, ScaleDirection scaleOrUnscale) +{ + if (left_scale.size() != A.get_num_rows()) + { + FatalError("Must call setup(A) before binormalization scaling can scale matrix", AMGX_ERR_NOT_IMPLEMENTED); + } + + if (A.is_matrix_distributed()) + { + FatalError("Binormalization scaling not supported for distributed matrices", AMGX_ERR_NOT_IMPLEMENTED); + } + + int nrows = A.get_num_rows(); + + if (scaleOrUnscale == amgx::SCALE) + { + /*VVector rownorms(nrows, 0.0); + VVector colnorms(nrows, 0.0); + getColRowNorms<<<4096,256>>>(nrows, A.row_offsets.raw(), A.col_indices.raw(), A.values.raw(), rownorms.raw(), colnorms.raw()); + cudaCheckError(); + ValueTypeB row_max = *(thrust::max_element(rownorms.begin(), rownorms.end())); + ValueTypeB row_min = *(thrust::min_element(rownorms.begin(), rownorms.end())); + ValueTypeB col_max = *(thrust::max_element(colnorms.begin(), colnorms.end())); + ValueTypeB col_min = *(thrust::min_element(colnorms.begin(), colnorms.end())); + cudaCheckError(); + printf("Original Matrix: rowmax: %e, rowmin: %e, colmax: %e, colmin: %e\n", row_max, row_min, col_max, col_min);fflush(stdout);*/ + // A_scaled = F*A*G (f = diag(F) = sqrt(fabs(x)), g = diag(G) = sqrt(fabs(y)) + // A_ij = f_i * A_ij * g_j + scaleMatrixDevice <<< 4096, 256>>>(A.get_num_rows(), A.row_offsets.raw(), A.col_indices.raw(), A.values.raw(), left_scale.raw(), right_scale.raw()); + cudaCheckError(); + + if (!scaled_before) + { + this->norm_coef = sqrt(thrust::transform_reduce(A.values.begin(), A.values.begin() + A.get_num_nz() * A.get_block_size(), square_value(), 0., thrust::plus()) / A.get_num_rows()); + cudaCheckError(); + thrust::transform(A.values.begin(), A.values.begin() + A.get_num_nz()*A.get_block_size(), A.values.begin(), vmul_scale_const(1. / this->norm_coef) ); + thrust::transform(left_scale.begin(), left_scale.end(), left_scale.begin(), vmul_scale_const(sqrt(1. / this->norm_coef)) ); + thrust::transform(right_scale.begin(), right_scale.end(), right_scale.begin(), vmul_scale_const(sqrt(1. / this->norm_coef)) ); + cudaCheckError(); + /*thrust::fill(rownorms.begin(), rownorms.end(), 0.); + thrust::fill(colnorms.begin(), colnorms.end(), 0.); + getColRowNorms<<<4096,256>>>(nrows, A.row_offsets.raw(), A.col_indices.raw(), A.values.raw(), rownorms.raw(), colnorms.raw()); + cudaCheckError(); + row_max = *(thrust::max_element(rownorms.begin(), rownorms.end())); + row_min = *(thrust::min_element(rownorms.begin(), rownorms.end())); + col_max = *(thrust::max_element(colnorms.begin(), colnorms.end())); + col_min = *(thrust::min_element(colnorms.begin(), colnorms.end())); + cudaCheckError(); + printf("Scaled Matrix: rowmax: %e, rowmin: %e, colmax: %e, colmin: %e\n", row_max, row_min, col_max, col_min);fflush(stdout);*/ + } + + this->scaled_before = true; + } + else + { + scaleMatrixDevice <<< 4096, 256>>>(A.get_num_rows(), A.row_offsets.raw(), A.col_indices.raw(), A.values.raw(), left_scale.raw(), right_scale.raw()); + cudaCheckError(); + } +} + +// Setup on Host +template +void NBinormalizationScaler >::setup(Matrix_h &A) +{ + if (A.is_matrix_distributed()) + { + FatalError("Binormalization scaling not supported for distributed matrices", AMGX_ERR_NOT_IMPLEMENTED); + } + + // move these out to config parameters? + const int max_iters = 10; + const ValueTypeB tolerance = 1e-10; + int rows = A.get_num_rows(), cols = A.get_num_cols(); + // temporary vectors + VVector x(rows, 1), y(cols, 1), beta(rows, 0), gamma(cols, 0); + // perform matvecs to get beta and gamma (spmv for beta, spmvT for gamma) + computeBetaGammaHost(rows, A.row_offsets.raw(), A.col_indices.raw(), A.values.raw(), + x.raw(), y.raw(), beta.raw(), gamma.raw()); + double std1 = 0., std2 = 0., sum1 = cols, sum2 = rows; + + // calculate initial std1 and std2 + for (int i = 0; i < rows; i++) + { + std1 += pow(x[i] * beta[i] - sum1, 2.0); + } + + std1 = sqrt(std1 / rows) / sum1; + + for (int i = 0; i < cols; i++) + { + std2 += pow(y[i] * gamma[i] - sum2, 2.0); + } + + std2 = sqrt(std2 / cols) / sum2; + //printf("std1: %lg, std2: %lg\n",std1, std2); + double std_initial = sqrt((std1 * std1) + (std2 * std2)); + double std = std_initial; + + for (int t = 0; t < max_iters; t++) + { + if (std < tolerance) { break; } // finished + + // x = sum1 ./ beta + for (int i = 0; i < rows; i++) { x[i] = ( isNotCloseToZero(beta[i]) ? sum1 / beta[i] : sum1 / epsilon(beta[i]) ); } + + // gamma = C*x + computeGammaHost(rows, A.row_offsets.raw(), A.col_indices.raw(), A.values.raw(), x.raw(), gamma.raw()); + + // gamma = 1 ./ beta + for (int i = 0; i < cols; i++) { y[i] = ( isNotCloseToZero(gamma[i]) ? sum2 / gamma[i] : sum2 / epsilon(gamma[i]) ); } + + // beta = B*y + computeBetaHost(rows, A.row_offsets.raw(), A.col_indices.raw(), A.values.raw(), y.raw(), beta.raw()); + //ValueTypeB std_old = std; + std = 0.; + + for (int i = 0; i < rows; i++) + { + std += pow(x[i] * beta[i] - sum1, 2.0); + } + + std = sqrt(std / rows) / sum1; + // print it #, current error, convergence rate + //printf("ITER: %d %.3e %.4lg\n",t, std, std / std_old); + } + + //Save scaling vectors for later user, setup complete + left_scale = VVector(beta); + right_scale = VVector(gamma); +} + + +template +void NBinormalizationScaler >::scaleMatrix(Matrix_h &A, ScaleDirection scaleOrUnscale) +{ + if (A.is_matrix_distributed()) + { + FatalError("Binormalization scaling not supported for distributed matrices", AMGX_ERR_NOT_IMPLEMENTED); + } + + if (left_scale.size() != A.get_num_rows()) + { + FatalError("Must call setup(A) before binormalization scaling can scale matrix", AMGX_ERR_NOT_IMPLEMENTED); + } + + // A_scaled = F*A*G (f = diag(F) = sqrt(fabs(x)), g = diag(G) = sqrt(fabs(y)) + // A_ij = f_i * A_ij * g_j + scaleMatrixHost(A.get_num_rows(), A.row_offsets.raw(), A.col_indices.raw(), A.values.raw(), left_scale.raw(), right_scale.raw()); +} + + +template +void NBinormalizationScaler >::scaleVector(VVector &v, ScaleDirection scaleOrUnscale, ScaleSide leftOrRight) +{ + VVector *scale_vector = (leftOrRight == amgx::LEFT) ? &this->left_scale : &this->right_scale; + + //thrust::transform(v.begin(), v.end(), scale_vector->begin(), v.begin(), (scaleOrUnscale == amgx::SCALE) ? vmul_scale() : vmul_unscale() ); + if (scaleOrUnscale == amgx::SCALE) + { + thrust::transform(v.begin(), v.end(), scale_vector->begin(), v.begin(), vmul_scale() ); + } + else + { + thrust::transform(v.begin(), v.end(), scale_vector->begin(), v.begin(), vmul_unscale() ); + } +} + +template +void NBinormalizationScaler >::scaleVector(VVector &v, ScaleDirection scaleOrUnscale, ScaleSide leftOrRight) +{ + FatalError("4x4 block size not supported", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE); +} + + +/**************************************** + * Explict instantiations + ***************************************/ +#define AMGX_CASE_LINE(CASE) template class NBinormalizationScaler_Base::Type>; +AMGX_FORALL_BUILDS(AMGX_CASE_LINE) +#undef AMGX_CASE_LINE + +#define AMGX_CASE_LINE(CASE) template class NBinormalizationScaler::Type>; +AMGX_FORALL_BUILDS(AMGX_CASE_LINE) +#undef AMGX_CASE_LINE + + +} // namespace amgx + diff --git a/cuda_code/nbody-unroll_1.cu b/cuda_code/nbody-unroll_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..f99bf07db2b4591b7f30cf3a27c8a982678b804d --- /dev/null +++ b/cuda_code/nbody-unroll_1.cu @@ -0,0 +1,100 @@ +#include +#include +#include +#include "timer.h" + +#define BLOCK_SIZE 256 +#define SOFTENING 1e-9f + +typedef struct { float4 *pos, *vel; } BodySystem; + +void randomizeBodies(float *data, int n) { + for (int i = 0; i < n; i++) { + data[i] = 2.0f * (rand() / (float)RAND_MAX) - 1.0f; + } +} + +__global__ +void bodyForce(float4 *p, float4 *v, float dt, int n) { + int i = blockDim.x * blockIdx.x + threadIdx.x; + if (i < n) { + float Fx = 0.0f; float Fy = 0.0f; float Fz = 0.0f; + + for (int tile = 0; tile < gridDim.x; tile++) { + __shared__ float3 spos[BLOCK_SIZE]; + float4 tpos = p[tile * blockDim.x + threadIdx.x]; + spos[threadIdx.x] = make_float3(tpos.x, tpos.y, tpos.z); + __syncthreads(); + + #pragma unroll + for (int j = 0; j < BLOCK_SIZE; j++) { + float dx = spos[j].x - p[i].x; + float dy = spos[j].y - p[i].y; + float dz = spos[j].z - p[i].z; + float distSqr = dx*dx + dy*dy + dz*dz + SOFTENING; + float invDist = rsqrtf(distSqr); + float invDist3 = invDist * invDist * invDist; + + Fx += dx * invDist3; Fy += dy * invDist3; Fz += dz * invDist3; + } + __syncthreads(); + } + + v[i].x += dt*Fx; v[i].y += dt*Fy; v[i].z += dt*Fz; + } +} + +int main(const int argc, const char** argv) { + + int nBodies = 30000; + if (argc > 1) nBodies = atoi(argv[1]); + + const float dt = 0.01f; // time step + const int nIters = 10; // simulation iterations + + int bytes = 2*nBodies*sizeof(float4); + float *buf = (float*)malloc(bytes); + BodySystem p = { (float4*)buf, ((float4*)buf) + nBodies }; + + randomizeBodies(buf, 8*nBodies); // Init pos / vel data + + float *d_buf; + cudaMalloc(&d_buf, bytes); + BodySystem d_p = { (float4*)d_buf, ((float4*)d_buf) + nBodies }; + + int nBlocks = (nBodies + BLOCK_SIZE - 1) / BLOCK_SIZE; + double totalTime = 0.0; + + for (int iter = 1; iter <= nIters; iter++) { + StartTimer(); + + cudaMemcpy(d_buf, buf, bytes, cudaMemcpyHostToDevice); + bodyForce<<>>(d_p.pos, d_p.vel, dt, nBodies); + cudaMemcpy(buf, d_buf, bytes, cudaMemcpyDeviceToHost); + + for (int i = 0 ; i < nBodies; i++) { // integrate position + p.pos[i].x += p.vel[i].x*dt; + p.pos[i].y += p.vel[i].y*dt; + p.pos[i].z += p.vel[i].z*dt; + } + + const double tElapsed = GetTimer() / 1000.0; + if (iter > 1) { // First iter is warm up + totalTime += tElapsed; + } +#ifndef SHMOO + printf("Iteration %d: %.3f seconds\n", iter, tElapsed); +#endif + } + double avgTime = totalTime / (double)(nIters-1); + +#ifdef SHMOO + printf("%d, %0.3f\n", nBodies, 1e-9 * nBodies * nBodies / avgTime); +#else + printf("Average rate for iterations 2 through %d: %.3f +- %.3f steps per second.\n", + nIters, rate); + printf("%d Bodies: average %0.3f Billion Interactions / second\n", nBodies, 1e-9 * nBodies * nBodies / avgTime); +#endif + free(buf); + cudaFree(d_buf); +} diff --git a/cuda_code/nbody_brute_force.cu b/cuda_code/nbody_brute_force.cu new file mode 100644 index 0000000000000000000000000000000000000000..89fe9dd32de451c7712021d0a0d570f980f3f638 --- /dev/null +++ b/cuda_code/nbody_brute_force.cu @@ -0,0 +1,209 @@ +/* +** nbody_brute_force.c - nbody simulation using the brute-force algorithm (O(n*n)) +** +**/ + +#include +#include +#include +#include +#include +#include +#include + +#ifdef DISPLAY +#include +#include +#endif + +#include "ui.cuh" +#include "nbody.cuh" +#include "nbody_tools.cuh" + +FILE* f_out=NULL; + +int nparticles=10; /* number of particles */ +float T_FINAL=1.0; /* simulation end time */ +particle_t*particles; + +double sum_speed_sq = 0; +double max_acc = 0; +double max_speed = 0; + +void init() { + /* Nothing to do */ +} + +#ifdef DISPLAY +extern Display *theDisplay; /* These three variables are required to open the */ +extern GC theGC; /* particle plotting window. They are externally */ +extern Window theMain; /* declared in ui.h but are also required here. */ +#endif + +/* compute the force that a particle with position (x_pos, y_pos) and mass 'mass' + * applies to particle p + */ +void compute_force(particle_t*p, double x_pos, double y_pos, double mass) { + double x_sep, y_sep, dist_sq, grav_base; + + x_sep = x_pos - p->x_pos; + y_sep = y_pos - p->y_pos; + dist_sq = MAX((x_sep*x_sep) + (y_sep*y_sep), 0.01); + + /* Use the 2-dimensional gravity rule: F = d * (GMm/d^2) */ + grav_base = GRAV_CONSTANT*(p->mass)*(mass)/dist_sq; + + p->x_force += grav_base*x_sep; + p->y_force += grav_base*y_sep; +} + +/* compute the new position/velocity */ +void move_particle(particle_t*p, double step) { + + p->x_pos += (p->x_vel)*step; + p->y_pos += (p->y_vel)*step; + double x_acc = p->x_force/p->mass; + double y_acc = p->y_force/p->mass; + p->x_vel += x_acc*step; + p->y_vel += y_acc*step; + + /* compute statistics */ + double cur_acc = (x_acc*x_acc + y_acc*y_acc); + cur_acc = sqrt(cur_acc); + double speed_sq = (p->x_vel)*(p->x_vel) + (p->y_vel)*(p->y_vel); + double cur_speed = sqrt(speed_sq); + + sum_speed_sq += speed_sq; + max_acc = MAX(max_acc, cur_acc); + max_speed = MAX(max_speed, cur_speed); +} + + +/* + Move particles one time step. + + Update positions, velocity, and acceleration. + Return local computations. +*/ +void all_move_particles(double step) +{ + /* First calculate force for particles. */ + int i; + for(i=0; ix_pos, p->y_pos, p->mass); + } + } + + /* then move all particles and return statistics */ + for(i=0; ix_pos, p->y_pos, p->x_vel, p->y_vel); + } +} + +void run_simulation() { + double t = 0.0, dt = 0.01; + while (t < T_FINAL && nparticles>0) { + /* Update time. */ + t += dt; + /* Move particles with the current and compute rms velocity. */ + all_move_particles(dt); + + /* Adjust dt based on maximum speed and acceleration--this + simple rule tries to insure that no velocity will change + by more than 10% */ + + dt = 0.1*max_speed/max_acc; + + /* Plot the movement of the particle */ +#if DISPLAY + clear_display(); + draw_all_particles(); + flush_display(); +#endif + } +} + +/* + Simulate the movement of nparticles particles. +*/ +int main(int argc, char**argv) +{ + if(argc >= 2) { + nparticles = atoi(argv[1]); + } + if(argc == 3) { + T_FINAL = atof(argv[2]); + } + + init(); + + /* Allocate global shared arrays for the particles data set. */ + particles = (particle_t*)malloc(sizeof(particle_t)*nparticles); + all_init_particles(nparticles, particles); + + /* Initialize thread data structures */ +#ifdef DISPLAY + /* Open an X window to display the particles */ + simple_init (100,100,DISPLAY_SIZE, DISPLAY_SIZE); +#endif + + struct timeval t1, t2; + gettimeofday(&t1, NULL); + + /* Main thread starts simulation ... */ + run_simulation(); + + gettimeofday(&t2, NULL); + + double duration = (t2.tv_sec -t1.tv_sec)+((t2.tv_usec-t1.tv_usec)/1e6); + +#ifdef DUMP_RESULT + FILE* f_out = fopen("particles.log", "w"); + assert(f_out); + print_all_particles(f_out); + fclose(f_out); +#endif + + printf("-----------------------------\n"); + printf("nparticles: %d\n", nparticles); + printf("T_FINAL: %f\n", T_FINAL); + printf("-----------------------------\n"); + printf("Simulation took %lf s to complete\n", duration); + +#ifdef DISPLAY + clear_display(); + draw_all_particles(); + flush_display(); + + printf("Hit return to close the window."); + + getchar(); + /* Close the X window used to display the particles */ + XCloseDisplay(theDisplay); +#endif + return 0; +} diff --git a/cuda_code/nccl_2.cu b/cuda_code/nccl_2.cu new file mode 100644 index 0000000000000000000000000000000000000000..7af98a65876ed2c5591766bc9baed03c6ba1407e --- /dev/null +++ b/cuda_code/nccl_2.cu @@ -0,0 +1,360 @@ +/** + * Copyright (c) 2017-present, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "gloo/nccl/nccl.h" + +#include +#include + +#include "gloo/cuda_private.h" + +namespace gloo { +namespace nccl { + +// Allocate a set of per-device streams used to serialize NCCL op scheduling. +// These ensure concurrent NCCL ops are not interleaved across devices (i.e., +// through priority scheduling), resulting in deadlock. Use a function-scope +// static to avoid SIOF with the CUDA runtime. +static CudaDeviceStreams& getNcclStreams() { + static CudaDeviceStreams ncclStreams; + return ncclStreams; +} + +template +class NCCLContext { + public: + NCCLContext(const std::vector& devices) : devices(devices) { + // Initialze comms. Synchronize with conflicting CUDA and NCCL operations. + comms.resize(devices.size()); + std::lock_guard lock(CudaShared::getMutex()); + NCCL_CHECK(ncclCommInitAll(comms.data(), devices.size(), devices.data())); + } + ~NCCLContext() { + /* + * TODO(T30279827) Temporarily disable calling ncclCommDestroy + * Calling ncclCommDestroy while program exiting is undefined + * according to nvidia, and lead to segfault in NCCL 2 + * (whether it is called before or after the CUDA runtime destructor). + * Temporarily disable it in destructor to avoid segfault. + * Following up with Nvidia for long term solution. + */ + + /* + for (auto i = 0; i < devices.size(); ++i) { + CudaDeviceScope scope(devices[i]); + { + // Synchronize memory allocation with NCCL operations + std::lock_guard lock(CudaShared::getMutex()); + ncclCommDestroy(comms[i]); + } + } + */ + } + + // Instances cannot be copied or copy-assigned + NCCLContext(const NCCLContext&) = delete; + NCCLContext& operator=(const NCCLContext&) = delete; + + const std::vector devices; + std::vector comms; +}; + +// Initializing NCCL communications is expensive. Allocate context as needed per +// unique device set and cache for reuse. +template +static std::shared_ptr> getNcclContext( + const NCCLExecution& ex) { + static std::unordered_map>> + contexts; + const auto key = ex.getKey(); + { + static std::mutex m; + std::lock_guard lock(m); + if (!contexts[key]) { + contexts[key] = std::make_shared>(ex.getDevices()); + } + } + const auto context = contexts[key]; + GLOO_ENFORCE_NE(context.get(), (void*)nullptr); + return context; +} + +template +NCCLExecution::NCCLExecution(std::vector>&& elements) + : elements(std::move(elements)) { + // Allocate events to synchronize source, destination, and NCCL streams + ncclEvents.resize(this->elements.size()); + for (auto i = 0; i < this->elements.size(); i++) { + CudaDeviceScope scope(this->elements[i].device); + CUDA_CHECK(cudaEventCreateWithFlags( + &ncclEvents[i], cudaEventDefault | cudaEventDisableTiming)); + } +} + +template +NCCLExecution::~NCCLExecution() { + for (auto i = 0; i < this->elements.size(); i++) { + CudaDeviceScope scope(this->elements[i].device); + CUDA_CHECK(cudaEventDestroy(ncclEvents[i])); + } +} + +template +std::vector NCCLExecution::getDevices() const { + std::vector result; + result.reserve(elements.size()); + for (const auto& el : elements) { + GLOO_ENFORCE( + // Performing a linear search given small set of devices + std::find(result.begin(), result.end(), el.device) == result.end(), + "NCCL elements must map to unique devices"); + result.push_back(el.device); + } + return result; +} + +template +std::string NCCLExecution::getKey() const { + // Construct a key representing the order-dependent devices in this NCCL + // execution. This is used to index into the NCCL context map and allows an + // implicit association between elements[i].device and NCCLContext::comms[i] + std::string result; + for (const auto& el : elements) { + result += std::to_string(el.device) + ","; + } + return result; +} + +template +class ncclTypeWrapper; + +template <> +class ncclTypeWrapper { + public: + static const ncclDataType_t type = ncclChar; +}; + +template <> +class ncclTypeWrapper { + public: + static const ncclDataType_t type = ncclChar; +}; + +template <> +class ncclTypeWrapper { + public: + static const ncclDataType_t type = ncclInt; +}; + +template <> +class ncclTypeWrapper { + public: + static const ncclDataType_t type = ncclInt64; +}; + +template <> +class ncclTypeWrapper { + public: + static const ncclDataType_t type = ncclUint64; +}; + +template <> +class ncclTypeWrapper { + public: + static const ncclDataType_t type = ncclHalf; +}; + +template <> +class ncclTypeWrapper { + public: + static const ncclDataType_t type = ncclFloat; +}; + +template <> +class ncclTypeWrapper { + public: + static const ncclDataType_t type = ncclDouble; +}; + +template +NCCLOp::NCCLOp(NCCLExecution&& execution) + : execution_(std::move(execution)), context_(getNcclContext(execution_)) {} + +template +void NCCLOp::wait() { + auto& elements = execution_.elements; + for (auto i = 0; i < elements.size(); ++i) { + CudaDeviceScope scope(elements[i].device); + elements[i].dstStream.wait(); + } +} + +template +template +void NCCLOp::runNCCL(F&& f) { + const auto& elements = execution_.elements; + const auto& ncclEvents = execution_.ncclEvents; + const auto& comms = context_->comms; + + // Synchronize memory allocation with NCCL operations + std::lock_guard lock(CudaShared::getMutex()); + +#if NCCL_VERSION_MIN(2,0,0) + NCCL_CHECK(ncclGroupStart()); +#endif + // Kick off the NCCL operation on each device + for (auto i = 0; i < elements.size(); i++) { + const auto& element = elements[i]; + const auto& srcStream = element.srcStream.getStream(); + const auto& dstStream = element.dstStream.getStream(); + const auto& ncclStream = getNcclStreams()[element.device]; + const auto& srcEvent = element.srcStream.getEvent(); + const auto& dstEvent = element.dstStream.getEvent(); + + CudaDeviceScope scope(element.device); + // Synchronize the source and destination with the NCCL stream. Record + // events in the source and destination streams, and wait on these in the + // NCCL streams. + CUDA_CHECK(cudaEventRecord(srcEvent, srcStream)); + CUDA_CHECK(cudaStreamWaitEvent(ncclStream, srcEvent, 0)); + if (srcStream != dstStream) { + CUDA_CHECK(cudaEventRecord(dstEvent, dstStream)); + CUDA_CHECK(cudaStreamWaitEvent(ncclStream, dstEvent, 0)); + } + // Run the operation + f(element, comms[i], ncclStream); + } +#if NCCL_VERSION_MIN(2,0,0) + NCCL_CHECK(ncclGroupEnd()); +#endif + for (auto i = 0; i < elements.size(); ++i) { + const auto& element = elements[i]; + const auto& ncclStream = getNcclStreams()[element.device]; + const auto& dstStream = element.dstStream.getStream(); + const auto& dstEvent = element.dstStream.getEvent(); + + CudaDeviceScope scope(element.device); + // Record an event in the NCCL stream signaling the operation is complete. + // Synchronize with the destination stream. + CUDA_CHECK(cudaEventRecord(ncclEvents[i], ncclStream)); + CUDA_CHECK(cudaStreamWaitEvent(dstStream, ncclEvents[i], 0)); + CUDA_CHECK(cudaEventRecord(dstEvent, dstStream)); + } +} + +template +void ReduceOp::runAsync() { + const auto op = op_; + const auto root = root_; + this->runNCCL([op, root]( + const NCCLElement& element, ncclComm_t comm, cudaStream_t stream) { + NCCL_CHECK(ncclReduce( + *element.src, + *element.dst, + element.src.getCount(), + ncclTypeWrapper::type, + op, + root, + comm, + stream)); + }); +} + +template +void AllreduceOp::runAsync() { + const auto op = op_; + this->runNCCL([op]( + const NCCLElement& element, ncclComm_t comm, cudaStream_t stream) { + NCCL_CHECK(ncclAllReduce( + *element.src, + *element.dst, + element.src.getCount(), + ncclTypeWrapper::type, + op, + comm, + stream)); + }); +} + +template +void ReduceScatterOp::runAsync() { + const auto op = op_; + this->runNCCL([op]( + const NCCLElement& element, ncclComm_t comm, cudaStream_t stream) { + NCCL_CHECK(ncclReduceScatter( + *element.src, + *element.dst, + element.dst.getCount(), + ncclTypeWrapper::type, + op, + comm, + stream)); + }); +} + +template +void BroadcastOp::runAsync() { + const int root = root_; + this->runNCCL([root]( + const NCCLElement& element, ncclComm_t comm, cudaStream_t stream) { + NCCL_CHECK(ncclBcast( + *element.dst, + element.dst.getCount(), + ncclTypeWrapper::type, + root, + comm, + stream)); + }); +} + +template +void AllgatherOp::runAsync() { + this->runNCCL([]( + const NCCLElement& element, ncclComm_t comm, cudaStream_t stream) { +#if NCCL_VERSION_MIN(2,0,0) + NCCL_CHECK(ncclAllGather( + *element.src, + *element.dst, + element.src.getCount(), + ncclTypeWrapper::type, + comm, + stream)); +#else + NCCL_CHECK(ncclAllGather( + *element.src, + element.src.getCount(), + ncclTypeWrapper::type, + *element.dst, + comm, + stream)); +#endif + }); +} + +#define DEFINE_NCCL_TYPES_AND_OPS(T) \ +template class NCCLExecution; \ +template class NCCLContext; \ +template class NCCLOp; \ + \ +template class ReduceOp; \ +template class AllreduceOp; \ +template class ReduceScatterOp; \ +template class BroadcastOp; \ +template class AllgatherOp; + +DEFINE_NCCL_TYPES_AND_OPS(int8_t); +DEFINE_NCCL_TYPES_AND_OPS(uint8_t); +DEFINE_NCCL_TYPES_AND_OPS(int32_t); +DEFINE_NCCL_TYPES_AND_OPS(int64_t); +DEFINE_NCCL_TYPES_AND_OPS(uint64_t); +DEFINE_NCCL_TYPES_AND_OPS(float16); +DEFINE_NCCL_TYPES_AND_OPS(float); +DEFINE_NCCL_TYPES_AND_OPS(double); + +} // namespace nccl +} // namespace gloo diff --git a/cuda_code/nearest_neighborhood.cu b/cuda_code/nearest_neighborhood.cu new file mode 100644 index 0000000000000000000000000000000000000000..ff7828689d549f54f4eda1e292a09d5f3cd66291 --- /dev/null +++ b/cuda_code/nearest_neighborhood.cu @@ -0,0 +1,167 @@ +#include +#include +#include +#include +#include +#include + +#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } + +void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) +{ + if (code != cudaSuccess) + { + fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); + if (abort) exit(code); + } +} + +int infTwoExp(int val) +{ + int inf=1; + while(val>inf) inf<<=1; + return inf; +} + +void getGPULayout( + int dim0,int dim1,int dim2, + int* bdim0,int* bdim1,int* bdim2, + int* tdim0,int* tdim1,int* tdim2 +) +{ + (*tdim2)=64; + if(dim2<(*tdim2)) (*tdim2)=infTwoExp(dim2); + (*bdim2)=dim2/(*tdim2); + if(dim2%(*tdim2)>0) (*bdim2)++; + + (*tdim1)=1024/(*tdim2); + if(dim1<(*tdim1)) (*tdim1)=infTwoExp(dim1); + (*bdim1)=dim1/(*tdim1); + if(dim1%(*tdim1)>0) (*bdim1)++; + + (*tdim0)=1024/((*tdim1)*(*tdim2)); + if(dim0<(*tdim0)) (*tdim0)=infTwoExp(dim0); + (*bdim0)=dim0/(*tdim0); + if(dim0%(*tdim0)>0) (*bdim0)++; +} + +__global__ +void findNearestPoint3DIdxKernel( + float* ref_pts, // [b,pn1,3] + float* que_pts, // [b,pn2,3] + int* idxs, // [b,pn2] + int b, + int pn1, + int pn2, + int exclude_self +) +{ + int bi = threadIdx.x + blockIdx.x*blockDim.x; + int p2i = threadIdx.y + blockIdx.y*blockDim.y; + if(p2i>=pn2||bi>=b) return; + + float x2=que_pts[bi*pn2*3+p2i*3]; + float y2=que_pts[bi*pn2*3+p2i*3+1]; + float z2=que_pts[bi*pn2*3+p2i*3+2]; + float min_dist=FLT_MAX; + int min_idx=0; + for(int p1i=0;p1i=pn2||bi>=b) return; + + float x2=que_pts[bi*pn2*2+p2i*2]; + float y2=que_pts[bi*pn2*2+p2i*2+1]; + float min_dist=FLT_MAX; + int min_idx=0; + for(int p1i=0;p1i>>(ref_pts_dev,que_pts_dev,idxs_dev,b,pn1,pn2,exclude_self); + else + findNearestPoint2DIdxKernel<<>>(ref_pts_dev,que_pts_dev,idxs_dev,b,pn1,pn2,exclude_self); + gpuErrchk(cudaGetLastError()) + + gpuErrchk(cudaMemcpy(idxs,idxs_dev,b*pn2*sizeof(int),cudaMemcpyDeviceToHost)) + gpuErrchk(cudaFree(ref_pts_dev)) + gpuErrchk(cudaFree(que_pts_dev)) + gpuErrchk(cudaFree(idxs_dev)) + +} + +#ifdef __cplusplus +} +#endif diff --git a/cuda_code/neighbor_list_new_impl.cu b/cuda_code/neighbor_list_new_impl.cu new file mode 100644 index 0000000000000000000000000000000000000000..e9b1cd06133c04fbf3e06e37885cf7d2125f604a --- /dev/null +++ b/cuda_code/neighbor_list_new_impl.cu @@ -0,0 +1,457 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "backend/kernel_compiler/gpu/cuda_impl/sponge/neighbor_list/neighbor_list_new_impl.cuh" +#include +#include + +__device__ __host__ VECTOR operator-(const VECTOR &vecb) { + VECTOR vec; + vec.x = -vecb.x; + vec.y = -vecb.y; + vec.z = -vecb.z; + return vec; +} + +__device__ __host__ VECTOR Get_Periodic_Displacement(const VECTOR vec_a, const VECTOR vec_b, const VECTOR box_length) { + VECTOR dr; + // dr = vec_a - vec_b; + dr.x = vec_a.x - vec_b.x; + dr.y = vec_a.y - vec_b.y; + dr.x = vec_a.z - vec_b.z; + + dr.x = dr.x - floorf(dr.x / box_length.x + 0.5) * box_length.x; + dr.y = dr.y - floorf(dr.y / box_length.y + 0.5) * box_length.y; + dr.z = dr.z - floorf(dr.z / box_length.z + 0.5) * box_length.z; + return dr; +} + +__global__ void Copy_List(const int element_numbers, const int *origin_list, int *list) { + int i = blockDim.x * blockIdx.x + threadIdx.x; + if (i < element_numbers) { + list[i] = origin_list[i]; + } +} +__global__ void Copy_List(const int element_numbers, const float *origin_list, float *list) { + int i = blockDim.x * blockIdx.x + threadIdx.x; + if (i < element_numbers) { + list[i] = origin_list[i]; + } +} + +__global__ void Crd_To_Uint_Crd(const int atom_numbers, float *scale_factor, const VECTOR *crd, + UNSIGNED_INT_VECTOR *uint_crd) { + int atom_i = blockDim.x * blockIdx.x + threadIdx.x; + if (atom_i < atom_numbers) { + INT_VECTOR tempi; + VECTOR temp = crd[atom_i]; + + temp.x *= scale_factor[0]; + temp.y *= scale_factor[1]; + temp.z *= scale_factor[2]; + + tempi.int_x = temp.x; + tempi.int_y = temp.y; + tempi.int_z = temp.z; + + uint_crd[atom_i].uint_x = (tempi.int_x << 2); + uint_crd[atom_i].uint_y = (tempi.int_y << 2); + uint_crd[atom_i].uint_z = (tempi.int_z << 2); + } +} + +__global__ void Vector_Translation(const int vector_numbers, VECTOR *vec_list, const VECTOR translation_vec) { + int i = blockDim.x * blockIdx.x + threadIdx.x; + if (i < vector_numbers) { + vec_list[i].x = vec_list[i].x + translation_vec.x; + vec_list[i].y = vec_list[i].y + translation_vec.y; + vec_list[i].z = vec_list[i].z + translation_vec.z; + } +} +__global__ void Vector_Translation(const int vector_numbers, VECTOR *vec_list, const VECTOR *translation_vec) { + int i = blockDim.x * blockIdx.x + threadIdx.x; + if (i < vector_numbers) { + vec_list[i].x = vec_list[i].x + translation_vec[0].x; + vec_list[i].y = vec_list[i].y + translation_vec[0].y; + vec_list[i].z = vec_list[i].z + translation_vec[0].z; + } +} +__global__ void Crd_Periodic_Map(const int atom_numbers, VECTOR *crd, const float *box_length) { + int atom_i = blockDim.x * blockIdx.x + threadIdx.x; + if (atom_i < atom_numbers) { + if (crd[atom_i].x >= 0) { + if (crd[atom_i].x < box_length[0]) { + } else { + crd[atom_i].x = crd[atom_i].x - box_length[0]; + } + } else { + crd[atom_i].x = crd[atom_i].x + box_length[0]; + } + + if (crd[atom_i].y >= 0) { + if (crd[atom_i].y < box_length[1]) { + } else { + crd[atom_i].y = crd[atom_i].y - box_length[1]; + } + } else { + crd[atom_i].y = crd[atom_i].y + box_length[1]; + } + if (crd[atom_i].z >= 0) { + if (crd[atom_i].z < box_length[2]) { + } else { + crd[atom_i].z = crd[atom_i].z - box_length[2]; + } + } else { + crd[atom_i].z = crd[atom_i].z + box_length[2]; + } + } +} + +__global__ void Clear_Grid_Bucket(const int grid_numbers, int *atom_numbers_in_grid_bucket, GRID_BUCKET *bucket) { + int grid_serial = blockDim.x * blockIdx.x + threadIdx.x; + if (grid_serial < grid_numbers) { + GRID_BUCKET bucket_i = bucket[grid_serial]; + for (int i = 0; i < atom_numbers_in_grid_bucket[grid_serial]; i = i + 1) { + bucket_i.atom_serial[i] = -1; + } + atom_numbers_in_grid_bucket[grid_serial] = 0; + } +} + +__global__ void Find_Atom_In_Grid_Serial(const int atom_numbers, const float *grid_length_inverse, const VECTOR *crd, + const int *grid_N, const int gridxy, int *atom_in_grid_serial) { + int atom_i = blockDim.x * blockIdx.x + threadIdx.x; + if (atom_i < atom_numbers) { + int Nx = static_cast(crd[atom_i].x) * grid_length_inverse[0]; // crd.x must < boxlength.x + int Ny = static_cast(crd[atom_i].y) * grid_length_inverse[1]; + int Nz = static_cast(crd[atom_i].z) * grid_length_inverse[2]; + Nx = Nx & ((Nx - grid_N[0]) >> 31); + Ny = Ny & ((Ny - grid_N[1]) >> 31); + Nz = Nz & ((Nz - grid_N[2]) >> 31); + atom_in_grid_serial[atom_i] = Nz * gridxy + Ny * grid_N[0] + Nx; + } +} + +__global__ void Put_Atom_In_Grid_Bucket(const int atom_numbers, const int *atom_in_grid_serial, GRID_BUCKET *bucket, + int *atom_numbers_in_grid_bucket) { + int atom_i = blockDim.x * blockIdx.x + threadIdx.x; + if (atom_i < atom_numbers) { + int grid_serial = atom_in_grid_serial[atom_i]; + GRID_BUCKET bucket_i = bucket[grid_serial]; + int a = atom_numbers_in_grid_bucket[grid_serial]; + atomicCAS(&bucket_i.atom_serial[a], -1, atom_i); + if (bucket_i.atom_serial[a] != atom_i) { + while (true) { + a = a + 1; + atomicCAS(&bucket_i.atom_serial[a], -1, atom_i); + if (bucket_i.atom_serial[a] == atom_i) { + atomicAdd(&atom_numbers_in_grid_bucket[grid_serial], 1); + break; + } + } + } else { + atomicAdd(&atom_numbers_in_grid_bucket[grid_serial], 1); + } + } +} +__global__ void Find_atom_neighbors(const int atom_numbers, const UNSIGNED_INT_VECTOR *uint_crd, + const float *uint_dr_to_dr_cof, const int *atom_in_grid_serial, + const GRID_POINTER *gpointer, const GRID_BUCKET *bucket, + const int *atom_numbers_in_grid_bucket, NEIGHBOR_LIST *nl, + const float cutoff_skin_square) { + int atom_i = blockDim.x * blockIdx.x + threadIdx.x; + if (atom_i < atom_numbers) { + int grid_serial = atom_in_grid_serial[atom_i]; + int grid_serial2; + int atom_numbers_in_nl_lin = 0; + int atom_j; + int int_x; + int int_y; + int int_z; + UNSIGNED_INT_VECTOR uint_crd_i = uint_crd[atom_i]; + NEIGHBOR_LIST nl_i = nl[atom_i]; + GRID_POINTER gpointer_i = gpointer[grid_serial]; + VECTOR dr; + float dr2; + for (int grid_cycle = 0; grid_cycle < 125; grid_cycle = grid_cycle + 1) { + grid_serial2 = gpointer_i.grid_serial[grid_cycle]; + GRID_BUCKET bucket_i = bucket[grid_serial2]; + for (int i = 0; i < atom_numbers_in_grid_bucket[grid_serial2]; i = i + 1) { + atom_j = bucket_i.atom_serial[i]; + if (atom_j > atom_i) { + int_x = uint_crd[atom_j].uint_x - uint_crd_i.uint_x; + int_y = uint_crd[atom_j].uint_y - uint_crd_i.uint_y; + int_z = uint_crd[atom_j].uint_z - uint_crd_i.uint_z; + dr.x = uint_dr_to_dr_cof[0] * int_x; + dr.y = uint_dr_to_dr_cof[1] * int_y; + dr.z = uint_dr_to_dr_cof[2] * int_z; + dr2 = dr.x * dr.x + dr.y * dr.y + dr.z * dr.z; + if (dr2 < cutoff_skin_square) { + nl_i.atom_serial[atom_numbers_in_nl_lin] = atom_j; + atom_numbers_in_nl_lin = atom_numbers_in_nl_lin + 1; + } + } + } + } // 124 grid cycle + nl[atom_i].atom_numbers = atom_numbers_in_nl_lin; + } +} + +__global__ void Is_need_refresh_neighbor_list_cuda(const int atom_numbers, const VECTOR *crd, const VECTOR *old_crd, + const float half_skin_square, int *need_refresh_flag) { + int i = blockDim.x * blockIdx.x + threadIdx.x; + if (i < atom_numbers) { + VECTOR r1 = crd[i]; + VECTOR r2 = old_crd[i]; + r1.x = r1.x - r2.x; + r1.y = r1.y - r2.y; + r1.z = r1.z - r2.z; + float r1_2 = r1.x * r1.x + r1.y * r1.y + r1.z * r1.z; + if (r1_2 > half_skin_square) { + atomicExch(&need_refresh_flag[0], 1); + } + } +} + +__global__ void Is_need_refresh_neighbor_list_cuda(const int atom_numbers, const VECTOR *crd, const VECTOR *old_crd, + const VECTOR *box_length, const float half_skin_square, + int *need_refresh_flag) { + int i = blockDim.x * blockIdx.x + threadIdx.x; + if (i < atom_numbers) { + VECTOR r1 = crd[i]; + VECTOR r2 = old_crd[i]; + r1 = Get_Periodic_Displacement(r1, r2, box_length[0]); + float r1_2 = r1.x * r1.x + r1.y * r1.y + r1.z * r1.z; + if (r1_2 > half_skin_square) { + atomicExch(&need_refresh_flag[0], 1); + } + } +} + +__global__ void Delete_Excluded_Atoms_Serial_In_Neighbor_List(const int atom_numbers, NEIGHBOR_LIST *nl, + const int *excluded_list_start, const int *excluded_list, + const int *excluded_atom_numbers) { + int atom_i = blockDim.x * blockIdx.x + threadIdx.x; + if (atom_i < atom_numbers) { + int excluded_number = excluded_atom_numbers[atom_i]; + if (excluded_number > 0) { + int list_start = excluded_list_start[atom_i]; + int atom_min = excluded_list[list_start]; + int list_end = list_start + excluded_number; + int atom_max = excluded_list[list_end - 1]; + NEIGHBOR_LIST nl_i = nl[atom_i]; + int atomnumbers_in_nl_lin = nl_i.atom_numbers; + int atom_j; + int excluded_atom_numbers_lin = list_end - list_start; + int excluded_atom_numbers_count = 0; + for (int i = 0; i < atomnumbers_in_nl_lin; i = i + 1) { + atom_j = nl_i.atom_serial[i]; + if (atom_j < atom_min || atom_j > atom_max) { + continue; + } else { + for (int j = list_start; j < list_end; j = j + 1) { + if (atom_j == excluded_list[j]) { + atomnumbers_in_nl_lin = atomnumbers_in_nl_lin - 1; + nl_i.atom_serial[i] = nl_i.atom_serial[atomnumbers_in_nl_lin]; + excluded_atom_numbers_count = excluded_atom_numbers_count + 1; + i = i - 1; + } + } + if (excluded_atom_numbers_count < excluded_atom_numbers_lin) { + } else { + break; + } // break + } // in the range of excluded min to max + } // cycle for neighbors + nl[atom_i].atom_numbers = atomnumbers_in_nl_lin; + } // if need excluded + } +} + +void Refresh_Neighbor_List(int *refresh_sign, const int thread, const int atom_numbers, VECTOR *crd, VECTOR *old_crd, + UNSIGNED_INT_VECTOR *uint_crd, float *crd_to_uint_crd_cof, float *uint_dr_to_dr_cof, + int *atom_in_grid_serial, const float skin, float *box_length, const GRID_POINTER *gpointer, + GRID_BUCKET *bucket, int *atom_numbers_in_grid_bucket, NEIGHBOR_LIST *d_nl, + int *excluded_list_start, int *excluded_list, int *excluded_numbers, + float cutoff_skin_square, int grid_numbers, float *grid_length_inverse, int *grid_N, int Nxy, + cudaStream_t stream) { + std::vector h_refresh_sign(1); + cudaMemcpyAsync(h_refresh_sign.data(), refresh_sign, sizeof(int), cudaMemcpyDeviceToHost, stream); + if (h_refresh_sign[0] == 1) { + Clear_Grid_Bucket<<(grid_numbers) / thread), thread, 0, stream>>>( + grid_numbers, atom_numbers_in_grid_bucket, bucket); + + Crd_Periodic_Map<<(atom_numbers) / thread), thread, 0, stream>>>(atom_numbers, crd, + box_length); + + Find_Atom_In_Grid_Serial<<(atom_numbers) / thread), thread, 0, stream>>>( + atom_numbers, grid_length_inverse, crd, grid_N, Nxy, atom_in_grid_serial); + + Copy_List<<(3. * atom_numbers) / thread), thread, 0, stream>>>( + 3 * atom_numbers, reinterpret_cast(crd), reinterpret_cast(old_crd)); + + Put_Atom_In_Grid_Bucket<<(atom_numbers) / thread), thread, 0, stream>>>( + atom_numbers, atom_in_grid_serial, bucket, atom_numbers_in_grid_bucket); + + Crd_To_Uint_Crd<<(atom_numbers) / thread), thread, 0, stream>>>( + atom_numbers, crd_to_uint_crd_cof, crd, uint_crd); + + Find_atom_neighbors<<(atom_numbers) / thread), thread, 0, stream>>>( + atom_numbers, uint_crd, uint_dr_to_dr_cof, atom_in_grid_serial, gpointer, bucket, atom_numbers_in_grid_bucket, + d_nl, cutoff_skin_square); + + Delete_Excluded_Atoms_Serial_In_Neighbor_List<<(atom_numbers) / thread), thread, 0, + stream>>>(atom_numbers, d_nl, excluded_list_start, excluded_list, + excluded_numbers); + h_refresh_sign[0] = 0; + } +} + +__global__ void construct_neighbor_list_kernel(int atom_numbers, int max_neighbor_numbers, int *nl_atom_numbers, + int *nl_atom_serial, NEIGHBOR_LIST *nl) { + for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < atom_numbers; i += gridDim.x * blockDim.x) { + nl[i].atom_numbers = nl_atom_numbers[i]; + nl[i].atom_serial = nl_atom_serial + i * max_neighbor_numbers; + } +} + +void Construct_Neighbor_List(int atom_numbers, int max_neighbor_numbers, int *nl_atom_numbers, int *nl_atom_serial, + NEIGHBOR_LIST *nl, cudaStream_t stream) { + construct_neighbor_list_kernel<<(atom_numbers) / 128), 128, 0, stream>>>( + atom_numbers, max_neighbor_numbers, nl_atom_numbers, nl_atom_serial, nl); +} + +__global__ void copy_neighbor_list_atom_number(int atom_numbers, int max_neighbor_numbers, NEIGHBOR_LIST *nl, + int *nl_atom_numbers, int *nl_atom_serial) { + int i, j; + for (i = blockIdx.x * blockDim.x + threadIdx.x; i < atom_numbers; i += gridDim.x * blockDim.x) { + nl_atom_numbers[i] = nl[i].atom_numbers; + for (j = blockIdx.y * blockDim.y + threadIdx.y; j < max_neighbor_numbers; j += gridDim.y * blockDim.y) { + if (j < nl_atom_numbers[i]) { + nl_atom_serial[i * max_neighbor_numbers + j] = nl[i].atom_serial[j]; + } else { + nl_atom_serial[i * max_neighbor_numbers + j] = 0; + } + } + } +} + +__global__ void Reset_List(const int element_numbers, int *list, const int replace_element) { + int i = blockDim.x * blockIdx.x + threadIdx.x; + if (i < element_numbers) { + list[i] = replace_element; + } +} + +__global__ void Reset_List(const int element_numbers, float *list, const float replace_element) { + int i = blockDim.x * blockIdx.x + threadIdx.x; + if (i < element_numbers) { + list[i] = replace_element; + } +} + +void CopyNeighborListAtomNumber(int atom_numbers, int max_neighbor_numbers, NEIGHBOR_LIST *nl, int *nl_atom_numbers, + int *nl_atom_serial, cudaStream_t stream) { + copy_neighbor_list_atom_number<<(atom_numbers) / 128), 128, 0, stream>>>( + atom_numbers, max_neighbor_numbers, nl, nl_atom_numbers, nl_atom_serial); +} + +void Refresh_Neighbor_List_No_Check(int grid_numbers, int atom_numbers, float skin, int Nxy, float cutoff_skin_square, + int *grid_N, float *box_length, int *atom_numbers_in_grid_bucket, + float *grid_length_inverse, int *atom_in_grid_serial, GRID_BUCKET *bucket, + VECTOR *crd, VECTOR *old_crd, float *crd_to_uint_crd_cof, + UNSIGNED_INT_VECTOR *uint_crd, float *uint_dr_to_dr_cof, GRID_POINTER *gpointer, + NEIGHBOR_LIST *d_nl, int *excluded_list_start, int *excluded_list, + int *excluded_numbers, cudaStream_t stream) { + Clear_Grid_Bucket<<(grid_numbers) / 32), 32, 0, stream>>>( + grid_numbers, atom_numbers_in_grid_bucket, bucket); + + Crd_Periodic_Map<<(atom_numbers) / 32), 32, 0, stream>>>(atom_numbers, crd, box_length); + + Find_Atom_In_Grid_Serial<<(atom_numbers) / 32), 32, 0, stream>>>( + atom_numbers, grid_length_inverse, crd, grid_N, Nxy, atom_in_grid_serial); + cudaMemcpyAsync(old_crd, crd, sizeof(VECTOR) * atom_numbers, cudaMemcpyDeviceToDevice, stream); + + Put_Atom_In_Grid_Bucket<<(atom_numbers) / 32), 32, 0, stream>>>( + atom_numbers, atom_in_grid_serial, bucket, atom_numbers_in_grid_bucket); + + Crd_To_Uint_Crd<<(atom_numbers) / 32), 32, 0, stream>>>(atom_numbers, crd_to_uint_crd_cof, + crd, uint_crd); + + Find_atom_neighbors<<(atom_numbers) / 32), 32, 0, stream>>>( + atom_numbers, uint_crd, uint_dr_to_dr_cof, atom_in_grid_serial, gpointer, bucket, atom_numbers_in_grid_bucket, d_nl, + cutoff_skin_square); + + Delete_Excluded_Atoms_Serial_In_Neighbor_List<<(atom_numbers) / 32), 32, 0, stream>>>( + atom_numbers, d_nl, excluded_list_start, excluded_list, excluded_numbers); +} + +__global__ void Mul_half(float *src, float *dst) { + int index = threadIdx.x; + if (index < 3) { + dst[index] = src[index] * 0.5; + } +} + +__global__ void Mul_quarter(float *src, float *dst) { + int index = threadIdx.x; + if (index < 3) { + dst[index] = src[index] * 0.25; + } +} + +int refresh_count = 0; + +void Neighbor_List_Update_New(int grid_numbers, int atom_numbers, int *d_refresh_count, int refresh_interval, + int not_first_time, float skin, int Nxy, float cutoff_square, + float cutoff_with_skin_square, int *grid_N, float *box_length, + int *atom_numbers_in_grid_bucket, float *grid_length_inverse, int *atom_in_grid_serial, + GRID_BUCKET *bucket, float *crd, float *old_crd, float *crd_to_uint_crd_cof, + float *half_crd_to_uint_crd_cof, unsigned int *uint_crd, float *uint_dr_to_dr_cof, + GRID_POINTER *gpointer, NEIGHBOR_LIST *d_nl, int *excluded_list_start, int *excluded_list, + int *excluded_numbers, float half_skin_square, int *is_need_refresh_neighbor_list, + int forced_update, int forced_check, cudaStream_t stream) { + if (forced_update) { + Mul_quarter<<<1, 3, 0, stream>>>(crd_to_uint_crd_cof, half_crd_to_uint_crd_cof); + Refresh_Neighbor_List_No_Check( + grid_numbers, atom_numbers, skin, Nxy, cutoff_square, grid_N, box_length, atom_numbers_in_grid_bucket, + grid_length_inverse, atom_in_grid_serial, bucket, reinterpret_cast(crd), + reinterpret_cast(old_crd), half_crd_to_uint_crd_cof, reinterpret_cast(uint_crd), + uint_dr_to_dr_cof, gpointer, d_nl, excluded_list_start, excluded_list, excluded_numbers, stream); + + } else if (refresh_interval > 0 && !forced_check) { + if (refresh_count % refresh_interval == 0) { + Mul_quarter<<<1, 3, 0, stream>>>(crd_to_uint_crd_cof, half_crd_to_uint_crd_cof); + Refresh_Neighbor_List_No_Check(grid_numbers, atom_numbers, skin, Nxy, cutoff_square, grid_N, box_length, + atom_numbers_in_grid_bucket, grid_length_inverse, atom_in_grid_serial, bucket, + reinterpret_cast(crd), reinterpret_cast(old_crd), + half_crd_to_uint_crd_cof, reinterpret_cast(uint_crd), + uint_dr_to_dr_cof, gpointer, d_nl, excluded_list_start, excluded_list, + excluded_numbers, stream); + } + refresh_count += 1; + } else { + Is_need_refresh_neighbor_list_cuda<<(atom_numbers) / 128), 128, 0, stream>>>( + atom_numbers, reinterpret_cast(crd), reinterpret_cast(old_crd), + reinterpret_cast(box_length), half_skin_square, is_need_refresh_neighbor_list); + Mul_quarter<<<1, 3, 0, stream>>>(crd_to_uint_crd_cof, half_crd_to_uint_crd_cof); + Refresh_Neighbor_List(is_need_refresh_neighbor_list, 32, atom_numbers, reinterpret_cast(crd), + reinterpret_cast(old_crd), reinterpret_cast(uint_crd), + half_crd_to_uint_crd_cof, uint_dr_to_dr_cof, atom_in_grid_serial, skin, box_length, gpointer, + bucket, atom_numbers_in_grid_bucket, d_nl, excluded_list_start, excluded_list, + excluded_numbers, cutoff_with_skin_square, grid_numbers, grid_length_inverse, grid_N, Nxy, + stream); + } +} diff --git a/cuda_code/network_13.cu b/cuda_code/network_13.cu new file mode 100644 index 0000000000000000000000000000000000000000..6d29e7d7d4dce5330e73fd90ebacb69af3d59234 --- /dev/null +++ b/cuda_code/network_13.cu @@ -0,0 +1,143 @@ +/* + * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, are permitted + * provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright notice, this list of + * conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, this list of + * conditions and the following disclaimer in the documentation and/or other materials + * provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used + * to endorse or promote products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND + * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *//* + */ + +/** @file network.cu + * @author Thomas Müller, NVIDIA + * @brief API interface of a neural network implementation + */ + +#include +#include + +#include +#include + +#if TCNN_MIN_GPU_ARCH >= 70 +#include +#endif + + +TCNN_NAMESPACE_BEGIN + +Activation string_to_activation(std::string activation_name) { + if (equals_case_insensitive(activation_name, "None")) { + return Activation::None; + } else if (equals_case_insensitive(activation_name, "ReLU")) { + return Activation::ReLU; + } else if (equals_case_insensitive(activation_name, "Exponential")) { + return Activation::Exponential; + } else if (equals_case_insensitive(activation_name, "Sigmoid")) { + return Activation::Sigmoid; + } else if (equals_case_insensitive(activation_name, "Sine")) { + return Activation::Sine; + } else if (equals_case_insensitive(activation_name, "Squareplus")) { + return Activation::Squareplus; + } else if (equals_case_insensitive(activation_name, "Softplus")) { + return Activation::Softplus; + } + + throw std::runtime_error{std::string{"Invalid activation name: "} + activation_name}; +} + +template +void extract_dimension_pos_neg(cudaStream_t stream, const uint32_t num_elements, const uint32_t dim, const uint32_t fan_in, const uint32_t fan_out, const T* encoded, float* output) { + linear_kernel(extract_dimension_pos_neg_kernel, 0, stream, num_elements, dim, fan_in, fan_out, encoded, output); +} + +template void extract_dimension_pos_neg(cudaStream_t stream, const uint32_t num_elements, const uint32_t dim, const uint32_t fan_in, const uint32_t fan_out, const network_precision_t* encoded, float* output); + + +template +Network* create_network(const json& network) { + std::string network_type = network.value("otype", "MLP"); + + bool wantFullyFusedMlp = equals_case_insensitive(network_type, "MegakernelMLP") || equals_case_insensitive(network_type, "FullyFusedMLP"); + bool wantCutlassMlp = equals_case_insensitive(network_type, "MLP") || equals_case_insensitive(network_type, "CutlassMLP"); + + // If the GPU architecture is insufficient for + if (MIN_GPU_ARCH <= 70 || std::is_same::value) { + if (wantFullyFusedMlp && MIN_GPU_ARCH <= 70) { + std::cout + << "Warning: FullyFusedMLP is not supported for the selected architecture " << MIN_GPU_ARCH << ". " + << "Falling back to CutlassMLP. For maximum performance, raise the target GPU architecture to 75+." + << std::endl; + } + + wantCutlassMlp |= wantFullyFusedMlp; + wantFullyFusedMlp = false; + } + + if (wantFullyFusedMlp) { + if (!std::is_same::value) { + throw std::runtime_error{"FullyFusedMLP can only be used if the network precision is set to __half."}; + } else { +#if TCNN_MIN_GPU_ARCH >= 70 +# define TCNN_FULLY_FUSED_PARAMS \ + network["n_input_dims"], \ + network["n_output_dims"], \ + network.value("n_hidden_layers", 5u), \ + network.value("feedback_alignment", false), \ + string_to_activation(network.value("activation", "ReLU")), \ + string_to_activation(network.value("output_activation", "None")), + + uint32_t n_neurons = network.value("n_neurons", 128u); + switch (n_neurons) { + case 16: return new FullyFusedMLP{TCNN_FULLY_FUSED_PARAMS}; + case 32: return new FullyFusedMLP{TCNN_FULLY_FUSED_PARAMS}; + case 64: return new FullyFusedMLP{TCNN_FULLY_FUSED_PARAMS}; + case 128: return new FullyFusedMLP{TCNN_FULLY_FUSED_PARAMS}; + default: throw std::runtime_error{std::string{"FullyFusedMLP only supports 16, 32, 64, and 128 neurons, but got "} + std::to_string(n_neurons) + ". Use CutlassMLP instead if this is a requirement."}; + } +# undef TCNN_FULLY_FUSED_PARAMS +#else //TCNN_MIN_GPU_ARCH >= 70 + throw std::runtime_error{"FullyFusedMLP was not compiled due to insufficient GPU arch of <70."}; +#endif //TCNN_MIN_GPU_ARCH >= 70 + } + } else if (wantCutlassMlp) { + return new CutlassMLP{ + network["n_input_dims"], + network.value("n_neurons", 128u), + network["n_output_dims"], + network.value("n_hidden_layers", 5u), + string_to_activation(network.value("activation", "ReLU")), + string_to_activation(network.value("output_activation", "None")), + }; + } else if (equals_case_insensitive(network_type, "ResNet") || equals_case_insensitive(network_type, "CutlassResNet")) { + return new CutlassResNet{ + network["n_input_dims"], + network.value("n_neurons", 128u), + network["n_output_dims"], + network.value("n_blocks", 2u), + network.value("n_matrices_per_block", 2u), + string_to_activation(network.value("output_activation", "None")), + }; + } + + throw std::runtime_error{std::string{"Invalid network type: "} + network_type}; +} + +template Network* create_network(const json& network); + +TCNN_NAMESPACE_END diff --git a/cuda_code/network_kernels_5.cu b/cuda_code/network_kernels_5.cu new file mode 100644 index 0000000000000000000000000000000000000000..462d85e6356ae4dc50efd1b03df2bc094f5773ad --- /dev/null +++ b/cuda_code/network_kernels_5.cu @@ -0,0 +1,233 @@ +#include "cuda_runtime.h" +#include "curand.h" +#include "cublas_v2.h" + +extern "C" { +#include +#include +#include + +#include "network.h" +//#include "image.h" +//#include "data.h" +#include "utils.h" +#include "parser.h" + +#include "crop_layer.h" +#include "connected_layer.h" +#include "rnn_layer.h" +#include "gru_layer.h" +#include "crnn_layer.h" +#include "detection_layer.h" +#include "convolutional_layer.h" +#include "activation_layer.h" +#include "deconvolutional_layer.h" +#include "maxpool_layer.h" +#include "avgpool_layer.h" +#include "normalization_layer.h" +#include "batchnorm_layer.h" +#include "cost_layer.h" +#include "local_layer.h" +#include "softmax_layer.h" +#include "dropout_layer.h" +#include "route_layer.h" +#include "shortcut_layer.h" +#include "blas.h" +} + +float * get_network_output_gpu_layer(network net, int i); +float * get_network_delta_gpu_layer(network net, int i); +float * get_network_output_gpu(network net); + +void forward_network_gpu(network net, network_state state) +{ + state.workspace = net.workspace; + int i; + for(i = 0; i < net.n; ++i){ + state.index = i; + layer l = net.layers[i]; + if(l.delta_gpu){ + fill_ongpu(l.outputs * l.batch, 0, l.delta_gpu, 1); + } + if(l.type == CONVOLUTIONAL){ + forward_convolutional_layer_gpu(l, state); + } else if(l.type == DECONVOLUTIONAL){ + forward_deconvolutional_layer_gpu(l, state); + } else if(l.type == ACTIVE){ + forward_activation_layer_gpu(l, state); + } else if(l.type == LOCAL){ + forward_local_layer_gpu(l, state); + } else if(l.type == DETECTION){ + forward_detection_layer_gpu(l, state); + } else if(l.type == CONNECTED){ + forward_connected_layer_gpu(l, state); + } else if(l.type == RNN){ + forward_rnn_layer_gpu(l, state); + } else if(l.type == GRU){ + forward_gru_layer_gpu(l, state); + } else if(l.type == CRNN){ + forward_crnn_layer_gpu(l, state); + } else if(l.type == CROP){ + forward_crop_layer_gpu(l, state); + } else if(l.type == COST){ + forward_cost_layer_gpu(l, state); + } else if(l.type == SOFTMAX){ + forward_softmax_layer_gpu(l, state); + } else if(l.type == NORMALIZATION){ + forward_normalization_layer_gpu(l, state); + } else if(l.type == BATCHNORM){ + forward_batchnorm_layer_gpu(l, state); + } else if(l.type == MAXPOOL){ + forward_maxpool_layer_gpu(l, state); + } else if(l.type == AVGPOOL){ + forward_avgpool_layer_gpu(l, state); + } else if(l.type == DROPOUT){ + forward_dropout_layer_gpu(l, state); + } else if(l.type == ROUTE){ + forward_route_layer_gpu(l, net); + } else if(l.type == SHORTCUT){ + forward_shortcut_layer_gpu(l, state); + } + state.input = l.output_gpu; + } +} + +void backward_network_gpu(network net, network_state state) +{ + state.workspace = net.workspace; + int i; + float * original_input = state.input; + float * original_delta = state.delta; + for(i = net.n-1; i >= 0; --i){ + state.index = i; + layer l = net.layers[i]; + if(i == 0){ + state.input = original_input; + state.delta = original_delta; + }else{ + layer prev = net.layers[i-1]; + state.input = prev.output_gpu; + state.delta = prev.delta_gpu; + } + if(l.type == CONVOLUTIONAL){ + backward_convolutional_layer_gpu(l, state); + } else if(l.type == DECONVOLUTIONAL){ + backward_deconvolutional_layer_gpu(l, state); + } else if(l.type == ACTIVE){ + backward_activation_layer_gpu(l, state); + } else if(l.type == LOCAL){ + backward_local_layer_gpu(l, state); + } else if(l.type == MAXPOOL){ + if(i != 0) backward_maxpool_layer_gpu(l, state); + } else if(l.type == AVGPOOL){ + if(i != 0) backward_avgpool_layer_gpu(l, state); + } else if(l.type == DROPOUT){ + backward_dropout_layer_gpu(l, state); + } else if(l.type == DETECTION){ + backward_detection_layer_gpu(l, state); + } else if(l.type == NORMALIZATION){ + backward_normalization_layer_gpu(l, state); + } else if(l.type == BATCHNORM){ + backward_batchnorm_layer_gpu(l, state); + } else if(l.type == SOFTMAX){ + if(i != 0) backward_softmax_layer_gpu(l, state); + } else if(l.type == CONNECTED){ + backward_connected_layer_gpu(l, state); + } else if(l.type == RNN){ + backward_rnn_layer_gpu(l, state); + } else if(l.type == GRU){ + backward_gru_layer_gpu(l, state); + } else if(l.type == CRNN){ + backward_crnn_layer_gpu(l, state); + } else if(l.type == COST){ + backward_cost_layer_gpu(l, state); + } else if(l.type == ROUTE){ + backward_route_layer_gpu(l, net); + } else if(l.type == SHORTCUT){ + backward_shortcut_layer_gpu(l, state); + } + } +} + +void update_network_gpu(network net) +{ + int i; + int update_batch = net.batch*net.subdivisions; + float rate = get_current_rate(net); + for(i = 0; i < net.n; ++i){ + layer l = net.layers[i]; + if(l.type == CONVOLUTIONAL){ + update_convolutional_layer_gpu(l, update_batch, rate, net.momentum, net.decay); + } else if(l.type == DECONVOLUTIONAL){ + update_deconvolutional_layer_gpu(l, rate, net.momentum, net.decay); + } else if(l.type == CONNECTED){ + update_connected_layer_gpu(l, update_batch, rate, net.momentum, net.decay); + } else if(l.type == GRU){ + update_gru_layer_gpu(l, update_batch, rate, net.momentum, net.decay); + } else if(l.type == RNN){ + update_rnn_layer_gpu(l, update_batch, rate, net.momentum, net.decay); + } else if(l.type == CRNN){ + update_crnn_layer_gpu(l, update_batch, rate, net.momentum, net.decay); + } else if(l.type == LOCAL){ + update_local_layer_gpu(l, update_batch, rate, net.momentum, net.decay); + } + } +} + +float train_network_datum_gpu(network net, float *x, float *y) +{ + network_state state; + state.index = 0; + state.net = net; + int x_size = get_network_input_size(net)*net.batch; + int y_size = get_network_output_size(net)*net.batch; + if(net.layers[net.n-1].type == DETECTION) y_size = net.layers[net.n-1].truths*net.batch; + if(!*net.input_gpu){ + *net.input_gpu = cuda_make_array(x, x_size); + *net.truth_gpu = cuda_make_array(y, y_size); + }else{ + cuda_push_array(*net.input_gpu, x, x_size); + cuda_push_array(*net.truth_gpu, y, y_size); + } + state.input = *net.input_gpu; + state.delta = 0; + state.truth = *net.truth_gpu; + state.train = 1; + forward_network_gpu(net, state); + backward_network_gpu(net, state); + float error = get_network_cost(net); + if (((*net.seen) / net.batch) % net.subdivisions == 0) update_network_gpu(net); + + return error; +} + +float *get_network_output_layer_gpu(network net, int i) +{ + layer l = net.layers[i]; + cuda_pull_array(l.output_gpu, l.output, l.outputs*l.batch); + return l.output; +} + +float *get_network_output_gpu(network net) +{ + int i; + for(i = net.n-1; i > 0; --i) if(net.layers[i].type != COST) break; + return get_network_output_layer_gpu(net, i); +} + +float *network_predict_gpu(network net, float *input) +{ + int size = get_network_input_size(net) * net.batch; + network_state state; + state.index = 0; + state.net = net; + state.input = cuda_make_array(input, size); + state.truth = 0; + state.train = 0; + state.delta = 0; + forward_network_gpu(net, state); + float *out = get_network_output_gpu(net); + cuda_free(state.input); + return out; +} + diff --git a/cuda_code/new-forward_4.cu b/cuda_code/new-forward_4.cu new file mode 100644 index 0000000000000000000000000000000000000000..cb1a6b49775f0b3217a51abdd493ec7e71b2cfd2 --- /dev/null +++ b/cuda_code/new-forward_4.cu @@ -0,0 +1,200 @@ +#include +#include +#include "gpu-new-forward.h" +#define TILE_WIDTH 16 + +const int constMemSize = 16*4*7*7; //M*C*K*K +__constant__ float Kc[constMemSize];//filter-bank + +__global__ void shared_mem_kernel(float *y, const float *x, const int B, const int M, const int C, const int H, const int W, const int K) +{ + + const int H_out = H - K + 1; + const int W_out = W - K + 1; + +#define y4d(i3, i2, i1, i0) y[(i3) * (M * H_out * W_out) + (i2) * (H_out * W_out) + (i1) * (W_out) + i0] +#define x4d(i3, i2, i1, i0) x[(i3) * (C * H * W) + (i2) * (H * W) + (i1) * (W) + i0] +#define k4d(i3, i2, i1, i0) Kc[(i3) * (C * K * K) + (i2) * (K * K) + (i1) * (K) + i0] + + // Insert your GPU convolution kernel code here + int W_grid = ceil(W_out*1.0 / TILE_WIDTH); //# of tiles in width + int n, m, h, w, c, p, q; + n = blockIdx.x; //current channel + m = blockIdx.y; //current ouput feature map + h = blockIdx.z / W_grid * TILE_WIDTH + threadIdx.y; + w = (blockIdx.z % W_grid) * TILE_WIDTH + threadIdx.x; + + float acc = 0.; + if(h < H_out && w < W_out){ + for(c=0; c>>(device_y, device_x, B, M, C, H, W, K); + } + else { + dim3 blockDim(TILE_WIDTH, TILE_WIDTH, 1); + dim3 gridDim(ceil(H_out*W_out/(1.0*TILE_WIDTH)), ceil(M/(1.0*TILE_WIDTH)), B); + combined_unroll_mm_kernel<<>>(device_y,device_x,device_k,B,C,H,K,W,M); + + } + + + // Copy the output back to host + cudaMemcpy(host_y, device_y, B*M*(H-K+1)*(W-K+1)*sizeof(float), cudaMemcpyDeviceToHost); + + // Free device memory + cudaFree(device_y); + cudaFree(device_x); + cudaFree(device_k); + + // Useful snippet for error checking + // cudaError_t error = cudaGetLastError(); + // if(error != cudaSuccess) + // { + // std::cout<<"CUDA error: "< +void Memcpy(ep::Stream* stream, void* dst, const void* src, size_t sz) { + if (dst == src) { return; } + OF_CUDA_CHECK(cudaMemcpyAsync(dst, src, sz, cudaMemcpyDefault, + stream->As()->cuda_stream())); +} + +template<> +void Memset(ep::Stream* stream, void* dst, const char value, size_t sz) { + OF_CUDA_CHECK(cudaMemsetAsync(dst, value, sz, stream->As()->cuda_stream())); +} + +} // namespace oneflow diff --git a/cuda_code/new_resize_3.cu b/cuda_code/new_resize_3.cu new file mode 100644 index 0000000000000000000000000000000000000000..43885cb9cdf9c7d25483153f5239bd3ca380b6ad --- /dev/null +++ b/cuda_code/new_resize_3.cu @@ -0,0 +1,622 @@ +// Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + +#include "dali/pipeline/operators/resize/new_resize.h" +#include +#include +#include +#include +#include + +namespace dali { + +// Greatest Common Factor +int gcf(int a, int b) { + int t; + if (b > a) { + t = a; + a = b; + b = t; + } + + while (b) { + t = a % b; + a = b; + b = t; + } + + return a; +} + +// Least Common Multiplier +int lcm(int a, int b) { + return a / gcf(a, b) * b; +} + +void DataDependentSetupCPU(const Tensor &input, + Tensor *output, const char *pOpName, + const uint8 **ppInRaster, uint8 **ppOutRaster, + vector *pSizes, const DALISize *out_size) { + DALI_ENFORCE(input.ndim() == 3); + DALI_ENFORCE(IsType(input.type()), "Expects input data in uint8."); + + const vector &shape = input.shape(); + const int C = shape[2]; + DALI_ENFORCE(C == 1 || C == 3, + string(pOpName ? pOpName : "Operation") + + " supports only hwc rgb & grayscale inputs."); + + if (out_size) + output->Resize({out_size->height, out_size->width, C}); + else + output->Resize(shape); + + output->set_type(input.type()); + + if (!ppInRaster) + return; + + *ppInRaster = input.template data(); + if (ppOutRaster) + *ppOutRaster = static_cast(output->raw_mutable_data()); + + if (pSizes) { + (*pSizes)[0].height = shape[0]; + (*pSizes)[0].width = shape[1]; + } +} + +bool DataDependentSetupGPU(const TensorList &input, TensorList *output, + size_t batch_size, bool reshapeBatch, vector *inPtrs, + vector *outPtrs, vector *pSizes, + ResizeParamDescr *pResizeDescr) { + DALI_ENFORCE(IsType(input.type()), + "Expected input data stored in uint8."); + + auto pResize = pResizeDescr ? pResizeDescr->pResize_ : NULL; + auto pResizeParam = pResizeDescr ? pResizeDescr->pResizeParam_ : NULL; + auto pMirroring = pResizeDescr ? pResizeDescr->pMirroring_ : NULL; + auto pTotalSize = pResizeDescr ? pResizeDescr->pTotalSize_ : NULL; + + // Set all elements to 0, if we will use them + if (pTotalSize) + memset(pTotalSize, 0, pResizeDescr->nBatchSlice_ * sizeof(pTotalSize[0])); + + bool newResize = false; + vector output_shape(batch_size); + for (size_t i = 0; i < batch_size; ++i) { + // Verify the inputs + const auto &input_shape = input.tensor_shape(i); + DALI_ENFORCE(input_shape.size() == 3, + "Expects 3-dimensional image input."); + + DALI_ENFORCE(input_shape[2] == 1 || input_shape[2] == 3, + "Not valid color type argument (1 or 3)"); + + // Collect the output shapes + if (pResize) { + // We are resizing + const auto input_size = pResize->size(input_t, i); + const auto out_size = pResize->size(output_t, i); + pResize->SetSize(input_size, input_shape, pResize->newSizes(i), out_size); + + if (pResizeParam) { + // NewResize is used + const int H0 = input_size->height; + const int W0 = input_size->width; + const int H1 = out_size->height; + const int W1 = out_size->width; + + int cropY, cropX; + const bool doingCrop = pResize->CropNeeded(*out_size); + if (doingCrop) + pResize->DefineCrop(out_size, &cropX, &cropY); + else + cropY = cropX = 0; + + auto resizeParam = pResizeParam + i * (pMirroring ? N_GRID_PARAMS : 1); + if (pMirroring) { + const int lcmH = lcm(H0, H1); + const int lcmW = lcm(W0, W1); + + const int sy0 = lcmH / H0; + const int sy1 = lcmH / H1; + const int sx0 = lcmW / W0; + const int sx1 = lcmW / W1; + + if (!newResize) { + newResize = resizeParam[0].x != sx0 || resizeParam[0].y != sy0 || + resizeParam[1].x != sx1 || resizeParam[1].y != sy1 || + resizeParam[2].x != cropX || resizeParam[2].y != cropY; + } + + if (newResize) { + resizeParam[0] = {sx0, sy0}; + resizeParam[1] = {sx1, sy1}; + resizeParam[2] = {cropX, cropY}; + } + + if (pTotalSize) { + // We need to check for overflow + const size_t idx = i % pResizeDescr->nBatchSlice_; + if (pTotalSize[idx] < UINT_MAX - sx0 * sy0) + pTotalSize[idx] += sx0 * sy0; + else + pTotalSize[idx] = UINT_MAX; + } + + if (pMirroring) + pResize->MirrorNeeded(pMirroring + i); + } else { + resizeParam[0] = {W1, H1}; + } + } + + // Collect the output shapes + output_shape[i] = {out_size->height, out_size->width, input_shape[2]}; + } else { + output_shape[i] = input_shape; + } + + if (pSizes) { + (*pSizes)[i].height = input_shape[0]; + (*pSizes)[i].width = input_shape[1]; + if (reshapeBatch) { + // When batch is reshaped: only one "image" will be used + (*pSizes)[i].height *= batch_size; + pSizes = NULL; + } + } + } + + // Resize the output + output->Resize(output_shape); + output->set_type(input.type()); + + CollectPointersForExecution(reshapeBatch ? 1 : batch_size, input, inPtrs, output, outPtrs); + return newResize; +} + +void CollectPointersForExecution(size_t batch_size, + const TensorList &input, vector *inPtrs, + TensorList *output, vector *outPtrs) { + if (!inPtrs || !outPtrs) + return; + + // Collect the pointers for execution + for (size_t i = 0; i < batch_size; ++i) { + (*inPtrs)[i] = input.template tensor(i); + (*outPtrs)[i] = output->template mutable_tensor(i); + } +} + +typedef void (*allocMemoryFunction)(ResizeMappingPixDescrCPU *pntr, size_t nElem); + +typedef void (*assignElemFunction)(ResizeMappingPixDescrCPU *pntr, size_t nElem, + uint32_t addr, uint32_t area); + +static void resizeVector(ResizeMappingPixDescrCPU *pntr, size_t nElem) { + pntr->resize(nElem); +} + +static void assignVectorElement(ResizeMappingPixDescrCPU *pntr, size_t elemIdx, + uint32_t addr, uint32_t area) { + PixMapping &elem = PIX_MAPPING_CPU(*pntr)[elemIdx]; + elem.pixAddr = addr; + elem.pixArea = area; +} + +class PixMappingHelper { + public: + CC PixMappingHelper(uint32_t len, ResizeMapping *pMapping, MappingInfo *pMapInfo, + uint32_t resizedArea, ResizeMappingPixDescrCPU *pPixMapping = NULL, + allocMemoryFunction allocMemFunc = NULL, + assignElemFunction assignFunc = NULL); + + void CC constructTable(int C, int W0, size_t sx0, size_t sy0, size_t sx1, size_t sy1, + int stepW = 1, int stepH = 1, int startW = 0, int startH = 0); + + inline uint32_t CC numUsed() const { return numPixMapUsed_; } + + private: + void CC AddPixel(uint32_t addr, uint32_t area, int crdX, int crdY); + + void CC UpdateMapping(int shift, int centerX, int centerY); + + inline float CC distance(float x, float y) const { return x * x + y * y; } + + uint32_t numPixMapMax_; // length of the allocated PixMapping array + uint32_t numPixMapUsed_; // number of already used elements of pPixMapping + ResizeMappingPixDescrCPU *pPixMapping_; + ResizeMapping *const pMappingBase_; + ResizeMapping *pMapping_; + MappingInfo *const pMappingClosestBase_; + MappingInfo *pMappingClosest_; + + const allocMemoryFunction allocMemFunc_; + const assignElemFunction assignFunc_; + + const uint32_t area_; + const uint32_t resizedArea_; + float closestDist_; + float centerX_, centerY_; +}; + +__global__ void InitiateResizeTables(int nTable, const ResizeGridParam *resizeDescr, + MappingInfo *mapPntr[], MappingInfo **mappingMem, size_t step) { + size_t idx = blockIdx.x; + mapPntr[idx] = mappingMem[idx]; + if (mapPntr[idx]) { + for (size_t i = idx + step; i < nTable; idx = i, i += step) + mapPntr[i] = mapPntr[idx] + + resizeDescr[idx * N_GRID_PARAMS].x * resizeDescr[idx * N_GRID_PARAMS].y; + } else { + // No resize tables for that batch slice will be constructed + for (size_t i = idx + step; i < nTable; i += step) + mapPntr[i] = NULL; + } +} + +__global__ void ConstructResizeTables(int C, const ResizeGridParam *resizeDescr, + const DALISize *in_sizes, int W0, MappingInfo *pResizeMapping[]) { + const int imagIdx = blockIdx.x; + auto resizeParam = resizeDescr + N_GRID_PARAMS * imagIdx; + if (in_sizes) + W0 = in_sizes[imagIdx].width; + + SET_RESIZE_PARAM(); + + PixMappingHelper helper(sx0 * sy0, NULL, pResizeMapping[imagIdx], area); + helper.constructTable(C, W0, sx0, sy0, sx1, sy1, + blockDim.x, blockDim.y, threadIdx.x, threadIdx.y); +} + +#define RESIZE_GPU_PREAMBLE() // empty macro +#define RESIZE_GPU_CORE(C) RESIZE_CORE(C) + +#define RESIZE_GPU_N_PREAMBLE() // empty macro +#define RESIZE_GPU_N_CORE(C) RESIZE_N_CORE(C) + +__global__ void BatchedCongenericResizeKernel( + int H0, int W0, const uint8 *img_in, int H, int W, uint8 *img_out, int C, + const ResizeGridParam *resizeParam, const MirroringInfo *pMirrorInfo, + MappingInfo *const ppMapping[], const ResizeMapping *pResizeMapping, + const PixMapping *pPixMapping) { + const int imagIdx = blockIdx.x; + const bool mirrorHor = pMirrorInfo && (pMirrorInfo + imagIdx)->x != 0; + const bool mirrorVert = pMirrorInfo && (pMirrorInfo + imagIdx)->y != 0; + RESIZE_PREPARE(); + if (ppMapping || pResizeMapping) { + const MappingInfo *pMapping = ppMapping ? ppMapping[0] : NULL; + AUGMENT_RESIZE_GPU_CONGENERIC(H, W, C, img_in, img_out, RESIZE_GPU_N); + } else { + AUGMENT_RESIZE_GPU_CONGENERIC(H, W, C, img_in, img_out, RESIZE_GPU); + } +} + +DALIError_t BatchedCongenericResize(int N, const dim3 &gridDim, cudaStream_t stream, int C, + const DALISize &sizeIn, const uint8 *in_batch, const DALISize &sizeOut, + uint8 *out_batch, + const ResizeGridParam *resizeDescr, const MirroringInfo *pMirrorInfo, + MappingInfo *ppMapping[], MappingInfo **mapMem, + const ResizeMapping *pResizeMapping, const PixMapping *pPixMapping, + bool newMapping) { + if (ppMapping && newMapping) { + CHECK_RESIZE_DESCR(); + + InitiateResizeTables << < 1, 1, 0, stream >> > + (1, resizeDescr, ppMapping, mapMem, 1); + + ConstructResizeTables << < 1, gridDim, 0, stream >> > + (C, resizeDescr, NULL, sizeIn.width, ppMapping); + } + + BatchedCongenericResizeKernel << < N, gridDim, 0, stream >> > + (sizeIn.height, sizeIn.width, in_batch, sizeOut.height, sizeOut.width, out_batch, C, + resizeDescr, pMirrorInfo, ppMapping, pResizeMapping, pPixMapping); + + return DALISuccess; +} + +__global__ void BatchedResizeKernel(int C, const ResizeGridParam *resizeDescr, + MappingInfo *const ppMapping[], const MirroringInfo *pMirrorInfo, + const DALISize *in_sizes, const uint8 *const imgs_in[], + const DALISize *out_sizes, uint8 *const imgs_out[]) { + const int imagIdx = blockIdx.x; + auto resizeParam = resizeDescr + N_GRID_PARAMS * imagIdx; + const int W0 = in_sizes[imagIdx].width; + const int H0 = in_sizes[imagIdx].height; + const int W = out_sizes[imagIdx].width; + const int H = out_sizes[imagIdx].height; + const bool mirrorHor = pMirrorInfo && (pMirrorInfo + imagIdx)->x != 0; + const bool mirrorVert = pMirrorInfo && (pMirrorInfo + imagIdx)->y != 0; + + RESIZE_PREPARE(); + if (ppMapping) { + const ResizeMapping *pResizeMapping = NULL; + const PixMapping *pPixMapping = NULL; + const MappingInfo *pMapping = ppMapping[imagIdx]; + AUGMENT_RESIZE_GPU_GENERIC(H, W, C, imgs_in[imagIdx], imgs_out[imagIdx], RESIZE_GPU_N); + } else { + AUGMENT_RESIZE_GPU_GENERIC(H, W, C, imgs_in[imagIdx], imgs_out[imagIdx], RESIZE_GPU); + } +} + +DALIError_t BatchedResize(int N, const dim3 &gridDim, cudaStream_t stream, int C, + const ResizeGridParam *resizeDescr, const ImgSizeDescr sizes[], + const ImgRasterDescr raster[], MappingInfo *ppMapping[], + MappingInfo **mapMem, size_t nBatchSlice) { + auto in_sizes = IMG_SIZES(sizes[input_t]); + auto out_sizes = IMG_SIZES(sizes[output_t]); + if (ppMapping) { + InitiateResizeTables << < nBatchSlice, 1, 0, stream >> > + (N, resizeDescr, ppMapping, mapMem, nBatchSlice); + + ConstructResizeTables << < N, gridDim, 0, stream >> > + (C, resizeDescr, in_sizes, 0, ppMapping); + } + + const uint8 *const *in = IMG_RASTERS(raster[input_t]); + uint8 *const *out = IMG_RASTERS(raster[output_t]); + + const MirroringInfo *pMirrorInfo = resizeDescr + N_GRID_PARAMS * N; + BatchedResizeKernel << < N, gridDim, 0, stream >> > + (C, resizeDescr, ppMapping, pMirrorInfo, in_sizes, in, out_sizes, out); + + return DALISuccess; +} + +PixMappingHelper::PixMappingHelper(uint32_t area, ResizeMapping *pMapping, MappingInfo *pMapInfo, + uint32_t resizedArea, ResizeMappingPixDescrCPU *pPixMapping, + allocMemoryFunction allocMemFunc, assignElemFunction assignFunc) : + area_(area), resizedArea_(resizedArea), allocMemFunc_(allocMemFunc), + assignFunc_(assignFunc), pMappingBase_(pMapping), + pMappingClosestBase_(pMapInfo) { + numPixMapMax_ = 1; + numPixMapUsed_ = 0; + + if (resizedArea == 0 && allocMemFunc_) + (*allocMemFunc_)(pPixMapping_ = pPixMapping, numPixMapMax_ = 2 * area); + else + pPixMapping_ = NULL; +} + +void PixMappingHelper::AddPixel(uint32_t addr, uint32_t area, int crdX, int crdY) { + assert(area != 0); + if (pPixMapping_) { + if (numPixMapUsed_ == numPixMapMax_) { + // Previously allocated array needs to be extended + (*allocMemFunc_)(pPixMapping_, numPixMapMax_ <<= 1); + } + + pMapping_->nPixels++; + (*assignFunc_)(pPixMapping_, numPixMapUsed_++, addr, area); + } else { + const float newDist = distance((crdX << 1) - centerX_, (crdY << 1) - centerY_); + if (closestDist_ > newDist) { + closestDist_ = newDist; + *pMappingClosest_ = addr; + } + } +} + +void PixMappingHelper::UpdateMapping(int shift, int centerX, int centerY) { + if (pPixMapping_) { + (pMapping_ = pMappingBase_ + shift)->intersectInfoAddr = numUsed(); + } else { + pMappingClosest_ = pMappingClosestBase_ + shift; + centerX_ = centerX; + centerY_ = centerY; + closestDist_ = FLT_MAX; + } +} + +#define RUN_CHECK_1 0 + +void PixMappingHelper::constructTable(int C, int W0, size_t sx0, size_t sy0, size_t sx1, + size_t sy1, int stepW, int stepH, int startW, int startH) { + // (x, y) pixel coordinate of PIX in resized image + // 0 <= x < W1; 0 <= y < H1 + + for (size_t y = startH; y < sy0; y += stepH) { + for (size_t x = startW; x < sx0; x += stepW) { + const size_t nX = x * sx1; + const size_t nY = y * sy1; + // The indices of the top-left pixel of the initial image, intersecting with PIX + const size_t begIdx[2] = {nX / sx0, nY / sy0}; + + // The indices of the bottom-right pixel of the initial image, intersecting with PIX + size_t endIdx[2] = {(nX + sx1) / sx0, (nY + sy1) / sy0}; + + // Intersection of the right (bottom) pixels with the PIX (could be equal to 0) + const size_t extra[2] = {min((nX + sx1) % sx0, sx1), min((nY + sy1) % sy0, sy1)}; + + // Length of the left (top) pixels intersecting with the PIX + const size_t lenFirst[2] = {(sx0 - nX % sx0), (sy0 - nY % sy0)}; + + // Doubled (x,y) coordinates of the pixel's center + const size_t lenX = endIdx[0] + begIdx[0] - (extra[0] || endIdx[0] == begIdx[0]? 0 : 1); + const size_t lenY = endIdx[1] + begIdx[1] - (extra[1] || endIdx[1] == begIdx[1]? 0 : 1); + + // Relative address to the first intersecting pixels + UpdateMapping(((y * sy1) % sy0) * sx0 + (x * sx1) % sx0, lenX, lenY); + + endIdx[0] -= begIdx[0]; + endIdx[1] -= begIdx[1]; +#if RUN_CHECK_1 + size_t check = 0; +#endif + size_t rowMult = endIdx[1]? lenFirst[1] : extra[1]; + size_t y0 = 0; + while (true) { + size_t x0 = endIdx[0]; + + // Relative address of the last pixel in row y0, intersecting with PIX + uint32_t pixAddr = ((y0 * W0) + x0) * C; + if (extra[0]) + AddPixel(pixAddr, extra[0] * rowMult, x0, y0); + + if (x0) { + while (--x0 > 0) + AddPixel(pixAddr -= C, sx0 * rowMult, x0, y0); + + AddPixel(pixAddr -= C, lenFirst[0] * rowMult, x0, y0); + } + +#if RUN_CHECK_1 + check += rowMult * ((endIdx[0]? sx0 * (endIdx[0] - 1) + lenFirst[0] : 0) + extra[0]); +#endif + if (++y0 >= endIdx[1]) { + if (y0 > endIdx[1] || !(rowMult = extra[1])) + break; + } else { + rowMult = sy0; + } + } + +#if RUN_CHECK_1 + assert(check == sx1 * sy1); +#endif + } + } +} + +void ResizeMappingTable::initTable(int H0, int W0, int H1, int W1, int C, + uint16_t xSize, uint16_t ySize, bool use_NN) { + io_size[0] = {W0, H0}; + io_size[1] = {W1, H1}; + C_ = C; + + if (use_NN) + resizeMappingSimpleCPU.resize({xSize * ySize}); + else + resizeMappingCPU.resize({xSize * ySize}); +} + +void ResizeMappingTable::constructTable(int H0, int W0, int H1, int W1, int C, int resizeType) { + // The table, which contains the information about correspondence of pixels of the initial + // image to the pixels of the resized one. + + // Resizing from (H0, W0) to (H1, W1) + // Main equations are: + // H0 * sy0 = H1 * sy1 + // W0 * sx0 = W1 * sx1 + const size_t lcmH = lcm(H0, H1); + const size_t lcmW = lcm(W0, W1); + + const size_t sy0 = lcmH / H0; + const size_t sy1 = lcmH / H1; + const size_t sx0 = lcmW / W0; + const size_t sx1 = lcmW / W1; + + const bool use_NN = resizeType == DALI_INTERP_NN; + initTable(H0, W0, H1, W1, C, sx0, sy0, use_NN); + + PixMappingHelper helper(sx0 * sy0, RESIZE_MAPPING_CPU(resizeMappingCPU), + RESIZE_MAPPING_CPU(resizeMappingSimpleCPU), use_NN ? sx1 * sy1 : 0, + &pixMappingCPU, resizeVector, assignVectorElement); + + helper.constructTable(C, W0, sx0, sy0, sx1, sy1); + if (!use_NN) + pixMappingCPU.resize(helper.numUsed()); +} + +template <> +void NewResize::RunImpl(DeviceWorkspace *ws, const int idx) { + const auto &input = ws->Input(idx); + const auto &output = ws->Output(idx); + const bool use_NN = type_ == DALI_INTERP_NN; + + size_t resizeMemory[BATCH_SLICE_NUMB]; + ResizeGridParam *pResizeGrid = resizeParam_.data(); + MirroringInfo *pMirror = pResizeGrid + N_GRID_PARAMS * batch_size_; + ResizeParamDescr resizeDescr(this, pResizeGrid, pMirror, + use_NN ? resizeMemory : NULL, BATCH_SLICE_NUMB); + + const bool newMapping = DataDependentSetupGPU(input, output, batch_size_, false, + inputImages(), outputImages(), NULL, + &resizeDescr); + + const int C = input.shape()[0][2]; + + const auto sizeIn = size(input_t, 0); + const auto sizeOut = size(output_t, 0); + cudaStream_t s = ws->stream(); + + const bool congenericBatch = BatchIsCongeneric(sizeIn, sizeOut, C); + MappingInfo **mapPntr = NULL; + if (use_NN) { + if (newMapping) { + if (congenericBatch) + mapPntr = CopyResizeTableToGPU(resizeMemory, s); + else + mapPntr = CopyResizeTableToGPU(resizeMemory, s, batch_size_, BATCH_SLICE_NUMB); + } else { + mapPntr = mappingPntr_; + } + } + + if (congenericBatch) { + if (newMapping) { + // Copying the descriptor of operation into GPU + resizeParamGPU_.Copy(vector( + resizeParam_.begin(), resizeParam_.begin() + N_GRID_PARAMS), s); + } + + mirrorParamGPU_.Copy(vector( + resizeParam_.begin() + N_GRID_PARAMS * batch_size_, resizeParam_.end()), s); + + const ResizeMapping *pResizeMapping = NULL; + const PixMapping *pPixMapping = NULL; +#if USE_RESIZE_TABLE_GPU + if (!use_NN) { + if (newMapping || !resizeTbl_.IsValid(sizeIn, sizeOut, C)) { + resizeTbl_.constructTable(sizeIn.height, sizeIn.width, + sizeOut.height, sizeOut.width, C, type_); + resizeTbl_.copyToGPU(s); + } + + pResizeMapping = RESIZE_MAPPING_GPU(resizeTbl_.resizeMappingGPU); + pPixMapping = PIX_MAPPING_GPU(resizeTbl_.pPixMappingGPU); + } +#endif + + DALI_CALL(BatchedCongenericResize(batch_size_, dim3(32, 32), s, C, + *sizeIn, input.template data(), + *sizeOut, static_cast(output->raw_mutable_data()), + RESIZE_PARAM(resizeParamGPU_), MIRRORING_PARAM(mirrorParamGPU_), + mapPntr, mapMemGPU_, pResizeMapping, pPixMapping, newMapping)); + } else { + resizeParamGPU_.Copy(resizeParam_, s); + + vector *raster[] = {(vector *)(inputImages()), outputImages()}; + + for (int i = input_t; i <= output_t; i++) { + TENSOR_COPY(sizesGPU_[i], sizes(static_cast(i)), s); + TENSOR_COPY(imgsGPU_[i], *(raster[i]), s); + } + + DALI_CALL(BatchedResize(batch_size_, dim3(32, 32), s, C, RESIZE_PARAM(resizeParamGPU_), + sizesGPU_, imgsGPU_, mapPntr, mapMemGPU_, _countof(mapMem_))); + } +} + +template<> +void NewResize::SetupSharedSampleParams(DeviceWorkspace* ws) { + Resize::SetupSharedSampleParams(ws); +} + +} // namespace dali + diff --git a/cuda_code/new_test.cu b/cuda_code/new_test.cu new file mode 100644 index 0000000000000000000000000000000000000000..a8909ca87bbafcb6f2765ef7d0d7a332699e233c --- /dev/null +++ b/cuda_code/new_test.cu @@ -0,0 +1,12 @@ +__global__ void f(int * A, int *B) { + + int tid = threadIdx.x; + int diff = (B - A); + + int x = B[tid]; + int y = A[tid + diff - 1]; + + B[tid] = x + y; + +} + diff --git a/cuda_code/nlm_kernel.cu b/cuda_code/nlm_kernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..d3d9810c8a26df36f3cdd846410c86c632ad723f --- /dev/null +++ b/cuda_code/nlm_kernel.cu @@ -0,0 +1,331 @@ +/************************************************************** +* Implementation of non local means using gpu, +* In this implementation shared memory isn't used. +* The algorithm follows the matlab code was given. +**************************************************************/ + + +#include +#include +#include + +#define PSIZE 3 + + +//declaration of functions + +//****host functions**** +void write_image2(char *d_path , float *im, int im_size); +float **read_image(char *im_path, int im_size); +float **add_padding(float **image, int im_size, int p_size, int *pad_size); +float *gaussian_filter(int patch_size, float sigma); +float *to_rowmajor(float **image, int im_size); +void checkCuda(cudaError_t result); + +//****device functions**** +__global__ void filter_kernel(float *image, float *denoised_image, float *gauss_kernel, int p_size, int im_size, float f_s ); +__device__ void find_patch(float *pixel_patch, int current_pixel,int im_size,int p_size,float *image); +__device__ float compute_weight(float *pixel_patch,float *temp_patch,float *g_kernel, int p_size, float f_sigma); + + +int main(int argc, char *argv[]){ + + //pass all parameters needed for computation. + //patch size now is defined because must be known in compile time + if(argc !=5){ + printf("Give correct arguments:1)source file(.csv format), 2) image size, 3)filter sigma, 4)gaussian filter sigma\n"); + exit(1); + } + + char *image_path = argv[1]; + int image_size = atoi(argv[2]); + float f_sigma = atof(argv[3]); + float g_sigma = atof(argv[4]); + int patch_size = PSIZE; + float *den_image = (float *)malloc(image_size*image_size*sizeof(float)); + + //read 2-D image + float **I = read_image(image_path,image_size); + + //add padding + int padded_size; + float **D=add_padding(I,image_size,patch_size,&padded_size); + + //the initial 2-D image is now useless + for(int i=0;i>>(dev_padded_image,dev_denoised_image,dev_g_kernel,patch_size,padded_size,f_sigma); + cudaEventRecord(stop); + /***************************************************************/ + + checkCuda(cudaMemcpy(den_image,dev_denoised_image,image_size*image_size*sizeof(float),cudaMemcpyDeviceToHost)); + + checkCuda(cudaEventSynchronize(stop)); + float milliseconds = 0; + checkCuda(cudaEventElapsedTime(&milliseconds,start,stop)); + + + checkCuda(cudaEventDestroy(start)); + checkCuda(cudaEventDestroy(stop)); + checkCuda(cudaFree(dev_g_kernel)); + checkCuda(cudaFree(dev_padded_image)); + checkCuda(cudaFree(dev_denoised_image)); + + //some print messages + printf("*************************************************************************************\n"); + printf("gpu execution (without use of shared memory) finished total time is %f msec\n",milliseconds); + printf("image size is %d\n", image_size); + printf("patch size is %d\n", patch_size); + printf("filtes sigma is %f and gauss kernel sigma is %f\n",f_sigma,g_sigma); + printf("**************************************************************************************\n"); + + char denoised_path[200]; + snprintf(denoised_path,sizeof(denoised_path),"../data/gpu_denoised_image_%d_%d.csv",image_size,patch_size); + write_image2(denoised_path,den_image,image_size); + + free(GKERNEL); + free(den_image); + free(padded_I); +} + + + +/***********host functions****************/ + +float **add_padding(float **image , int im_size, int p_size, int *pad_size){ + + int padding = (p_size-1)/2; + int padded_size = im_size + padding*2; + *pad_size = padded_size; + + //allocate memomry for padded image + float **padded_image =(float **)malloc(padded_size*sizeof(float *)); + for(int i=0;i=padding+im_size)padded_image[row][col] = image[row-padding][im_size-1-(col-padding-im_size)]; + else padded_image[row][col] = image[row-padding][col-padding]; + } + } + + //mirroring first and last rows. + for(int row=0;rowmax) max = kernel[x*patch_size+y]; + } + } + + //devide also with the max value + for(int x=0;x +#include + +#define CUDA_CHECK(condition) \ + /* Code block avoids redefinition of cudaError_t error */ \ + do { \ + cudaError_t error = condition; \ + if (error != cudaSuccess) { \ + std::cout << cudaGetErrorString(error) << std::endl; \ + } \ + } while (0) + +#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) +int const threadsPerBlock = sizeof(unsigned long long) * 8; + +__device__ inline float devIoU(float const * const a, float const * const b) { + float left = max(a[0], b[0]), right = min(a[2], b[2]); + float top = max(a[1], b[1]), bottom = min(a[3], b[3]); + float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f); + float interS = width * height; + float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1); + float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1); + return interS / (Sa + Sb - interS); +} + +__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh, + const float *dev_boxes, unsigned long long *dev_mask) { + const int row_start = blockIdx.y; + const int col_start = blockIdx.x; + + // if (row_start > col_start) return; + + const int row_size = + min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); + const int col_size = + min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); + + __shared__ float block_boxes[threadsPerBlock * 5]; + if (threadIdx.x < col_size) { + block_boxes[threadIdx.x * 5 + 0] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0]; + block_boxes[threadIdx.x * 5 + 1] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1]; + block_boxes[threadIdx.x * 5 + 2] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2]; + block_boxes[threadIdx.x * 5 + 3] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3]; + block_boxes[threadIdx.x * 5 + 4] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4]; + } + __syncthreads(); + + if (threadIdx.x < row_size) { + const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; + const float *cur_box = dev_boxes + cur_box_idx * 5; + int i = 0; + unsigned long long t = 0; + int start = 0; + if (row_start == col_start) { + start = threadIdx.x + 1; + } + for (i = start; i < col_size; i++) { + if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) { + t |= 1ULL << i; + } + } + const int col_blocks = DIVUP(n_boxes, threadsPerBlock); + dev_mask[cur_box_idx * col_blocks + col_start] = t; + } +} + +void _set_device(int device_id) { + int current_device; + CUDA_CHECK(cudaGetDevice(¤t_device)); + if (current_device == device_id) { + return; + } + // The call to cudaSetDevice must come before any calls to Get, which + // may perform initialization using the GPU. + CUDA_CHECK(cudaSetDevice(device_id)); +} + +void _nms(int* keep_out, int* num_out, const float* boxes_host, int boxes_num, + int boxes_dim, float nms_overlap_thresh, int device_id) { + _set_device(device_id); + + float* boxes_dev = NULL; + unsigned long long* mask_dev = NULL; + + const int col_blocks = DIVUP(boxes_num, threadsPerBlock); + + CUDA_CHECK(cudaMalloc(&boxes_dev, + boxes_num * boxes_dim * sizeof(float))); + CUDA_CHECK(cudaMemcpy(boxes_dev, + boxes_host, + boxes_num * boxes_dim * sizeof(float), + cudaMemcpyHostToDevice)); + + CUDA_CHECK(cudaMalloc(&mask_dev, + boxes_num * col_blocks * sizeof(unsigned long long))); + + dim3 blocks(DIVUP(boxes_num, threadsPerBlock), + DIVUP(boxes_num, threadsPerBlock)); + dim3 threads(threadsPerBlock); + nms_kernel<<>>(boxes_num, + nms_overlap_thresh, + boxes_dev, + mask_dev); + + std::vector mask_host(boxes_num * col_blocks); + CUDA_CHECK(cudaMemcpy(&mask_host[0], + mask_dev, + sizeof(unsigned long long) * boxes_num * col_blocks, + cudaMemcpyDeviceToHost)); + + std::vector remv(col_blocks); + memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); + + int num_to_keep = 0; + for (int i = 0; i < boxes_num; i++) { + int nblock = i / threadsPerBlock; + int inblock = i % threadsPerBlock; + + if (!(remv[nblock] & (1ULL << inblock))) { + keep_out[num_to_keep++] = i; + unsigned long long *p = &mask_host[0] + i * col_blocks; + for (int j = nblock; j < col_blocks; j++) { + remv[j] |= p[j]; + } + } + } + *num_out = num_to_keep; + + CUDA_CHECK(cudaFree(boxes_dev)); + CUDA_CHECK(cudaFree(mask_dev)); +} diff --git a/cuda_code/nms_kernel_68.cu b/cuda_code/nms_kernel_68.cu new file mode 100644 index 0000000000000000000000000000000000000000..986fc347a3f853531f90724a46fddff2d1068d78 --- /dev/null +++ b/cuda_code/nms_kernel_68.cu @@ -0,0 +1,137 @@ +// ------------------------------------------------------------------ +// R-3D +// Copyright (c) 2017 Boston University +// Licensed under The MIT License [see LICENSE for details] +// Modified by Lu He +// ------------------------------------------------------------------ + +#include "gpu_nms.hpp" +#include +#include + +#define CUDA_CHECK(condition) \ + /* Code block avoids redefinition of cudaError_t error */ \ + do { \ + cudaError_t error = condition; \ + if (error != cudaSuccess) { \ + std::cout << cudaGetErrorString(error) << std::endl; \ + } \ + } while (0) + +#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) +int const threadsPerBlock = sizeof(unsigned long long) * 8; + +__device__ inline float devIoU(float const * const a, float const * const b) { + float left = max(a[0], b[0]), right = min(a[1], b[1]); + float interS = max(right - left + 1, 0.f); + float Sa = a[1] - a[0] + 1; + float Sb = b[1] - b[0] + 1; + return interS / (Sa + Sb - interS); +} + +__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh, + const float *dev_boxes, unsigned long long *dev_mask) { + const int row_start = blockIdx.y; + const int col_start = blockIdx.x; + + // if (row_start > col_start) return; + const int row_size = + min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); + const int col_size = + min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); + + __shared__ float block_boxes[threadsPerBlock * 3]; + if (threadIdx.x < col_size) { + block_boxes[threadIdx.x * 3 + 0] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 3 + 0]; + block_boxes[threadIdx.x * 3 + 1] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 3 + 1]; + block_boxes[threadIdx.x * 3 + 2] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 3 + 2]; + } + __syncthreads(); + + if (threadIdx.x < row_size) { + const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; + const float *cur_box = dev_boxes + cur_box_idx * 3; + int i = 0; + unsigned long long t = 0; + int start = 0; + if (row_start == col_start) { + start = threadIdx.x + 1; + } + for (i = start; i < col_size; i++) { + if (devIoU(cur_box, block_boxes + i * 3) > nms_overlap_thresh) { + t |= 1ULL << i; + } + } + const int col_blocks = DIVUP(n_boxes, threadsPerBlock); + dev_mask[cur_box_idx * col_blocks + col_start] = t; + } +} + +void _set_device(int device_id) { + int current_device; + CUDA_CHECK(cudaGetDevice(¤t_device)); + if (current_device == device_id) { + return; + } + // The call to cudaSetDevice must come before any calls to Get, which + // may perform initialization using the GPU. + CUDA_CHECK(cudaSetDevice(device_id)); +} + +void _nms(int* keep_out, int* num_out, const float* boxes_host, int boxes_num, + int boxes_dim, float nms_overlap_thresh, int device_id) { + _set_device(device_id); + + float* boxes_dev = NULL; + unsigned long long* mask_dev = NULL; + + const int col_blocks = DIVUP(boxes_num, threadsPerBlock); + + CUDA_CHECK(cudaMalloc(&boxes_dev, + boxes_num * boxes_dim * sizeof(float))); + CUDA_CHECK(cudaMemcpy(boxes_dev, + boxes_host, + boxes_num * boxes_dim * sizeof(float), + cudaMemcpyHostToDevice)); + + CUDA_CHECK(cudaMalloc(&mask_dev, + boxes_num * col_blocks * sizeof(unsigned long long))); + + dim3 blocks(DIVUP(boxes_num, threadsPerBlock), + DIVUP(boxes_num, threadsPerBlock)); + dim3 threads(threadsPerBlock); + nms_kernel<<>>(boxes_num, + nms_overlap_thresh, + boxes_dev, + mask_dev); + + std::vector mask_host(boxes_num * col_blocks); + CUDA_CHECK(cudaMemcpy(&mask_host[0], + mask_dev, + sizeof(unsigned long long) * boxes_num * col_blocks, + cudaMemcpyDeviceToHost)); + + std::vector remv(col_blocks); + memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); + + int num_to_keep = 0; + for (int i = 0; i < boxes_num; i++) { + int nblock = i / threadsPerBlock; + int inblock = i % threadsPerBlock; + + if (!(remv[nblock] & (1ULL << inblock))) { + keep_out[num_to_keep++] = i; + unsigned long long *p = &mask_host[0] + i * col_blocks; + for (int j = nblock; j < col_blocks; j++) { + remv[j] |= p[j]; + } + } + } + *num_out = num_to_keep; + + CUDA_CHECK(cudaFree(boxes_dev)); + CUDA_CHECK(cudaFree(mask_dev)); +} diff --git a/cuda_code/nms_layer.cu b/cuda_code/nms_layer.cu new file mode 100644 index 0000000000000000000000000000000000000000..e82100aac2e8ff6d025ab50651ab4763266104c0 --- /dev/null +++ b/cuda_code/nms_layer.cu @@ -0,0 +1,135 @@ +#include "caffe/ultinous/nms_layer.hpp" + +namespace caffe +{ +namespace ultinous +{ + +#define BLOCK_DIM_X 32 +#define BLOCK_DIM_Y 16 + + +template +static +__global__ void nmsFilterKernel(Dtype *dst, Dtype const *src, int const width, int const height, int const channels) +{ + int const blockStartX = blockDim.x * blockIdx.x; + int const blockStartY = blockDim.y * blockIdx.y; + + int const x = blockStartX + threadIdx.x; + int const y = blockStartY + threadIdx.y; + int const c = blockIdx.z % channels; + int const ind = blockIdx.z / channels; + int const channelStart = ind * width * height * channels + c * width * height; + + int const shmHeight = BLOCK_DIM_Y + 2 * R; + int const shmWidth = BLOCK_DIM_X + 2 * R; + + __shared__ Dtype shm[shmWidth * shmHeight]; + +#pragma unroll + for(int j = threadIdx.y; j < shmHeight; j += blockDim.y) + { +#pragma unroll + for(int i = threadIdx.x; i < shmWidth; i += blockDim.x) + { + int const locX = max(min(blockStartX + i - R, int(width - 1) ), 0); + int const locY = max(min(blockStartY + j - R, int(height - 1) ), 0); + shm[j * shmWidth + i] = src[channelStart + locY * width + locX]; + } + } + + __syncthreads(); + + int const locX = threadIdx.x + R; + int const locY = threadIdx.y + R; + + Dtype const val = shm[locY * shmWidth + locX]; + Dtype maxVal = val; + +#pragma unroll + for(int j = locY - R; j < locY + R + 1; ++j) +#pragma unroll + for(int i = locX - R; i < locX + R + 1; ++i ) + maxVal = fmax(shm[j * shmWidth + i], maxVal); + + if(y < height && x < width) + dst[channelStart + y * width + x] = (val == maxVal)? val : 0.f; +} + + + +template +static inline +void nmsFilterKernelWrapper(Blob const *src, Blob *dst) +{ + Dtype const *src_data = src->gpu_data(); + Dtype *dst_data = dst->mutable_gpu_data(); + + auto const num_ = src->num(); + auto const channels_ = src->channels(); + auto const height_ = src->height(); + auto const width_ = src->width(); + + dim3 const block(BLOCK_DIM_X, BLOCK_DIM_Y); + dim3 const grid( + static_cast((width_ + BLOCK_DIM_X - 1) / BLOCK_DIM_X) + , static_cast((height_ + BLOCK_DIM_Y - 1) / BLOCK_DIM_Y) + , static_cast(channels_ * num_) + ); + + nmsFilterKernel<<>>(dst_data, src_data, width_, height_, channels_); +} + + + +template +static inline +void expandNMSFilterKernel(Blob const *src, Blob *dst, uint radius) +{ + if( radius < MAX ) + { + expandNMSFilterKernel(src, dst, radius); + } + else + nmsFilterKernelWrapper(src, dst); +} + + +template <> +void expandNMSFilterKernel(Blob const *src, Blob *dst, uint) +{ + NOT_IMPLEMENTED; +} + +template <> +void expandNMSFilterKernel(Blob const *src, Blob *dst, uint) +{ + NOT_IMPLEMENTED; +} + +template +void NMSLayer::Forward_gpu(const vector *> &bottom, const vector *> &top) +{ + constexpr uint MAX_ITER = 8; + uint radius = kernel_size / 2; + + CHECK_LT(radius, MAX_ITER); + CHECK_GE(radius, 1); + + expandNMSFilterKernel(bottom[0], top[0], radius); + + CUDA_POST_KERNEL_CHECK; +} + +template +void NMSLayer::Backward_gpu(const vector *> &top, const vector &propagate_down, + const vector *> &bottom) +{ + // not implemented +} + +INSTANTIATE_LAYER_GPU_FUNCS(NMSLayer); + +} // namespace ultinous +} // namespace caffe diff --git a/cuda_code/nn_resize_op_kernel.cu b/cuda_code/nn_resize_op_kernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..d10b315837db93d8f936058a1165ac9b23b0d12e --- /dev/null +++ b/cuda_code/nn_resize_op_kernel.cu @@ -0,0 +1,219 @@ +#ifdef WITH_CUDA + +#include "core/context_cuda.h" +#include "utils/op_kernel.h" + +namespace dragon { + +namespace kernel { + +/*! NNResize */ + +template +__global__ void _NNResize_NCHW( + const int nthreads, + const int C, + const int H, + const int W, + const int out_h, + const int out_w, + const float scale_h, + const float scale_w, + const T* x, + T* y) { + CUDA_1D_KERNEL_LOOP(y_idx, nthreads) { + const int w = y_idx % out_w; + const int h = (y_idx / out_w) % out_h; + const int c = (y_idx / out_w / out_h) % C; + const int n = y_idx / out_w / out_h / C; + const int h_in = min(int(floorf(h * scale_h)), H - 1); + const int w_in = min(int(floorf(w * scale_w)), W - 1); +#if __CUDA_ARCH__ >= 350 + y[y_idx] = __ldg(x + (((n * C + c) * H + h_in) * W + w_in)); +#else + y[y_idx] = x[((n * C + c) * H + h_in) * W + w_in]; +#endif + } +} + +template +__global__ void _NNResize_NHWC( + const int nthreads, + const int C, + const int H, + const int W, + const int out_h, + const int out_w, + const float scale_h, + const float scale_w, + const T* x, + T* y) { + CUDA_1D_KERNEL_LOOP(y_idx, nthreads) { + const int c = y_idx % C; + const int w = (y_idx / C) % out_w; + const int h = (y_idx / C / out_w) % out_h; + const int n = y_idx / C / out_w / out_h; + const int h_in = min(int(floorf(h * scale_h)), H - 1); + const int w_in = min(int(floorf(w * scale_w)), W - 1); +#if __CUDA_ARCH__ >= 350 + y[y_idx] = __ldg(x + (((n * H + h_in) * W + w_in) * C + c)); +#else + y[y_idx] = x[((n * H + h_in) * W + w_in) * C + c]; +#endif + } +} + +/*! NNResize */ + +template <> void NNResize( + const int N, + const int C, + const int H, + const int W, + const int out_h, + const int out_w, + const string& data_format, + const float* x, + float* y, + CUDAContext* ctx) { + auto nthreads = N * C * out_h * out_w; + const float scale_h = (float)H / out_h; + const float scale_w = (float)W / out_w; + if (data_format == "NCHW") { + _NNResize_NCHW + << < CUDA_BLOCKS(nthreads), CUDA_THREADS, + 0, ctx->cuda_stream() >> > + (nthreads, C, H, W, out_h, out_w, + scale_h, scale_w, x, y); + } else if(data_format == "NHWC") { + _NNResize_NHWC + << < CUDA_BLOCKS(nthreads), CUDA_THREADS, + 0, ctx->cuda_stream() >> > + (nthreads, C, H, W, out_h, out_w, + scale_h, scale_w, x, y); + } else LOG(FATAL) << "Unknown data format: " << data_format; +} + +/*! NNResize */ + +template <> void NNResize( + const int N, + const int C, + const int H, + const int W, + const int out_h, + const int out_w, + const string& data_format, + const float16* x, + float16* y, + CUDAContext* ctx) { + auto nthreads = N * C * out_h * out_w; + const float scale_h = (float)H / out_h; + const float scale_w = (float)W / out_w; + if (data_format == "NCHW") { + _NNResize_NCHW + << < CUDA_BLOCKS(nthreads), CUDA_THREADS, + 0, ctx->cuda_stream() >> > + (nthreads, C, H, W, out_h, out_w, scale_h, scale_w, + reinterpret_cast(x), + reinterpret_cast(y)); + } else if(data_format == "NHWC") { + _NNResize_NHWC + << < CUDA_BLOCKS(nthreads), CUDA_THREADS, + 0, ctx->cuda_stream() >> > + (nthreads, C, H, W, out_h, out_w, scale_h, scale_w, + reinterpret_cast(x), + reinterpret_cast(y)); + } else LOG(FATAL) << "Unknown data format: " << data_format; +} + +/*! NNResizeGrad */ + +template + __global__ void _NNResizeGrad_NCHW( + const int nthreads, + const int C, + const int H, + const int W, + const int out_h, + const int out_w, + const float scale_h, + const float scale_w, + const T* dy, + T* dx) { + CUDA_1D_KERNEL_LOOP(y_idx, nthreads) { + const int w = y_idx % out_w; + const int h = (y_idx / out_w) % out_h; + const int c = (y_idx / out_w / out_h) % C; + const int n = y_idx / out_w / out_h / C; + const int h_in = min(int(floorf(h * scale_h)), H - 1); + const int w_in = min(int(floorf(w * scale_w)), W - 1); +#if __CUDA_ARCH__ >= 350 + atomicAdd(&dx[((n * C + c) * H + h_in) * W + w_in], __ldg(dy + y_idx)); +#else + atomicAdd(&dx[((n * C + c) * H + h_in) * W + w_in], dy[y_idx]); +#endif + } +} + +template +__global__ void _NNResizeGrad_NHWC( + const int nthreads, + const int C, + const int H, + const int W, + const int out_h, + const int out_w, + const float scale_h, + const float scale_w, + const T* dy, + T* dx) { + CUDA_1D_KERNEL_LOOP(y_idx, nthreads) { + const int c = y_idx % C; + const int w = (y_idx / C) % out_w; + const int h = (y_idx / C / out_w) % out_h; + const int n = y_idx / C / out_w / out_h; + const int h_in = min(int(floorf(h * scale_h)), H - 1); + const int w_in = min(int(floorf(w * scale_w)), W - 1); +#if __CUDA_ARCH__ >= 350 + atomicAdd(&dx[((n * H + h_in) * W + w_in) * C + c], __ldg(dy + y_idx)); +#else + atomicAdd(&dx[((n * H + h_in) * W + w_in) * C + c], dy[y_idx]); +#endif + } +} + +template <> void NNResizeGrad( + const int N, + const int C, + const int H, + const int W, + const int out_h, + const int out_w, + const string& data_format, + const float* dy, + float* dx, + CUDAContext* ctx) { + auto nthreads = N * C * out_h * out_w; + const float scale_h = (float)H / out_h; + const float scale_w = (float)W / out_w; + if (data_format == "NCHW") { + _NNResizeGrad_NCHW + << < CUDA_BLOCKS(nthreads), CUDA_THREADS, + 0, ctx->cuda_stream() >> > + (nthreads, C, H, W, out_h, out_w, + scale_h, scale_w, dy, dx); + } else if(data_format == "NHWC") { + _NNResizeGrad_NHWC + << < CUDA_BLOCKS(nthreads), CUDA_THREADS, + 0, ctx->cuda_stream() >> > + (nthreads, C, H, W, out_h, out_w, + scale_h, scale_w, dy, dx); + } else LOG(FATAL) << "Unknown data format: " << data_format; +} + +} // namespace kernel + +} // namepsace dragon + +#endif // WITH_CUDA \ No newline at end of file diff --git a/cuda_code/noPBCForce_1.cu b/cuda_code/noPBCForce_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..948248c81f90076c08cd23a76acc4c678cc5f0c2 --- /dev/null +++ b/cuda_code/noPBCForce_1.cu @@ -0,0 +1,88 @@ +extern "C" __global__ void calcNoPBCEnForces( + mixed* __restrict__ energyBuffer, + const real4* __restrict__ posq, + unsigned long long* __restrict__ forceBuffers, + real* __restrict__ dedq, + const real4* __restrict__ parameters, + const int* __restrict__ pairidx0, + const int* __restrict__ pairidx1, + int numParticles, + int paddedNumAtoms +) { + int totpair = numParticles * (numParticles - 1) / 2; + for (int npair = blockIdx.x*blockDim.x+threadIdx.x; npair < totpair; npair += blockDim.x*gridDim.x) { + int ii = pairidx0[npair]; + int jj = pairidx1[npair]; + real3 delta = make_real3(posq[jj].x-posq[ii].x,posq[jj].y-posq[ii].y,posq[jj].z-posq[ii].z); + real R2 = delta.x * delta.x + delta.y * delta.y + delta.z * delta.z; + real inverseR = RSQRT(R2); + real c1c2 = posq[ii].w * posq[jj].w; + + real sig = parameters[ii].y + parameters[jj].y; + real sig2 = inverseR * sig; + sig2 *= sig2; + real sig6 = sig2 * sig2 * sig2; + real epssig6 = parameters[ii].z * parameters[jj].z * sig6; + + real ener = ONE_4PI_EPS0 * c1c2 * inverseR + epssig6 * (sig6 - 1); + + atomicAdd(&energyBuffer[ii], ener); + real dEdRdR = ONE_4PI_EPS0 * c1c2 * inverseR + epssig6 * (12 * sig6 - 6); + dEdRdR *= inverseR * inverseR; + real3 force = - dEdRdR * delta; + atomicAdd(&forceBuffers[ii], static_cast((long long) (force.x*0x100000000))); + atomicAdd(&forceBuffers[ii+paddedNumAtoms], static_cast((long long) (force.y*0x100000000))); + atomicAdd(&forceBuffers[ii+2*paddedNumAtoms], static_cast((long long) (force.z*0x100000000))); + atomicAdd(&forceBuffers[jj], static_cast((long long) (-force.x*0x100000000))); + atomicAdd(&forceBuffers[jj+paddedNumAtoms], static_cast((long long) (-force.y*0x100000000))); + atomicAdd(&forceBuffers[jj+2*paddedNumAtoms], static_cast((long long) (-force.z*0x100000000))); + + atomicAdd(&dedq[ii], ONE_4PI_EPS0*posq[jj].w*inverseR); + atomicAdd(&dedq[jj], ONE_4PI_EPS0*posq[ii].w*inverseR); + } +} + +extern "C" __global__ void calcNoPBCExclusions( + mixed* __restrict__ energyBuffer, + const real4* __restrict__ posq, + unsigned long long* __restrict__ forceBuffers, + real* __restrict__ dedq, + const real4* __restrict__ parameters, + const int* __restrict__ expairidx0, + const int* __restrict__ expairidx1, + const int totpair, + const int numParticles, + const int paddedNumAtoms) { + for (int npair = blockIdx.x*blockDim.x+threadIdx.x; npair < totpair; npair += blockDim.x*gridDim.x) { + int ii = expairidx0[npair]; + int jj = expairidx1[npair]; + real3 delta = make_real3(posq[jj].x-posq[ii].x,posq[jj].y-posq[ii].y,posq[jj].z-posq[ii].z); + real R2 = delta.x * delta.x + delta.y * delta.y + delta.z * delta.z; + real inverseR = RSQRT(R2); + real c1c2 = posq[ii].w * posq[jj].w; + + real sig = parameters[ii].y + parameters[jj].y; + real sig2 = inverseR * sig; + sig2 *= sig2; + real sig6 = sig2 * sig2 * sig2; + real epssig6 = parameters[ii].z * parameters[jj].z * sig6; + + real ener = ONE_4PI_EPS0 * c1c2 * inverseR + epssig6 * (sig6 - 1); + + energyBuffer[npair] -= ener; + + real dEdRdR = ONE_4PI_EPS0 * c1c2 * inverseR + epssig6 * (12 * sig6 - 6); + dEdRdR *= inverseR * inverseR; + + real3 force = dEdRdR * delta; + atomicAdd(&forceBuffers[ii], static_cast((long long) (force.x*0x100000000))); + atomicAdd(&forceBuffers[ii+paddedNumAtoms], static_cast((long long) (force.y*0x100000000))); + atomicAdd(&forceBuffers[ii+2*paddedNumAtoms], static_cast((long long) (force.z*0x100000000))); + atomicAdd(&forceBuffers[jj], static_cast((long long) (-force.x*0x100000000))); + atomicAdd(&forceBuffers[jj+paddedNumAtoms], static_cast((long long) (-force.y*0x100000000))); + atomicAdd(&forceBuffers[jj+2*paddedNumAtoms], static_cast((long long) (-force.z*0x100000000))); + + atomicAdd(&dedq[ii], -ONE_4PI_EPS0*posq[jj].w*inverseR); + atomicAdd(&dedq[jj], -ONE_4PI_EPS0*posq[ii].w*inverseR); + } +} \ No newline at end of file diff --git a/cuda_code/nonoverlapping_stencil.cu b/cuda_code/nonoverlapping_stencil.cu new file mode 100644 index 0000000000000000000000000000000000000000..66a489f2cf2dbf5fb50ab129e3e5f64195c712c3 --- /dev/null +++ b/cuda_code/nonoverlapping_stencil.cu @@ -0,0 +1,242 @@ +#define DAWN_GENERATED 1 +#undef DAWN_BACKEND_T +#define DAWN_BACKEND_T CUDA +#ifndef BOOST_RESULT_OF_USE_TR1 +#define BOOST_RESULT_OF_USE_TR1 1 +#endif +#ifndef BOOST_NO_CXX11_DECLTYPE +#define BOOST_NO_CXX11_DECLTYPE 1 +#endif +#ifndef GRIDTOOLS_DAWN_HALO_EXTENT +#define GRIDTOOLS_DAWN_HALO_EXTENT 3 +#endif +#ifndef BOOST_PP_VARIADICS +#define BOOST_PP_VARIADICS 1 +#endif +#ifndef BOOST_FUSION_DONT_USE_PREPROCESSED_FILES +#define BOOST_FUSION_DONT_USE_PREPROCESSED_FILES 1 +#endif +#ifndef BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS +#define BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS 1 +#endif +#ifndef GT_VECTOR_LIMIT_SIZE +#define GT_VECTOR_LIMIT_SIZE 30 +#endif +#ifndef BOOST_FUSION_INVOKE_MAX_ARITY +#define BOOST_FUSION_INVOKE_MAX_ARITY GT_VECTOR_LIMIT_SIZE +#endif +#ifndef FUSION_MAX_VECTOR_SIZE +#define FUSION_MAX_VECTOR_SIZE GT_VECTOR_LIMIT_SIZE +#endif +#ifndef FUSION_MAX_MAP_SIZE +#define FUSION_MAX_MAP_SIZE GT_VECTOR_LIMIT_SIZE +#endif +#ifndef BOOST_MPL_LIMIT_VECTOR_SIZE +#define BOOST_MPL_LIMIT_VECTOR_SIZE GT_VECTOR_LIMIT_SIZE +#endif +#include +using namespace gridtools::dawn; + +namespace dawn_generated { +namespace cuda { +__global__ void __launch_bounds__(128) + generated_stencil59_ms58_kernel(const int isize, const int jsize, const int ksize, + const int stride_111_1, const int stride_111_2, + ::dawn::float_type* const in, ::dawn::float_type* const out) { + + // Start kernel + const unsigned int nx = isize; + const unsigned int ny = jsize; + const int block_size_i = (blockIdx.x + 1) * 32 < nx ? 32 : nx - blockIdx.x * 32; + const int block_size_j = (blockIdx.y + 1) * 4 < ny ? 4 : ny - blockIdx.y * 4; + + // computing the global position in the physical domain + + // In a typical cuda block we have the following regions + + // aa bbbbbbbb cc + + // aa bbbbbbbb cc + + // hh dddddddd ii + + // hh dddddddd ii + + // hh dddddddd ii + + // hh dddddddd ii + + // ee ffffffff gg + + // ee ffffffff gg + + // Regions b,d,f have warp (or multiple of warp size) + + // Size of regions a, c, h, i, e, g are determined by max_extent_t + + // Regions b,d,f are easily executed by dedicated warps (one warp for each line) + + // Regions (a,h,e) and (c,i,g) are executed by two specialized warp + int iblock = 0 - 1; + int jblock = 0 - 1; + if(threadIdx.y < +4) { + iblock = threadIdx.x; + jblock = (int)threadIdx.y + 0; + } + // initialized iterators + int idx111 = (blockIdx.x * 32 + iblock) * 1 + (blockIdx.y * 4 + jblock) * stride_111_1; + + // Pre-fill of kcaches + for(int k = 0 + 0; k <= 10 + 0; ++k) { + + // Head fill of kcaches + if(iblock >= 0 && iblock <= block_size_i - 1 + 0 && jblock >= 0 && + jblock <= block_size_j - 1 + 0) { + ::dawn::float_type dx; + { + out[idx111] = + (((int)-4 * (__ldg(&(in[idx111])) + (__ldg(&(in[idx111 + 1 * 1])) + + (__ldg(&(in[idx111 + 1 * -1])) + + (__ldg(&(in[idx111 + stride_111_1 * -1])) + + __ldg(&(in[idx111 + stride_111_1 * 1]))))))) / + (dx * dx)); + } + } + // Flush of kcaches + + // Flush of kcaches + + // Slide kcaches + + // increment iterators + idx111 += stride_111_2; + } + // Final flush of kcaches + + // Final flush of kcaches + + // Final flush of kcaches + + // jump iterators to match the beginning of next interval + idx111 += stride_111_2 * (4); + + // Pre-fill of kcaches + for(int k = 15 + 0; k <= ksize - 1 + 0 + 0; ++k) { + + // Head fill of kcaches + if(iblock >= 0 && iblock <= block_size_i - 1 + 0 && jblock >= 0 && + jblock <= block_size_j - 1 + 0) { + { + out[idx111] = (int)10; + } + } + // Flush of kcaches + + // Flush of kcaches + + // Slide kcaches + + // increment iterators + idx111 += stride_111_2; + } + // Final flush of kcaches + + // Final flush of kcaches + + // Final flush of kcaches +} + +class generated { +public: + struct sbase : public timer_cuda { + + sbase(std::string name) : timer_cuda(name) {} + + double get_time() { return total_time(); } + }; + + struct stencil_59 : public sbase { + + // Members + + // Temporary storage typedefs + using tmp_halo_t = gridtools::halo<0, 0, 0, 0, 0>; + using tmp_meta_data_t = storage_traits_t::storage_info_t<0, 5, tmp_halo_t>; + using tmp_storage_t = storage_traits_t::data_store_t<::dawn::float_type, tmp_meta_data_t>; + const gridtools::dawn::domain m_dom; + + public: + stencil_59(const gridtools::dawn::domain& dom_, int rank, int xcols, int ycols) + : sbase("stencil_59"), m_dom(dom_) {} + static constexpr ::dawn::driver::cartesian_extent in_extent = {-1, 1, -1, 1, 0, 0}; + static constexpr ::dawn::driver::cartesian_extent out_extent = {0, 0, 0, 0, 0, 0}; + + void run(storage_ijk_t in_ds, storage_ijk_t out_ds) { + + // starting timers + start(); + { + ; + gridtools::data_view in = gridtools::make_device_view(in_ds); + gridtools::data_view out = gridtools::make_device_view(out_ds); + const unsigned int nx = m_dom.isize() - m_dom.iminus() - m_dom.iplus(); + const unsigned int ny = m_dom.jsize() - m_dom.jminus() - m_dom.jplus(); + const unsigned int nz = m_dom.ksize() - m_dom.kminus() - m_dom.kplus(); + dim3 threads(32, 4 + 0, 1); + const unsigned int nbx = (nx + 32 - 1) / 32; + const unsigned int nby = (ny + 4 - 1) / 4; + const unsigned int nbz = 1; + dim3 blocks(nbx, nby, nbz); + generated_stencil59_ms58_kernel<<>>( + nx, ny, nz, in_ds.strides()[1], in_ds.strides()[2], + (in.data() + in_ds.get_storage_info_ptr()->index(in.begin<0>(), in.begin<1>(), 0)), + (out.data() + out_ds.get_storage_info_ptr()->index(out.begin<0>(), out.begin<1>(), 0))); + }; + + // stopping timers + pause(); + } + }; + static constexpr const char* s_name = "generated"; + stencil_59 m_stencil_59; + +public: + generated(const generated&) = delete; + + // Members + + // Stencil-Data + + generated(const gridtools::dawn::domain& dom, int rank = 1, int xcols = 1, int ycols = 1) + : m_stencil_59(dom, rank, xcols, ycols) {} + + template + void sync_storages(S field) { + field.sync(); + } + + template + void sync_storages(S0 f0, S... fields) { + f0.sync(); + sync_storages(fields...); + } + + void run(storage_ijk_t in, storage_ijk_t out) { + sync_storages(in, out); + m_stencil_59.run(in, out); + ; + sync_storages(in, out); + } + + std::string get_name() const { return std::string(s_name); } + + void reset_meters() { m_stencil_59.reset(); } + + double get_total_time() { + double res = 0; + res += m_stencil_59.get_time(); + return res; + } +}; +} // namespace cuda +} // namespace dawn_generated diff --git a/cuda_code/noop.cu b/cuda_code/noop.cu new file mode 100644 index 0000000000000000000000000000000000000000..7e6506566664e932290c82aaee997df58ca3f71a --- /dev/null +++ b/cuda_code/noop.cu @@ -0,0 +1,47 @@ +#include "noop.h" + +void crossbowKernelNoop (void *args) { + + /* GEMM variables */ + float *A, *B, *C; + int M, N, K; + float alpha, beta; + + crossbowStreamP s = (crossbowStreamP) args; + + int offset, length; + crossbowDataBufferP unit = crossbowModelVariable (s->model, s->op->kernel->id, 1, &offset, &length); + + crossbowDataBufferP input = crossbowStreamGetCurrentInput (s); + crossbowDataBufferP output = crossbowStreamGetCurrentOutput (s); + + /* Get kernel configuration parameter */ + int axis = crossbowKernelConfigParamGetIntValue ((crossbowKernelConfigParamP) crossbowArrayListGet(s->op->kernel->parameters, 0)); + + /* Set device buffers */ + A = (float *) (input->dev); + B = (float *) (unit->dev); + C = (float *) (output->dev); + M = crossbowVariableSchemaCountElementsInRange (s->examples->schema, 0, axis); + K = crossbowVariableSchemaCountElementsFrom (s->examples->schema, axis); + N = K; + alpha = 1; + beta = 0; + +#ifndef CUBLAS_NOOP + checkCublasStatus(cublasSgemm (s->cublasHandle[s->op->branch], CUBLAS_OP_N, CUBLAS_OP_N, M, N, K, &alpha, A, M, B, K, &beta, C, M)); +#else + /* Subterfuge unused parameter warnings */ + UNUSED (M); + UNUSED (N); + UNUSED (K); + UNUSED (alpha); + UNUSED (A); + UNUSED (B); + UNUSED (beta); + UNUSED (C); +#endif + + /* Store output in stream */ + crossbowListAppend(s->outputs[s->op->id], output); +} diff --git a/cuda_code/nop_4.cu b/cuda_code/nop_4.cu new file mode 100644 index 0000000000000000000000000000000000000000..f47755722559a910249edf36b28e9a72ed5ebdd3 --- /dev/null +++ b/cuda_code/nop_4.cu @@ -0,0 +1,49 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/*! \file + \brief Basic CUDA file for testing compiler flags. +*/ + +__device__ int inner() +{ + return -1; +} + +__global__ void test() +{ + inner(); +} + +int main() +{ + test<<<1,1>>>(); + return 0; +} diff --git a/cuda_code/norace_intrawarp_none-blkatom.cu b/cuda_code/norace_intrawarp_none-blkatom.cu new file mode 100644 index 0000000000000000000000000000000000000000..8c454a8af01a13c12ac84b9e41a2ccfa94b5e6d8 --- /dev/null +++ b/cuda_code/norace_intrawarp_none-blkatom.cu @@ -0,0 +1,33 @@ +#include + +#define NBLOCKS 1 +#define TPERBLK 1 + +#define NTHREADS (NBLOCKS * TPERBLK) + +void errCheck() +{ + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) { + printf("Error %d: %s\n", err, cudaGetErrorString(err)); + exit(1); + } +} + +__device__ int flag = 0; + +__global__ void kmain(volatile unsigned int *data) +{ + atomicExch_block((unsigned int *)&data[0], 1); + data[0] = 2; +} + +int main() +{ + unsigned int *d_data; + cudaMalloc(&d_data, sizeof(unsigned int)); + kmain<<>>(d_data); + errCheck(); + return 0; +} + diff --git a/cuda_code/normal_distribution_1.cu b/cuda_code/normal_distribution_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..19150081ebc6774d9c1f233a3da2951f87af0e69 --- /dev/null +++ b/cuda_code/normal_distribution_1.cu @@ -0,0 +1,68 @@ +/* +Copyright 2020 The OneFlow Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +#include "oneflow/user/kernels/distributions/normal_distribution.h" +#include "oneflow/core/common/data_type.h" + +namespace oneflow { + +namespace { + +template +__device__ T GenNormal(curandState* state, const T mean, const T std); + +template<> +__device__ float GenNormal(curandState* state, const float mean, const float std) { + return (curand_normal(state) + mean) / std; +} + +template<> +__device__ double GenNormal(curandState* state, const double mean, const double std) { + return (curand_normal_double(state) + mean) / std; +} + +template +__global__ void GenerateGpu(curandState* state, const int64_t elem_cnt, T* dptr, const T mean, + const T std) { + const int id = blockIdx.x * blockDim.x + threadIdx.x; + curandState localState = state[id]; + if (id < elem_cnt) { dptr[id] = GenNormal(&localState, mean, std); } + state[id] = localState; +} + +} // namespace + +template +void NormalDistribution::operator()( + DeviceCtx* device_ctx, const int64_t elem_cnt, T* dptr, + const std::shared_ptr& generator) const { + CHECK_GE(elem_cnt, 0); + auto gen = CHECK_JUST(generator->Get()); + int32_t block_num = gen->max_block_num(); + int32_t thread_num = gen->max_thread_num(); + auto* curand_states = gen->curand_states(); + GenerateGpu<<cuda_stream()>>>(curand_states, elem_cnt, + dptr, mean_, std_); +} + +#define INITIATE_GPU_NORMAL_DISTRIBUTION(T, typeproto) \ + template void NormalDistribution::operator()( \ + DeviceCtx* device_ctx, const int64_t elem_cnt, T* dptr, \ + const std::shared_ptr& generator) const; + +OF_PP_FOR_EACH_TUPLE(INITIATE_GPU_NORMAL_DISTRIBUTION, FLOATING_DATA_TYPE_SEQ) + +} // namespace oneflow \ No newline at end of file diff --git a/cuda_code/normalize_kernels.cu b/cuda_code/normalize_kernels.cu new file mode 100644 index 0000000000000000000000000000000000000000..3e61d4e35832cb9b2f90ebc9b64d8398f6d6b9e6 --- /dev/null +++ b/cuda_code/normalize_kernels.cu @@ -0,0 +1,1169 @@ +#include "block_reduce.h" +#include "kernels.h" +#include + +namespace cg = cooperative_groups; +const float LN_EPSILON = 1e-8f; +#define TILE_DIM 32 + +template __forceinline__ __device__ T add_eps(T x) { + return fabsf(x) > LN_EPSILON ? x : (x < 0 ? -LN_EPSILON : LN_EPSILON); +} + +/** +@brief: ker_layer_norm +Standard layer normalization. +It will not only output the layer norm result, + but also outputs variance. + may also output means, depends on whether + the means argument is nullptr + +@thread +gridDim.x = batch_size * seq_len +blockDim.x = hidden_size + +@param +ln_res: [batch_size* seq_len, hidden_size], ln result. +vars: [batch_size* seq_len], variance per token +means: [batch_size* seq_len], means per token, can be nullput +inp: [batch_size * seq_len, hidden_size], ln input. +scale: [hidden_size], ln scale +bias: [hidden_size], ln bias +*/ +template +__global__ void ker_layer_norm(T *ln_res, T *vars, T *means, const T *inp, + const T *scale, const T *bias, int hidden_size) { + // step 0. compute local sum + float l_sum = 0; + float l_square_sum = 0; + const float4 *inp_f4 = (const float4 *)inp + blockIdx.x * hidden_size; + for (uint idx = threadIdx.x; idx < hidden_size; idx += blockDim.x) { + float4 val = inp_f4[idx]; + l_sum += val.x + val.y + val.z + val.w; + l_square_sum += + val.x * val.x + val.y * val.y + val.z * val.z + val.w * val.w; + } + + // step 1. compute reduce sum + float mean_dim = float(hidden_size) * 4.f; + float reduce_val[2] = {l_sum, l_square_sum}; + blockReduce(reduce_val); + __shared__ float s_mean, s_var; + if (threadIdx.x == 0) { + s_mean = reduce_val[0] / mean_dim; + if (means != nullptr) { + means[blockIdx.x] = s_mean; + } + s_var = reduce_val[1] / mean_dim - s_mean * s_mean + LN_EPSILON; + vars[blockIdx.x] = s_var; + s_var = rsqrtf(s_var); + } + __syncthreads(); + + // step 2. layer norm result + float4 *output_f4 = (float4 *)ln_res + blockIdx.x * hidden_size; + for (uint idx = threadIdx.x; idx < hidden_size; idx += blockDim.x) { + float4 vscale = __ldg((const float4 *)scale + idx); + float4 vbias = __ldg((const float4 *)bias + idx); + float4 val = inp_f4[idx]; + val.x = (val.x - s_mean) * s_var * vscale.x + vbias.x; + val.y = (val.y - s_mean) * s_var * vscale.y + vbias.y; + val.z = (val.z - s_mean) * s_var * vscale.z + vbias.z; + val.w = (val.w - s_mean) * s_var * vscale.w + vbias.w; + output_f4[idx] = val; + } +} + +template <> +__global__ void ker_layer_norm<__half>(__half *ln_res, __half *vars, + __half *means, const __half *inp, + const __half *scale, const __half *bias, + int hidden_size) { + // step 0. compute local sum + float l_sum = 0; + float l_square_sum = 0; + const float4 *inp_f4 = (const float4 *)inp + blockIdx.x * hidden_size; + for (uint idx = threadIdx.x; idx < hidden_size; idx += blockDim.x) { + float4 val_f4 = inp_f4[idx]; + __half2 *val_h2 = (__half2 *)(&val_f4); +#pragma unroll + for (int i = 0; i < 4; i++) { + float2 val_f2 = __half22float2(val_h2[i]); + l_sum += val_f2.x + val_f2.y; + l_square_sum += val_f2.x * val_f2.x + val_f2.y * val_f2.y; + } + } + + // step 1. compute reduce sum + float mean_dim = float(hidden_size) * 8.f; + float reduce_val[2] = {l_sum, l_square_sum}; + blockReduce(reduce_val); + __shared__ float s_mean, s_var; + if (threadIdx.x == 0) { + s_mean = reduce_val[0] / mean_dim; + if (means != nullptr) { + means[blockIdx.x] = s_mean; + } + s_var = reduce_val[1] / mean_dim - s_mean * s_mean + LN_EPSILON; + vars[blockIdx.x] = s_var; + s_var = rsqrtf(s_var); + } + __syncthreads(); + + // step 2. layer norm result + float4 *output_f4 = (float4 *)ln_res + blockIdx.x * hidden_size; + for (uint idx = threadIdx.x; idx < hidden_size; idx += blockDim.x) { + // load scale, bias, input + float4 scale_f4 = __ldg((const float4 *)scale + idx); + __half2 *scale_h2 = (__half2 *)(&scale_f4); + float4 bias_f4 = __ldg((const float4 *)bias + idx); + __half2 *bias_h2 = (__half2 *)(&bias_f4); + float4 val_f4 = inp_f4[idx]; + __half2 *val_h2 = (__half2 *)(&val_f4); + +#pragma unroll + for (int i = 0; i < 4; i++) { + float2 scale_f2 = __half22float2(scale_h2[i]); + float2 bias_f2 = __half22float2(bias_h2[i]); + float2 val_f2 = __half22float2(val_h2[i]); + val_f2.x = (val_f2.x - s_mean) * s_var * scale_f2.x + bias_f2.x; + val_f2.y = (val_f2.y - s_mean) * s_var * scale_f2.y + bias_f2.y; + val_h2[i] = __float22half2_rn(val_f2); + } + output_f4[idx] = val_f4; + } +} + +// __global__ void ker_layer_norm_x2(__half *ln_res, __half *vars, +// __half *means, const __half *inp, +// const __half *scale, const __half +// *bias, int hidden_size) { +// // step 0. compute local sum +// float l_sum = 0; +// float l_square_sum = 0; +// const float4 *inp_f4 = (const float4 *)inp + blockIdx.x * 2 * hidden_size; +// for (uint idx = 2 * threadIdx.x; idx < hidden_size * 2; idx += blockDim.x * +// 2) { +// float4 val_f4 = inp_f4[idx]; +// float4 val_f4_1 = inp_f4[idx+1]; +// __half2 *val_h2 = (__half2 *)(&val_f4); +// __half2 *val_h2_1 = (__half2 *)(&val_f4_1); +// #pragma unroll +// for (int i = 0; i < 4; i++) { +// float2 val_f2 = __half22float2(val_h2[i]); +// float2 val_f2_1 = __half22float2(val_h2_1[i]); +// l_sum += val_f2.x + val_f2.y + val_f2_1.x + val_f2_1.y; +// l_square_sum += val_f2.x * val_f2.x + val_f2.y * val_f2.y + val_f2_1.x +// * val_f2_1.x + val_f2_1.y * val_f2_1.y; +// } +// } + +// // step 1. compute reduce sum +// float mean_dim = float(hidden_size) * 8.f * 2; +// float reduce_val[2] = {l_sum, l_square_sum}; +// blockReduce(reduce_val); +// __shared__ float s_mean, s_var; +// if (threadIdx.x == 0) { +// s_mean = reduce_val[0] / mean_dim; +// if (means != nullptr) { +// means[blockIdx.x] = s_mean; +// } +// s_var = reduce_val[1] / mean_dim - s_mean * s_mean + LN_EPSILON; +// vars[blockIdx.x] = s_var; +// s_var = rsqrtf(s_var); +// } +// __syncthreads(); + +// // step 2. layer norm result +// float4 *output_f4 = (float4 *)ln_res + blockIdx.x * hidden_size * 2; +// for (uint idx = 2 * threadIdx.x; idx < hidden_size * 2; idx += blockDim.x * +// 2) { +// // load scale, bias, input +// float4 scale_f4 = __ldg((const float4 *)scale + idx); +// __half2 *scale_h2 = (__half2 *)(&scale_f4); +// float4 scale_f4_1 = __ldg((const float4 *)scale + idx + 1); +// __half2 *scale_h2_1 = (__half2 *)(&scale_f4_1); +// float4 bias_f4 = __ldg((const float4 *)bias + idx); +// __half2 *bias_h2 = (__half2 *)(&bias_f4); +// float4 bias_f4_1 = __ldg((const float4 *)bias + idx + 1); +// __half2 *bias_h2_1 = (__half2 *)(&bias_f4_1); +// float4 val_f4 = inp_f4[idx]; +// __half2 *val_h2 = (__half2 *)(&val_f4); +// float4 val_f4_1 = inp_f4[idx+1]; +// __half2 *val_h2_1 = (__half2 *)(&val_f4_1); + +// #pragma unroll +// for (int i = 0; i < 4; i++) { +// float2 scale_f2 = __half22float2(scale_h2[i]); +// float2 scale_f2_1 = __half22float2(scale_h2_1[i]); +// float2 bias_f2 = __half22float2(bias_h2[i]); +// float2 bias_f2_1 = __half22float2(bias_h2_1[i]); +// float2 val_f2 = __half22float2(val_h2[i]); +// float2 val_f2_1 = __half22float2(val_h2_1[i]); +// val_f2.x = (val_f2.x - s_mean) * s_var * scale_f2.x + bias_f2.x; +// val_f2.y = (val_f2.y - s_mean) * s_var * scale_f2.y + bias_f2.y; +// val_h2[i] = __float22half2_rn(val_f2); +// val_f2_1.x = (val_f2_1.x - s_mean) * s_var * scale_f2_1.x + +// bias_f2_1.x; val_f2_1.y = (val_f2_1.y - s_mean) * s_var * scale_f2_1.y +// + bias_f2_1.y; val_h2_1[i] = __float22half2_rn(val_f2_1); +// } +// output_f4[idx] = val_f4; +// output_f4[idx+1] = val_f4_1; +// } +// } + +// __global__ void ker_layer_norm_x4(__half *ln_res, __half *vars, +// __half *means, const __half *inp, +// const __half *scale, const __half +// *bias, int hidden_size) { +// // step 0. compute local sum +// float l_sum = 0; +// float l_square_sum = 0; +// const float4 *inp_f4 = (const float4 *)inp + blockIdx.x * hidden_size * 4; +// for (uint idx = 4 * threadIdx.x; idx < hidden_size * 4; idx += blockDim.x * +// 4) { +// float4 val_f4 = inp_f4[idx]; +// float4 val_f4_1 = inp_f4[idx+1]; +// float4 val_f4_2 = inp_f4[idx+2]; +// float4 val_f4_3 = inp_f4[idx+3]; +// __half2 *val_h2 = (__half2 *)(&val_f4); +// __half2 *val_h2_1 = (__half2 *)(&val_f4_1); +// __half2 *val_h2_2 = (__half2 *)(&val_f4_2); +// __half2 *val_h2_3 = (__half2 *)(&val_f4_3); +// #pragma unroll +// for (int i = 0; i < 4; i++) { +// float2 val_f2 = __half22float2(val_h2[i]); +// float2 val_f2_1 = __half22float2(val_h2_1[i]); +// float2 val_f2_2 = __half22float2(val_h2_2[i]); +// float2 val_f2_3 = __half22float2(val_h2_3[i]); +// l_sum += val_f2.x + val_f2.y + val_f2_1.x + val_f2_1.y + val_f2_2.x + +// val_f2_2.y + val_f2_3.x + val_f2_3.y; l_square_sum += val_f2.x * +// val_f2.x + val_f2.y * val_f2.y; l_square_sum += val_f2_1.x * val_f2_1.x +// + val_f2_1.y * val_f2_1.y; l_square_sum += val_f2_2.x * val_f2_2.x + +// val_f2_2.y * val_f2_2.y; l_square_sum += val_f2_3.x * val_f2_3.x + +// val_f2_3.y * val_f2_3.y; +// } +// } + +// // step 1. compute reduce sum +// float mean_dim = float(hidden_size) * 8.f * 4; +// float reduce_val[2] = {l_sum, l_square_sum}; +// blockReduce(reduce_val); +// __shared__ float s_mean, s_var; +// if (threadIdx.x == 0) { +// s_mean = reduce_val[0] / mean_dim; +// if (means != nullptr) { +// means[blockIdx.x] = s_mean; +// } +// s_var = reduce_val[1] / mean_dim - s_mean * s_mean + LN_EPSILON; +// vars[blockIdx.x] = s_var; +// s_var = rsqrtf(s_var); +// } +// __syncthreads(); + +// // step 2. layer norm result +// float4 *output_f4 = (float4 *)ln_res + blockIdx.x * hidden_size * 4; +// for (uint idx = 4 * threadIdx.x; idx < hidden_size * 4; idx += blockDim.x * +// 4) { +// // load scale, bias, input +// float4 scale_f4 = __ldg((const float4 *)scale + idx); +// __half2 *scale_h2 = (__half2 *)(&scale_f4); +// float4 scale_f4_1 = __ldg((const float4 *)scale + idx + 1); +// __half2 *scale_h2_1 = (__half2 *)(&scale_f4_1); +// float4 scale_f4_2 = __ldg((const float4 *)scale + idx + 2); +// __half2 *scale_h2_2 = (__half2 *)(&scale_f4_2); +// float4 scale_f4_3 = __ldg((const float4 *)scale + idx + 3); +// __half2 *scale_h2_3 = (__half2 *)(&scale_f4_3); +// float4 bias_f4 = __ldg((const float4 *)bias + idx); +// __half2 *bias_h2 = (__half2 *)(&bias_f4); +// float4 bias_f4_1 = __ldg((const float4 *)bias + idx + 1); +// __half2 *bias_h2_1 = (__half2 *)(&bias_f4_1); +// float4 bias_f4_2 = __ldg((const float4 *)bias + idx + 2); +// __half2 *bias_h2_2 = (__half2 *)(&bias_f4_2); +// float4 bias_f4_3 = __ldg((const float4 *)bias + idx + 3); +// __half2 *bias_h2_3 = (__half2 *)(&bias_f4_3); +// float4 val_f4 = inp_f4[idx]; +// __half2 *val_h2 = (__half2 *)(&val_f4); +// float4 val_f4_1 = inp_f4[idx+1]; +// __half2 *val_h2_1 = (__half2 *)(&val_f4_1); +// float4 val_f4_2 = inp_f4[idx+2]; +// __half2 *val_h2_2 = (__half2 *)(&val_f4_2); +// float4 val_f4_3 = inp_f4[idx+3]; +// __half2 *val_h2_3 = (__half2 *)(&val_f4_3); + +// #pragma unroll +// for (int i = 0; i < 4; i++) { +// float2 scale_f2 = __half22float2(scale_h2[i]); +// float2 scale_f2_1 = __half22float2(scale_h2_1[i]); +// float2 scale_f2_2 = __half22float2(scale_h2_2[i]); +// float2 scale_f2_3 = __half22float2(scale_h2_3[i]); +// float2 bias_f2 = __half22float2(bias_h2[i]); +// float2 bias_f2_1 = __half22float2(bias_h2_1[i]); +// float2 bias_f2_2 = __half22float2(bias_h2_2[i]); +// float2 bias_f2_3 = __half22float2(bias_h2_3[i]); +// float2 val_f2 = __half22float2(val_h2[i]); +// float2 val_f2_1 = __half22float2(val_h2_1[i]); +// float2 val_f2_2 = __half22float2(val_h2_2[i]); +// float2 val_f2_3 = __half22float2(val_h2_3[i]); +// val_f2.x = (val_f2.x - s_mean) * s_var * scale_f2.x + bias_f2.x; +// val_f2.y = (val_f2.y - s_mean) * s_var * scale_f2.y + bias_f2.y; +// val_f2_1.x = (val_f2_1.x - s_mean) * s_var * scale_f2_1.x + +// bias_f2_1.x; val_f2_1.y = (val_f2_1.y - s_mean) * s_var * scale_f2_1.y +// + bias_f2_1.y; val_f2_2.x = (val_f2_2.x - s_mean) * s_var * +// scale_f2_2.x + bias_f2_2.x; val_f2_2.y = (val_f2_2.y - s_mean) * s_var +// * scale_f2_2.y + bias_f2_2.y; val_f2_3.x = (val_f2_3.x - s_mean) * +// s_var * scale_f2_3.x + bias_f2_3.x; val_f2_3.y = (val_f2_3.y - s_mean) +// * s_var * scale_f2_3.y + bias_f2_3.y; val_h2[i] = +// __float22half2_rn(val_f2); val_h2_1[i] = __float22half2_rn(val_f2_1); +// val_h2_2[i] = __float22half2_rn(val_f2_2); +// val_h2_3[i] = __float22half2_rn(val_f2_3); +// } +// output_f4[idx] = val_f4; +// output_f4[idx+1] = val_f4_1; +// output_f4[idx+2] = val_f4_2; +// output_f4[idx+3] = val_f4_3; +// } +// } + +template <> +void launch_layer_norm(float *ln_res, float *vars, float *means, + const float *inp, const float *scale, + const float *bias, int batch_size, int hidden_dim, + cudaStream_t stream) { + if (hidden_dim % 4 != 0) { + throw std::runtime_error("violate hidden_dim % 4 = 0"); + } + hidden_dim >>= 2; + int nthread = min(((hidden_dim + 31) / 32) * 32, MAX_THREADS); + dim3 grid_dim(batch_size); + dim3 block_dim(nthread); + + ker_layer_norm<<>>( + ln_res, vars, means, inp, scale, bias, hidden_dim); +} + +template <> +void launch_layer_norm<__half>(__half *ln_res, __half *vars, __half *means, + const __half *inp, const __half *scale, + const __half *bias, int batch_size, + int hidden_dim, cudaStream_t stream) { + if (hidden_dim % 8 != 0) { + throw std::runtime_error("violate hidden_dim % 8 = 0"); + } + hidden_dim >>= 3; + int nthread = min(((hidden_dim + 31) / 32) * 32, MAX_THREADS); + dim3 grid_dim(batch_size); + dim3 block_dim(nthread); + + ker_layer_norm<__half><<>>( + ln_res, vars, means, inp, scale, bias, hidden_dim); + // if (hidden_dim % 8 != 0) { + // throw std::runtime_error("violate hidden_dim % 8 = 0"); + // } + // hidden_dim >>= 3; + + // if (hidden_dim * 8 < 8192) { + // int nthread = min(((hidden_dim + 31) / 32) * 32, MAX_THREADS); + // dim3 grid_dim(batch_size); + // dim3 block_dim(nthread); + // ker_layer_norm<__half><<>>( + // ln_res, vars, means, inp, scale, bias, hidden_dim); + // } else if (hidden_dim * 8 >= 8192 && hidden_dim * 8 <= 8192 * 2) { + // hidden_dim >>= 1; + // int nthread = min(((hidden_dim + 31) / 32) * 32, MAX_THREADS); + // dim3 grid_dim(batch_size); + // dim3 block_dim(nthread); + // ker_layer_norm_x2<<>>( + // ln_res, vars, means, inp, scale, bias, hidden_dim); + // } else if (hidden_dim * 8 > 8192 * 2 && hidden_dim * 8 <= 8192 * 4) { + // hidden_dim >>= 2; + // int nthread = min(((hidden_dim + 31) / 32) * 32, MAX_THREADS); + // dim3 grid_dim(batch_size); + // dim3 block_dim(nthread); + // ker_layer_norm_x4<<>>( + // ln_res, vars, means, inp, scale, bias, hidden_dim); + // } else { + // throw std::runtime_error("hidden_dim % 4 != 0 || hidden_dim > 32768"); + // } +} + +/** +@brief: ker_ln_bw_dgamma_dbetta +Layer norm backword kernel, compute the gradient of gamma and betta. +dbetta = sum(dout, dim=0) +dgamma = sum(xhat * dout, dim=0) +xhat = (input - mean) * rsqrt(var) or + (output - betta) / gamma + + +@thread +gridDim.x = hidden_size / 32 +blockDim.x = 32 +blockDim.y = 32 + +@param +gamma_grad: [hidden_size], gradient of gamma +betta_grad: [hidden_size], gradient of betta +out_grad: [batch_size * seq_len, hidden_size], gradient of betta ln output +inp_or_out: [batch_size * seq_len, hidden_size], ln output if means is nullptr + ln input if means is not nullptr +gamma: [hidden_size], gamma of ln, + used to compute xhat, maybe nullptr +betta: [hidden_size], betta of ln, + used to compute xhat, maybe nullptr +vars: [batch_size * seq_len], variance of ln forward, + used to compute xhat, maybe nullptr +means: [batch_size * seq_len], mean of ln forward, + used to compute xhat, maybe nullptr +(gamma && betta) ^ (vars && means) should be true +*/ +template +__global__ void +ker_ln_bw_dgamma_dbetta(T *gamma_grad, T *betta_grad, const T *out_grad, + const T *inp_or_out, const T *gamma, const T *betta, + const T *vars, const T *means, int rows, int width) { + __shared__ float betta_buffer[TILE_DIM][TILE_DIM]; + __shared__ float gamma_buffer[TILE_DIM][TILE_DIM]; + + cg::thread_block b = cg::this_thread_block(); + cg::thread_block_tile g = cg::tiled_partition(b); + + int idx = blockDim.x * blockIdx.x + threadIdx.x; + int offset = threadIdx.y * width + idx; + int y_stride = width * TILE_DIM; + + // Loop across inp height + float dbetta = 0; + float dgamma = 0; + float dout, val; + if (idx < width) { + if (means == nullptr) { + float vbetta = (float)betta[idx]; + float vgamma = (float)gamma[idx]; + for (int r = threadIdx.y; r < rows; r += TILE_DIM) { + dout = (float)out_grad[offset]; + // inp_or_out is output + val = (float)inp_or_out[offset]; + dbetta += dout; + dgamma += ((val - vbetta) / add_eps(vgamma) * dout); + offset += y_stride; + } + } else { + for (int r = threadIdx.y; r < rows; r += TILE_DIM) { + dout = (float)out_grad[offset]; + // inp_or_out is input + val = (float)inp_or_out[offset]; + dbetta += dout; + dgamma += ((val - (float)means[r]) * + rsqrtf((float)vars[r] + LN_EPSILON) * dout); + offset += y_stride; + } + } + } + + // Sum the shared buffer. + betta_buffer[threadIdx.x][threadIdx.y] = dbetta; + gamma_buffer[threadIdx.x][threadIdx.y] = dgamma; + __syncthreads(); + float s1 = betta_buffer[threadIdx.y][threadIdx.x]; + float s2 = gamma_buffer[threadIdx.y][threadIdx.x]; + __syncthreads(); + + for (int i = 1; i < TILE_DIM; i <<= 1) { + s1 += g.shfl_down(s1, i); + s2 += g.shfl_down(s2, i); + } + + int pos = blockIdx.x * TILE_DIM + threadIdx.y; + if (threadIdx.x == 0 && idx < width) { + betta_grad[pos] = s1; + gamma_grad[pos] = s2; + } +} + +/** +@brief: ker_ln_bw_dinp +Layer norm backword kernel, compute the gradient of input. +dinp = (dxhat - (sum(dxhat) + xhat * sum(dxhat * xhat)) / hidden_dim) + * rsqrt(var) +xhat = (input - mean) * rsqrt(var) if mean is not nullptr + (output - betta) / gamma if mean is nullptr +dxhat = dout * gamma + + +@thread +gridDim.x = batch_size * seq_len +blockDim.x = hidden_size + +@param +inp_grad: [batch_size * seq_len, hidden_size], gradient of betta ln output +out_grad: [batch_size * seq_len, hidden_size], gradient of betta ln output +residual_grad: [batch_size * seq_len, hidden_size], gradient of residual input, + usually appear in pre-layer-norm for transformer layer, maybe nullptr +inp_or_out: [batch_size * seq_len, hidden_size], ln output if means is nullptr + ln input if means is not nullptr +gamma: [hidden_size], gamma of ln, + used to compute xhat and dxhat +betta: [hidden_size], betta of ln, + used to compute xhat, maybe nullptr +vars: [batch_size * seq_len], variance of ln forward, + used to compute xhat and dinp +means: [batch_size * seq_len], mean of ln forward, + used to compute xhat, maybe nullptr +*/ +template +__global__ void ker_ln_bw_dinp(T *inp_grad, const T *out_grad, + const T *residual_grad, const T *inp_or_out, + const T *gamma, const T *betta, const T *vars, + const T *means, int hidden_dim) { + int offset = blockIdx.x * hidden_dim + threadIdx.x; + float4 dxhat, xhat; + float var_rsqrt; + + if (threadIdx.x < hidden_dim) { + // step 0. dxhat = dout * gamma + dxhat = ((const float4 *)out_grad)[offset]; + float4 vgamma = ((const float4 *)gamma)[threadIdx.x]; + dxhat.x *= vgamma.x; + dxhat.y *= vgamma.y; + dxhat.z *= vgamma.z; + dxhat.w *= vgamma.w; + + /* + step 1. xhat = (output - betta) / gamma or + (input - mean) * rsqrtf(var) + */ + xhat = ((const float4 *)inp_or_out)[offset]; + var_rsqrt = rsqrtf((float)vars[blockIdx.x] + LN_EPSILON); + if (means == nullptr) { + // inp_or_out is output, xhat = (output - betta) / gamma + float4 vbetta = ((const float4 *)betta)[threadIdx.x]; + xhat.x = (xhat.x - vbetta.x) / add_eps(vgamma.x); + xhat.y = (xhat.y - vbetta.y) / add_eps(vgamma.y); + xhat.z = (xhat.z - vbetta.z) / add_eps(vgamma.z); + xhat.w = (xhat.w - vbetta.w) / add_eps(vgamma.w); + } else { + // inp_or_out is input, xhat = (input - mean) * rsqrtf(var) + float fmean = (float)means[blockIdx.x]; + xhat.x = (xhat.x - fmean) * var_rsqrt; + xhat.y = (xhat.y - fmean) * var_rsqrt; + xhat.z = (xhat.z - fmean) * var_rsqrt; + xhat.w = (xhat.w - fmean) * var_rsqrt; + } + } + + /* step2. block reduce sum for dxhat and dxhat*xhat */ + float reduce_val[2] = {0.f, 0.f}; + if (threadIdx.x < hidden_dim) { + reduce_val[0] = dxhat.x + dxhat.y + dxhat.z + dxhat.w; + reduce_val[1] = dxhat.x * xhat.x + dxhat.y * xhat.y + dxhat.z * xhat.z + + dxhat.w * xhat.w; + } + blockReduce(reduce_val); + __shared__ float s_sum_dxhat, s_sum_dxhat_xhat; + if (threadIdx.x == 0) { + float mean_dim = hidden_dim * 4; + s_sum_dxhat = reduce_val[0] / mean_dim; + s_sum_dxhat_xhat = reduce_val[1] / mean_dim; + } + __syncthreads(); + + /* + step3. compute input gradient + (dxhat - (sum(dxhat) + xhat * sum(dxhat * xhat)) / mean_dim) * rsqrt(var) + */ + if (threadIdx.x >= hidden_dim) { + return; + } + dxhat.x = (dxhat.x - s_sum_dxhat - xhat.x * s_sum_dxhat_xhat) * var_rsqrt; + dxhat.y = (dxhat.y - s_sum_dxhat - xhat.y * s_sum_dxhat_xhat) * var_rsqrt; + dxhat.z = (dxhat.z - s_sum_dxhat - xhat.z * s_sum_dxhat_xhat) * var_rsqrt; + dxhat.w = (dxhat.w - s_sum_dxhat - xhat.w * s_sum_dxhat_xhat) * var_rsqrt; + if (residual_grad) { + // Add the residual grad, + // usually in pre-layer-norm for transformer layer + float4 dresidual = ((const float4 *)residual_grad)[offset]; + dxhat.x += dresidual.x; + dxhat.y += dresidual.y; + dxhat.z += dresidual.z; + dxhat.w += dresidual.w; + } + ((float4 *)inp_grad)[offset] = dxhat; +} + +template <> +__global__ void ker_ln_bw_dinp<__half>(__half *inp_grad, const __half *out_grad, + const __half *residual_grad, + const __half *inp_or_out, + const __half *gamma, const __half *betta, + const __half *vars, const __half *means, + int hidden_dim) { + int offset = blockIdx.x * hidden_dim + threadIdx.x; + + float2 dxhat[4], xhat[4]; + float var_rsqrt; + float4 vtmp; + __half2 *tmp_h2; + float reduce_val[2] = {0.f, 0.f}; + + if (threadIdx.x < hidden_dim) { + // step 0. dxhat = dout * gamma + vtmp = ((const float4 *)out_grad)[offset]; + tmp_h2 = reinterpret_cast<__half2 *>(&vtmp); + float4 gamma_f4 = ((const float4 *)gamma)[threadIdx.x]; + __half2 *gamma_h2 = reinterpret_cast<__half2 *>(&gamma_f4); +#pragma unroll + for (int i = 0; i < 4; i++) { + float2 vdout = __half22float2(tmp_h2[i]); + float2 vgamma = __half22float2(gamma_h2[i]); + dxhat[i].x = vdout.x * vgamma.x; + dxhat[i].y = vdout.y * vgamma.y; + reduce_val[0] += dxhat[i].x + dxhat[i].y; + } + + /* + step 1. xhat = (output - betta) / gamma or + (input - mean) * rsqrtf(var) + */ + vtmp = ((const float4 *)inp_or_out)[offset]; + var_rsqrt = rsqrtf((float)vars[blockIdx.x] + LN_EPSILON); + if (means == nullptr) { + // inp_or_out is output, xhat = (output - betta) / gamma + float4 vbetta = ((const float4 *)betta)[threadIdx.x]; + __half2 *betta_h2 = reinterpret_cast<__half2 *>(&vbetta); +#pragma unroll + for (int i = 0; i < 4; i++) { + float2 vout = __half22float2(tmp_h2[i]); + float2 vgamma = __half22float2(gamma_h2[i]); + float2 vbetta = __half22float2(betta_h2[i]); + xhat[i].x = (vout.x - vbetta.x) / add_eps(vgamma.x); + xhat[i].y = (vout.y - vbetta.y) / add_eps(vgamma.y); + reduce_val[1] += xhat[i].x * dxhat[i].x + xhat[i].y * dxhat[i].y; + } + } else { + // inp_or_out is input, xhat = (input - mean) * rsqrtf(var) + float fmean = (float)means[blockIdx.x]; +#pragma unroll + for (int i = 0; i < 4; i++) { + float2 vinp = __half22float2(tmp_h2[i]); + xhat[i].x = (vinp.x - fmean) * var_rsqrt; + xhat[i].y = (vinp.y - fmean) * var_rsqrt; + reduce_val[1] += xhat[i].x * dxhat[i].x + xhat[i].y * dxhat[i].y; + } + } + } + + /* step2. block reduce sum for dxhat and dxhat*xhat */ + blockReduce(reduce_val); + __shared__ float s_sum_dxhat, s_sum_dxhat_xhat; + if (threadIdx.x == 0) { + float mean_dim = hidden_dim * 8; + s_sum_dxhat = reduce_val[0] / mean_dim; + s_sum_dxhat_xhat = reduce_val[1] / mean_dim; + } + __syncthreads(); + + /* + step3. compute input gradient + (dxhat - (sum(dxhat) + xhat * sum(dxhat * xhat)) / mean_dim) * rsqrt(var) + */ + if (threadIdx.x >= hidden_dim) { + return; + } + if (residual_grad) { + // Add the residual grad, + // usually in pre-layer-norm for transformer layer + float4 dresidual = ((const float4 *)residual_grad)[offset]; + __half *hdres = reinterpret_cast<__half *>(&dresidual); +#pragma unroll + for (int i = 0; i < 4; i++) { + tmp_h2[i].x = __float2half( + (dxhat[i].x - s_sum_dxhat - xhat[i].x * s_sum_dxhat_xhat) * + var_rsqrt + + __half2float(hdres[2 * i])); + tmp_h2[i].y = __float2half( + (dxhat[i].y - s_sum_dxhat - xhat[i].y * s_sum_dxhat_xhat) * + var_rsqrt + + __half2float(hdres[2 * i + 1])); + } + } else { +#pragma unroll + for (int i = 0; i < 4; i++) { + tmp_h2[i].x = __float2half( + (dxhat[i].x - s_sum_dxhat - xhat[i].x * s_sum_dxhat_xhat) * + var_rsqrt); + tmp_h2[i].y = __float2half( + (dxhat[i].y - s_sum_dxhat - xhat[i].y * s_sum_dxhat_xhat) * + var_rsqrt); + } + } + ((float4 *)inp_grad)[offset] = vtmp; +} + +__global__ void ker_ln_bw_dinp_x2(__half *inp_grad, const __half *out_grad, + const __half *residual_grad, + const __half *inp_or_out, const __half *gamma, + const __half *betta, const __half *vars, + const __half *means, int hidden_dim) { + int offset = blockIdx.x * hidden_dim * 2 + threadIdx.x * 2; + + float2 dxhat[4], xhat[4]; + float2 dxhat_1[4], xhat_1[4]; + float var_rsqrt; + float4 vtmp, vtmp_1; + __half2 *tmp_h2; + __half2 *tmp_h2_1; + float reduce_val[2] = {0.f, 0.f}; + + if (threadIdx.x < hidden_dim) { + // step 0. dxhat = dout * gamma + vtmp = ((const float4 *)out_grad)[offset]; + vtmp_1 = ((const float4 *)out_grad)[offset + 1]; + tmp_h2 = reinterpret_cast<__half2 *>(&vtmp); + tmp_h2_1 = reinterpret_cast<__half2 *>(&vtmp_1); + float4 gamma_f4 = ((const float4 *)gamma)[threadIdx.x * 2]; + float4 gamma_f4_1 = ((const float4 *)gamma)[threadIdx.x * 2 + 1]; + __half2 *gamma_h2 = reinterpret_cast<__half2 *>(&gamma_f4); + __half2 *gamma_h2_1 = reinterpret_cast<__half2 *>(&gamma_f4_1); +#pragma unroll + for (int i = 0; i < 4; i++) { + float2 vdout = __half22float2(tmp_h2[i]); + float2 vdout_1 = __half22float2(tmp_h2_1[i]); + float2 vgamma = __half22float2(gamma_h2[i]); + float2 vgamma_1 = __half22float2(gamma_h2_1[i]); + dxhat[i].x = vdout.x * vgamma.x; + dxhat[i].y = vdout.y * vgamma.y; + dxhat_1[i].x = vdout_1.x * vgamma_1.x; + dxhat_1[i].y = vdout_1.y * vgamma_1.y; + reduce_val[0] += dxhat[i].x + dxhat[i].y + dxhat_1[i].x + dxhat_1[i].y; + } + + /* + step 1. xhat = (output - betta) / gamma or + (input - mean) * rsqrtf(var) + */ + vtmp = ((const float4 *)inp_or_out)[offset]; + vtmp_1 = ((const float4 *)inp_or_out)[offset + 1]; + var_rsqrt = rsqrtf((float)vars[blockIdx.x] + LN_EPSILON); + if (means == nullptr) { + // inp_or_out is output, xhat = (output - betta) / gamma + float4 vbetta = ((const float4 *)betta)[2 * threadIdx.x]; + float4 vbetta_1 = ((const float4 *)betta)[2 * threadIdx.x + 1]; + __half2 *betta_h2 = reinterpret_cast<__half2 *>(&vbetta); + __half2 *betta_h2_1 = reinterpret_cast<__half2 *>(&vbetta_1); +#pragma unroll + for (int i = 0; i < 4; i++) { + float2 vout = __half22float2(tmp_h2[i]); + float2 vout_1 = __half22float2(tmp_h2_1[i]); + float2 vgamma = __half22float2(gamma_h2[i]); + float2 vgamma_1 = __half22float2(gamma_h2_1[i]); + float2 vbetta = __half22float2(betta_h2[i]); + float2 vbetta_1 = __half22float2(betta_h2_1[i]); + xhat[i].x = (vout.x - vbetta.x) / add_eps(vgamma.x); + xhat_1[i].x = (vout_1.x - vbetta_1.x) / add_eps(vgamma_1.x); + xhat[i].y = (vout.y - vbetta.y) / add_eps(vgamma.y); + xhat_1[i].y = (vout_1.y - vbetta_1.y) / add_eps(vgamma_1.y); + reduce_val[1] += xhat[i].x * dxhat[i].x + xhat[i].y * dxhat[i].y; + reduce_val[1] += + xhat_1[i].x * dxhat_1[i].x + xhat_1[i].y * dxhat_1[i].y; + } + } else { + // inp_or_out is input, xhat = (input - mean) * rsqrtf(var) + float fmean = (float)means[blockIdx.x]; +#pragma unroll + for (int i = 0; i < 4; i++) { + float2 vinp = __half22float2(tmp_h2[i]); + float2 vinp_1 = __half22float2(tmp_h2_1[i]); + xhat[i].x = (vinp.x - fmean) * var_rsqrt; + xhat_1[i].x = (vinp_1.x - fmean) * var_rsqrt; + xhat[i].y = (vinp.y - fmean) * var_rsqrt; + xhat_1[i].y = (vinp_1.y - fmean) * var_rsqrt; + reduce_val[1] += xhat[i].x * dxhat[i].x + xhat[i].y * dxhat[i].y; + reduce_val[1] += + xhat_1[i].x * dxhat_1[i].x + xhat_1[i].y * dxhat_1[i].y; + } + } + } + + /* step2. block reduce sum for dxhat and dxhat*xhat */ + blockReduce(reduce_val); + __shared__ float s_sum_dxhat, s_sum_dxhat_xhat; + if (threadIdx.x == 0) { + float mean_dim = hidden_dim * 8 * 2; + s_sum_dxhat = reduce_val[0] / mean_dim; + s_sum_dxhat_xhat = reduce_val[1] / mean_dim; + } + __syncthreads(); + + /* + step3. compute input gradient + (dxhat - (sum(dxhat) + xhat * sum(dxhat * xhat)) / mean_dim) * rsqrt(var) + */ + if (threadIdx.x >= hidden_dim) { + return; + } + if (residual_grad) { + // Add the residual grad, + // usually in pre-layer-norm for transformer layer + float4 dresidual = ((const float4 *)residual_grad)[offset]; + float4 dresidual_1 = ((const float4 *)residual_grad)[offset + 1]; + __half *hdres = reinterpret_cast<__half *>(&dresidual); + __half *hdres_1 = reinterpret_cast<__half *>(&dresidual_1); +#pragma unroll + for (int i = 0; i < 4; i++) { + tmp_h2[i].x = __float2half( + (dxhat[i].x - s_sum_dxhat - xhat[i].x * s_sum_dxhat_xhat) * + var_rsqrt + + __half2float(hdres[2 * i])); + tmp_h2_1[i].x = __float2half( + (dxhat_1[i].x - s_sum_dxhat - xhat_1[i].x * s_sum_dxhat_xhat) * + var_rsqrt + + __half2float(hdres_1[2 * i])); + tmp_h2[i].y = __float2half( + (dxhat[i].y - s_sum_dxhat - xhat[i].y * s_sum_dxhat_xhat) * + var_rsqrt + + __half2float(hdres[2 * i + 1])); + tmp_h2_1[i].y = __float2half( + (dxhat_1[i].y - s_sum_dxhat - xhat_1[i].y * s_sum_dxhat_xhat) * + var_rsqrt + + __half2float(hdres_1[2 * i + 1])); + } + } else { +#pragma unroll + for (int i = 0; i < 4; i++) { + tmp_h2[i].x = __float2half( + (dxhat[i].x - s_sum_dxhat - xhat[i].x * s_sum_dxhat_xhat) * + var_rsqrt); + tmp_h2_1[i].x = __float2half( + (dxhat_1[i].x - s_sum_dxhat - xhat_1[i].x * s_sum_dxhat_xhat) * + var_rsqrt); + tmp_h2[i].y = __float2half( + (dxhat[i].y - s_sum_dxhat - xhat[i].y * s_sum_dxhat_xhat) * + var_rsqrt); + tmp_h2_1[i].y = __float2half( + (dxhat_1[i].y - s_sum_dxhat - xhat_1[i].y * s_sum_dxhat_xhat) * + var_rsqrt); + } + } + ((float4 *)inp_grad)[offset] = vtmp; + ((float4 *)inp_grad)[offset + 1] = vtmp_1; +} + +__global__ void ker_ln_bw_dinp_x4(__half *inp_grad, const __half *out_grad, + const __half *residual_grad, + const __half *inp_or_out, const __half *gamma, + const __half *betta, const __half *vars, + const __half *means, int hidden_dim) { + int offset = blockIdx.x * hidden_dim * 4 + threadIdx.x * 4; + + float2 dxhat[4], xhat[4]; + float2 dxhat_1[4], xhat_1[4]; + float2 dxhat_2[4], xhat_2[4]; + float2 dxhat_3[4], xhat_3[4]; + float var_rsqrt; + float4 vtmp, vtmp_1, vtmp_2, vtmp_3; + __half2 *tmp_h2; + __half2 *tmp_h2_1; + __half2 *tmp_h2_2; + __half2 *tmp_h2_3; + float reduce_val[2] = {0.f, 0.f}; + + if (threadIdx.x < hidden_dim) { + // step 0. dxhat = dout * gamma + vtmp = ((const float4 *)out_grad)[offset]; + vtmp_1 = ((const float4 *)out_grad)[offset + 1]; + vtmp_2 = ((const float4 *)out_grad)[offset + 2]; + vtmp_3 = ((const float4 *)out_grad)[offset + 3]; + tmp_h2 = reinterpret_cast<__half2 *>(&vtmp); + tmp_h2_1 = reinterpret_cast<__half2 *>(&vtmp_1); + tmp_h2_2 = reinterpret_cast<__half2 *>(&vtmp_2); + tmp_h2_3 = reinterpret_cast<__half2 *>(&vtmp_3); + float4 gamma_f4 = ((const float4 *)gamma)[threadIdx.x * 4]; + float4 gamma_f4_1 = ((const float4 *)gamma)[threadIdx.x * 4 + 1]; + float4 gamma_f4_2 = ((const float4 *)gamma)[threadIdx.x * 4 + 2]; + float4 gamma_f4_3 = ((const float4 *)gamma)[threadIdx.x * 4 + 3]; + __half2 *gamma_h2 = reinterpret_cast<__half2 *>(&gamma_f4); + __half2 *gamma_h2_1 = reinterpret_cast<__half2 *>(&gamma_f4_1); + __half2 *gamma_h2_2 = reinterpret_cast<__half2 *>(&gamma_f4_2); + __half2 *gamma_h2_3 = reinterpret_cast<__half2 *>(&gamma_f4_3); +#pragma unroll + for (int i = 0; i < 4; i++) { + float2 vdout = __half22float2(tmp_h2[i]); + float2 vdout_1 = __half22float2(tmp_h2_1[i]); + float2 vdout_2 = __half22float2(tmp_h2_2[i]); + float2 vdout_3 = __half22float2(tmp_h2_3[i]); + float2 vgamma = __half22float2(gamma_h2[i]); + float2 vgamma_1 = __half22float2(gamma_h2_1[i]); + float2 vgamma_2 = __half22float2(gamma_h2_2[i]); + float2 vgamma_3 = __half22float2(gamma_h2_3[i]); + dxhat[i].x = vdout.x * vgamma.x; + dxhat[i].y = vdout.y * vgamma.y; + dxhat_1[i].x = vdout_1.x * vgamma_1.x; + dxhat_1[i].y = vdout_1.y * vgamma_1.y; + dxhat_2[i].x = vdout_2.x * vgamma_2.x; + dxhat_2[i].y = vdout_2.y * vgamma_2.y; + dxhat_3[i].x = vdout_3.x * vgamma_3.x; + dxhat_3[i].y = vdout_3.y * vgamma_3.y; + reduce_val[0] += dxhat[i].x + dxhat[i].y + dxhat_1[i].x + dxhat_1[i].y + + dxhat_2[i].x + dxhat_2[i].y + dxhat_3[i].x + + dxhat_3[i].y; + } + + /* + step 1. xhat = (output - betta) / gamma or + (input - mean) * rsqrtf(var) + */ + vtmp = ((const float4 *)inp_or_out)[offset]; + vtmp_1 = ((const float4 *)inp_or_out)[offset + 1]; + vtmp_2 = ((const float4 *)inp_or_out)[offset + 2]; + vtmp_3 = ((const float4 *)inp_or_out)[offset + 3]; + var_rsqrt = rsqrtf((float)vars[blockIdx.x] + LN_EPSILON); + if (means == nullptr) { + // inp_or_out is output, xhat = (output - betta) / gamma + float4 vbetta = ((const float4 *)betta)[4 * threadIdx.x]; + float4 vbetta_1 = ((const float4 *)betta)[4 * threadIdx.x + 1]; + float4 vbetta_2 = ((const float4 *)betta)[4 * threadIdx.x + 2]; + float4 vbetta_3 = ((const float4 *)betta)[4 * threadIdx.x + 3]; + __half2 *betta_h2 = reinterpret_cast<__half2 *>(&vbetta); + __half2 *betta_h2_1 = reinterpret_cast<__half2 *>(&vbetta_1); + __half2 *betta_h2_2 = reinterpret_cast<__half2 *>(&vbetta_2); + __half2 *betta_h2_3 = reinterpret_cast<__half2 *>(&vbetta_3); +#pragma unroll + for (int i = 0; i < 4; i++) { + float2 vout = __half22float2(tmp_h2[i]); + float2 vout_1 = __half22float2(tmp_h2_1[i]); + float2 vout_2 = __half22float2(tmp_h2_2[i]); + float2 vout_3 = __half22float2(tmp_h2_3[i]); + float2 vgamma = __half22float2(gamma_h2[i]); + float2 vgamma_1 = __half22float2(gamma_h2_1[i]); + float2 vgamma_2 = __half22float2(gamma_h2_2[i]); + float2 vgamma_3 = __half22float2(gamma_h2_3[i]); + float2 vbetta = __half22float2(betta_h2[i]); + float2 vbetta_1 = __half22float2(betta_h2_1[i]); + float2 vbetta_2 = __half22float2(betta_h2_2[i]); + float2 vbetta_3 = __half22float2(betta_h2_3[i]); + xhat[i].x = (vout.x - vbetta.x) / add_eps(vgamma.x); + xhat_1[i].x = (vout_1.x - vbetta_1.x) / add_eps(vgamma_1.x); + xhat_2[i].x = (vout_2.x - vbetta_2.x) / add_eps(vgamma_2.x); + xhat_3[i].x = (vout_3.x - vbetta_3.x) / add_eps(vgamma_3.x); + xhat[i].y = (vout.y - vbetta.y) / add_eps(vgamma.y); + xhat_1[i].y = (vout_1.y - vbetta_1.y) / add_eps(vgamma_1.y); + xhat_2[i].y = (vout_2.y - vbetta_2.y) / add_eps(vgamma_2.y); + xhat_3[i].y = (vout_3.y - vbetta_3.y) / add_eps(vgamma_3.y); + reduce_val[1] += xhat[i].x * dxhat[i].x + xhat[i].y * dxhat[i].y; + reduce_val[1] += + xhat_1[i].x * dxhat_1[i].x + xhat_1[i].y * dxhat_1[i].y; + reduce_val[1] += + xhat_2[i].x * dxhat_2[i].x + xhat_2[i].y * dxhat_2[i].y; + reduce_val[1] += + xhat_3[i].x * dxhat_3[i].x + xhat_3[i].y * dxhat_3[i].y; + } + } else { + // inp_or_out is input, xhat = (input - mean) * rsqrtf(var) + float fmean = (float)means[blockIdx.x]; +#pragma unroll + for (int i = 0; i < 4; i++) { + float2 vinp = __half22float2(tmp_h2[i]); + float2 vinp_1 = __half22float2(tmp_h2_1[i]); + float2 vinp_2 = __half22float2(tmp_h2_2[i]); + float2 vinp_3 = __half22float2(tmp_h2_3[i]); + xhat[i].x = (vinp.x - fmean) * var_rsqrt; + xhat_1[i].x = (vinp_1.x - fmean) * var_rsqrt; + xhat_2[i].x = (vinp_2.x - fmean) * var_rsqrt; + xhat_3[i].x = (vinp_3.x - fmean) * var_rsqrt; + xhat[i].y = (vinp.y - fmean) * var_rsqrt; + xhat_1[i].y = (vinp_1.y - fmean) * var_rsqrt; + xhat_2[i].y = (vinp_2.y - fmean) * var_rsqrt; + xhat_3[i].y = (vinp_3.y - fmean) * var_rsqrt; + reduce_val[1] += xhat[i].x * dxhat[i].x + xhat[i].y * dxhat[i].y; + reduce_val[1] += + xhat_1[i].x * dxhat_1[i].x + xhat_1[i].y * dxhat_1[i].y; + reduce_val[1] += + xhat_2[i].x * dxhat_2[i].x + xhat_2[i].y * dxhat_2[i].y; + reduce_val[1] += + xhat_3[i].x * dxhat_3[i].x + xhat_3[i].y * dxhat_3[i].y; + } + } + } + + /* step2. block reduce sum for dxhat and dxhat*xhat */ + blockReduce(reduce_val); + __shared__ float s_sum_dxhat, s_sum_dxhat_xhat; + if (threadIdx.x == 0) { + float mean_dim = hidden_dim * 8 * 4; + s_sum_dxhat = reduce_val[0] / mean_dim; + s_sum_dxhat_xhat = reduce_val[1] / mean_dim; + } + __syncthreads(); + + /* + step3. compute input gradient + (dxhat - (sum(dxhat) + xhat * sum(dxhat * xhat)) / mean_dim) * rsqrt(var) + */ + if (threadIdx.x >= hidden_dim) { + return; + } + if (residual_grad) { + // Add the residual grad, + // usually in pre-layer-norm for transformer layer + float4 dresidual = ((const float4 *)residual_grad)[offset]; + float4 dresidual_1 = ((const float4 *)residual_grad)[offset + 1]; + float4 dresidual_2 = ((const float4 *)residual_grad)[offset + 2]; + float4 dresidual_3 = ((const float4 *)residual_grad)[offset + 3]; + __half *hdres = reinterpret_cast<__half *>(&dresidual); + __half *hdres_1 = reinterpret_cast<__half *>(&dresidual_1); + __half *hdres_2 = reinterpret_cast<__half *>(&dresidual_2); + __half *hdres_3 = reinterpret_cast<__half *>(&dresidual_3); +#pragma unroll + for (int i = 0; i < 4; i++) { + tmp_h2[i].x = __float2half( + (dxhat[i].x - s_sum_dxhat - xhat[i].x * s_sum_dxhat_xhat) * + var_rsqrt + + __half2float(hdres[2 * i])); + tmp_h2_1[i].x = __float2half( + (dxhat_1[i].x - s_sum_dxhat - xhat_1[i].x * s_sum_dxhat_xhat) * + var_rsqrt + + __half2float(hdres_1[2 * i])); + tmp_h2_2[i].x = __float2half( + (dxhat_2[i].x - s_sum_dxhat - xhat_2[i].x * s_sum_dxhat_xhat) * + var_rsqrt + + __half2float(hdres_2[2 * i])); + tmp_h2_3[i].x = __float2half( + (dxhat_3[i].x - s_sum_dxhat - xhat_3[i].x * s_sum_dxhat_xhat) * + var_rsqrt + + __half2float(hdres_3[2 * i])); + tmp_h2[i].y = __float2half( + (dxhat[i].y - s_sum_dxhat - xhat[i].y * s_sum_dxhat_xhat) * + var_rsqrt + + __half2float(hdres[2 * i + 1])); + tmp_h2_1[i].y = __float2half( + (dxhat_1[i].y - s_sum_dxhat - xhat_1[i].y * s_sum_dxhat_xhat) * + var_rsqrt + + __half2float(hdres_1[2 * i + 1])); + tmp_h2_2[i].y = __float2half( + (dxhat_2[i].y - s_sum_dxhat - xhat_2[i].y * s_sum_dxhat_xhat) * + var_rsqrt + + __half2float(hdres_1[2 * i + 1])); + tmp_h2_3[i].y = __float2half( + (dxhat_3[i].y - s_sum_dxhat - xhat_3[i].y * s_sum_dxhat_xhat) * + var_rsqrt + + __half2float(hdres_1[2 * i + 1])); + } + } else { +#pragma unroll + for (int i = 0; i < 4; i++) { + tmp_h2[i].x = __float2half( + (dxhat[i].x - s_sum_dxhat - xhat[i].x * s_sum_dxhat_xhat) * + var_rsqrt); + tmp_h2_1[i].x = __float2half( + (dxhat_1[i].x - s_sum_dxhat - xhat_1[i].x * s_sum_dxhat_xhat) * + var_rsqrt); + tmp_h2_2[i].x = __float2half( + (dxhat_2[i].x - s_sum_dxhat - xhat_2[i].x * s_sum_dxhat_xhat) * + var_rsqrt); + tmp_h2_3[i].x = __float2half( + (dxhat_3[i].x - s_sum_dxhat - xhat_3[i].x * s_sum_dxhat_xhat) * + var_rsqrt); + tmp_h2[i].y = __float2half( + (dxhat[i].y - s_sum_dxhat - xhat[i].y * s_sum_dxhat_xhat) * + var_rsqrt); + tmp_h2_1[i].y = __float2half( + (dxhat_1[i].y - s_sum_dxhat - xhat_1[i].y * s_sum_dxhat_xhat) * + var_rsqrt); + tmp_h2_2[i].y = __float2half( + (dxhat_2[i].y - s_sum_dxhat - xhat_2[i].y * s_sum_dxhat_xhat) * + var_rsqrt); + tmp_h2_3[i].y = __float2half( + (dxhat_3[i].y - s_sum_dxhat - xhat_3[i].y * s_sum_dxhat_xhat) * + var_rsqrt); + } + } + ((float4 *)inp_grad)[offset] = vtmp; + ((float4 *)inp_grad)[offset + 1] = vtmp_1; + ((float4 *)inp_grad)[offset + 2] = vtmp_2; + ((float4 *)inp_grad)[offset + 3] = vtmp_3; +} + +/** +Layer norm backword, + compute the gradient of gamma, betta and input. +dbetta = sum(dout, dim=0) +xhat = (input - mean) * rsqrt(var) if mean is not nullptr + (output - betta) / gamma if mean is nullptr +dgamma = sum(xhat * dout, dim=0) +dxhat = dout * gamma +dinp = (dxhat - (sum(dxhat, 1) + xhat * sum(dxhat * xhat, 1)) / hidden_dim) + * rsqrt(var) + +residual_grad, means, betta can be nullptr. +residual_grad will be added to dinp if it is not nullptr + which is useful in transformer layer when pre-ln +means and betta are only used to compute xhat, + (means == nullptr) ^ (betta == nullptr) should be true +*/ +template <> +void launch_ln_bw(float *gamma_grad, float *betta_grad, float *inp_grad, + const float *out_grad, const float *residual_grad, + const float *inp_or_out, const float *gamma, + const float *betta, const float *vars, + const float *means, int batch, int hidden_dim, + cudaStream_t stream[2]) { + // compute grad of gamma and betta + dim3 grid_dim(((hidden_dim + TILE_DIM - 1) / TILE_DIM) * TILE_DIM); + dim3 block_dim(TILE_DIM, TILE_DIM); + ker_ln_bw_dgamma_dbetta<<>>( + gamma_grad, betta_grad, out_grad, inp_or_out, gamma, betta, vars, means, + batch, hidden_dim); + + // compute grad of input + if (hidden_dim % 4 != 0 || hidden_dim > 4096) { + throw std::runtime_error("hidden_dim % 4 != 0 || hidden_dim > 4096"); + } + hidden_dim >>= 2; + int nthread = min(((hidden_dim + 31) / 32) * 32, MAX_THREADS); + ker_ln_bw_dinp<<>>( + inp_grad, out_grad, residual_grad, inp_or_out, gamma, betta, vars, means, + hidden_dim); +} + +template <> +void launch_ln_bw<__half>(__half *gamma_grad, __half *betta_grad, + __half *inp_grad, const __half *out_grad, + const __half *residual_grad, const __half *inp_or_out, + const __half *gamma, const __half *betta, + const __half *vars, const __half *means, int batch, + int hidden_dim, cudaStream_t stream[2]) { + // compute grad of gamma and betta + dim3 grid_dim(((hidden_dim + TILE_DIM - 1) / TILE_DIM) * TILE_DIM); + dim3 block_dim(TILE_DIM, TILE_DIM); + ker_ln_bw_dgamma_dbetta<__half><<>>( + gamma_grad, betta_grad, out_grad, inp_or_out, gamma, betta, vars, means, + batch, hidden_dim); + + // compute grad of input + if (hidden_dim % 8 != 0) { + throw std::runtime_error("hidden_dim % 8 != 0"); + } + hidden_dim >>= 3; + + if (hidden_dim * 8 <= 8192) { + int nthread = min(((hidden_dim + 31) / 32) * 32, MAX_THREADS); + ker_ln_bw_dinp<<>>( + inp_grad, out_grad, residual_grad, inp_or_out, gamma, betta, vars, + means, hidden_dim); + } else if (hidden_dim * 8 > 8192 && hidden_dim * 8 <= 8192 * 2) { + hidden_dim >>= 1; + int nthread = min(((hidden_dim + 31) / 32) * 32, MAX_THREADS); + ker_ln_bw_dinp_x2<<>>( + inp_grad, out_grad, residual_grad, inp_or_out, gamma, betta, vars, + means, hidden_dim); + } else if (hidden_dim * 8 > 2 * 8192 && hidden_dim * 8 <= 8192 * 4) { + hidden_dim >>= 2; + int nthread = min(((hidden_dim + 31) / 32) * 32, MAX_THREADS); + ker_ln_bw_dinp_x4<<>>( + inp_grad, out_grad, residual_grad, inp_or_out, gamma, betta, vars, + means, hidden_dim); + } else { + throw std::runtime_error("hidden_dim % 4 != 0 || hidden_dim > 32768"); + } +} diff --git a/cuda_code/normalize_op.cu b/cuda_code/normalize_op.cu new file mode 100644 index 0000000000000000000000000000000000000000..af056af28356212ef56ef6c87a9340540482e020 --- /dev/null +++ b/cuda_code/normalize_op.cu @@ -0,0 +1,161 @@ +#include + +#include "caffe2/core/context_gpu.h" +#include "caffe2/operators/normalize_op.h" + +namespace caffe2 { + +__global__ void +NormalizeKernel(const int M, const int N, const float* data_in, float* out) { + typedef cub::BlockReduce BlockReduce; + __shared__ BlockReduce::TempStorage temp_storage; + for (int i = blockIdx.x; i < M; i += gridDim.x) { + float sum_squares = 0.0; + __shared__ float row_sum_squares; + for (int j = threadIdx.x; j < N; j += blockDim.x) { + const float x_ij = data_in[i * N + j]; + sum_squares += x_ij * x_ij; + } + float reduce_result = BlockReduce(temp_storage).Sum(sum_squares); + + if (threadIdx.x == 0) { + row_sum_squares = reduce_result; + } + __syncthreads(); + for (int j = threadIdx.x; j < N; j += blockDim.x) { + const int index = i * N + j; + out[index] = data_in[index] / sqrt(row_sum_squares); + } + } +} + +__global__ void NormalizeGradientKernel( + const int M, + const int N, + const float* in_mat, + const float* grad_out_mat, + float* grad_mat) { + typedef cub::BlockReduce BlockReduce; + __shared__ BlockReduce::TempStorage temp_storage_sum; + __shared__ BlockReduce::TempStorage temp_storage_norm; + for (int i = blockIdx.x; i < M; i += gridDim.x) { + float sum = 0.0; + float norm = 0.0; + __shared__ float row_sum; + __shared__ float row_norm; + __shared__ float row_norm_3; + for (int j = threadIdx.x; j < N; j += blockDim.x) { + const int index = i * N + j; + sum += in_mat[index] * grad_out_mat[index]; + norm += in_mat[index] * in_mat[index]; + } + float reduce_result = BlockReduce(temp_storage_sum).Sum(sum); + float reduce_norm = BlockReduce(temp_storage_norm).Sum(norm); + + if (threadIdx.x == 0) { + row_sum = reduce_result; + row_norm = sqrt(reduce_norm); + row_norm_3 = pow(row_norm, 3); + } + __syncthreads(); + for (int j = threadIdx.x; j < N; j += blockDim.x) { + const int index = i * N + j; + const float x_ij = in_mat[index]; + const float dy_ij = grad_out_mat[index]; + grad_mat[index] = (dy_ij / row_norm) - ((x_ij / row_norm_3) * row_sum); + } + } +} + +template <> +bool NormalizeOp::RunOnDevice() { + auto& X = Input(0); + auto* Y = Output(0); + Y->ResizeLike(X); + int N = X.dim32(X.ndim() - 1); + int M = X.size() / N; + NormalizeKernel<<< + min(M, CAFFE_MAXIMUM_NUM_BLOCKS), + CAFFE_CUDA_NUM_THREADS, + 0, + context_.cuda_stream()>>>( + M, N, X.data(), Y->mutable_data()); + return true; +} + +template <> +bool NormalizeGradientOp::RunOnDevice() { + const auto& X = Input(0); + const auto& dY = Input(1); + auto* dX = Output(0); + dX->ResizeLike(X); + int N = X.dim32(X.ndim() - 1); + int M = X.size() / N; + NormalizeGradientKernel<<< + min(M, CAFFE_MAXIMUM_NUM_BLOCKS), + CAFFE_CUDA_NUM_THREADS, + 0, + context_.cuda_stream()>>>( + M, + N, + X.data(), + dY.data(), + dX->mutable_data()); + return true; +} + +namespace { +__global__ void NormalizeL1Kernel( + const int m, + const int n, + const int sf, + const float* xData, + float* yData) { + typedef cub::BlockReduce BlockReduce; + __shared__ BlockReduce::TempStorage temp_storage; + + for (int i = blockIdx.x; i < n; i += gridDim.x) { + auto base = (i / sf) * sf * m + (i % sf); + + float sum = 0.0; + __shared__ float norm; + for (int j = threadIdx.x; j < m; j += blockDim.x) { + const auto x_ij = xData[base + j * sf]; + sum += abs(x_ij); + } + float reduce_result = BlockReduce(temp_storage).Sum(sum); + + if (threadIdx.x == 0) { + norm = reduce_result; + } + __syncthreads(); + if (norm != 0) { + for (int j = threadIdx.x; j < m; j += blockDim.x) { + const auto index = base + j * sf; + yData[index] = xData[index] / norm; + } + } + } +} +} // namespace + +template <> +void NormalizeL1Op::DoNormalize( + const float* xData, + float* yData, + const int m, + const int n, + const int sf) { + NormalizeL1Kernel<<< + min(n, CAFFE_MAXIMUM_NUM_BLOCKS), + CAFFE_CUDA_NUM_THREADS, + 0, + context_.cuda_stream()>>>(m, n, sf, xData, yData); +} + +REGISTER_CUDA_OPERATOR(Normalize, NormalizeOp); +REGISTER_CUDA_OPERATOR( + NormalizeGradient, + NormalizeGradientOp); +REGISTER_CUDA_OPERATOR(NormalizeL1, NormalizeL1Op); +} // namespace diff --git a/cuda_code/np_matrix_op_12.cu b/cuda_code/np_matrix_op_12.cu new file mode 100644 index 0000000000000000000000000000000000000000..10ff0eac2c29a06816968f0dd73ef97b125d44cf --- /dev/null +++ b/cuda_code/np_matrix_op_12.cu @@ -0,0 +1,140 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/*! + * Copyright (c) 2019 by Contributors + * \file np_matrix_op.cu + * \brief GPU Implementation of numpy matrix operations + */ + +#include "./np_matrix_op-inl.h" +#include "../nn/concat-inl.h" + +namespace mxnet { +namespace op { + +NNVM_REGISTER_OP(_np_transpose) +.set_attr("FCompute", NumpyTranspose); + +NNVM_REGISTER_OP(_np_reshape) +.set_attr("FCompute", UnaryOp::IdentityCompute); + +NNVM_REGISTER_OP(_np_squeeze) +.set_attr("FCompute", UnaryOp::IdentityCompute); + +NNVM_REGISTER_OP(_npi_concatenate) +.set_attr("FCompute", NumpyConcatenateForward); + +NNVM_REGISTER_OP(_backward_np_concat) +.set_attr("FCompute", NumpyConcatenateBackward); + +NNVM_REGISTER_OP(_npi_stack) +.set_attr("FCompute", StackOpForward); + +NNVM_REGISTER_OP(_npi_vstack) +.set_attr("FCompute", NumpyVstackForward); + +NNVM_REGISTER_OP(_backward_np_vstack) +.set_attr("FCompute", NumpyVstackBackward); + +NNVM_REGISTER_OP(_npi_dstack) +.set_attr("FCompute", DStackCompute); + +NNVM_REGISTER_OP(_backward_np_dstack) +.set_attr("FCompute", DStackGradCompute); + +NNVM_REGISTER_OP(_npi_column_stack) +.set_attr("FCompute", NumpyColumnStackForward); + +NNVM_REGISTER_OP(_backward_np_column_stack) +.set_attr("FCompute", NumpyColumnStackBackward); + +NNVM_REGISTER_OP(_np_roll) +.set_attr("FCompute", NumpyRollCompute); + +template<> +void NumpyFlipForwardImpl(const OpContext& ctx, + const std::vector& inputs, + const std::vector& outputs, + const std::vector& stride_, + const std::vector& trailing_, + const index_t& flip_index) { + mshadow::Stream *s = ctx.get_stream(); + mshadow::Tensor workspace = + ctx.requested[0].get_space_typed( + mshadow::Shape1(flip_index * sizeof(index_t) * 2), s); + + auto stride_workspace = workspace.dptr_; + auto trailing_workspace = workspace.dptr_ + flip_index * sizeof(index_t); + + cudaMemcpyAsync(stride_workspace, thrust::raw_pointer_cast(stride_.data()), + stride_.size() * sizeof(index_t), + cudaMemcpyHostToDevice, mshadow::Stream::GetStream(s)); + cudaMemcpyAsync(trailing_workspace, thrust::raw_pointer_cast(trailing_.data()), + trailing_.size() * sizeof(index_t), + cudaMemcpyHostToDevice, mshadow::Stream::GetStream(s)); + + MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { + mxnet_op::Kernel::Launch(s, inputs[0].Size(), flip_index, + inputs[0].dptr(), outputs[0].dptr(), + reinterpret_cast(stride_workspace), reinterpret_cast(trailing_workspace)); + }); +} + +NNVM_REGISTER_OP(_npi_flip) +.set_attr("FCompute", NumpyFlipForward); + +NNVM_REGISTER_OP(_backward_npi_flip) +.set_attr("FCompute", NumpyFlipForward); + +NNVM_REGISTER_OP(_np_moveaxis) +.set_attr("FCompute", NumpyMoveaxisCompute); + +NNVM_REGISTER_OP(_npi_rot90) +.set_attr("FCompute", NumpyRot90Compute); + +NNVM_REGISTER_OP(_npi_hsplit) +.set_attr("FCompute", HSplitOpForward); + +NNVM_REGISTER_OP(_npi_hsplit_backward) +.set_attr("FCompute", HSplitOpBackward); + +NNVM_REGISTER_OP(_npx_reshape) +.set_attr("FCompute", UnaryOp::IdentityCompute); + +NNVM_REGISTER_OP(_np_diag) +.set_attr("FCompute", NumpyDiagOpForward); + +NNVM_REGISTER_OP(_backward_np_diag) +.set_attr("FCompute", NumpyDiagOpBackward); + +NNVM_REGISTER_OP(_np_diagonal) +.set_attr("FCompute", NumpyDiagonalOpForward); + +NNVM_REGISTER_OP(_backward_np_diagonal) +.set_attr("FCompute", NumpyDiagonalOpBackward); + +NNVM_REGISTER_OP(_np_diagflat) +.set_attr("FCompute", NumpyDiagflatOpForward); + +NNVM_REGISTER_OP(_backward_np_diagflat) +.set_attr("FCompute", NumpyDiagflatOpBackward); + +} // namespace op +} // namespace mxnet diff --git a/cuda_code/np_matrix_op_7.cu b/cuda_code/np_matrix_op_7.cu new file mode 100644 index 0000000000000000000000000000000000000000..be161287887e26b6254103a01b19a44e968cd2df --- /dev/null +++ b/cuda_code/np_matrix_op_7.cu @@ -0,0 +1,146 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/*! + * Copyright (c) 2019 by Contributors + * \file np_matrix_op.cu + * \brief GPU Implementation of numpy matrix operations + */ + +#include "./np_matrix_op-inl.h" +#include "../nn/concat-inl.h" + +namespace mxnet { +namespace op { + +NNVM_REGISTER_OP(_np_transpose) +.set_attr("FCompute", NumpyTranspose); + +NNVM_REGISTER_OP(_np_reshape) +.set_attr("FCompute", UnaryOp::IdentityCompute); + +NNVM_REGISTER_OP(_np_squeeze) +.set_attr("FCompute", UnaryOp::IdentityCompute); + +NNVM_REGISTER_OP(_npi_concatenate) +.set_attr("FCompute", NumpyConcatenateForward); + +NNVM_REGISTER_OP(_backward_np_concat) +.set_attr("FCompute", NumpyConcatenateBackward); + +NNVM_REGISTER_OP(_npi_stack) +.set_attr("FCompute", StackOpForward); + +NNVM_REGISTER_OP(_npi_vstack) +.set_attr("FCompute", NumpyVstackForward); + +NNVM_REGISTER_OP(_backward_np_vstack) +.set_attr("FCompute", NumpyVstackBackward); + +NNVM_REGISTER_OP(_npi_hstack) +.set_attr("FCompute", HStackCompute); + +NNVM_REGISTER_OP(_backward_np_hstack) +.set_attr("FCompute", HStackGradCompute); + +NNVM_REGISTER_OP(_npi_dstack) +.set_attr("FCompute", DStackCompute); + +NNVM_REGISTER_OP(_backward_np_dstack) +.set_attr("FCompute", DStackGradCompute); + +NNVM_REGISTER_OP(_npi_column_stack) +.set_attr("FCompute", NumpyColumnStackForward); + +NNVM_REGISTER_OP(_backward_np_column_stack) +.set_attr("FCompute", NumpyColumnStackBackward); + +NNVM_REGISTER_OP(_np_roll) +.set_attr("FCompute", NumpyRollCompute); + +template<> +void NumpyFlipForwardImpl(const OpContext& ctx, + const std::vector& inputs, + const std::vector& outputs, + const std::vector& stride_, + const std::vector& trailing_, + const index_t& flip_index) { + mshadow::Stream *s = ctx.get_stream(); + mshadow::Tensor workspace = + ctx.requested[0].get_space_typed( + mshadow::Shape1(flip_index * sizeof(index_t) * 2), s); + + auto stride_workspace = workspace.dptr_; + auto trailing_workspace = workspace.dptr_ + flip_index * sizeof(index_t); + + cudaMemcpyAsync(stride_workspace, thrust::raw_pointer_cast(stride_.data()), + stride_.size() * sizeof(index_t), + cudaMemcpyHostToDevice, mshadow::Stream::GetStream(s)); + cudaMemcpyAsync(trailing_workspace, thrust::raw_pointer_cast(trailing_.data()), + trailing_.size() * sizeof(index_t), + cudaMemcpyHostToDevice, mshadow::Stream::GetStream(s)); + + MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { + mxnet_op::Kernel::Launch(s, inputs[0].Size(), flip_index, + inputs[0].dptr(), outputs[0].dptr(), + reinterpret_cast(stride_workspace), reinterpret_cast(trailing_workspace)); + }); +} + +NNVM_REGISTER_OP(_npi_flip) +.set_attr("FCompute", NumpyFlipForward); + +NNVM_REGISTER_OP(_backward_npi_flip) +.set_attr("FCompute", NumpyFlipForward); + +NNVM_REGISTER_OP(_np_moveaxis) +.set_attr("FCompute", NumpyMoveaxisCompute); + +NNVM_REGISTER_OP(_npi_rot90) +.set_attr("FCompute", NumpyRot90Compute); + +NNVM_REGISTER_OP(_npi_hsplit) +.set_attr("FCompute", HSplitOpForward); + +NNVM_REGISTER_OP(_npi_hsplit_backward) +.set_attr("FCompute", HSplitOpBackward); + +NNVM_REGISTER_OP(_npx_reshape) +.set_attr("FCompute", UnaryOp::IdentityCompute); + +NNVM_REGISTER_OP(_np_diag) +.set_attr("FCompute", NumpyDiagOpForward); + +NNVM_REGISTER_OP(_backward_np_diag) +.set_attr("FCompute", NumpyDiagOpBackward); + +NNVM_REGISTER_OP(_np_diagonal) +.set_attr("FCompute", NumpyDiagonalOpForward); + +NNVM_REGISTER_OP(_backward_np_diagonal) +.set_attr("FCompute", NumpyDiagonalOpBackward); + +NNVM_REGISTER_OP(_np_diagflat) +.set_attr("FCompute", NumpyDiagflatOpForward); + +NNVM_REGISTER_OP(_backward_np_diagflat) +.set_attr("FCompute", NumpyDiagflatOpBackward); + +} // namespace op +} // namespace mxnet diff --git a/cuda_code/nstream-device-thrust.cu b/cuda_code/nstream-device-thrust.cu new file mode 100644 index 0000000000000000000000000000000000000000..fb60e94ff6ba1e90a8d406fd17dd922b5138169e --- /dev/null +++ b/cuda_code/nstream-device-thrust.cu @@ -0,0 +1,181 @@ +/// +/// Copyright (c) 2017, Intel Corporation +/// +/// Redistribution and use in source and binary forms, with or without +/// modification, are permitted provided that the following conditions +/// are met: +/// +/// * Redistributions of source code must retain the above copyright +/// notice, this list of conditions and the following disclaimer. +/// * Redistributions in binary form must reproduce the above +/// copyright notice, this list of conditions and the following +/// disclaimer in the documentation and/or other materials provided +/// with the distribution. +/// * Neither the name of Intel Corporation nor the names of its +/// contributors may be used to endorse or promote products +/// derived from this software without specific prior written +/// permission. +/// +/// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +/// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +/// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +/// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +/// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +/// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +/// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +/// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +/// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +/// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +/// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +/// POSSIBILITY OF SUCH DAMAGE. + +////////////////////////////////////////////////////////////////////// +/// +/// NAME: nstream +/// +/// PURPOSE: To compute memory bandwidth when adding a vector of a given +/// number of double precision values to the scalar multiple of +/// another vector of the same length, and storing the result in +/// a third vector. +/// +/// USAGE: The program takes as input the number +/// of iterations to loop over the triad vectors, the length of the +/// vectors, and the offset between vectors +/// +/// <# iterations> +/// +/// The output consists of diagnostics to make sure the +/// algorithm worked, and of timing statistics. +/// +/// NOTES: Bandwidth is determined as the number of words read, plus the +/// number of words written, times the size of the words, divided +/// by the execution time. For a vector length of N, the total +/// number of words read and written is 4*N*sizeof(double). +/// +/// +/// HISTORY: This code is loosely based on the Stream benchmark by John +/// McCalpin, but does not follow all the Stream rules. Hence, +/// reported results should not be associated with Stream in +/// external publications +/// +/// Converted to C++11 by Jeff Hammond, November 2017. +/// +////////////////////////////////////////////////////////////////////// + +#include "prk_util.h" +#include "prk_cuda.h" +#include "prk_thrust.h" + +int main(int argc, char * argv[]) +{ + std::cout << "Parallel Research Kernels version " << PRKVERSION << std::endl; + std::cout << "C++11/Thrust STREAM triad: A = B + scalar * C" << std::endl; + + ////////////////////////////////////////////////////////////////////// + /// Read and test input parameters + ////////////////////////////////////////////////////////////////////// + + int iterations, offset; + size_t length; + try { + if (argc < 3) { + throw "Usage: <# iterations> "; + } + + iterations = std::atoi(argv[1]); + if (iterations < 1) { + throw "ERROR: iterations must be >= 1"; + } + + length = std::atol(argv[2]); + if (length <= 0) { + throw "ERROR: vector length must be positive"; + } + + offset = (argc>3) ? std::atoi(argv[3]) : 0; + if (length <= 0) { + throw "ERROR: offset must be nonnegative"; + } + } + catch (const char * e) { + std::cout << e << std::endl; + return 1; + } + + std::cout << "Number of iterations = " << iterations << std::endl; + std::cout << "Vector length = " << length << std::endl; + std::cout << "Offset = " << offset << std::endl; + + ////////////////////////////////////////////////////////////////////// + // Allocate space and perform the computation + ////////////////////////////////////////////////////////////////////// + + auto nstream_time = 0.0; + + thrust::device_vector A(length); + thrust::device_vector B(length); + thrust::device_vector C(length); + + double scalar(3); + { + thrust::fill(thrust::device, A.begin(), A.end(), 0.0); + thrust::fill(thrust::device, B.begin(), B.end(), 2.0); + thrust::fill(thrust::device, C.begin(), C.end(), 2.0); + + auto nstream = [=] __host__ __device__ (thrust::tuple t) { + thrust::get<0>(t) += thrust::get<1>(t) + scalar * thrust::get<2>(t); + }; + + for (int iter = 0; iter<=iterations; iter++) { + + if (iter==1) nstream_time = prk::wtime(); + + thrust::for_each( thrust::device, + thrust::make_zip_iterator(thrust::make_tuple(A.begin(), B.begin(), C.begin())), + thrust::make_zip_iterator(thrust::make_tuple(A.end() , B.end() , C.end())), + nstream); + prk::CUDA::check( cudaDeviceSynchronize() ); + } + nstream_time = prk::wtime() - nstream_time; + } + + ////////////////////////////////////////////////////////////////////// + /// Analyze and output results + ////////////////////////////////////////////////////////////////////// + + double ar(0); + double br(2); + double cr(2); + for (int i=0; i<=iterations; i++) { + ar += br + scalar * cr; + } + + ar *= length; + + //double asum = thrust::reduce(A.begin(), A.end(), 0.0, thrust::plus()); + double asum = thrust::transform_reduce(A.begin(), + A.end(), + [=] __host__ __device__ (double x) -> double { return fabs(x); }, + 0.0, + thrust::plus()); + + double epsilon(1.e-8); + if (prk::abs(ar-asum)/asum > epsilon) { + std::cout << "Failed Validation on output array\n" + << std::setprecision(16) + << " Expected checksum: " << ar << "\n" + << " Observed checksum: " << asum << std::endl; + std::cout << "ERROR: solution did not validate" << std::endl; + return 1; + } else { + std::cout << "Solution validates" << std::endl; + double avgtime = nstream_time/iterations; + double nbytes = 4.0 * length * sizeof(double); + std::cout << "Rate (MB/s): " << 1.e-6*nbytes/avgtime + << " Avg time (s): " << avgtime << std::endl; + } + + return 0; +} + + diff --git a/cuda_code/nufft_1d_3_1_test.cu b/cuda_code/nufft_1d_3_1_test.cu new file mode 100644 index 0000000000000000000000000000000000000000..5e92231427164a583a0709e17baa78c7a13eaf43 --- /dev/null +++ b/cuda_code/nufft_1d_3_1_test.cu @@ -0,0 +1,250 @@ +#include +#include +#include +#include +#include +#include +//#include +using namespace thrust; + + +#include "ragridder_plan.h" +#include "conv_interp_invoker.h" + +#include "cuft.h" +#include "deconv.h" +#include "cugridder.h" +#include "precomp.h" +#include "utils.h" + + +int main(int argc, char *argv[]) +{ + /* Input: M, N1, N2, epsilon method + method - conv method + M - number of randomly distributed points + N1, N2 - output size + epsilon - tolerance + */ + + // issue related to accuary - how to set sigma, epsilon, number of plane, beta and kw. the number of w plane may need to increase. + int ier = 0; + int N = 100; + PCS sigma = 2; // upsampling factor + int M = 100; + + + PCS epsilon = 1e-6; + + int kerevalmeth = 0; + + int method=0; + + //gpu_method == 0, nupts driven + + //int ier; + PCS *u; + CPX *c; + u = (PCS *)malloc(M * sizeof(PCS)); //Allocates page-locked memory on the host. + c = (CPX *)malloc(M * sizeof(CPX)); + PCS *d_u; + CUCPX *d_c, *d_fk; + CUCPX *d_fw; + checkCudaErrors(cudaMalloc(&d_u, M * sizeof(PCS))); + checkCudaErrors(cudaMalloc(&d_c, M * sizeof(CUCPX))); + /// pixel size + // generating data + for (int i = 0; i < M; i++) + { + u[i] = 2.0 + i*PI/(double)M; //xxxxx + c[i].real(randm11()*1000); + c[i].imag(i); + // wgt[i] = 1; + } + + PCS *k = (PCS*) malloc(sizeof(PCS)*N*10); + // PCS pixelsize = 0.01; + for (int i = 0; i < N; i++) + { + /* code */ + // k[i] = (int)i-N/2; + k[i] = -(double)i/(double)N; + // k[i] = i/(double)N; + // k[i] = i-N/2 + randm11(); + printf("%.10lf ",k[i]); + + } + printf("\n"); + + //data transfer + checkCudaErrors(cudaMemcpy(d_u, u, M * sizeof(PCS), cudaMemcpyHostToDevice)); //u + checkCudaErrors(cudaMemcpy(d_c, c, M * sizeof(CUCPX), cudaMemcpyHostToDevice)); + + /* ----------Step2: plan setting------------*/ + curafft_plan *plan; + + plan = new curafft_plan(); + memset(plan, 0, sizeof(curafft_plan)); + + + PCS *d_k; + checkCudaErrors(cudaMalloc((void**)&d_k,sizeof(PCS)*N)); + checkCudaErrors(cudaMemcpy(d_k,k,sizeof(PCS)*N,cudaMemcpyHostToDevice)); + plan->d_x = d_k; + int direction = 1; //inverse + + cunufft_setting(N,1,1,M,kerevalmeth,method,direction,epsilon,sigma,3,1,d_u,NULL,NULL,d_c,plan); + int nf1 = plan->nf1; + printf("conv info printing, sigma %lf, kw %d, beta %lf, nf1 %d\n",plan->copts.upsampfac,plan->copts.kw,plan->copts.ES_beta, nf1); + + // // fourier_series_appro_invoker(d_fwkerhalf,plan->copts,nf1/2+1); + PCS *fwkerhalf = (PCS *)malloc(sizeof(PCS)*(N)); + checkCudaErrors(cudaMemcpy(fwkerhalf, plan->fwkerhalf1, sizeof(PCS)*(N), cudaMemcpyDeviceToHost)); + + //fourier_series(fwkerhalf,k,plan->copts,N,nf1/2+1); +#ifdef DEBUG + printf("correction factor printing method1...\n"); + for (int i = 0; i < N; i++) + { + /* code */ + printf("%lf ",fwkerhalf[i]); + } + printf("\n"); +#endif + // fw (conv res set) + checkCudaErrors(cudaMalloc((void**)&d_fw,sizeof(CUCPX)*plan->nf1)); + checkCudaErrors(cudaMemset(d_fw, 0, sizeof(CUCPX)*plan->nf1)); + plan->fw = d_fw; + // fk malloc and set + checkCudaErrors(cudaMalloc((void**)&d_fk,sizeof(CUCPX)*N)); + plan->fk = d_fk; + + // calulating result + curafft_conv(plan); + CPX *fw = (CPX *)malloc(sizeof(CPX)*plan->nf1); + cudaMemcpy(fw,plan->fw,sizeof(CUCPX)*plan->nf1,cudaMemcpyDeviceToHost); +#ifdef DEBUG + printf("conv result printing...\n"); + + for (int i = 0; i < nf1; i++) + { + printf("%lf ",fw[i].real()); + } + printf("\n"); + +#endif + PCS *kp = (PCS *) malloc(sizeof(PCS)*N); + checkCudaErrors(cudaMemcpy(kp,plan->d_x,sizeof(PCS)*N,cudaMemcpyDeviceToHost)); + + CPX *fk = (CPX *)malloc(sizeof(CPX)*N); + memset(fk,0,sizeof(CPX)*N); + // dft + for (int i = 0; i < N; i++) + { + /* code */ + for (int j = 0; j < plan->nf1; j++) + { + if(jnf1/2; + // temp1 = (double)j/(double)nf1; + // if(j>=nf1/2){ + // temp1 = temp1 - 1.00000000; + // idx -= nf1; + // } + // temp1 *=PI * 2.0000000000; + // temp1 *= k[i]; + // fk[i] = fk[i] + fw[idx]*exp((double)temp1*IMA); + + // + // fk[i].real( temp2 ); + // fk[i].imag( temp3 ); + // if(jta.i_center[0],plan->ta.o_center[0]); + for(int i=0; ita.o_center[0])*plan->ta.i_center[0]*IMA); + } + + + // result printing + + printf("final result printing...\n"); + for(int i=0; i<10; i++){ + printf("%.10lf ",fk[i].real()); + + } + printf("\n"); + CPX *truth = (CPX *) malloc(sizeof(CPX)*N); + printf("ground truth printing...\n"); + for (int i = 0; i < N; i++) + { + truth[i] = 0; + for (int j = 0; j < M; j++) + { + truth[i] += c[j] * exp(k[i] * u[j] * IMA); + } + } + + for (int i = 0; i < 10; i++) + { + printf("%.10lf ", truth[i].real()); + } + printf("\n"); + + + // double fk_max = 0; + // for(int i=0; ifk_max)fk_max = abs(fk[i].real()); + // } + // printf("fk max %lf\n",fk_max); + CPX diff; + double err=0; + double nrm=0; + for(int i=0; i +#include +#include +#include +#include +#include +//#include +using namespace thrust; + + +#include "ragridder_plan.h" +#include "conv_interp_invoker.h" +#include "cuft.h" +#include "deconv.h" +#include "cugridder.h" +#include "precomp.h" +#include "utils.h" + + +int main(int argc, char *argv[]) +{ + /* Input: M, N1, N2, epsilon method + method - conv method + M - number of randomly distributed points + N1, N2 - output size + epsilon - tolerance + */ + int ier = 0; + if (argc < 4) + { + fprintf(stderr, + "Usage: W Stacking\n" + "Arguments:\n" + " N1, N2 : image size.\n" + " M: The number of randomly distributed points.\n" + " epsilon: NUFFT tolerance (default 1e-6).\n" + " kerevalmeth: Kernel evaluation method; one of\n" + " 0: Exponential of square root (default), or\n" + " 1: Horner evaluation.\n" + " method: One of\n" + " 0: nupts driven (default),\n" + " 2: sub-problem, or\n"); + return 1; + } + int N1, N2; + PCS sigma = 2.0; // upsampling factor + int M; + + double inp; + sscanf(argv[1], "%d", &N1); + sscanf(argv[2], "%d", &N2); + sscanf(argv[3], "%d", &M); + PCS epsilon = 1e-6; + if(argc>4){ + sscanf(argv[4], "%lf", &inp); + epsilon = inp; + } + int kerevalmeth = 0; + if(argc>5)sscanf(argv[5], "%d", &kerevalmeth); + int method=0; + if(argc>6)sscanf(argv[6], "%d", &method); + + //gpu_method == 0, nupts driven + + //int ier; + PCS *u, *v; + CPX *c; + u = (PCS *)malloc(M * sizeof(PCS)); //Allocates page-locked memory on the host. + v = (PCS *)malloc(M * sizeof(PCS)); + PCS *d_u, *d_v; + CUCPX *d_c, *d_fk; + CUCPX *d_fw; + checkCudaErrors(cudaMalloc(&d_u, M * sizeof(PCS))); + checkCudaErrors(cudaMalloc(&d_v, M * sizeof(PCS))); + checkCudaErrors(cudaMalloc(&d_c, M * sizeof(CUCPX))); + checkCudaErrors(cudaMalloc(&d_fk, N1*N2*sizeof(CUCPX))); + // generating data + for (int i = 0; i < M; i++) + { + u[i] = randm11()*PI; //xxxxx + printf("%lf ",u[i]); + v[i] = randm11()*PI; + // wgt[i] = 1; + } + printf("\n"); + CPX *fk = (CPX*) malloc(sizeof(CPX)*N1*N2); + for(int i=0; iopts.gpu_device_id = 0; + plan->opts.upsampfac = sigma; + plan->opts.gpu_sort = 1; + plan->opts.gpu_binsizex = -1; + plan->opts.gpu_binsizey = -1; + plan->opts.gpu_binsizez = -1; + plan->opts.gpu_kerevalmeth = kerevalmeth; + plan->opts.gpu_conv_only = 0; + plan->opts.gpu_gridder_method = method; + plan->type = 2; + + ier = setup_conv_opts(plan->copts, epsilon, sigma, 1, direction, kerevalmeth); //check the arguements + + if(ier!=0)printf("setup_error\n"); + + // plan setting + // cuda stream malloc in setup_plan + + + int nf1 = get_num_cells(N1,plan->copts); + int nf2 = get_num_cells(N2,plan->copts); + + plan->dim = 2; + setup_plan(nf1, nf2, 1, M, d_u, d_v, NULL, d_c, plan); + + plan->ms = N1; + plan->mt = N2; + plan->mu = 1; + plan->execute_flow = 1; + int iflag = -1; + int fftsign = (iflag>=0) ? 1 : -1; + + plan->iflag = fftsign; //may be useless| conflict with direction + plan->batchsize = 1; + + plan->copts.direction = direction; // related to type + + // // fw allocation + // checkCudaErrors(cudaMalloc((void**)&plan->fw,sizeof(CUCPX)*nf1*nf2*nf3)); + + // PCS *fwkerhalf1 = (PCS*)malloc(sizeof(PCS)*(plan->nf1/2+1)); + // onedim_fseries_kernel_seq(plan->nf1, fwkerhalf1, plan->copts); // used for correction + + // PCS *fwkerhalf2 = (PCS*)malloc(sizeof(PCS)*(plan->nf2/2+1)); + // onedim_fseries_kernel_seq(plan->nf2, fwkerhalf2, plan->copts); + + fourier_series_appro_invoker(plan->fwkerhalf1, plan->copts, plan->nf1/2+1); + printf("correction factor printing...\n"); + PCS *corr = (PCS*) malloc(sizeof(PCS)*(plan->nf1/2+1)); + checkCudaErrors(cudaMemcpy(corr,plan->fwkerhalf1,sizeof(PCS)*(plan->nf1/2+1),cudaMemcpyDeviceToHost)); + for(int i=0; i<10; i++){ + printf("%.3lf ",corr[i]); + } + printf("\n"); + fourier_series_appro_invoker(plan->fwkerhalf2, plan->copts, plan->nf2/2+1); + +#ifdef DEBUG + printf("nf1, nf2 %d %d\n",plan->nf1,plan->nf2); + printf("copts info printing...\n"); + printf("kw: %d, direction: %d, pirange: %d, upsampfac: %lf, \nbeta: %lf, halfwidth: %lf, c: %lf\n", + plan->copts.kw, + plan->copts.direction, + plan->copts.pirange, + plan->copts.upsampfac, + plan->copts.ES_beta, + plan->copts.ES_halfwidth, + plan->copts.ES_c); + + PCS *fwkerhalf1 = (PCS*)malloc(sizeof(PCS)*(plan->nf1/2+1)); + PCS *fwkerhalf2 = (PCS*)malloc(sizeof(PCS)*(plan->nf2/2+1)); + + checkCudaErrors(cudaMemcpy(fwkerhalf1,plan->fwkerhalf1,(plan->nf1/2+1)* + sizeof(PCS),cudaMemcpyDeviceToHost)); + + checkCudaErrors(cudaMemcpy(fwkerhalf2,plan->fwkerhalf2,(plan->nf2/2+1)* + sizeof(PCS),cudaMemcpyDeviceToHost)); + + printf("correction factor print...\n"); + for(int i=0; ifwkerhalf1,fwkerhalf1,(plan->nf1/2+1)* + // sizeof(PCS),cudaMemcpyHostToDevice)); + + // checkCudaErrors(cudaMemcpy(plan->fwkerhalf2,fwkerhalf2,(plan->nf2/2+1)* + // sizeof(PCS),cudaMemcpyHostToDevice)); + + // cufft plan setting + cufftHandle fftplan; + int n[] = {plan->nf2, plan->nf1}; + int inembed[] = {plan->nf2, plan->nf1}; + int onembed[] = {plan->nf2, plan->nf1}; + + // cufftCreate(&fftplan); + // cufftPlan2d(&fftplan,n[0],n[1],CUFFT_TYPE); + // the bach size sets as the num of w when memory is sufficent. Alternative way, set as a smaller number when memory is insufficient. + // and handle this piece by piece + cufftPlanMany(&fftplan,2,n,inembed,1,inembed[0]*inembed[1], + onembed,1,onembed[0]*onembed[1],CUFFT_TYPE,plan->nf3); //need to check and revise (the partial conv will be differnt) + plan->fftplan = fftplan; + + // set up bin size +++ (for other methods) and related malloc based on gpu method + // assign memory for index after sorting (can be done in setup_plan) + // bin sorting (for other methods) + + if(ier == 1){ + printf("errors in gridder setting\n"); + return ier; + } + // fw (conv res set) + checkCudaErrors(cudaMalloc((void**)&d_fw,sizeof(CUCPX)*nf1*nf2)); + checkCudaErrors(cudaMemset(d_fw, 0, sizeof(CUCPX)*nf1*nf2)); + plan->fw = d_fw; + // fk malloc and set + // checkCudaErrors(cudaMalloc((void**)&d_fk,sizeof(CUCPX)*N1*N2)); + plan->fk = d_fk; + + // calulating result + curafft_deconv(plan); + printf("deconv result printing...\n"); + CPX *fw = (CPX *)malloc(sizeof(CPX)*nf1*nf2); + cudaMemcpy(fw,plan->fw,sizeof(CUCPX)*nf1*nf2,cudaMemcpyDeviceToHost); + for(int i=0; ifw,sizeof(CUCPX)*nf1*nf2,cudaMemcpyDeviceToHost); + for(int i=0; ifftplan, plan->fw, plan->fw, plan->iflag); +// #ifdef DEBUG + printf("fft result printing...\n"); + // CPX *fw = (CPX *)malloc(sizeof(CPX)*nf1*nf2); + cudaMemcpy(fw,plan->fw,sizeof(CUCPX)*nf1*nf2,cudaMemcpyDeviceToHost); + for(int i=0; id_c,sizeof(CUCPX)*M, cudaMemcpyDeviceToHost)); + + // result printing + printf("final result printing...\n"); + for(int i=0; ifk_max)fk_max = abs(c[i].real()); + } + printf("fk max %lf\n",fk_max); + for(int i=0; imax) max = temp; + if(temp/fk_max > l2_max) l2_max = temp/fk_max; + } + printf("maximal abs error %.5g, maximal l2 error %.5g\n",max,l2_max); + + //free + curafft_free(plan); + free(fk); + free(u); + free(v); + free(c); + + return ier; +} \ No newline at end of file diff --git a/cuda_code/nulls_14.cu b/cuda_code/nulls_14.cu new file mode 100644 index 0000000000000000000000000000000000000000..5ce1a7af4ad4386dde95b7e7aafc58260db63fdb --- /dev/null +++ b/cuda_code/nulls_14.cu @@ -0,0 +1,486 @@ +/* + * Copyright (c) 2020, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + +namespace { // anonymous + +static constexpr int BLOCK_SIZE = 256; + +template +__global__ void replace_nulls_strings(cudf::column_device_view input, + cudf::column_device_view replacement, + cudf::bitmask_type* output_valid, + cudf::size_type* offsets, + char* chars, + cudf::size_type* valid_counter) +{ + cudf::size_type nrows = input.size(); + cudf::size_type i = blockIdx.x * blockDim.x + threadIdx.x; + + uint32_t active_mask = 0xffffffff; + active_mask = __ballot_sync(active_mask, i < nrows); + auto const lane_id{threadIdx.x % cudf::detail::warp_size}; + uint32_t valid_sum{0}; + + while (i < nrows) { + bool input_is_valid = input.is_valid_nocheck(i); + bool output_is_valid = true; + + if (replacement_has_nulls && !input_is_valid) { + output_is_valid = replacement.is_valid_nocheck(i); + } + + cudf::string_view out; + if (input_is_valid) { + out = input.element(i); + } else if (output_is_valid) { + out = replacement.element(i); + } + + bool nonzero_output = (input_is_valid || output_is_valid); + + if (phase == 0) { + offsets[i] = nonzero_output ? out.size_bytes() : 0; + uint32_t bitmask = __ballot_sync(active_mask, output_is_valid); + if (0 == lane_id) { + output_valid[cudf::word_index(i)] = bitmask; + valid_sum += __popc(bitmask); + } + } else if (phase == 1) { + if (nonzero_output) std::memcpy(chars + offsets[i], out.data(), out.size_bytes()); + } + + i += blockDim.x * gridDim.x; + active_mask = __ballot_sync(active_mask, i < nrows); + } + + // Compute total valid count for this block and add it to global count + uint32_t block_valid_count = cudf::detail::single_lane_block_sum_reduce(valid_sum); + // one thread computes and adds to output_valid_count + if (threadIdx.x == 0) { atomicAdd(valid_counter, block_valid_count); } +} + +template +__global__ void replace_nulls(cudf::column_device_view input, + cudf::column_device_view replacement, + cudf::mutable_column_device_view output, + cudf::size_type* output_valid_count) +{ + cudf::size_type nrows = input.size(); + cudf::size_type i = blockIdx.x * blockDim.x + threadIdx.x; + + uint32_t active_mask = 0xffffffff; + active_mask = __ballot_sync(active_mask, i < nrows); + auto const lane_id{threadIdx.x % cudf::detail::warp_size}; + uint32_t valid_sum{0}; + + while (i < nrows) { + bool input_is_valid = input.is_valid_nocheck(i); + bool output_is_valid = true; + if (input_is_valid) { + output.data()[i] = input.element(i); + } else { + if (replacement_has_nulls) { output_is_valid = replacement.is_valid_nocheck(i); } + output.data()[i] = replacement.element(i); + } + + /* output valid counts calculations*/ + if (replacement_has_nulls) { + uint32_t bitmask = __ballot_sync(active_mask, output_is_valid); + if (0 == lane_id) { + output.set_mask_word(cudf::word_index(i), bitmask); + valid_sum += __popc(bitmask); + } + } + + i += blockDim.x * gridDim.x; + active_mask = __ballot_sync(active_mask, i < nrows); + } + if (replacement_has_nulls) { + // Compute total valid count for this block and add it to global count + uint32_t block_valid_count = + cudf::detail::single_lane_block_sum_reduce(valid_sum); + // one thread computes and adds to output_valid_count + if (threadIdx.x == 0) { atomicAdd(output_valid_count, block_valid_count); } + } +} + +/** + * @brief Functor called by the `type_dispatcher` in order to invoke and instantiate + * `replace_nulls` with the appropriate data types. + */ +struct replace_nulls_column_kernel_forwarder { + template ()>* = nullptr> + std::unique_ptr operator()(cudf::column_view const& input, + cudf::column_view const& replacement, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) + { + cudf::size_type nrows = input.size(); + cudf::detail::grid_1d grid{nrows, BLOCK_SIZE}; + + auto output = + cudf::detail::allocate_like(input, + input.size(), + replacement.has_nulls() ? cudf::mask_allocation_policy::ALWAYS + : cudf::mask_allocation_policy::NEVER, + stream, + mr); + + auto output_view = output->mutable_view(); + + auto replace = replace_nulls; + if (output_view.nullable()) replace = replace_nulls; + + auto device_in = cudf::column_device_view::create(input); + auto device_out = cudf::mutable_column_device_view::create(output_view); + auto device_replacement = cudf::column_device_view::create(replacement); + + rmm::device_scalar valid_counter(0, stream); + cudf::size_type* valid_count = valid_counter.data(); + + replace<<>>( + *device_in, *device_replacement, *device_out, valid_count); + + if (output_view.nullable()) { + output->set_null_count(output->size() - valid_counter.value(stream)); + } + + return output; + } + + template ()>* = nullptr> + std::unique_ptr operator()(cudf::column_view const& input, + cudf::column_view const& replacement, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) + { + CUDF_FAIL("No specialization exists for the given type."); + } +}; + +template <> +std::unique_ptr replace_nulls_column_kernel_forwarder::operator()( + cudf::column_view const& input, + cudf::column_view const& replacement, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + rmm::device_scalar valid_counter(0, stream); + cudf::size_type* valid_count = valid_counter.data(); + + auto replace_first = replace_nulls_strings<0, false>; + auto replace_second = replace_nulls_strings<1, false>; + if (replacement.has_nulls()) { + replace_first = replace_nulls_strings<0, true>; + replace_second = replace_nulls_strings<1, true>; + } + + // Create new offsets column to use in kernel + std::unique_ptr sizes = cudf::make_numeric_column( + cudf::data_type(cudf::type_id::INT32), input.size(), cudf::mask_state::UNALLOCATED, stream); + + auto sizes_view = sizes->mutable_view(); + auto device_in = cudf::column_device_view::create(input); + auto device_replacement = cudf::column_device_view::create(replacement); + + rmm::device_buffer valid_bits = + cudf::detail::create_null_mask(input.size(), cudf::mask_state::UNINITIALIZED, stream, mr); + + // Call first pass kernel to get sizes in offsets + cudf::detail::grid_1d grid{input.size(), BLOCK_SIZE, 1}; + replace_first<<>>( + *device_in, + *device_replacement, + reinterpret_cast(valid_bits.data()), + sizes_view.begin(), + nullptr, + valid_count); + + std::unique_ptr offsets = cudf::strings::detail::make_offsets_child_column( + sizes_view.begin(), sizes_view.end(), stream, mr); + auto offsets_view = offsets->mutable_view(); + + int32_t size; + CUDA_TRY(cudaMemcpyAsync( + &size, offsets_view.end() - 1, sizeof(int32_t), cudaMemcpyDefault, stream.value())); + + // Allocate chars array and output null mask + cudf::size_type null_count = input.size() - valid_counter.value(stream); + std::unique_ptr output_chars = + cudf::strings::detail::create_chars_child_column(input.size(), null_count, size, stream, mr); + + auto output_chars_view = output_chars->mutable_view(); + + replace_second<<>>( + *device_in, + *device_replacement, + reinterpret_cast(valid_bits.data()), + offsets_view.begin(), + output_chars_view.data(), + valid_count); + + return cudf::make_strings_column(input.size(), + std::move(offsets), + std::move(output_chars), + input.size() - valid_counter.value(stream), + std::move(valid_bits), + stream, + mr); +} + +template <> +std::unique_ptr replace_nulls_column_kernel_forwarder::operator()( + cudf::column_view const& input, + cudf::column_view const& replacement, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + cudf::dictionary_column_view dict_input(input); + cudf::dictionary_column_view dict_repl(replacement); + return cudf::dictionary::detail::replace_nulls(dict_input, dict_repl, stream, mr); +} + +template +struct replace_nulls_functor { + T* value_it; + replace_nulls_functor(T* _value_it) : value_it(_value_it) {} + __device__ T operator()(T input, bool is_valid) { return is_valid ? input : *value_it; } +}; + +/** + * @brief Functor called by the `type_dispatcher` in order to invoke and instantiate + * `replace_nulls` with the appropriate data types. + */ +struct replace_nulls_scalar_kernel_forwarder { + template ()>* = nullptr> + std::unique_ptr operator()(cudf::column_view const& input, + cudf::scalar const& replacement, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) + { + CUDF_EXPECTS(input.type() == replacement.type(), "Data type mismatch"); + std::unique_ptr output = + cudf::allocate_like(input, cudf::mask_allocation_policy::NEVER, mr); + auto output_view = output->mutable_view(); + + using Type = cudf::device_storage_type_t; + using ScalarType = cudf::scalar_type_t; + auto s1 = static_cast(replacement); + auto device_in = cudf::column_device_view::create(input); + + auto func = replace_nulls_functor{s1.data()}; + thrust::transform(rmm::exec_policy(stream), + input.data(), + input.data() + input.size(), + cudf::detail::make_validity_iterator(*device_in), + output_view.data(), + func); + return output; + } + + template ()>* = nullptr> + std::unique_ptr operator()(cudf::column_view const& input, + cudf::scalar const& replacement, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) + { + CUDF_FAIL("No specialization exists for the given type."); + } +}; + +template <> +std::unique_ptr replace_nulls_scalar_kernel_forwarder::operator()( + cudf::column_view const& input, + cudf::scalar const& replacement, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + CUDF_EXPECTS(input.type() == replacement.type(), "Data type mismatch"); + cudf::strings_column_view input_s(input); + const cudf::string_scalar& repl = static_cast(replacement); + return cudf::strings::detail::replace_nulls(input_s, repl, stream, mr); +} + +template <> +std::unique_ptr replace_nulls_scalar_kernel_forwarder::operator()( + cudf::column_view const& input, + cudf::scalar const& replacement, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + cudf::dictionary_column_view dict_input(input); + return cudf::dictionary::detail::replace_nulls(dict_input, replacement, stream, mr); +} + +/** + * @brief Functor used by `inclusive_scan` to determine the index to gather from in + * the result column. When current row in input column is NULL, return previous + * accumulated index, otherwise return the current index. The second element in + * the return tuple is discarded. + */ +struct replace_policy_functor { + __device__ thrust::tuple operator()( + thrust::tuple const& lhs, + thrust::tuple const& rhs) + { + return thrust::get<1>(rhs) ? thrust::make_tuple(thrust::get<0>(rhs), true) + : thrust::make_tuple(thrust::get<0>(lhs), true); + } +}; + +/** + * @brief Function used by replace_nulls policy + */ + +std::unique_ptr replace_nulls_policy_impl(cudf::column_view const& input, + cudf::replace_policy const& replace_policy, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + auto device_in = cudf::column_device_view::create(input); + auto index = thrust::make_counting_iterator(0); + auto valid_it = cudf::detail::make_validity_iterator(*device_in); + auto in_begin = thrust::make_zip_iterator(thrust::make_tuple(index, valid_it)); + + rmm::device_vector gather_map(input.size()); + auto gm_begin = thrust::make_zip_iterator( + thrust::make_tuple(gather_map.begin(), thrust::make_discard_iterator())); + + auto func = replace_policy_functor(); + if (replace_policy == cudf::replace_policy::PRECEDING) { + thrust::inclusive_scan( + rmm::exec_policy(stream), in_begin, in_begin + input.size(), gm_begin, func); + } else { + auto in_rbegin = thrust::make_reverse_iterator(in_begin + input.size()); + auto gm_rbegin = thrust::make_reverse_iterator(gm_begin + gather_map.size()); + thrust::inclusive_scan( + rmm::exec_policy(stream), in_rbegin, in_rbegin + input.size(), gm_rbegin, func); + } + + auto output = cudf::detail::gather(cudf::table_view({input}), + gather_map.begin(), + gather_map.end(), + cudf::out_of_bounds_policy::DONT_CHECK); + + return std::move(output->release()[0]); +} + +} // end anonymous namespace + +namespace cudf { +namespace detail { +std::unique_ptr replace_nulls(cudf::column_view const& input, + cudf::column_view const& replacement, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + CUDF_EXPECTS(input.type() == replacement.type(), "Data type mismatch"); + CUDF_EXPECTS(replacement.size() == input.size(), "Column size mismatch"); + + if (input.is_empty()) { return cudf::empty_like(input); } + + if (!input.has_nulls()) { return std::make_unique(input); } + + return cudf::type_dispatcher( + input.type(), replace_nulls_column_kernel_forwarder{}, input, replacement, stream, mr); +} + +std::unique_ptr replace_nulls(cudf::column_view const& input, + cudf::scalar const& replacement, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + if (input.is_empty()) { return cudf::empty_like(input); } + + if (!input.has_nulls() || !replacement.is_valid()) { + return std::make_unique(input, stream, mr); + } + + return cudf::type_dispatcher( + input.type(), replace_nulls_scalar_kernel_forwarder{}, input, replacement, stream, mr); +} + +std::unique_ptr replace_nulls(cudf::column_view const& input, + cudf::replace_policy const& replace_policy, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + if (input.is_empty()) { return cudf::empty_like(input); } + + if (!input.has_nulls()) { return std::make_unique(input, stream, mr); } + + return replace_nulls_policy_impl(input, replace_policy, stream, mr); +} + +} // namespace detail + +std::unique_ptr replace_nulls(cudf::column_view const& input, + cudf::column_view const& replacement, + rmm::mr::device_memory_resource* mr) +{ + CUDF_FUNC_RANGE(); + return cudf::detail::replace_nulls(input, replacement, rmm::cuda_stream_default, mr); +} + +std::unique_ptr replace_nulls(cudf::column_view const& input, + cudf::scalar const& replacement, + rmm::mr::device_memory_resource* mr) +{ + CUDF_FUNC_RANGE(); + return cudf::detail::replace_nulls(input, replacement, rmm::cuda_stream_default, mr); +} + +std::unique_ptr replace_nulls(column_view const& input, + replace_policy const& replace_policy, + rmm::mr::device_memory_resource* mr) +{ + CUDF_FUNC_RANGE(); + return cudf::detail::replace_nulls(input, replace_policy, rmm::cuda_stream_default, mr); +} + +} // namespace cudf diff --git a/cuda_code/nulls_4.cu b/cuda_code/nulls_4.cu new file mode 100644 index 0000000000000000000000000000000000000000..39fa62c99b09ed1055e56075b7bad013e7039511 --- /dev/null +++ b/cuda_code/nulls_4.cu @@ -0,0 +1,407 @@ +/* + * Copyright (c) 2020, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +namespace { // anonymous + +static constexpr int BLOCK_SIZE = 256; + +template +__global__ void replace_nulls_strings(cudf::column_device_view input, + cudf::column_device_view replacement, + cudf::bitmask_type* output_valid, + cudf::size_type* offsets, + char* chars, + cudf::size_type* valid_counter) +{ + cudf::size_type nrows = input.size(); + cudf::size_type i = blockIdx.x * blockDim.x + threadIdx.x; + + uint32_t active_mask = 0xffffffff; + active_mask = __ballot_sync(active_mask, i < nrows); + auto const lane_id{threadIdx.x % cudf::detail::warp_size}; + uint32_t valid_sum{0}; + + while (i < nrows) { + bool input_is_valid = input.is_valid_nocheck(i); + bool output_is_valid = true; + + if (replacement_has_nulls && !input_is_valid) { + output_is_valid = replacement.is_valid_nocheck(i); + } + + cudf::string_view out; + if (input_is_valid) { + out = input.element(i); + } else if (output_is_valid) { + out = replacement.element(i); + } + + bool nonzero_output = (input_is_valid || output_is_valid); + + if (phase == 0) { + offsets[i] = nonzero_output ? out.size_bytes() : 0; + uint32_t bitmask = __ballot_sync(active_mask, output_is_valid); + if (0 == lane_id) { + output_valid[cudf::word_index(i)] = bitmask; + valid_sum += __popc(bitmask); + } + } else if (phase == 1) { + if (nonzero_output) std::memcpy(chars + offsets[i], out.data(), out.size_bytes()); + } + + i += blockDim.x * gridDim.x; + active_mask = __ballot_sync(active_mask, i < nrows); + } + + // Compute total valid count for this block and add it to global count + uint32_t block_valid_count = cudf::detail::single_lane_block_sum_reduce(valid_sum); + // one thread computes and adds to output_valid_count + if (threadIdx.x == 0) { atomicAdd(valid_counter, block_valid_count); } +} + +template +__global__ void replace_nulls(cudf::column_device_view input, + cudf::column_device_view replacement, + cudf::mutable_column_device_view output, + cudf::size_type* output_valid_count) +{ + cudf::size_type nrows = input.size(); + cudf::size_type i = blockIdx.x * blockDim.x + threadIdx.x; + + uint32_t active_mask = 0xffffffff; + active_mask = __ballot_sync(active_mask, i < nrows); + auto const lane_id{threadIdx.x % cudf::detail::warp_size}; + uint32_t valid_sum{0}; + + while (i < nrows) { + bool input_is_valid = input.is_valid_nocheck(i); + bool output_is_valid = true; + if (input_is_valid) { + output.data()[i] = input.element(i); + } else { + if (replacement_has_nulls) { output_is_valid = replacement.is_valid_nocheck(i); } + output.data()[i] = replacement.element(i); + } + + /* output valid counts calculations*/ + if (replacement_has_nulls) { + uint32_t bitmask = __ballot_sync(active_mask, output_is_valid); + if (0 == lane_id) { + output.set_mask_word(cudf::word_index(i), bitmask); + valid_sum += __popc(bitmask); + } + } + + i += blockDim.x * gridDim.x; + active_mask = __ballot_sync(active_mask, i < nrows); + } + if (replacement_has_nulls) { + // Compute total valid count for this block and add it to global count + uint32_t block_valid_count = + cudf::detail::single_lane_block_sum_reduce(valid_sum); + // one thread computes and adds to output_valid_count + if (threadIdx.x == 0) { atomicAdd(output_valid_count, block_valid_count); } + } +} + +/** + * @brief Functor called by the `type_dispatcher` in order to invoke and instantiate + * `replace_nulls` with the appropriate data types. + */ +struct replace_nulls_column_kernel_forwarder { + template ()>* = nullptr> + std::unique_ptr operator()(cudf::column_view const& input, + cudf::column_view const& replacement, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) + { + cudf::size_type nrows = input.size(); + cudf::detail::grid_1d grid{nrows, BLOCK_SIZE}; + + auto output = + cudf::detail::allocate_like(input, + input.size(), + replacement.has_nulls() ? cudf::mask_allocation_policy::ALWAYS + : cudf::mask_allocation_policy::NEVER, + stream, + mr); + + auto output_view = output->mutable_view(); + + auto replace = replace_nulls; + if (output_view.nullable()) replace = replace_nulls; + + auto device_in = cudf::column_device_view::create(input); + auto device_out = cudf::mutable_column_device_view::create(output_view); + auto device_replacement = cudf::column_device_view::create(replacement); + + rmm::device_scalar valid_counter(0, stream); + cudf::size_type* valid_count = valid_counter.data(); + + replace<<>>( + *device_in, *device_replacement, *device_out, valid_count); + + if (output_view.nullable()) { + output->set_null_count(output->size() - valid_counter.value(stream)); + } + + return output; + } + + template ()>* = nullptr> + std::unique_ptr operator()(cudf::column_view const& input, + cudf::column_view const& replacement, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) + { + CUDF_FAIL("No specialization exists for the given type."); + } +}; + +template <> +std::unique_ptr replace_nulls_column_kernel_forwarder::operator()( + cudf::column_view const& input, + cudf::column_view const& replacement, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + rmm::device_scalar valid_counter(0, stream); + cudf::size_type* valid_count = valid_counter.data(); + + auto replace_first = replace_nulls_strings<0, false>; + auto replace_second = replace_nulls_strings<1, false>; + if (replacement.has_nulls()) { + replace_first = replace_nulls_strings<0, true>; + replace_second = replace_nulls_strings<1, true>; + } + + // Create new offsets column to use in kernel + std::unique_ptr sizes = cudf::make_numeric_column( + cudf::data_type(cudf::type_id::INT32), input.size(), cudf::mask_state::UNALLOCATED, stream); + + auto sizes_view = sizes->mutable_view(); + auto device_in = cudf::column_device_view::create(input); + auto device_replacement = cudf::column_device_view::create(replacement); + + rmm::device_buffer valid_bits = + cudf::detail::create_null_mask(input.size(), cudf::mask_state::UNINITIALIZED, stream, mr); + + // Call first pass kernel to get sizes in offsets + cudf::detail::grid_1d grid{input.size(), BLOCK_SIZE, 1}; + replace_first<<>>( + *device_in, + *device_replacement, + reinterpret_cast(valid_bits.data()), + sizes_view.begin(), + nullptr, + valid_count); + + std::unique_ptr offsets = cudf::strings::detail::make_offsets_child_column( + sizes_view.begin(), sizes_view.end(), stream, mr); + auto offsets_view = offsets->mutable_view(); + + int32_t size; + CUDA_TRY(cudaMemcpyAsync( + &size, offsets_view.end() - 1, sizeof(int32_t), cudaMemcpyDefault, stream.value())); + + // Allocate chars array and output null mask + cudf::size_type null_count = input.size() - valid_counter.value(stream); + std::unique_ptr output_chars = + cudf::strings::detail::create_chars_child_column(input.size(), null_count, size, stream, mr); + + auto output_chars_view = output_chars->mutable_view(); + + replace_second<<>>( + *device_in, + *device_replacement, + reinterpret_cast(valid_bits.data()), + offsets_view.begin(), + output_chars_view.data(), + valid_count); + + return cudf::make_strings_column(input.size(), + std::move(offsets), + std::move(output_chars), + input.size() - valid_counter.value(stream), + std::move(valid_bits), + stream, + mr); +} + +template <> +std::unique_ptr replace_nulls_column_kernel_forwarder::operator()( + cudf::column_view const& input, + cudf::column_view const& replacement, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + cudf::dictionary_column_view dict_input(input); + cudf::dictionary_column_view dict_repl(replacement); + return cudf::dictionary::detail::replace_nulls(dict_input, dict_repl, stream, mr); +} + +template +struct replace_nulls_functor { + T* value_it; + replace_nulls_functor(T* _value_it) : value_it(_value_it) {} + __device__ T operator()(T input, bool is_valid) { return is_valid ? input : *value_it; } +}; + +/** + * @brief Functor called by the `type_dispatcher` in order to invoke and instantiate + * `replace_nulls` with the appropriate data types. + */ +struct replace_nulls_scalar_kernel_forwarder { + template ()>* = nullptr> + std::unique_ptr operator()(cudf::column_view const& input, + cudf::scalar const& replacement, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) + { + CUDF_EXPECTS(input.type() == replacement.type(), "Data type mismatch"); + std::unique_ptr output = + cudf::allocate_like(input, cudf::mask_allocation_policy::NEVER, mr); + auto output_view = output->mutable_view(); + + using Type = cudf::device_storage_type_t; + using ScalarType = cudf::scalar_type_t; + auto s1 = static_cast(replacement); + auto device_in = cudf::column_device_view::create(input); + + auto func = replace_nulls_functor{s1.data()}; + thrust::transform(rmm::exec_policy(stream)->on(stream.value()), + input.data(), + input.data() + input.size(), + cudf::detail::make_validity_iterator(*device_in), + output_view.data(), + func); + return output; + } + + template ()>* = nullptr> + std::unique_ptr operator()(cudf::column_view const& input, + cudf::scalar const& replacement, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) + { + CUDF_FAIL("No specialization exists for the given type."); + } +}; + +template <> +std::unique_ptr replace_nulls_scalar_kernel_forwarder::operator()( + cudf::column_view const& input, + cudf::scalar const& replacement, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + CUDF_EXPECTS(input.type() == replacement.type(), "Data type mismatch"); + cudf::strings_column_view input_s(input); + const cudf::string_scalar& repl = static_cast(replacement); + return cudf::strings::detail::replace_nulls(input_s, repl, stream, mr); +} + +template <> +std::unique_ptr replace_nulls_scalar_kernel_forwarder::operator()( + cudf::column_view const& input, + cudf::scalar const& replacement, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + cudf::dictionary_column_view dict_input(input); + return cudf::dictionary::detail::replace_nulls(dict_input, replacement, stream, mr); +} + +} // end anonymous namespace + +namespace cudf { +namespace detail { +std::unique_ptr replace_nulls(cudf::column_view const& input, + cudf::column_view const& replacement, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + CUDF_EXPECTS(input.type() == replacement.type(), "Data type mismatch"); + CUDF_EXPECTS(replacement.size() == input.size(), "Column size mismatch"); + + if (input.is_empty()) { return cudf::empty_like(input); } + + if (!input.has_nulls()) { return std::make_unique(input); } + + return cudf::type_dispatcher( + input.type(), replace_nulls_column_kernel_forwarder{}, input, replacement, stream, mr); +} + +std::unique_ptr replace_nulls(cudf::column_view const& input, + cudf::scalar const& replacement, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) +{ + if (input.is_empty()) { return cudf::empty_like(input); } + + if (!input.has_nulls() || !replacement.is_valid()) { + return std::make_unique(input, stream, mr); + } + + return cudf::type_dispatcher( + input.type(), replace_nulls_scalar_kernel_forwarder{}, input, replacement, stream, mr); +} + +} // namespace detail + +std::unique_ptr replace_nulls(cudf::column_view const& input, + cudf::column_view const& replacement, + rmm::mr::device_memory_resource* mr) +{ + CUDF_FUNC_RANGE(); + return cudf::detail::replace_nulls(input, replacement, rmm::cuda_stream_default, mr); +} + +std::unique_ptr replace_nulls(cudf::column_view const& input, + cudf::scalar const& replacement, + rmm::mr::device_memory_resource* mr) +{ + CUDF_FUNC_RANGE(); + return cudf::detail::replace_nulls(input, replacement, rmm::cuda_stream_default, mr); +} +} // namespace cudf diff --git a/cuda_code/nv_kernel2_1.cu b/cuda_code/nv_kernel2_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..84b182e56fd118738ec30d59cd2724b4cf4ab83d --- /dev/null +++ b/cuda_code/nv_kernel2_1.cu @@ -0,0 +1,1726 @@ +// +// Experimental Kernel for Kepler (Compute 3.5) devices +// code submitted by nVidia performance engineer Alexey Panteleev +// with modifications by Christian Buchner +// +// for Compute 3.5 +// NOTE: compile this .cu module for compute_35,sm_35 with --maxrregcount=80 +// for Compute 3.0 +// NOTE: compile this .cu module for compute_30,sm_30 with --maxrregcount=63 +// + +#include + +#include "cuda_runtime.h" +#include "miner.h" + +#include "salsa_kernel.h" +#include "nv_kernel2.h" +#include "cuda_helper.h" + +#define THREADS_PER_WU 1 // single thread per hash + +#if __CUDA_ARCH__ < 350 + // Kepler (Compute 3.0) + #define __ldg(x) (*(x)) +#endif + +// grab lane ID +static __device__ __inline__ unsigned int __laneId() { unsigned int laneId; asm( "mov.u32 %0, %%laneid;" : "=r"( laneId ) ); return laneId; } + +// forward references +template __global__ void nv2_scrypt_core_kernelA(uint32_t *g_idata, int begin, int end); +template __global__ void nv2_scrypt_core_kernelB(uint32_t *g_odata, int begin, int end); +template __global__ void nv2_scrypt_core_kernelA_LG(uint32_t *g_idata, int begin, int end, unsigned int LOOKUP_GAP); +template __global__ void nv2_scrypt_core_kernelB_LG(uint32_t *g_odata, int begin, int end, unsigned int LOOKUP_GAP); + +// scratchbuf constants (pointers to scratch buffer for each work unit) +__constant__ uint32_t* c_V[TOTAL_WARP_LIMIT]; + +// iteration count N +__constant__ uint32_t c_N; +__constant__ uint32_t c_N_1; // N - 1 +__constant__ uint32_t c_spacing; // (N+LOOKUP_GAP-1)/LOOKUP_GAP + + +NV2Kernel::NV2Kernel() : KernelInterface() +{ +} + +void NV2Kernel::set_scratchbuf_constants(int MAXWARPS, uint32_t** h_V) +{ + checkCudaErrors(cudaMemcpyToSymbol(c_V, h_V, MAXWARPS*sizeof(uint32_t*), 0, cudaMemcpyHostToDevice)); +} + +bool NV2Kernel::run_kernel(dim3 grid, dim3 threads, int WARPS_PER_BLOCK, int thr_id, cudaStream_t stream, uint32_t* d_idata, uint32_t* d_odata, unsigned int N, unsigned int LOOKUP_GAP, bool interactive, bool benchmark, int texture_cache) +{ + bool success = true; + + // make some constants available to kernel, update only initially and when changing + static int prev_N[MAX_DEVICES] = {0}; + if (N != prev_N[thr_id]) { + uint32_t h_N = N; + uint32_t h_N_1 = N-1; + uint32_t h_spacing = (N+LOOKUP_GAP-1)/LOOKUP_GAP; + + cudaMemcpyToSymbolAsync(c_N, &h_N, sizeof(uint32_t), 0, cudaMemcpyHostToDevice, stream); + cudaMemcpyToSymbolAsync(c_N_1, &h_N_1, sizeof(uint32_t), 0, cudaMemcpyHostToDevice, stream); + cudaMemcpyToSymbolAsync(c_spacing, &h_spacing, sizeof(uint32_t), 0, cudaMemcpyHostToDevice, stream); + + prev_N[thr_id] = N; + } + + // First phase: Sequential writes to scratchpad. + const int batch = device_batchsize[thr_id]; + unsigned int pos = 0; + + do + { + if (LOOKUP_GAP == 1) { + if (IS_SCRYPT()) nv2_scrypt_core_kernelA <<< grid, threads, 0, stream >>>(d_idata, pos, min(pos+batch, N)); + if (IS_SCRYPT_JANE()) nv2_scrypt_core_kernelA<<< grid, threads, 0, stream >>>(d_idata, pos, min(pos+batch, N)); + } else { + if (IS_SCRYPT()) nv2_scrypt_core_kernelA_LG <<< grid, threads, 0, stream >>>(d_idata, pos, min(pos+batch, N), LOOKUP_GAP); + if (IS_SCRYPT_JANE()) nv2_scrypt_core_kernelA_LG<<< grid, threads, 0, stream >>>(d_idata, pos, min(pos+batch, N), LOOKUP_GAP); + } + pos += batch; + } while (pos < N); + + // Second phase: Random read access from scratchpad. + pos = 0; + do + { + if (LOOKUP_GAP == 1) { + if (IS_SCRYPT()) nv2_scrypt_core_kernelB <<< grid, threads, 0, stream >>>(d_odata, pos, min(pos+batch, N)); + if (IS_SCRYPT_JANE()) nv2_scrypt_core_kernelB <<< grid, threads, 0, stream >>>(d_odata, pos, min(pos+batch, N)); + } else { + if (IS_SCRYPT()) nv2_scrypt_core_kernelB_LG <<< grid, threads, 0, stream >>>(d_odata, pos, min(pos+batch, N), LOOKUP_GAP); + if (IS_SCRYPT_JANE()) nv2_scrypt_core_kernelB_LG <<< grid, threads, 0, stream >>>(d_odata, pos, min(pos+batch, N), LOOKUP_GAP); + } + + pos += batch; + } while (pos < N); + + return success; +} + +//static __device__ uint4& operator^=(uint4& left, const uint4& right) +//{ +// left.x ^= right.x; +// left.y ^= right.y; +// left.z ^= right.z; +// left.w ^= right.w; +// return left; +//} + +__device__ __forceinline__ uint4 __shfl(const uint4 val, unsigned int lane, unsigned int width) +{ + return make_uint4( + (unsigned int)__shfl((int)val.x, lane, width), + (unsigned int)__shfl((int)val.y, lane, width), + (unsigned int)__shfl((int)val.z, lane, width), + (unsigned int)__shfl((int)val.w, lane, width)); +} + +__device__ __forceinline__ void __transposed_write_BC(uint4 (&B)[4], uint4 (&C)[4], uint4 *D, int spacing) +{ + unsigned int laneId = __laneId(); + + unsigned int lane8 = laneId&7; + unsigned int tile = laneId/8; + + uint4 T1[8], T2[8]; + + /* Source matrix, A-H are threads, 0-7 are data items, thread A is marked with `*`: + + *A0 B0 C0 D0 E0 F0 G0 H0 + *A1 B1 C1 D1 E1 F1 G1 H1 + *A2 B2 C2 D2 E2 F2 G2 H2 + *A3 B3 C3 D3 E3 F3 G3 H3 + *A4 B4 C4 D4 E4 F4 G4 H4 + *A5 B5 C5 D5 E5 F5 G5 H5 + *A6 B6 C6 D6 E6 F6 G6 H6 + *A7 B7 C7 D7 E7 F7 G7 H7 + */ + + // rotate rows + T1[0] = B[0]; + T1[1] = __shfl(B[1], lane8 + 7, 8); + T1[2] = __shfl(B[2], lane8 + 6, 8); + T1[3] = __shfl(B[3], lane8 + 5, 8); + T1[4] = __shfl(C[0], lane8 + 4, 8); + T1[5] = __shfl(C[1], lane8 + 3, 8); + T1[6] = __shfl(C[2], lane8 + 2, 8); + T1[7] = __shfl(C[3], lane8 + 1, 8); + + /* Matrix after row rotates: + + *A0 B0 C0 D0 E0 F0 G0 H0 + H1 *A1 B1 C1 D1 E1 F1 G1 + G2 H2 *A2 B2 C2 D2 E2 F2 + F3 G3 H3 *A3 B3 C3 D3 E3 + E4 F4 G4 H4 *A4 B4 C4 D4 + D5 E5 F5 G5 H5 *A5 B5 C5 + C6 D6 E6 F6 G6 H6 *A6 B6 + B7 C7 D7 E7 F7 G7 H7 *A7 + */ + + // rotate columns up using a barrel shifter simulation + // column X is rotated up by (X+1) items +#pragma unroll 8 + for (int n = 0; n < 8; n++) T2[n] = ((lane8 + 1) & 1) ? T1[(n + 1) &7] : T1[n]; +#pragma unroll 8 + for (int n = 0; n < 8; n++) T1[n] = ((lane8 + 1) & 2) ? T2[(n + 2) &7] : T2[n]; +#pragma unroll 8 + for (int n = 0; n < 8; n++) T2[n] = ((lane8 + 1) & 4) ? T1[(n + 4) & 7] : T1[n]; + + /* Matrix after column rotates: + + H1 H2 H3 H4 H5 H6 H7 H0 + G2 G3 G4 G5 G6 G7 G0 G1 + F3 F4 F5 F6 F7 F0 F1 F2 + E4 E5 E6 E7 E0 E1 E2 E3 + D5 D6 D7 D0 D1 D2 D3 D4 + C6 C7 C0 C1 C2 C3 C4 C5 + B7 B0 B1 B2 B3 B4 B5 B6 + *A0 *A1 *A2 *A3 *A4 *A5 *A6 *A7 + */ + + // rotate rows again using address math and write to D, in reverse row order + D[spacing*2*(32*tile )+ lane8 ] = T2[7]; + D[spacing * 2 * (32 * tile + 4) + (lane8 + 7) & 7] = T2[6]; + D[spacing * 2 * (32 * tile + 8) + (lane8 + 6) & 7] = T2[5]; + D[spacing * 2 * (32 * tile + 12) + (lane8 + 5) & 7] = T2[4]; + D[spacing * 2 * (32 * tile + 16) + (lane8 + 4) & 7] = T2[3]; + D[spacing * 2 * (32 * tile + 20) + (lane8 + 3) & 7] = T2[2]; + D[spacing * 2 * (32 * tile + 24) + (lane8 + 2) & 7] = T2[1]; + D[spacing * 2 * (32 * tile + 28) + (lane8 + 1) & 7] = T2[0]; +} + +__device__ __forceinline__ void __transposed_read_BC(const uint4 *S, uint4 (&B)[4], uint4 (&C)[4], int spacing, int row) +{ + unsigned int laneId = __laneId(); + + unsigned int lane8 = laneId & 7; + unsigned int tile = laneId/8; + + // Perform the same transposition as in __transposed_write_BC, but in reverse order. + // See the illustrations in comments for __transposed_write_BC. + + // read and rotate rows, in reverse row order + uint4 T1[8], T2[8]; + T1[7] = __ldg(&S[(spacing*2*(32*tile ) + lane8 + 8*__shfl(row, 0, 8))]); + T1[6] = __ldg(&S[(spacing * 2 * (32 * tile + 4) + (lane8 + 7) & 7 + 8 * __shfl(row, 1, 8))]); + T1[5] = __ldg(&S[(spacing * 2 * (32 * tile + 8) + (lane8 + 6) & 7 + 8 * __shfl(row, 2, 8))]); + T1[4] = __ldg(&S[(spacing * 2 * (32 * tile + 12) + (lane8 + 5) & 7 + 8 * __shfl(row, 3, 8))]); + T1[3] = __ldg(&S[(spacing * 2 * (32 * tile + 16) + (lane8 + 4) & 7 + 8 * __shfl(row, 4, 8))]); + T1[2] = __ldg(&S[(spacing * 2 * (32 * tile + 20) + (lane8 + 3) & 7 + 8 * __shfl(row, 5, 8))]); + T1[1] = __ldg(&S[(spacing * 2 * (32 * tile + 24) + (lane8 + 2) & 7 + 8 * __shfl(row, 6, 8))]); + T1[0] = __ldg(&S[(spacing * 2 * (32 * tile + 28) + (lane8 + 1) & 7 + 8 * __shfl(row, 7, 8))]); + + // rotate columns down using a barrel shifter simulation + // column X is rotated down by (X+1) items, or up by (8-(X+1)) = (7-X) items +#pragma unroll 8 + for (int n = 0; n < 8; n++) T2[n] = ((7 - lane8) & 1) ? T1[(n + 1) & 7] : T1[n]; +#pragma unroll 8 + for (int n = 0; n < 8; n++) T1[n] = ((7 - lane8) & 2) ? T2[(n + 2) &7] : T2[n]; +#pragma unroll 8 + for (int n = 0; n < 8; n++) T2[n] = ((7 - lane8) & 4) ? T1[(n + 4) & 7] : T1[n]; + + // rotate rows + B[0] = T2[0]; + B[1] = __shfl(T2[1], lane8 + 1, 8); + B[2] = __shfl(T2[2], lane8 + 2, 8); + B[3] = __shfl(T2[3], lane8 + 3, 8); + C[0] = __shfl(T2[4], lane8 + 4, 8); + C[1] = __shfl(T2[5], lane8 + 5, 8); + C[2] = __shfl(T2[6], lane8 + 6, 8); + C[3] = __shfl(T2[7], lane8 + 7, 8); + +} + +__device__ __forceinline__ void __transposed_xor_BC(const uint4 *S, uint4 (&B)[4], uint4 (&C)[4], int spacing, int row) +{ + uint4 BT[4], CT[4]; + __transposed_read_BC(S, BT, CT, spacing, row); + +#pragma unroll 4 + for(int n = 0; n < 4; n++) + { + B[n] ^= BT[n]; + C[n] ^= CT[n]; + } +} + +#if __CUDA_ARCH__ < 350 + // Kepler (Compute 3.0) + #define ROTL(a, b) ((a)<<(b))|((a)>>(32-(b))) +#else + // Kepler (Compute 3.5) + #define ROTL(a, b) __funnelshift_l( a, a, b ); +#endif + + + +#if 0 + +#define QUARTER(a,b,c,d) \ + a += b; d ^= a; d = ROTL(d,16); \ + c += d; b ^= c; b = ROTL(b,12); \ + a += b; d ^= a; d = ROTL(d,8); \ + c += d; b ^= c; b = ROTL(b,7); + +static __device__ void xor_chacha8(uint4 *B, uint4 *C) +{ + uint32_t x[16]; + x[0]=(B[0].x ^= C[0].x); + x[1]=(B[0].y ^= C[0].y); + x[2]=(B[0].z ^= C[0].z); + x[3]=(B[0].w ^= C[0].w); + x[4]=(B[1].x ^= C[1].x); + x[5]=(B[1].y ^= C[1].y); + x[6]=(B[1].z ^= C[1].z); + x[7]=(B[1].w ^= C[1].w); + x[8]=(B[2].x ^= C[2].x); + x[9]=(B[2].y ^= C[2].y); + x[10]=(B[2].z ^= C[2].z); + x[11]=(B[2].w ^= C[2].w); + x[12]=(B[3].x ^= C[3].x); + x[13]=(B[3].y ^= C[3].y); + x[14]=(B[3].z ^= C[3].z); + x[15]=(B[3].w ^= C[3].w); + + /* Operate on columns. */ + QUARTER( x[0], x[4], x[ 8], x[12] ) + QUARTER( x[1], x[5], x[ 9], x[13] ) + QUARTER( x[2], x[6], x[10], x[14] ) + QUARTER( x[3], x[7], x[11], x[15] ) + + /* Operate on diagonals */ + QUARTER( x[0], x[5], x[10], x[15] ) + QUARTER( x[1], x[6], x[11], x[12] ) + QUARTER( x[2], x[7], x[ 8], x[13] ) + QUARTER( x[3], x[4], x[ 9], x[14] ) + + /* Operate on columns. */ + QUARTER( x[0], x[4], x[ 8], x[12] ) + QUARTER( x[1], x[5], x[ 9], x[13] ) + QUARTER( x[2], x[6], x[10], x[14] ) + QUARTER( x[3], x[7], x[11], x[15] ) + + /* Operate on diagonals */ + QUARTER( x[0], x[5], x[10], x[15] ) + QUARTER( x[1], x[6], x[11], x[12] ) + QUARTER( x[2], x[7], x[ 8], x[13] ) + QUARTER( x[3], x[4], x[ 9], x[14] ) + + /* Operate on columns. */ + QUARTER( x[0], x[4], x[ 8], x[12] ) + QUARTER( x[1], x[5], x[ 9], x[13] ) + QUARTER( x[2], x[6], x[10], x[14] ) + QUARTER( x[3], x[7], x[11], x[15] ) + + /* Operate on diagonals */ + QUARTER( x[0], x[5], x[10], x[15] ) + QUARTER( x[1], x[6], x[11], x[12] ) + QUARTER( x[2], x[7], x[ 8], x[13] ) + QUARTER( x[3], x[4], x[ 9], x[14] ) + + /* Operate on columns. */ + QUARTER( x[0], x[4], x[ 8], x[12] ) + QUARTER( x[1], x[5], x[ 9], x[13] ) + QUARTER( x[2], x[6], x[10], x[14] ) + QUARTER( x[3], x[7], x[11], x[15] ) + + /* Operate on diagonals */ + QUARTER( x[0], x[5], x[10], x[15] ) + QUARTER( x[1], x[6], x[11], x[12] ) + QUARTER( x[2], x[7], x[ 8], x[13] ) + QUARTER( x[3], x[4], x[ 9], x[14] ) + + B[0].x += x[0]; B[0].y += x[1]; B[0].z += x[2]; B[0].w += x[3]; B[1].x += x[4]; B[1].y += x[5]; B[1].z += x[6]; B[1].w += x[7]; + B[2].x += x[8]; B[2].y += x[9]; B[2].z += x[10]; B[2].w += x[11]; B[3].x += x[12]; B[3].y += x[13]; B[3].z += x[14]; B[3].w += x[15]; +} + +#else + +#define ADD4(d1,d2,d3,d4,s1,s2,s3,s4) \ + d1 += s1; d2 += s2; d3 += s3; d4 += s4; + +#define XOR4(d1,d2,d3,d4,s1,s2,s3,s4) \ + d1 ^= s1; d2 ^= s2; d3 ^= s3; d4 ^= s4; + +#define ROTL4(d1,d2,d3,d4,amt) \ + d1 = ROTL(d1, amt); d2 = ROTL(d2, amt); d3 = ROTL(d3, amt); d4 = ROTL(d4, amt); + +#define QROUND(a1,a2,a3,a4, b1,b2,b3,b4, c1,c2,c3,c4, amt) \ + ADD4 (a1,a2,a3,a4, c1,c2,c3,c4) \ + XOR4 (b1,b2,b3,b4, a1,a2,a3,a4) \ + ROTL4(b1,b2,b3,b4, amt) + +static __device__ void xor_chacha8(uint4 *B, uint4 *C) +{ + uint32_t x[16]; + x[0]=(B[0].x ^= C[0].x); + x[1]=(B[0].y ^= C[0].y); + x[2]=(B[0].z ^= C[0].z); + x[3]=(B[0].w ^= C[0].w); + x[4]=(B[1].x ^= C[1].x); + x[5]=(B[1].y ^= C[1].y); + x[6]=(B[1].z ^= C[1].z); + x[7]=(B[1].w ^= C[1].w); + x[8]=(B[2].x ^= C[2].x); + x[9]=(B[2].y ^= C[2].y); + x[10]=(B[2].z ^= C[2].z); + x[11]=(B[2].w ^= C[2].w); + x[12]=(B[3].x ^= C[3].x); + x[13]=(B[3].y ^= C[3].y); + x[14]=(B[3].z ^= C[3].z); + x[15]=(B[3].w ^= C[3].w); + + /* Operate on columns. */ + QROUND(x[ 0],x[ 1],x[ 2],x[ 3], x[12],x[13],x[14],x[15], x[ 4],x[ 5],x[ 6],x[ 7], 16); + QROUND(x[ 8],x[ 9],x[10],x[11], x[ 4],x[ 5],x[ 6],x[ 7], x[12],x[13],x[14],x[15], 12); + QROUND(x[ 0],x[ 1],x[ 2],x[ 3], x[12],x[13],x[14],x[15], x[ 4],x[ 5],x[ 6],x[ 7], 8); + QROUND(x[ 8],x[ 9],x[10],x[11], x[ 4],x[ 5],x[ 6],x[ 7], x[12],x[13],x[14],x[15], 7); + + /* Operate on diagonals */ + QROUND(x[ 0],x[ 1],x[ 2],x[ 3], x[15],x[12],x[13],x[14], x[ 5],x[ 6],x[ 7],x[ 4], 16); + QROUND(x[10],x[11],x[ 8],x[ 9], x[ 5],x[ 6],x[ 7],x[ 4], x[15],x[12],x[13],x[14], 12); + QROUND(x[ 0],x[ 1],x[ 2],x[ 3], x[15],x[12],x[13],x[14], x[ 5],x[ 6],x[ 7],x[ 4], 8); + QROUND(x[10],x[11],x[ 8],x[ 9], x[ 5],x[ 6],x[ 7],x[ 4], x[15],x[12],x[13],x[14], 7); + + /* Operate on columns. */ + QROUND(x[ 0],x[ 1],x[ 2],x[ 3], x[12],x[13],x[14],x[15], x[ 4],x[ 5],x[ 6],x[ 7], 16); + QROUND(x[ 8],x[ 9],x[10],x[11], x[ 4],x[ 5],x[ 6],x[ 7], x[12],x[13],x[14],x[15], 12); + QROUND(x[ 0],x[ 1],x[ 2],x[ 3], x[12],x[13],x[14],x[15], x[ 4],x[ 5],x[ 6],x[ 7], 8); + QROUND(x[ 8],x[ 9],x[10],x[11], x[ 4],x[ 5],x[ 6],x[ 7], x[12],x[13],x[14],x[15], 7); + + /* Operate on diagonals */ + QROUND(x[ 0],x[ 1],x[ 2],x[ 3], x[15],x[12],x[13],x[14], x[ 5],x[ 6],x[ 7],x[ 4], 16); + QROUND(x[10],x[11],x[ 8],x[ 9], x[ 5],x[ 6],x[ 7],x[ 4], x[15],x[12],x[13],x[14], 12); + QROUND(x[ 0],x[ 1],x[ 2],x[ 3], x[15],x[12],x[13],x[14], x[ 5],x[ 6],x[ 7],x[ 4], 8); + QROUND(x[10],x[11],x[ 8],x[ 9], x[ 5],x[ 6],x[ 7],x[ 4], x[15],x[12],x[13],x[14], 7); + + /* Operate on columns. */ + QROUND(x[ 0],x[ 1],x[ 2],x[ 3], x[12],x[13],x[14],x[15], x[ 4],x[ 5],x[ 6],x[ 7], 16); + QROUND(x[ 8],x[ 9],x[10],x[11], x[ 4],x[ 5],x[ 6],x[ 7], x[12],x[13],x[14],x[15], 12); + QROUND(x[ 0],x[ 1],x[ 2],x[ 3], x[12],x[13],x[14],x[15], x[ 4],x[ 5],x[ 6],x[ 7], 8); + QROUND(x[ 8],x[ 9],x[10],x[11], x[ 4],x[ 5],x[ 6],x[ 7], x[12],x[13],x[14],x[15], 7); + + /* Operate on diagonals */ + QROUND(x[ 0],x[ 1],x[ 2],x[ 3], x[15],x[12],x[13],x[14], x[ 5],x[ 6],x[ 7],x[ 4], 16); + QROUND(x[10],x[11],x[ 8],x[ 9], x[ 5],x[ 6],x[ 7],x[ 4], x[15],x[12],x[13],x[14], 12); + QROUND(x[ 0],x[ 1],x[ 2],x[ 3], x[15],x[12],x[13],x[14], x[ 5],x[ 6],x[ 7],x[ 4], 8); + QROUND(x[10],x[11],x[ 8],x[ 9], x[ 5],x[ 6],x[ 7],x[ 4], x[15],x[12],x[13],x[14], 7); + + /* Operate on columns. */ + QROUND(x[ 0],x[ 1],x[ 2],x[ 3], x[12],x[13],x[14],x[15], x[ 4],x[ 5],x[ 6],x[ 7], 16); + QROUND(x[ 8],x[ 9],x[10],x[11], x[ 4],x[ 5],x[ 6],x[ 7], x[12],x[13],x[14],x[15], 12); + QROUND(x[ 0],x[ 1],x[ 2],x[ 3], x[12],x[13],x[14],x[15], x[ 4],x[ 5],x[ 6],x[ 7], 8); + QROUND(x[ 8],x[ 9],x[10],x[11], x[ 4],x[ 5],x[ 6],x[ 7], x[12],x[13],x[14],x[15], 7); + + /* Operate on diagonals */ + QROUND(x[ 0],x[ 1],x[ 2],x[ 3], x[15],x[12],x[13],x[14], x[ 5],x[ 6],x[ 7],x[ 4], 16); + QROUND(x[10],x[11],x[ 8],x[ 9], x[ 5],x[ 6],x[ 7],x[ 4], x[15],x[12],x[13],x[14], 12); + QROUND(x[ 0],x[ 1],x[ 2],x[ 3], x[15],x[12],x[13],x[14], x[ 5],x[ 6],x[ 7],x[ 4], 8); + QROUND(x[10],x[11],x[ 8],x[ 9], x[ 5],x[ 6],x[ 7],x[ 4], x[15],x[12],x[13],x[14], 7); + + B[0].x += x[0]; B[0].y += x[1]; B[0].z += x[2]; B[0].w += x[3]; B[1].x += x[4]; B[1].y += x[5]; B[1].z += x[6]; B[1].w += x[7]; + B[2].x += x[8]; B[2].y += x[9]; B[2].z += x[10]; B[2].w += x[11]; B[3].x += x[12]; B[3].y += x[13]; B[3].z += x[14]; B[3].w += x[15]; +} + +#endif + + +#define ROTL7(a0,a1,a2,a3,a00,a10,a20,a30){\ +a0^=ROTL(a00, 7); a1^=ROTL(a10, 7); a2^=ROTL(a20, 7); a3^=ROTL(a30, 7);\ +};\ + +#define ROTL9(a0,a1,a2,a3,a00,a10,a20,a30){\ +a0^=ROTL(a00, 9); a1^=ROTL(a10, 9); a2^=ROTL(a20, 9); a3^=ROTL(a30, 9);\ +};\ + +#define ROTL13(a0,a1,a2,a3,a00,a10,a20,a30){\ +a0^=ROTL(a00, 13); a1^=ROTL(a10, 13); a2^=ROTL(a20, 13); a3^=ROTL(a30, 13);\ +};\ + +#define ROTL18(a0,a1,a2,a3,a00,a10,a20,a30){\ +a0^=ROTL(a00, 18); a1^=ROTL(a10, 18); a2^=ROTL(a20, 18); a3^=ROTL(a30, 18);\ +};\ + +static __device__ void xor_salsa8(uint4 *B, uint4 *C) +{ + uint32_t x[16]; + x[0]=(B[0].x ^= C[0].x); + x[1]=(B[0].y ^= C[0].y); + x[2]=(B[0].z ^= C[0].z); + x[3]=(B[0].w ^= C[0].w); + x[4]=(B[1].x ^= C[1].x); + x[5]=(B[1].y ^= C[1].y); + x[6]=(B[1].z ^= C[1].z); + x[7]=(B[1].w ^= C[1].w); + x[8]=(B[2].x ^= C[2].x); + x[9]=(B[2].y ^= C[2].y); + x[10]=(B[2].z ^= C[2].z); + x[11]=(B[2].w ^= C[2].w); + x[12]=(B[3].x ^= C[3].x); + x[13]=(B[3].y ^= C[3].y); + x[14]=(B[3].z ^= C[3].z); + x[15]=(B[3].w ^= C[3].w); + + /* Operate on columns. */ + ROTL7(x[4],x[9],x[14],x[3],x[0]+x[12],x[1]+x[5],x[6]+x[10],x[11]+x[15]); + ROTL9(x[8],x[13],x[2],x[7],x[0]+x[4],x[5]+x[9],x[10]+x[14],x[3]+x[15]); + ROTL13(x[12],x[1],x[6],x[11],x[4]+x[8],x[9]+x[13],x[2]+x[14],x[3]+x[7]); + ROTL18(x[0],x[5],x[10],x[15],x[8]+x[12],x[1]+x[13],x[2]+x[6],x[7]+x[11]); + + /* Operate on rows. */ + ROTL7(x[1],x[6],x[11],x[12],x[0]+x[3],x[4]+x[5],x[9]+x[10],x[14]+x[15]); + ROTL9(x[2],x[7],x[8],x[13],x[0]+x[1],x[5]+x[6],x[10]+x[11],x[12]+x[15]); + ROTL13(x[3],x[4],x[9],x[14],x[1]+x[2],x[6]+x[7],x[8]+x[11],x[12]+x[13]); + ROTL18(x[0],x[5],x[10],x[15],x[2]+x[3],x[4]+x[7],x[8]+x[9],x[13]+x[14]); + + /* Operate on columns. */ + ROTL7(x[4],x[9],x[14],x[3],x[0]+x[12],x[1]+x[5],x[6]+x[10],x[11]+x[15]); + ROTL9(x[8],x[13],x[2],x[7],x[0]+x[4],x[5]+x[9],x[10]+x[14],x[3]+x[15]); + ROTL13(x[12],x[1],x[6],x[11],x[4]+x[8],x[9]+x[13],x[2]+x[14],x[3]+x[7]); + ROTL18(x[0],x[5],x[10],x[15],x[8]+x[12],x[1]+x[13],x[2]+x[6],x[7]+x[11]); + + /* Operate on rows. */ + ROTL7(x[1],x[6],x[11],x[12],x[0]+x[3],x[4]+x[5],x[9]+x[10],x[14]+x[15]); + ROTL9(x[2],x[7],x[8],x[13],x[0]+x[1],x[5]+x[6],x[10]+x[11],x[12]+x[15]); + ROTL13(x[3],x[4],x[9],x[14],x[1]+x[2],x[6]+x[7],x[8]+x[11],x[12]+x[13]); + ROTL18(x[0],x[5],x[10],x[15],x[2]+x[3],x[4]+x[7],x[8]+x[9],x[13]+x[14]); + + /* Operate on columns. */ + ROTL7(x[4],x[9],x[14],x[3],x[0]+x[12],x[1]+x[5],x[6]+x[10],x[11]+x[15]); + ROTL9(x[8],x[13],x[2],x[7],x[0]+x[4],x[5]+x[9],x[10]+x[14],x[3]+x[15]); + ROTL13(x[12],x[1],x[6],x[11],x[4]+x[8],x[9]+x[13],x[2]+x[14],x[3]+x[7]); + ROTL18(x[0],x[5],x[10],x[15],x[8]+x[12],x[1]+x[13],x[2]+x[6],x[7]+x[11]); + + /* Operate on rows. */ + ROTL7(x[1],x[6],x[11],x[12],x[0]+x[3],x[4]+x[5],x[9]+x[10],x[14]+x[15]); + ROTL9(x[2],x[7],x[8],x[13],x[0]+x[1],x[5]+x[6],x[10]+x[11],x[12]+x[15]); + ROTL13(x[3],x[4],x[9],x[14],x[1]+x[2],x[6]+x[7],x[8]+x[11],x[12]+x[13]); + ROTL18(x[0],x[5],x[10],x[15],x[2]+x[3],x[4]+x[7],x[8]+x[9],x[13]+x[14]); + + /* Operate on columns. */ + ROTL7(x[4],x[9],x[14],x[3],x[0]+x[12],x[1]+x[5],x[6]+x[10],x[11]+x[15]); + ROTL9(x[8],x[13],x[2],x[7],x[0]+x[4],x[5]+x[9],x[10]+x[14],x[3]+x[15]); + ROTL13(x[12],x[1],x[6],x[11],x[4]+x[8],x[9]+x[13],x[2]+x[14],x[3]+x[7]); + ROTL18(x[0],x[5],x[10],x[15],x[8]+x[12],x[1]+x[13],x[2]+x[6],x[7]+x[11]); + + /* Operate on rows. */ + ROTL7(x[1],x[6],x[11],x[12],x[0]+x[3],x[4]+x[5],x[9]+x[10],x[14]+x[15]); + ROTL9(x[2],x[7],x[8],x[13],x[0]+x[1],x[5]+x[6],x[10]+x[11],x[12]+x[15]); + ROTL13(x[3],x[4],x[9],x[14],x[1]+x[2],x[6]+x[7],x[8]+x[11],x[12]+x[13]); + ROTL18(x[0],x[5],x[10],x[15],x[2]+x[3],x[4]+x[7],x[8]+x[9],x[13]+x[14]); + + B[0].x += x[0]; B[0].y += x[1]; B[0].z += x[2]; B[0].w += x[3]; B[1].x += x[4]; B[1].y += x[5]; B[1].z += x[6]; B[1].w += x[7]; + B[2].x += x[8]; B[2].y += x[9]; B[2].z += x[10]; B[2].w += x[11]; B[3].x += x[12]; B[3].y += x[13]; B[3].z += x[14]; B[3].w += x[15]; +} + + +template static __device__ void block_mixer(uint4 *B, uint4 *C) +{ + switch (ALGO) + { + case A_SCRYPT: xor_salsa8(B, C); break; + case A_SCRYPT_JANE: xor_chacha8(B, C); break; + } +} + +//////////////////////////////////////////////////////////////////////////////// +//! Experimental Scrypt core kernel for Titan devices. +//! @param g_idata input data in global memory +//! @param g_odata output data in global memory +//////////////////////////////////////////////////////////////////////////////// +template __global__ void nv2_scrypt_core_kernelA(uint32_t *g_idata, int begin, int end) +{ + int offset = blockIdx.x * blockDim.x + threadIdx.x / warpSize * warpSize; + g_idata += 32 * offset; + uint32_t * V = c_V[offset / warpSize]; + uint4 B[4], C[4]; + int i = begin; + + if(i == 0) { + __transposed_read_BC((uint4*)g_idata, B, C, 1, 0); + __transposed_write_BC(B, C, (uint4*)V, c_N); + ++i; + } else + __transposed_read_BC((uint4*)(V + (i-1)*32), B, C, c_N, 0); + + while(i < end) { + block_mixer(B, C); block_mixer(C, B); + __transposed_write_BC(B, C, (uint4*)(V + i*32), c_N); + ++i; + } +} + +template __global__ void nv2_scrypt_core_kernelA_LG(uint32_t *g_idata, int begin, int end, unsigned int LOOKUP_GAP) +{ + int offset = blockIdx.x * blockDim.x + threadIdx.x / warpSize * warpSize; + g_idata += 32 * offset; + uint32_t * V = c_V[offset / warpSize]; + uint4 B[4], C[4]; + int i = begin; + + if(i == 0) { + __transposed_read_BC((uint4*)g_idata, B, C, 1, 0); + __transposed_write_BC(B, C, (uint4*)V, c_spacing); + ++i; + } else { + int pos = (i-1)/LOOKUP_GAP, loop = (i-1)-pos*LOOKUP_GAP; + __transposed_read_BC((uint4*)(V + pos*32), B, C, c_spacing, 0); + while(loop--) { block_mixer(B, C); block_mixer(C, B); } + } + + while(i < end) { + block_mixer(B, C); block_mixer(C, B); + if (i % LOOKUP_GAP == 0) + __transposed_write_BC(B, C, (uint4*)(V + (i/LOOKUP_GAP)*32), c_spacing); + ++i; + } +} + +template __global__ void nv2_scrypt_core_kernelB(uint32_t *g_odata, int begin, int end) +{ + int offset = blockIdx.x * blockDim.x + threadIdx.x / warpSize * warpSize; + g_odata += 32 * offset; + uint32_t * V = c_V[offset / warpSize]; + uint4 B[4], C[4]; + + if(begin == 0) { + __transposed_read_BC((uint4*)V, B, C, c_N, c_N_1); + block_mixer(B, C); block_mixer(C, B); + } else + __transposed_read_BC((uint4*)g_odata, B, C, 1, 0); + + for (int i = begin; i < end; i++) { + int slot = C[0].x & c_N_1; + __transposed_xor_BC((uint4*)(V), B, C, c_N, slot); + block_mixer(B, C); block_mixer(C, B); + } + + __transposed_write_BC(B, C, (uint4*)(g_odata), 1); +} + +template __global__ void nv2_scrypt_core_kernelB_LG(uint32_t *g_odata, int begin, int end, unsigned int LOOKUP_GAP) +{ + int offset = blockIdx.x * blockDim.x + threadIdx.x / warpSize * warpSize; + g_odata += 32 * offset; + uint32_t * V = c_V[offset / warpSize]; + uint4 B[4], C[4]; + + if(begin == 0) { + int pos = c_N_1/LOOKUP_GAP, loop = 1 + (c_N_1-pos*LOOKUP_GAP); + __transposed_read_BC((uint4*)V, B, C, c_spacing, pos); + while(loop--) { block_mixer(B, C); block_mixer(C, B); } + } else { + __transposed_read_BC((uint4*)g_odata, B, C, 1, 0); + } + + for (int i = begin; i < end; i++) + { + int slot = C[0].x & c_N_1; + int pos = slot/LOOKUP_GAP, loop = slot-pos*LOOKUP_GAP; + uint4 b[4], c[4]; __transposed_read_BC((uint4*)(V), b, c, c_spacing, pos); + while(loop--) { block_mixer(b, c); block_mixer(c, b); } +#pragma unroll 4 + for(int n = 0; n < 4; n++) { B[n] ^= b[n]; C[n] ^= c[n]; } + block_mixer(B, C); block_mixer(C, B); + } + + __transposed_write_BC(B, C, (uint4*)(g_odata), 1); +} + + +// +// Maxcoin related Keccak implementation (Keccak256) +// + +// from salsa_kernel.cu +extern std::map context_blocks; +extern std::map context_wpb; +extern std::map context_kernel; +extern std::map context_streams[2]; +extern std::map context_hash[2]; + +__constant__ uint64_t ptarget64[4]; + +// ROL macro replaced with the inline assembly code below to work around a performance issue +//#define ROL(a, offset) ((((uint64_t)a) << ((offset) % 64)) ^ (((uint64_t)a) >> (64-((offset) % 64)))) +__inline__ __device__ uint2 ROL(const uint2 a, const int offset) { + uint2 result; + if(offset >= 32) { + asm("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(result.x) : "r"(a.x), "r"(a.y), "r"(offset)); + asm("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(result.y) : "r"(a.y), "r"(a.x), "r"(offset)); + } else { + asm("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(result.x) : "r"(a.y), "r"(a.x), "r"(offset)); + asm("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(result.y) : "r"(a.x), "r"(a.y), "r"(offset)); + } + return result; +} +#define ROL_mult8(a, offset) ROL(a, offset) + +//__inline__ __device__ uint64_t devectorize(uint2 v) { return __double_as_longlong(__hiloint2double(v.y, v.x)); } +//__inline__ __device__ uint2 vectorize(uint64_t v) { return make_uint2(__double2loint(__longlong_as_double(v)), __double2hiint(__longlong_as_double(v))); } +//__inline__ __device__ uint2 operator^ (uint2 a, uint2 b) { return make_uint2(a.x ^ b.x, a.y ^ b.y); } +//__inline__ __device__ uint2 operator& (uint2 a, uint2 b) { return make_uint2(a.x & b.x, a.y & b.y); } +//__inline__ __device__ uint2 operator| (uint2 a, uint2 b) { return make_uint2(a.x | b.x, a.y | b.y); } +//__inline__ __device__ uint2 operator~ (uint2 a) { return make_uint2(~a.x, ~a.y); } +//__inline__ __device__ void operator^= (uint2 &a, uint2 b) { a = a ^ b; } + +__constant__ uint64_t KeccakF_RoundConstants[24]; + +static uint64_t host_KeccakF_RoundConstants[24] = +{ + (uint64_t)0x0000000000000001ULL, + (uint64_t)0x0000000000008082ULL, + (uint64_t)0x800000000000808aULL, + (uint64_t)0x8000000080008000ULL, + (uint64_t)0x000000000000808bULL, + (uint64_t)0x0000000080000001ULL, + (uint64_t)0x8000000080008081ULL, + (uint64_t)0x8000000000008009ULL, + (uint64_t)0x000000000000008aULL, + (uint64_t)0x0000000000000088ULL, + (uint64_t)0x0000000080008009ULL, + (uint64_t)0x000000008000000aULL, + (uint64_t)0x000000008000808bULL, + (uint64_t)0x800000000000008bULL, + (uint64_t)0x8000000000008089ULL, + (uint64_t)0x8000000000008003ULL, + (uint64_t)0x8000000000008002ULL, + (uint64_t)0x8000000000000080ULL, + (uint64_t)0x000000000000800aULL, + (uint64_t)0x800000008000000aULL, + (uint64_t)0x8000000080008081ULL, + (uint64_t)0x8000000000008080ULL, + (uint64_t)0x0000000080000001ULL, + (uint64_t)0x8000000080008008ULL +}; + +__constant__ uint64_t pdata64[10]; + +//static __device__ __forceinline__ uint32_t cuda_swab32(uint32_t x) +//{ +// return (((x << 24) & 0xff000000u) | ((x << 8) & 0x00ff0000u) +// | ((x >> 8) & 0x0000ff00u) | ((x >> 24) & 0x000000ffu)); +//} + +// in this implementation the first and last iteration of the for() loop were explicitly +// unrolled and redundant operations were removed (e.g. operations on zero inputs, and +// computation of unnecessary outputs) +__global__ __launch_bounds__(256, 2) +void titan_crypto_hash( uint64_t *g_out, uint32_t nonce, uint32_t *g_good, bool validate ) +{ + uint2 Aba, Abe, Abi, Abo, Abu; + uint2 Aga, Age, Agi, Ago, Agu; + uint2 Aka, Ake, Aki, Ako, Aku; + uint2 Ama, Ame, Ami, Amo, Amu; + uint2 Asa, Ase, Asi, Aso, Asu; + uint2 BCa, BCe, BCi, BCo, BCu; + uint2 Da, De, Di, Do, Du; + uint2 Eba, Ebe, Ebi, Ebo, Ebu; + uint2 Ega, Ege, Egi, Ego, Egu; + uint2 Eka, Eke, Eki, Eko, Eku; + uint2 Ema, Eme, Emi, Emo, Emu; + uint2 Esa, Ese, Esi, Eso, Esu; + + // embed unique nonce into source data stream in pdata[] + Agu = vectorize((pdata64[9] & 0x00000000FFFFFFFFULL) | (((uint64_t)cuda_swab32(nonce + ((blockIdx.x * blockDim.x) + threadIdx.x))) << 32)); + + // prepareTheta + BCa = vectorize(pdata64[0]^pdata64[5]^0x0000000000000001ULL); + BCe = vectorize(pdata64[1]^pdata64[6]^0x8000000000000000ULL); + BCi = vectorize(pdata64[2]^pdata64[7]); + BCo = vectorize(pdata64[3]^pdata64[8]); + BCu = vectorize(pdata64[4])^Agu; + + //thetaRhoPiChiIotaPrepareTheta(round , A, E) + Da = BCu^ROL(BCe, 1); + De = BCa^ROL(BCi, 1); + Di = BCe^ROL(BCo, 1); + Do = BCi^ROL(BCu, 1); + Du = BCo^ROL(BCa, 1); + + Aba = vectorize(pdata64[0]) ^ Da; + BCa = Aba; + Age = vectorize(pdata64[6]) ^ De; + BCe = ROL(Age, 44); + Aki = Di; + BCi = ROL(Aki, 43); + Amo = Do; + BCo = ROL(Amo, 21); + Asu = Du; + BCu = ROL(Asu, 14); + Eba = BCa ^((~BCe)& BCi ); + Eba ^= vectorize((uint64_t)KeccakF_RoundConstants[0]); + Ebe = BCe ^((~BCi)& BCo ); + Ebi = BCi ^((~BCo)& BCu ); + Ebo = BCo ^((~BCu)& BCa ); + Ebu = BCu ^((~BCa)& BCe ); + + Abo = vectorize(pdata64[3]) ^ Do; + BCa = ROL(Abo, 28); + Agu ^= Du; + BCe = ROL(Agu, 20); + Aka = vectorize(0x0000000000000001ULL) ^ Da; + BCi = ROL(Aka, 3); + Ame = vectorize(0x8000000000000000ULL) ^ De; + BCo = ROL(Ame, 45); + Asi = Di; + BCu = ROL(Asi, 61); + Ega = BCa ^((~BCe)& BCi ); + Ege = BCe ^((~BCi)& BCo ); + Egi = BCi ^((~BCo)& BCu ); + Ego = BCo ^((~BCu)& BCa ); + Egu = BCu ^((~BCa)& BCe ); + + Abe = vectorize(pdata64[1]) ^ De; + BCa = ROL(Abe, 1); + Agi = vectorize(pdata64[7]) ^ Di; + BCe = ROL(Agi, 6); + Ako = Do; + BCi = ROL(Ako, 25); + Amu = Du; + BCo = ROL(Amu, 8); + Asa = Da; + BCu = ROL(Asa, 18); + Eka = BCa ^((~BCe)& BCi ); + Eke = BCe ^((~BCi)& BCo ); + Eki = BCi ^((~BCo)& BCu ); + Eko = BCo ^((~BCu)& BCa ); + Eku = BCu ^((~BCa)& BCe ); + + Abu = vectorize(pdata64[4]) ^ Du; + BCa = ROL(Abu, 27); + Aga = vectorize(pdata64[5]) ^ Da; + BCe = ROL(Aga, 36); + Ake = De; + BCi = ROL(Ake, 10); + Ami = Di; + BCo = ROL(Ami, 15); + Aso = Do; + BCu = ROR8(Aso); + Ema = BCa ^((~BCe)& BCi ); + Eme = BCe ^((~BCi)& BCo ); + Emi = BCi ^((~BCo)& BCu ); + Emo = BCo ^((~BCu)& BCa ); + Emu = BCu ^((~BCa)& BCe ); + + Abi = vectorize(pdata64[2]) ^ Di; + BCa = ROL(Abi, 62); + Ago = vectorize(pdata64[8]) ^ Do; + BCe = ROL(Ago, 55); + Aku = Du; + BCi = ROL(Aku, 39); + Ama = Da; + BCo = ROL(Ama, 41); + Ase = De; + BCu = ROL(Ase, 2); + Esa = BCa ^((~BCe)& BCi ); + Ese = BCe ^((~BCi)& BCo ); + Esi = BCi ^((~BCo)& BCu ); + Eso = BCo ^((~BCu)& BCa ); + Esu = BCu ^((~BCa)& BCe ); + + // prepareTheta + BCa = Eba^Ega^Eka^Ema^Esa; + BCe = Ebe^Ege^Eke^Eme^Ese; + BCi = Ebi^Egi^Eki^Emi^Esi; + BCo = Ebo^Ego^Eko^Emo^Eso; + BCu = Ebu^Egu^Eku^Emu^Esu; + + //thetaRhoPiChiIotaPrepareTheta(round+1, E, A) + Da = BCu^ROL(BCe, 1); + De = BCa^ROL(BCi, 1); + Di = BCe^ROL(BCo, 1); + Do = BCi^ROL(BCu, 1); + Du = BCo^ROL(BCa, 1); + + Eba ^= Da; + BCa = Eba; + Ege ^= De; + BCe = ROL(Ege, 44); + Eki ^= Di; + BCi = ROL(Eki, 43); + Emo ^= Do; + BCo = ROL(Emo, 21); + Esu ^= Du; + BCu = ROL(Esu, 14); + Aba = BCa ^((~BCe)& BCi ); + Aba ^= vectorize((uint64_t)KeccakF_RoundConstants[1]); + Abe = BCe ^((~BCi)& BCo ); + Abi = BCi ^((~BCo)& BCu ); + Abo = BCo ^((~BCu)& BCa ); + Abu = BCu ^((~BCa)& BCe ); + + Ebo ^= Do; + BCa = ROL(Ebo, 28); + Egu ^= Du; + BCe = ROL(Egu, 20); + Eka ^= Da; + BCi = ROL(Eka, 3); + Eme ^= De; + BCo = ROL(Eme, 45); + Esi ^= Di; + BCu = ROL(Esi, 61); + Aga = BCa ^((~BCe)& BCi ); + Age = BCe ^((~BCi)& BCo ); + Agi = BCi ^((~BCo)& BCu ); + Ago = BCo ^((~BCu)& BCa ); + Agu = BCu ^((~BCa)& BCe ); + + Ebe ^= De; + BCa = ROL(Ebe, 1); + Egi ^= Di; + BCe = ROL(Egi, 6); + Eko ^= Do; + BCi = ROL(Eko, 25); + Emu ^= Du; + BCo = ROL(Emu, 8); + Esa ^= Da; + BCu = ROL(Esa, 18); + Aka = BCa ^((~BCe)& BCi ); + Ake = BCe ^((~BCi)& BCo ); + Aki = BCi ^((~BCo)& BCu ); + Ako = BCo ^((~BCu)& BCa ); + Aku = BCu ^((~BCa)& BCe ); + + Ebu ^= Du; + BCa = ROL(Ebu, 27); + Ega ^= Da; + BCe = ROL(Ega, 36); + Eke ^= De; + BCi = ROL(Eke, 10); + Emi ^= Di; + BCo = ROL(Emi, 15); + Eso ^= Do; + BCu = ROR8(Eso); + Ama = BCa ^((~BCe)& BCi ); + Ame = BCe ^((~BCi)& BCo ); + Ami = BCi ^((~BCo)& BCu ); + Amo = BCo ^((~BCu)& BCa ); + Amu = BCu ^((~BCa)& BCe ); + + Ebi ^= Di; + BCa = ROL(Ebi, 62); + Ego ^= Do; + BCe = ROL(Ego, 55); + Eku ^= Du; + BCi = ROL(Eku, 39); + Ema ^= Da; + BCo = ROL(Ema, 41); + Ese ^= De; + BCu = ROL(Ese, 2); + Asa = BCa ^((~BCe)& BCi ); + Ase = BCe ^((~BCi)& BCo ); + Asi = BCi ^((~BCo)& BCu ); + Aso = BCo ^((~BCu)& BCa ); + Asu = BCu ^((~BCa)& BCe ); + + #pragma unroll 1 + for( int laneCount = 2; laneCount < 22; laneCount += 2 ) + { + // prepareTheta + BCa = Aba^Aga^Aka^Ama^Asa; + BCe = Abe^Age^Ake^Ame^Ase; + BCi = Abi^Agi^Aki^Ami^Asi; + BCo = Abo^Ago^Ako^Amo^Aso; + BCu = Abu^Agu^Aku^Amu^Asu; + + //thetaRhoPiChiIotaPrepareTheta(round , A, E) + Da = BCu^ROL(BCe, 1); + De = BCa^ROL(BCi, 1); + Di = BCe^ROL(BCo, 1); + Do = BCi^ROL(BCu, 1); + Du = BCo^ROL(BCa, 1); + + Aba ^= Da; + BCa = Aba; + Age ^= De; + BCe = ROL(Age, 44); + Aki ^= Di; + BCi = ROL(Aki, 43); + Amo ^= Do; + BCo = ROL(Amo, 21); + Asu ^= Du; + BCu = ROL(Asu, 14); + Eba = BCa ^((~BCe)& BCi ); + Eba ^= vectorize((uint64_t)KeccakF_RoundConstants[laneCount]); + Ebe = BCe ^((~BCi)& BCo ); + Ebi = BCi ^((~BCo)& BCu ); + Ebo = BCo ^((~BCu)& BCa ); + Ebu = BCu ^((~BCa)& BCe ); + + Abo ^= Do; + BCa = ROL(Abo, 28); + Agu ^= Du; + BCe = ROL(Agu, 20); + Aka ^= Da; + BCi = ROL(Aka, 3); + Ame ^= De; + BCo = ROL(Ame, 45); + Asi ^= Di; + BCu = ROL(Asi, 61); + Ega = BCa ^((~BCe)& BCi ); + Ege = BCe ^((~BCi)& BCo ); + Egi = BCi ^((~BCo)& BCu ); + Ego = BCo ^((~BCu)& BCa ); + Egu = BCu ^((~BCa)& BCe ); + + Abe ^= De; + BCa = ROL(Abe, 1); + Agi ^= Di; + BCe = ROL(Agi, 6); + Ako ^= Do; + BCi = ROL(Ako, 25); + Amu ^= Du; + BCo = ROL(Amu, 8); + Asa ^= Da; + BCu = ROL(Asa, 18); + Eka = BCa ^((~BCe)& BCi ); + Eke = BCe ^((~BCi)& BCo ); + Eki = BCi ^((~BCo)& BCu ); + Eko = BCo ^((~BCu)& BCa ); + Eku = BCu ^((~BCa)& BCe ); + + Abu ^= Du; + BCa = ROL(Abu, 27); + Aga ^= Da; + BCe = ROL(Aga, 36); + Ake ^= De; + BCi = ROL(Ake, 10); + Ami ^= Di; + BCo = ROL(Ami, 15); + Aso ^= Do; + BCu = ROR8(Aso); + Ema = BCa ^((~BCe)& BCi ); + Eme = BCe ^((~BCi)& BCo ); + Emi = BCi ^((~BCo)& BCu ); + Emo = BCo ^((~BCu)& BCa ); + Emu = BCu ^((~BCa)& BCe ); + + Abi ^= Di; + BCa = ROL(Abi, 62); + Ago ^= Do; + BCe = ROL(Ago, 55); + Aku ^= Du; + BCi = ROL(Aku, 39); + Ama ^= Da; + BCo = ROL(Ama, 41); + Ase ^= De; + BCu = ROL(Ase, 2); + Esa = BCa ^((~BCe)& BCi ); + Ese = BCe ^((~BCi)& BCo ); + Esi = BCi ^((~BCo)& BCu ); + Eso = BCo ^((~BCu)& BCa ); + Esu = BCu ^((~BCa)& BCe ); + + // prepareTheta + BCa = Eba^Ega^Eka^Ema^Esa; + BCe = Ebe^Ege^Eke^Eme^Ese; + BCi = Ebi^Egi^Eki^Emi^Esi; + BCo = Ebo^Ego^Eko^Emo^Eso; + BCu = Ebu^Egu^Eku^Emu^Esu; + + //thetaRhoPiChiIotaPrepareTheta(round+1, E, A) + Da = BCu^ROL(BCe, 1); + De = BCa^ROL(BCi, 1); + Di = BCe^ROL(BCo, 1); + Do = BCi^ROL(BCu, 1); + Du = BCo^ROL(BCa, 1); + + Eba ^= Da; + BCa = Eba; + Ege ^= De; + BCe = ROL(Ege, 44); + Eki ^= Di; + BCi = ROL(Eki, 43); + Emo ^= Do; + BCo = ROL(Emo, 21); + Esu ^= Du; + BCu = ROL(Esu, 14); + Aba = BCa ^((~BCe)& BCi ); + Aba ^= vectorize((uint64_t)KeccakF_RoundConstants[laneCount+1]); + Abe = BCe ^((~BCi)& BCo ); + Abi = BCi ^((~BCo)& BCu ); + Abo = BCo ^((~BCu)& BCa ); + Abu = BCu ^((~BCa)& BCe ); + + Ebo ^= Do; + BCa = ROL(Ebo, 28); + Egu ^= Du; + BCe = ROL(Egu, 20); + Eka ^= Da; + BCi = ROL(Eka, 3); + Eme ^= De; + BCo = ROL(Eme, 45); + Esi ^= Di; + BCu = ROL(Esi, 61); + Aga = BCa ^((~BCe)& BCi ); + Age = BCe ^((~BCi)& BCo ); + Agi = BCi ^((~BCo)& BCu ); + Ago = BCo ^((~BCu)& BCa ); + Agu = BCu ^((~BCa)& BCe ); + + Ebe ^= De; + BCa = ROL(Ebe, 1); + Egi ^= Di; + BCe = ROL(Egi, 6); + Eko ^= Do; + BCi = ROL(Eko, 25); + Emu ^= Du; + BCo = ROL(Emu, 8); + Esa ^= Da; + BCu = ROL(Esa, 18); + Aka = BCa ^((~BCe)& BCi ); + Ake = BCe ^((~BCi)& BCo ); + Aki = BCi ^((~BCo)& BCu ); + Ako = BCo ^((~BCu)& BCa ); + Aku = BCu ^((~BCa)& BCe ); + + Ebu ^= Du; + BCa = ROL(Ebu, 27); + Ega ^= Da; + BCe = ROL(Ega, 36); + Eke ^= De; + BCi = ROL(Eke, 10); + Emi ^= Di; + BCo = ROL(Emi, 15); + Eso ^= Do; + BCu = ROR8(Eso); + Ama = BCa ^((~BCe)& BCi ); + Ame = BCe ^((~BCi)& BCo ); + Ami = BCi ^((~BCo)& BCu ); + Amo = BCo ^((~BCu)& BCa ); + Amu = BCu ^((~BCa)& BCe ); + + Ebi ^= Di; + BCa = ROL(Ebi, 62); + Ego ^= Do; + BCe = ROL(Ego, 55); + Eku ^= Du; + BCi = ROL(Eku, 39); + Ema ^= Da; + BCo = ROL(Ema, 41); + Ese ^= De; + BCu = ROL(Ese, 2); + Asa = BCa ^((~BCe)& BCi ); + Ase = BCe ^((~BCi)& BCo ); + Asi = BCi ^((~BCo)& BCu ); + Aso = BCo ^((~BCu)& BCa ); + Asu = BCu ^((~BCa)& BCe ); + } + + // prepareTheta + BCa = Aba^Aga^Aka^Ama^Asa; + BCe = Abe^Age^Ake^Ame^Ase; + BCi = Abi^Agi^Aki^Ami^Asi; + BCo = Abo^Ago^Ako^Amo^Aso; + BCu = Abu^Agu^Aku^Amu^Asu; + + //thetaRhoPiChiIotaPrepareTheta(round , A, E) + Da = BCu^ROL(BCe, 1); + De = BCa^ROL(BCi, 1); + Di = BCe^ROL(BCo, 1); + Do = BCi^ROL(BCu, 1); + Du = BCo^ROL(BCa, 1); + + Aba ^= Da; + BCa = Aba; + Age ^= De; + BCe = ROL(Age, 44); + Aki ^= Di; + BCi = ROL(Aki, 43); + Amo ^= Do; + BCo = ROL(Amo, 21); + Asu ^= Du; + BCu = ROL(Asu, 14); + Eba = BCa ^((~BCe)& BCi ); + Eba ^= vectorize((uint64_t)KeccakF_RoundConstants[22]); + Ebe = BCe ^((~BCi)& BCo ); + Ebi = BCi ^((~BCo)& BCu ); + Ebo = BCo ^((~BCu)& BCa ); + Ebu = BCu ^((~BCa)& BCe ); + + Abo ^= Do; + BCa = ROL(Abo, 28); + Agu ^= Du; + BCe = ROL(Agu, 20); + Aka ^= Da; + BCi = ROL(Aka, 3); + Ame ^= De; + BCo = ROL(Ame, 45); + Asi ^= Di; + BCu = ROL(Asi, 61); + Ega = BCa ^((~BCe)& BCi ); + Ege = BCe ^((~BCi)& BCo ); + Egi = BCi ^((~BCo)& BCu ); + Ego = BCo ^((~BCu)& BCa ); + Egu = BCu ^((~BCa)& BCe ); + + Abe ^= De; + BCa = ROL(Abe, 1); + Agi ^= Di; + BCe = ROL(Agi, 6); + Ako ^= Do; + BCi = ROL(Ako, 25); + Amu ^= Du; + BCo = ROL(Amu, 8); + Asa ^= Da; + BCu = ROL(Asa, 18); + Eka = BCa ^((~BCe)& BCi ); + Eke = BCe ^((~BCi)& BCo ); + Eki = BCi ^((~BCo)& BCu ); + Eko = BCo ^((~BCu)& BCa ); + Eku = BCu ^((~BCa)& BCe ); + + Abu ^= Du; + BCa = ROL(Abu, 27); + Aga ^= Da; + BCe = ROL(Aga, 36); + Ake ^= De; + BCi = ROL(Ake, 10); + Ami ^= Di; + BCo = ROL(Ami, 15); + Aso ^= Do; + BCu = ROR8(Aso); + Ema = BCa ^((~BCe)& BCi ); + Eme = BCe ^((~BCi)& BCo ); + Emi = BCi ^((~BCo)& BCu ); + Emo = BCo ^((~BCu)& BCa ); + Emu = BCu ^((~BCa)& BCe ); + + Abi ^= Di; + BCa = ROL(Abi, 62); + Ago ^= Do; + BCe = ROL(Ago, 55); + Aku ^= Du; + BCi = ROL(Aku, 39); + Ama ^= Da; + BCo = ROL(Ama, 41); + Ase ^= De; + BCu = ROL(Ase, 2); + Esa = BCa ^((~BCe)& BCi ); + Ese = BCe ^((~BCi)& BCo ); + Esi = BCi ^((~BCo)& BCu ); + Eso = BCo ^((~BCu)& BCa ); + Esu = BCu ^((~BCa)& BCe ); + + // prepareTheta + BCa = Eba^Ega^Eka^Ema^Esa; + BCe = Ebe^Ege^Eke^Eme^Ese; + BCi = Ebi^Egi^Eki^Emi^Esi; + BCo = Ebo^Ego^Eko^Emo^Eso; + BCu = Ebu^Egu^Eku^Emu^Esu; + + //thetaRhoPiChiIotaPrepareTheta(round+1, E, A) + Da = BCu^ROL(BCe, 1); + De = BCa^ROL(BCi, 1); + Di = BCe^ROL(BCo, 1); + Do = BCi^ROL(BCu, 1); + Du = BCo^ROL(BCa, 1); + + Eba ^= Da; + BCa = Eba; + Ege ^= De; + BCe = ROL(Ege, 44); + Eki ^= Di; + BCi = ROL(Eki, 43); + Emo ^= Do; + BCo = ROL(Emo, 21); + Esu ^= Du; + BCu = ROL(Esu, 14); + Aba = BCa ^((~BCe)& BCi ); + Aba ^= vectorize((uint64_t)KeccakF_RoundConstants[23]); + Abe = BCe ^((~BCi)& BCo ); + Abi = BCi ^((~BCo)& BCu ); + Abo = BCo ^((~BCu)& BCa ); + + if (validate) { + g_out += 4 * ((blockIdx.x * blockDim.x) + threadIdx.x); + g_out[3] = devectorize(Abo); + g_out[2] = devectorize(Abi); + g_out[1] = devectorize(Abe); + g_out[0] = devectorize(Aba); + } + + // the likelyhood of meeting the hashing target is so low, that we're not guarding this + // with atomic writes, locks or similar... + uint64_t *g_good64 = (uint64_t*)g_good; + if (devectorize(Abo) <= ptarget64[3]) { + if (devectorize(Abo) < g_good64[3]) { + g_good64[3] = devectorize(Abo); + g_good64[2] = devectorize(Abi); + g_good64[1] = devectorize(Abe); + g_good64[0] = devectorize(Aba); + g_good[8] = nonce + ((blockIdx.x * blockDim.x) + threadIdx.x); + } + } +} + +static std::map context_good[2]; + +bool NV2Kernel::prepare_keccak256(int thr_id, const uint32_t host_pdata[20], const uint32_t host_ptarget[8]) +{ + static bool init[MAX_DEVICES] = {false}; + if (!init[thr_id]) + { + checkCudaErrors(cudaMemcpyToSymbol(KeccakF_RoundConstants, host_KeccakF_RoundConstants, sizeof(host_KeccakF_RoundConstants), 0, cudaMemcpyHostToDevice)); + + // allocate pinned host memory for good hashes + uint32_t *tmp; + checkCudaErrors(cudaMalloc((void **) &tmp, 9*sizeof(uint32_t))); context_good[0][thr_id] = tmp; + checkCudaErrors(cudaMalloc((void **) &tmp, 9*sizeof(uint32_t))); context_good[1][thr_id] = tmp; + + init[thr_id] = true; + } + checkCudaErrors(cudaMemcpyToSymbol(pdata64, host_pdata, 20*sizeof(uint32_t), 0, cudaMemcpyHostToDevice)); + checkCudaErrors(cudaMemcpyToSymbol(ptarget64, host_ptarget, 8*sizeof(uint32_t), 0, cudaMemcpyHostToDevice)); + + return context_good[0][thr_id] && context_good[1][thr_id]; +} + +void NV2Kernel::do_keccak256(dim3 grid, dim3 threads, int thr_id, int stream, uint32_t *hash, uint32_t nonce, int throughput, bool do_d2h) +{ + checkCudaErrors(cudaMemsetAsync(context_good[stream][thr_id], 0xff, 9 * sizeof(uint32_t), context_streams[stream][thr_id])); + + titan_crypto_hash<<>>((uint64_t*)context_hash[stream][thr_id], nonce, context_good[stream][thr_id], do_d2h); + + // copy hashes from device memory to host (ALL hashes, lots of data...) + if (do_d2h && hash != NULL) { + size_t mem_size = throughput * sizeof(uint32_t) * 8; + checkCudaErrors(cudaMemcpyAsync(hash, context_hash[stream][thr_id], mem_size, + cudaMemcpyDeviceToHost, context_streams[stream][thr_id])); + } + else if (hash != NULL) { + // asynchronous copy of winning nonce (just 4 bytes...) + checkCudaErrors(cudaMemcpyAsync(hash, context_good[stream][thr_id]+8, sizeof(uint32_t), + cudaMemcpyDeviceToHost, context_streams[stream][thr_id])); + } +} + + +// +// Blakecoin related Keccak implementation (Keccak256) +// + +typedef uint32_t sph_u32; +//#define SPH_C32(x) ((sph_u32)(x)) +//#define SPH_T32(x) ((x) & SPH_C32(0xFFFFFFFF)) +#if __CUDA_ARCH__ < 350 + // Kepler (Compute 3.0) + #define SPH_ROTL32(a, b) ((a)<<(b))|((a)>>(32-(b))) +#else + // Kepler (Compute 3.5) + #define SPH_ROTL32(a, b) __funnelshift_l( a, a, b ); +#endif +#define SPH_ROTR32(x, n) SPH_ROTL32(x, (32 - (n))) + +__constant__ uint32_t pdata[20]; + +#ifdef _MSC_VER +#pragma warning (disable: 4146) +#endif + +static __device__ __forceinline__ sph_u32 cuda_sph_bswap32(sph_u32 x) +{ + return (((x << 24) & 0xff000000u) | ((x << 8) & 0x00ff0000u) + | ((x >> 8) & 0x0000ff00u) | ((x >> 24) & 0x000000ffu)); +} + +/** + * Encode a 32-bit value into the provided buffer (big endian convention). + * + * @param dst the destination buffer + * @param val the 32-bit value to encode + */ +static __device__ __forceinline__ void +cuda_sph_enc32be(void *dst, sph_u32 val) +{ + *(sph_u32 *)dst = cuda_sph_bswap32(val); +} + +#define Z00 0 +#define Z01 1 +#define Z02 2 +#define Z03 3 +#define Z04 4 +#define Z05 5 +#define Z06 6 +#define Z07 7 +#define Z08 8 +#define Z09 9 +#define Z0A A +#define Z0B B +#define Z0C C +#define Z0D D +#define Z0E E +#define Z0F F + +#define Z10 E +#define Z11 A +#define Z12 4 +#define Z13 8 +#define Z14 9 +#define Z15 F +#define Z16 D +#define Z17 6 +#define Z18 1 +#define Z19 C +#define Z1A 0 +#define Z1B 2 +#define Z1C B +#define Z1D 7 +#define Z1E 5 +#define Z1F 3 + +#define Z20 B +#define Z21 8 +#define Z22 C +#define Z23 0 +#define Z24 5 +#define Z25 2 +#define Z26 F +#define Z27 D +#define Z28 A +#define Z29 E +#define Z2A 3 +#define Z2B 6 +#define Z2C 7 +#define Z2D 1 +#define Z2E 9 +#define Z2F 4 + +#define Z30 7 +#define Z31 9 +#define Z32 3 +#define Z33 1 +#define Z34 D +#define Z35 C +#define Z36 B +#define Z37 E +#define Z38 2 +#define Z39 6 +#define Z3A 5 +#define Z3B A +#define Z3C 4 +#define Z3D 0 +#define Z3E F +#define Z3F 8 + +#define Z40 9 +#define Z41 0 +#define Z42 5 +#define Z43 7 +#define Z44 2 +#define Z45 4 +#define Z46 A +#define Z47 F +#define Z48 E +#define Z49 1 +#define Z4A B +#define Z4B C +#define Z4C 6 +#define Z4D 8 +#define Z4E 3 +#define Z4F D + +#define Z50 2 +#define Z51 C +#define Z52 6 +#define Z53 A +#define Z54 0 +#define Z55 B +#define Z56 8 +#define Z57 3 +#define Z58 4 +#define Z59 D +#define Z5A 7 +#define Z5B 5 +#define Z5C F +#define Z5D E +#define Z5E 1 +#define Z5F 9 + +#define Z60 C +#define Z61 5 +#define Z62 1 +#define Z63 F +#define Z64 E +#define Z65 D +#define Z66 4 +#define Z67 A +#define Z68 0 +#define Z69 7 +#define Z6A 6 +#define Z6B 3 +#define Z6C 9 +#define Z6D 2 +#define Z6E 8 +#define Z6F B + +#define Z70 D +#define Z71 B +#define Z72 7 +#define Z73 E +#define Z74 C +#define Z75 1 +#define Z76 3 +#define Z77 9 +#define Z78 5 +#define Z79 0 +#define Z7A F +#define Z7B 4 +#define Z7C 8 +#define Z7D 6 +#define Z7E 2 +#define Z7F A + +#define Z80 6 +#define Z81 F +#define Z82 E +#define Z83 9 +#define Z84 B +#define Z85 3 +#define Z86 0 +#define Z87 8 +#define Z88 C +#define Z89 2 +#define Z8A D +#define Z8B 7 +#define Z8C 1 +#define Z8D 4 +#define Z8E A +#define Z8F 5 + +#define Z90 A +#define Z91 2 +#define Z92 8 +#define Z93 4 +#define Z94 7 +#define Z95 6 +#define Z96 1 +#define Z97 5 +#define Z98 F +#define Z99 B +#define Z9A 9 +#define Z9B E +#define Z9C 3 +#define Z9D C +#define Z9E D +#define Z9F 0 + +#define Mx(r, i) Mx_(Z ## r ## i) +#define Mx_(n) Mx__(n) +#define Mx__(n) M ## n + +#define CSx(r, i) CSx_(Z ## r ## i) +#define CSx_(n) CSx__(n) +#define CSx__(n) CS ## n + +#define CS0 SPH_C32(0x243F6A88) +#define CS1 SPH_C32(0x85A308D3) +#define CS2 SPH_C32(0x13198A2E) +#define CS3 SPH_C32(0x03707344) +#define CS4 SPH_C32(0xA4093822) +#define CS5 SPH_C32(0x299F31D0) +#define CS6 SPH_C32(0x082EFA98) +#define CS7 SPH_C32(0xEC4E6C89) +#define CS8 SPH_C32(0x452821E6) +#define CS9 SPH_C32(0x38D01377) +#define CSA SPH_C32(0xBE5466CF) +#define CSB SPH_C32(0x34E90C6C) +#define CSC SPH_C32(0xC0AC29B7) +#define CSD SPH_C32(0xC97C50DD) +#define CSE SPH_C32(0x3F84D5B5) +#define CSF SPH_C32(0xB5470917) + +#define GS(m0, m1, c0, c1, a, b, c, d) do { \ + a = SPH_T32(a + b + (m0 ^ c1)); \ + d = SPH_ROTR32(d ^ a, 16); \ + c = SPH_T32(c + d); \ + b = SPH_ROTR32(b ^ c, 12); \ + a = SPH_T32(a + b + (m1 ^ c0)); \ + d = SPH_ROTR32(d ^ a, 8); \ + c = SPH_T32(c + d); \ + b = SPH_ROTR32(b ^ c, 7); \ + } while (0) + +#define ROUND_S(r) do { \ + GS(Mx(r, 0), Mx(r, 1), CSx(r, 0), CSx(r, 1), V0, V4, V8, VC); \ + GS(Mx(r, 2), Mx(r, 3), CSx(r, 2), CSx(r, 3), V1, V5, V9, VD); \ + GS(Mx(r, 4), Mx(r, 5), CSx(r, 4), CSx(r, 5), V2, V6, VA, VE); \ + GS(Mx(r, 6), Mx(r, 7), CSx(r, 6), CSx(r, 7), V3, V7, VB, VF); \ + GS(Mx(r, 8), Mx(r, 9), CSx(r, 8), CSx(r, 9), V0, V5, VA, VF); \ + GS(Mx(r, A), Mx(r, B), CSx(r, A), CSx(r, B), V1, V6, VB, VC); \ + GS(Mx(r, C), Mx(r, D), CSx(r, C), CSx(r, D), V2, V7, V8, VD); \ + GS(Mx(r, E), Mx(r, F), CSx(r, E), CSx(r, F), V3, V4, V9, VE); \ + } while (0) + +#define COMPRESS32 do { \ + sph_u32 M0, M1, M2, M3, M4, M5, M6, M7; \ + sph_u32 M8, M9, MA, MB, MC, MD, ME, MF; \ + sph_u32 V0, V1, V2, V3, V4, V5, V6, V7; \ + sph_u32 V8, V9, VA, VB, VC, VD, VE, VF; \ + V0 = H0; \ + V1 = H1; \ + V2 = H2; \ + V3 = H3; \ + V4 = H4; \ + V5 = H5; \ + V6 = H6; \ + V7 = H7; \ + V8 = S0 ^ CS0; \ + V9 = S1 ^ CS1; \ + VA = S2 ^ CS2; \ + VB = S3 ^ CS3; \ + VC = T0 ^ CS4; \ + VD = T0 ^ CS5; \ + VE = T1 ^ CS6; \ + VF = T1 ^ CS7; \ + M0 = input[0]; \ + M1 = input[1]; \ + M2 = input[2]; \ + M3 = input[3]; \ + M4 = input[4]; \ + M5 = input[5]; \ + M6 = input[6]; \ + M7 = input[7]; \ + M8 = input[8]; \ + M9 = input[9]; \ + MA = input[10]; \ + MB = input[11]; \ + MC = input[12]; \ + MD = input[13]; \ + ME = input[14]; \ + MF = input[15]; \ + ROUND_S(0); \ + ROUND_S(1); \ + ROUND_S(2); \ + ROUND_S(3); \ + ROUND_S(4); \ + ROUND_S(5); \ + ROUND_S(6); \ + ROUND_S(7); \ + H0 ^= S0 ^ V0 ^ V8; \ + H1 ^= S1 ^ V1 ^ V9; \ + H2 ^= S2 ^ V2 ^ VA; \ + H3 ^= S3 ^ V3 ^ VB; \ + H4 ^= S0 ^ V4 ^ VC; \ + H5 ^= S1 ^ V5 ^ VD; \ + H6 ^= S2 ^ V6 ^ VE; \ + H7 ^= S3 ^ V7 ^ VF; \ + } while (0) + + +__global__ void titan_blake256_hash(uint64_t *g_out, uint32_t nonce, uint32_t *g_good, bool validate) +{ + uint32_t input[16]; + uint64_t output[4]; + +#pragma unroll 16 + for (int i=0; i < 16; ++i) input[i] = pdata[i]; + + sph_u32 H0 = 0x6A09E667; + sph_u32 H1 = 0xBB67AE85; + sph_u32 H2 = 0x3C6EF372; + sph_u32 H3 = 0xA54FF53A; + sph_u32 H4 = 0x510E527F; + sph_u32 H5 = 0x9B05688C; + sph_u32 H6 = 0x1F83D9AB; + sph_u32 H7 = 0x5BE0CD19; + sph_u32 S0 = 0; + sph_u32 S1 = 0; + sph_u32 S2 = 0; + sph_u32 S3 = 0; + sph_u32 T0 = 0; + sph_u32 T1 = 0; + T0 = SPH_T32(T0 + 512); + COMPRESS32; + +#pragma unroll 3 + for (int i=0; i < 3; ++i) input[i] = pdata[16+i]; + input[3] = nonce + ((blockIdx.x * blockDim.x) + threadIdx.x); + input[4] = 0x80000000; +#pragma unroll 8 + for (int i=5; i < 13; ++i) input[i] = 0; + input[13] = 0x00000001; + input[14] = T1; + input[15] = T0 + 128; + + T0 = SPH_T32(T0 + 128); + COMPRESS32; + + cuda_sph_enc32be((unsigned char*)output + 4*6, H6); + cuda_sph_enc32be((unsigned char*)output + 4*7, H7); + if (validate || output[3] <= ptarget64[3]) + { + // this data is only needed when we actually need to save the hashes + cuda_sph_enc32be((unsigned char*)output + 4*0, H0); + cuda_sph_enc32be((unsigned char*)output + 4*1, H1); + cuda_sph_enc32be((unsigned char*)output + 4*2, H2); + cuda_sph_enc32be((unsigned char*)output + 4*3, H3); + cuda_sph_enc32be((unsigned char*)output + 4*4, H4); + cuda_sph_enc32be((unsigned char*)output + 4*5, H5); + } + + if (validate) + { + g_out += 4 * ((blockIdx.x * blockDim.x) + threadIdx.x); +#pragma unroll 4 + for (int i=0; i < 4; ++i) g_out[i] = output[i]; + } + + if (output[3] <= ptarget64[3]) { + uint64_t *g_good64 = (uint64_t*)g_good; + if (output[3] < g_good64[3]) { + g_good64[3] = output[3]; + g_good64[2] = output[2]; + g_good64[1] = output[1]; + g_good64[0] = output[0]; + g_good[8] = nonce + ((blockIdx.x * blockDim.x) + threadIdx.x); + } + } +} + +bool NV2Kernel::prepare_blake256(int thr_id, const uint32_t host_pdata[20], const uint32_t host_ptarget[8]) +{ + static bool init[MAX_DEVICES] = {false}; + if (!init[thr_id]) + { + // allocate pinned host memory for good hashes + uint32_t *tmp; + checkCudaErrors(cudaMalloc((void **) &tmp, 9*sizeof(uint32_t))); context_good[0][thr_id] = tmp; + checkCudaErrors(cudaMalloc((void **) &tmp, 9*sizeof(uint32_t))); context_good[1][thr_id] = tmp; + + init[thr_id] = true; + } + checkCudaErrors(cudaMemcpyToSymbol(pdata, host_pdata, 20*sizeof(uint32_t), 0, cudaMemcpyHostToDevice)); + checkCudaErrors(cudaMemcpyToSymbol(ptarget64, host_ptarget, 8*sizeof(uint32_t), 0, cudaMemcpyHostToDevice)); + + return context_good[0][thr_id] && context_good[1][thr_id]; +} + +void NV2Kernel::do_blake256(dim3 grid, dim3 threads, int thr_id, int stream, uint32_t *hash, uint32_t nonce, int throughput, bool do_d2h) +{ + checkCudaErrors(cudaMemsetAsync(context_good[stream][thr_id], 0xff, 9 * sizeof(uint32_t), context_streams[stream][thr_id])); + + titan_blake256_hash<<>>((uint64_t*)context_hash[stream][thr_id], nonce, context_good[stream][thr_id], do_d2h); + + // copy hashes from device memory to host (ALL hashes, lots of data...) + if (do_d2h && hash != NULL) { + size_t mem_size = throughput * sizeof(uint32_t) * 8; + checkCudaErrors(cudaMemcpyAsync(hash, context_hash[stream][thr_id], mem_size, + cudaMemcpyDeviceToHost, context_streams[stream][thr_id])); + } + else if (hash != NULL) { + // asynchronous copy of winning nonce (just 4 bytes...) + checkCudaErrors(cudaMemcpyAsync(hash, context_good[stream][thr_id]+8, sizeof(uint32_t), + cudaMemcpyDeviceToHost, context_streams[stream][thr_id])); + } +} diff --git a/cuda_code/nvbug_1965743__unnecessary_static_on_get_occ_device_properties.cu b/cuda_code/nvbug_1965743__unnecessary_static_on_get_occ_device_properties.cu new file mode 100644 index 0000000000000000000000000000000000000000..c01c0ad4eda9084f4dcc516301d829a08d8ee7b6 --- /dev/null +++ b/cuda_code/nvbug_1965743__unnecessary_static_on_get_occ_device_properties.cu @@ -0,0 +1,5 @@ +// nvcc -Xcompiler -Wall -Xcompiler -Werror -ccbin=clang + +#include + +int main() {} diff --git a/cuda_code/nvgraph_vector_kernels.cu b/cuda_code/nvgraph_vector_kernels.cu new file mode 100644 index 0000000000000000000000000000000000000000..a2d8234f9e6469b3abc0756d5c8f18bc6a09333b --- /dev/null +++ b/cuda_code/nvgraph_vector_kernels.cu @@ -0,0 +1,200 @@ +/* + * Copyright (c) 2019, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include +#include "include/nvgraph_error.hxx" +#include "include/nvgraph_vector_kernels.hxx" + +#include "include/debug_macros.h" + +namespace nvgraph { + +void check_size(size_t sz) +{ + if (sz > INT_MAX) FatalError("Vector larger than INT_MAX", NVGRAPH_ERR_BAD_PARAMETERS); +} +template +void nrm1_raw_vec(ValueType_* vec, size_t n, ValueType_* res, cudaStream_t stream) +{ + thrust::device_ptr dev_ptr(vec); + *res = thrust::reduce(dev_ptr, dev_ptr + n); + cudaCheckError(); +} + +template +void fill_raw_vec(ValueType_* vec, size_t n, ValueType_ value, cudaStream_t stream) +{ + thrust::device_ptr dev_ptr(vec); + thrust::fill(dev_ptr, dev_ptr + n, value); + cudaCheckError(); +} + +template +void dump_raw_vec(ValueType_* vec, size_t n, int offset, cudaStream_t stream) +{ +#ifdef DEBUG + thrust::device_ptr dev_ptr(vec); + COUT().precision(15); + COUT() << "sample size = " << n << ", offset = " << offset << std::endl; + thrust::copy( + dev_ptr + offset, dev_ptr + offset + n, std::ostream_iterator(COUT(), " ")); + cudaCheckError(); + COUT() << std::endl; +#endif +} + +template +__global__ void flag_zeroes_kernel(int num_vertices, ValueType_* vec, int* flags) +{ + int tidx = blockDim.x * blockIdx.x + threadIdx.x; + for (int r = tidx; r < num_vertices; r += blockDim.x * gridDim.x) { + if (vec[r] != 0.0) + flags[r] = 1; // NOTE 2 : alpha*0 + (1-alpha)*1 = (1-alpha) + else + flags[r] = 0; + } +} +template +__global__ void dmv0_kernel(const ValueType_* __restrict__ D, + const ValueType_* __restrict__ x, + ValueType_* __restrict__ y, + int n) +{ + // y=D*x + int tidx = blockIdx.x * blockDim.x + threadIdx.x; + for (int i = tidx; i < n; i += blockDim.x * gridDim.x) y[i] = D[i] * x[i]; +} +template +__global__ void dmv1_kernel(const ValueType_* __restrict__ D, + const ValueType_* __restrict__ x, + ValueType_* __restrict__ y, + int n) +{ + // y+=D*x + int tidx = blockIdx.x * blockDim.x + threadIdx.x; + for (int i = tidx; i < n; i += blockDim.x * gridDim.x) y[i] += D[i] * x[i]; +} +template +void copy_vec(ValueType_* vec1, size_t n, ValueType_* res, cudaStream_t stream) +{ + thrust::device_ptr dev_ptr(vec1); + thrust::device_ptr res_ptr(res); +#ifdef DEBUG + // COUT() << "copy "<< n << " elements" << std::endl; +#endif + thrust::copy_n(dev_ptr, n, res_ptr); + cudaCheckError(); + // dump_raw_vec (res, n, 0); +} + +template +void flag_zeros_raw_vec(size_t num_vertices, ValueType_* vec, int* flags, cudaStream_t stream) +{ + int items_per_thread = 4; + int num_threads = 128; + int max_grid_size = 4096; + check_size(num_vertices); + int n = static_cast(num_vertices); + int num_blocks = std::min(max_grid_size, (n / (items_per_thread * num_threads)) + 1); + flag_zeroes_kernel<<>>(num_vertices, vec, flags); + cudaCheckError(); +} + +template +void dmv(size_t num_vertices, + ValueType_ alpha, + ValueType_* D, + ValueType_* x, + ValueType_ beta, + ValueType_* y, + cudaStream_t stream) +{ + int items_per_thread = 4; + int num_threads = 128; + int max_grid_size = 4096; + check_size(num_vertices); + int n = static_cast(num_vertices); + int num_blocks = std::min(max_grid_size, (n / (items_per_thread * num_threads)) + 1); + if (alpha == 1.0 && beta == 0.0) + dmv0_kernel<<>>(D, x, y, n); + else if (alpha == 1.0 && beta == 1.0) + dmv1_kernel<<>>(D, x, y, n); + else + FatalError("Not implemented case of y = D*x", NVGRAPH_ERR_BAD_PARAMETERS); + + cudaCheckError(); +} + +template +void set_connectivity(size_t n, + IndexType_ root, + ValueType_ self_loop_val, + ValueType_ unreachable_val, + ValueType_* res, + cudaStream_t stream) +{ + fill_raw_vec(res, n, unreachable_val); + cudaMemcpy(&res[root], &self_loop_val, sizeof(self_loop_val), cudaMemcpyHostToDevice); + cudaCheckError(); +} + +template void nrm1_raw_vec(float* vec, size_t n, float* res, cudaStream_t stream); +template void nrm1_raw_vec(double* vec, size_t n, double* res, cudaStream_t stream); + +template void dmv( + size_t num_vertices, float alpha, float* D, float* x, float beta, float* y, cudaStream_t stream); +template void dmv(size_t num_vertices, + double alpha, + double* D, + double* x, + double beta, + double* y, + cudaStream_t stream); + +template void set_connectivity( + size_t n, int root, float self_loop_val, float unreachable_val, float* res, cudaStream_t stream); +template void set_connectivity(size_t n, + int root, + double self_loop_val, + double unreachable_val, + double* res, + cudaStream_t stream); + +template void flag_zeros_raw_vec(size_t num_vertices, + float* vec, + int* flags, + cudaStream_t stream); +template void flag_zeros_raw_vec(size_t num_vertices, + double* vec, + int* flags, + cudaStream_t stream); + +template void fill_raw_vec(float* vec, size_t n, float value, cudaStream_t stream); +template void fill_raw_vec(double* vec, size_t n, double value, cudaStream_t stream); +template void fill_raw_vec(int* vec, size_t n, int value, cudaStream_t stream); +template void fill_raw_vec(char* vec, size_t n, char value, cudaStream_t stream); + +template void copy_vec(float* vec1, size_t n, float* res, cudaStream_t stream); +template void copy_vec(double* vec1, size_t n, double* res, cudaStream_t stream); +template void copy_vec(int* vec1, size_t n, int* res, cudaStream_t stream); +template void copy_vec(char* vec1, size_t n, char* res, cudaStream_t stream); + +template void dump_raw_vec(float* vec, size_t n, int off, cudaStream_t stream); +template void dump_raw_vec(double* vec, size_t n, int off, cudaStream_t stream); +template void dump_raw_vec(int* vec, size_t n, int off, cudaStream_t stream); +template void dump_raw_vec(char* vec, size_t n, int off, cudaStream_t stream); +} // end namespace nvgraph diff --git a/cuda_code/ols_12.cu b/cuda_code/ols_12.cu new file mode 100644 index 0000000000000000000000000000000000000000..7dbc411eac04f23db4813058631c217931bb54e4 --- /dev/null +++ b/cuda_code/ols_12.cu @@ -0,0 +1,256 @@ +/* + * Copyright (c) 2019-2020, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include + +namespace ML { +namespace GLM { + +using namespace MLCommon; + +template +struct OlsInputs { + T tol; + int n_row; + int n_col; + int n_row_2; + int algo; +}; + +template +class OlsTest : public ::testing::TestWithParam> { + protected: + void basicTest() { + params = ::testing::TestWithParam>::GetParam(); + int len = params.n_row * params.n_col; + int len2 = params.n_row_2 * params.n_col; + + raft::allocate(data, len); + raft::allocate(labels, params.n_row); + raft::allocate(coef, params.n_col); + raft::allocate(coef2, params.n_col); + raft::allocate(coef3, params.n_col); + raft::allocate(coef_ref, params.n_col); + raft::allocate(coef2_ref, params.n_col); + raft::allocate(coef3_ref, params.n_col); + raft::allocate(pred_data, len2); + raft::allocate(pred, params.n_row_2); + raft::allocate(pred_ref, params.n_row_2); + raft::allocate(pred2, params.n_row_2); + raft::allocate(pred2_ref, params.n_row_2); + raft::allocate(pred3, params.n_row_2); + raft::allocate(pred3_ref, params.n_row_2); + + std::vector data_h = {1.0, 1.0, 2.0, 2.0, 1.0, 2.0, 2.0, 3.0}; + data_h.resize(len); + raft::update_device(data, data_h.data(), len, stream); + + std::vector labels_h = {6.0, 8.0, 9.0, 11.0}; + labels_h.resize(params.n_row); + raft::update_device(labels, labels_h.data(), params.n_row, stream); + + std::vector coef_ref_h = {2.090908, 2.5454557}; + coef_ref_h.resize(params.n_col); + raft::update_device(coef_ref, coef_ref_h.data(), params.n_col, stream); + + std::vector coef2_ref_h = {1.000001, 1.9999998}; + coef2_ref_h.resize(params.n_col); + raft::update_device(coef2_ref, coef2_ref_h.data(), params.n_col, stream); + + std::vector coef3_ref_h = {0.99999, 2.00000}; + coef3_ref_h.resize(params.n_col); + raft::update_device(coef3_ref, coef3_ref_h.data(), params.n_col, stream); + + std::vector pred_data_h = {3.0, 2.0, 5.0, 5.0}; + pred_data_h.resize(len2); + raft::update_device(pred_data, pred_data_h.data(), len2, stream); + + std::vector pred_ref_h = {19.0, 16.9090}; + pred_ref_h.resize(params.n_row_2); + raft::update_device(pred_ref, pred_ref_h.data(), params.n_row_2, stream); + + std::vector pred2_ref_h = {16.0, 15.0}; + pred2_ref_h.resize(params.n_row_2); + raft::update_device(pred2_ref, pred2_ref_h.data(), params.n_row_2, stream); + + std::vector pred3_ref_h = {16.0, 15.0}; + pred3_ref_h.resize(params.n_row_2); + raft::update_device(pred3_ref, pred3_ref_h.data(), params.n_row_2, stream); + + intercept = T(0); + + olsFit(handle, data, params.n_row, params.n_col, labels, coef, &intercept, + false, false, stream, params.algo); + + olsPredict(handle, pred_data, params.n_row_2, params.n_col, coef, intercept, + pred, stream); + + raft::update_device(data, data_h.data(), len, stream); + raft::update_device(labels, labels_h.data(), params.n_row, stream); + + intercept2 = T(0); + olsFit(handle, data, params.n_row, params.n_col, labels, coef2, &intercept2, + true, false, stream, params.algo); + + olsPredict(handle, pred_data, params.n_row_2, params.n_col, coef2, + intercept2, pred2, stream); + + raft::update_device(data, data_h.data(), len, stream); + raft::update_device(labels, labels_h.data(), params.n_row, stream); + + intercept3 = T(0); + olsFit(handle, data, params.n_row, params.n_col, labels, coef3, &intercept3, + true, true, stream, params.algo); + + olsPredict(handle, pred_data, params.n_row_2, params.n_col, coef3, + intercept3, pred3, stream); + } + + void basicTest2() { + params = ::testing::TestWithParam>::GetParam(); + int len = params.n_row * params.n_col; + + raft::allocate(data_sc, len); + raft::allocate(labels_sc, len); + raft::allocate(coef_sc, 1); + raft::allocate(coef_sc_ref, 1); + + std::vector data_h = {1.0, 1.0, 2.0, 2.0, 1.0, 2.0, 2.0, 3.0}; + data_h.resize(len); + raft::update_device(data_sc, data_h.data(), len, stream); + + std::vector labels_h = {6.0, 8.0, 9.0, 11.0, -1.0, 2.0, -3.6, 3.3}; + labels_h.resize(len); + raft::update_device(labels_sc, labels_h.data(), len, stream); + + std::vector coef_sc_ref_h = {-0.29285714}; + coef_sc_ref_h.resize(1); + raft::update_device(coef_sc_ref, coef_sc_ref_h.data(), 1, stream); + + T intercept_sc = T(0); + + olsFit(handle, data_sc, len, 1, labels_sc, coef_sc, &intercept_sc, true, + false, stream, params.algo); + } + + void SetUp() override { + CUDA_CHECK(cudaStreamCreate(&stream)); + handle.set_stream(stream); + basicTest(); + basicTest2(); + } + + void TearDown() override { + CUDA_CHECK(cudaFree(data)); + CUDA_CHECK(cudaFree(labels)); + CUDA_CHECK(cudaFree(coef)); + CUDA_CHECK(cudaFree(coef_ref)); + CUDA_CHECK(cudaFree(coef2)); + CUDA_CHECK(cudaFree(coef2_ref)); + CUDA_CHECK(cudaFree(coef3)); + CUDA_CHECK(cudaFree(coef3_ref)); + CUDA_CHECK(cudaFree(pred_data)); + CUDA_CHECK(cudaFree(pred)); + CUDA_CHECK(cudaFree(pred_ref)); + CUDA_CHECK(cudaFree(pred2)); + CUDA_CHECK(cudaFree(pred2_ref)); + CUDA_CHECK(cudaFree(pred3)); + CUDA_CHECK(cudaFree(pred3_ref)); + + CUDA_CHECK(cudaFree(data_sc)); + CUDA_CHECK(cudaFree(labels_sc)); + CUDA_CHECK(cudaFree(coef_sc)); + CUDA_CHECK(cudaFree(coef_sc_ref)); + CUDA_CHECK(cudaStreamDestroy(stream)); + } + + protected: + OlsInputs params; + T *data, *labels, *coef, *coef_ref, *pred_data, *pred, *pred_ref; + T *coef2, *coef2_ref, *pred2, *pred2_ref; + T *coef3, *coef3_ref, *pred3, *pred3_ref; + T *data_sc, *labels_sc, *coef_sc, *coef_sc_ref; + T intercept, intercept2, intercept3; + raft::handle_t handle; + cudaStream_t stream; +}; + +const std::vector> inputsf2 = { + {0.001f, 4, 2, 2, 0}, {0.001f, 4, 2, 2, 1}, {0.001f, 4, 2, 2, 2}}; + +const std::vector> inputsd2 = { + {0.001, 4, 2, 2, 0}, {0.001, 4, 2, 2, 1}, {0.001, 4, 2, 2, 2}}; + +typedef OlsTest OlsTestF; +TEST_P(OlsTestF, Fit) { + ASSERT_TRUE(devArrMatch(coef_ref, coef, params.n_col, + raft::CompareApproxAbs(params.tol))); + + ASSERT_TRUE(devArrMatch(coef2_ref, coef2, params.n_col, + raft::CompareApproxAbs(params.tol))); + + ASSERT_TRUE(devArrMatch(coef3_ref, coef3, params.n_col, + raft::CompareApproxAbs(params.tol))); + + ASSERT_TRUE(devArrMatch(pred_ref, pred, params.n_row_2, + raft::CompareApproxAbs(params.tol))); + + ASSERT_TRUE(devArrMatch(pred2_ref, pred2, params.n_row_2, + raft::CompareApproxAbs(params.tol))); + + ASSERT_TRUE(devArrMatch(pred3_ref, pred3, params.n_row_2, + raft::CompareApproxAbs(params.tol))); + + ASSERT_TRUE(devArrMatch(coef_sc_ref, coef_sc, 1, + raft::CompareApproxAbs(params.tol))); +} + +typedef OlsTest OlsTestD; +TEST_P(OlsTestD, Fit) { + ASSERT_TRUE(raft::devArrMatch(coef_ref, coef, params.n_col, + raft::CompareApproxAbs(params.tol))); + + ASSERT_TRUE(raft::devArrMatch(coef2_ref, coef2, params.n_col, + raft::CompareApproxAbs(params.tol))); + + ASSERT_TRUE(raft::devArrMatch(coef3_ref, coef3, params.n_col, + raft::CompareApproxAbs(params.tol))); + + ASSERT_TRUE(raft::devArrMatch(pred_ref, pred, params.n_row_2, + raft::CompareApproxAbs(params.tol))); + + ASSERT_TRUE(devArrMatch(pred2_ref, pred2, params.n_row_2, + raft::CompareApproxAbs(params.tol))); + + ASSERT_TRUE(raft::devArrMatch(pred3_ref, pred3, params.n_row_2, + raft::CompareApproxAbs(params.tol))); + + ASSERT_TRUE(devArrMatch(coef_sc_ref, coef_sc, 1, + raft::CompareApproxAbs(params.tol))); +} + +INSTANTIATE_TEST_CASE_P(OlsTests, OlsTestF, ::testing::ValuesIn(inputsf2)); + +INSTANTIATE_TEST_CASE_P(OlsTests, OlsTestD, ::testing::ValuesIn(inputsd2)); + +} // namespace GLM +} // end namespace ML diff --git a/cuda_code/one_hot_10.cu b/cuda_code/one_hot_10.cu new file mode 100644 index 0000000000000000000000000000000000000000..2ebfb92a9a84993475fe0d2e719ded3b0feacde5 --- /dev/null +++ b/cuda_code/one_hot_10.cu @@ -0,0 +1,95 @@ +//////////////////////////////////////////////////////////////////////////////// +// Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC. +// Produced at the Lawrence Livermore National Laboratory. +// Written by the LBANN Research Team (B. Van Essen, et al.) listed in +// the CONTRIBUTORS file. +// +// LLNL-CODE-697807. +// All rights reserved. +// +// This file is part of LBANN: Livermore Big Artificial Neural Network +// Toolkit. For details, see http://software.llnl.gov/LBANN or +// https://github.com/LLNL/LBANN. +// +// Licensed under the Apache License, Version 2.0 (the "Licensee"); you +// may not use this file except in compliance with the License. You may +// obtain a copy of the License at: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the license. +//////////////////////////////////////////////////////////////////////////////// + +#define LBANN_ONE_HOT_LAYER_INSTANTIATE +#include "lbann/layers/misc/one_hot.hpp" + +namespace lbann { + +namespace { + +/** + * On input, output is assumed to be filled with zeros. + * + * Block dimensions: bsize x 1 x 1 + * + * Grid dimensions: (width / bsize) x 1 x 1 + */ +template +__global__ void fp_kernel(unsigned long long height, + unsigned long long width, + const TensorDataType* __restrict__ indices, + unsigned long long indices_stride, + TensorDataType* __restrict__ output, + unsigned long long output_ldim) { + const unsigned long long gid = threadIdx.x + blockIdx.x * blockDim.x; + const unsigned long long nthreads = blockDim.x * gridDim.x; + for (unsigned long long col = gid; col < width; col += nthreads) { + const auto& ind = indices[col*indices_stride]; + if (TensorDataType(0.f) <= ind && ind < TensorDataType(height)) { + const unsigned long long row = static_cast(ind); + output[row+col*output_ldim] = TensorDataType(1.f); + } + } +} + +} // namespace + +template +void one_hot_layer::fp_compute() { + + using GPUMatType = El::Matrix; + + // Local matrices + const auto& local_input = + dynamic_cast(this->get_local_prev_activations()); + auto& local_output = dynamic_cast(this->get_local_activations()); + + // Populate one-hot vectors + El::Zero(local_output); + if (!local_output.IsEmpty()) { + const size_t local_height = local_output.Height(); + const size_t local_width = local_output.Width(); + constexpr size_t block_size = 64; + const size_t grid_size = (local_width + block_size - 1) / block_size; + fp_kernel<<>>( + local_height, + local_width, + local_input.LockedBuffer(), + local_input.LDim(), + local_output.Buffer(), + local_output.LDim()); + } + +} + +#define PROTO(T) \ + template class one_hot_layer + +#define LBANN_INSTANTIATE_GPU_HALF +#include "lbann/macros/instantiate.hpp" + +} // namespace lbann diff --git a/cuda_code/one_hot_op_6.cu b/cuda_code/one_hot_op_6.cu new file mode 100644 index 0000000000000000000000000000000000000000..2b021748048c76823ae5f331a22f397c863e7cc1 --- /dev/null +++ b/cuda_code/one_hot_op_6.cu @@ -0,0 +1,99 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/operators/one_hot_op.h" +#include "paddle/fluid/platform/device/gpu/gpu_info.h" +#include "paddle/fluid/platform/device/gpu/gpu_primitives.h" + +namespace paddle { +namespace operators { +using platform::PADDLE_CUDA_NUM_THREADS; + +template +__global__ void FillOutputKernel(const InT* p_in_data, OutT* p_out_data, + const int64_t numel, const int depth) { + int idx = blockIdx.x * blockDim.x + threadIdx.x; + if (idx < numel && p_in_data[idx] >= 0 && p_in_data[idx] < depth) { + *(p_out_data + (idx * depth) + p_in_data[idx]) = 1.0; + } +} + +template +struct OneHotOpCUDAFunctor { + const framework::LoDTensor* in_; + framework::LoDTensor* out_; + const DeviceContext& ctx_; + int depth_; + + OneHotOpCUDAFunctor(const framework::LoDTensor* in, framework::LoDTensor* out, + int depth, const DeviceContext& ctx) + : in_(in), out_(out), depth_(depth), ctx_(ctx) {} + + template + void apply() const { + auto* p_in_data = in_->data(); + auto numel = in_->numel(); + auto* p_out_data = out_->mutable_data(ctx_.GetPlace()); + auto stream = ctx_.stream(); + math::set_constant(ctx_, out_, 0.0); + + FillOutputKernel<<<(numel + PADDLE_CUDA_NUM_THREADS - 1) / + PADDLE_CUDA_NUM_THREADS, + PADDLE_CUDA_NUM_THREADS, 0, stream>>>( + p_in_data, p_out_data, numel, depth_); + } +}; + +using LoDTensor = framework::LoDTensor; +template +class OneHotCUDAKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + auto* in = context.Input("X"); + auto* out = context.Output("Out"); + + int depth = -1; + if (context.HasInput("depth_tensor")) { + auto* depth_tensor = context.Input("depth_tensor"); + if (platform::is_gpu_place(depth_tensor->place())) { + framework::Tensor temp; + paddle::framework::TensorCopySync(*depth_tensor, platform::CPUPlace(), + &temp); + depth = *temp.data(); + } else { + depth = *depth_tensor->data(); + } + + auto in_dims = in->dims(); + framework::DDim out_dims(in_dims); + out_dims[out_dims.size() - 1] = depth; + out->Resize(out_dims); + } else { + depth = context.Attr("depth"); + } + framework::VisitDataType( + static_cast( + context.Attr("dtype")), + OneHotOpCUDAFunctor( + in, out, depth, context.template device_context())); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP_CUDA_KERNEL( + one_hot, ops::OneHotCUDAKernel, + ops::OneHotCUDAKernel); diff --git a/cuda_code/one_if_max_sub.cu b/cuda_code/one_if_max_sub.cu new file mode 100644 index 0000000000000000000000000000000000000000..5a4185014f73da9aa0fff2ed2143c9977555bb10 --- /dev/null +++ b/cuda_code/one_if_max_sub.cu @@ -0,0 +1,67 @@ +//======================================================================= +// Copyright (c) 2017 Baptiste Wicht +// Distributed under the terms of the MIT License. +// (See accompanying file LICENSE or copy at +// http://opensource.org/licenses/MIT) +//======================================================================= + +#include "egblas/assert.hpp" +#include "egblas/utils.hpp" +#include "egblas/sum.hpp" +#include "egblas/cuda_check.hpp" + +template +__global__ void one_if_max_sub_kernel(size_t B, size_t N, T alpha, const T* x, size_t incx, T* y, size_t incy) { + auto b = threadIdx.x + blockIdx.x * blockDim.x; + + if (b < B) { + T max = x[(b * N + 0) * incx]; + + for (size_t n = 1; n < N; ++n) { + max = x[(b * N + n) * incx] > max ? x[(b * N + n) * incx] : max; + } + + for (size_t n = 0; n < N; ++n) { + y[(b * N + n) * incx] = x[(b * N + n) * incx] == max ? alpha : T(0); + } + } +} + +template +__global__ void one_if_max_sub_kernel_flat(size_t B, size_t N, T alpha, const T* x, T* y) { + auto b = threadIdx.x + blockIdx.x * blockDim.x; + + if (b < B) { + T max = x[b * N + 0]; + + for (size_t n = 1; n < N; ++n) { + max = x[b * N + n] > max ? x[b * N + n] : max; + } + + for (size_t n = 0; n < N; ++n) { + y[b * N + n] = x[b * N + n] == max ? alpha : T(0); + } + } +} + +void egblas_sone_if_max_sub(size_t b, size_t n, float alpha, const float* x, size_t incx, float* y, size_t incy) { + const int blockSize = 64; + const int gridSize = (b + blockSize - 1) / blockSize; + + if (incx == 1 && incy == 1) { + one_if_max_sub_kernel_flat<<>>(b, n, alpha, x, y); + } else { + one_if_max_sub_kernel<<>>(b, n, alpha, x, incx, y, incy); + } +} + +void egblas_done_if_max_sub(size_t b, size_t n, double alpha, const double* x, size_t incx, double* y, size_t incy) { + const int blockSize = 64; + const int gridSize = (b + blockSize - 1) / blockSize; + + if (incx == 1 && incy == 1) { + one_if_max_sub_kernel_flat<<>>(b, n, alpha, x, y); + } else { + one_if_max_sub_kernel<<>>(b, n, alpha, x, incx, y, incy); + } +} diff --git a/cuda_code/online_softmax_beamsearch_kernels_2.cu b/cuda_code/online_softmax_beamsearch_kernels_2.cu new file mode 100644 index 0000000000000000000000000000000000000000..313d3acf31b452a0e40ec2a1e51ebb5a4099b3e9 --- /dev/null +++ b/cuda_code/online_softmax_beamsearch_kernels_2.cu @@ -0,0 +1,747 @@ +/* +* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +#include "fastertransformer/cuda/topk_kernels.cuh" +#include "cub/cub.cuh" + +namespace fastertransformer +{ + +#define TOPK_FP16_STORAGE 0 + +template +__launch_bounds__(THREADBLOCK_SIZE) +__global__ +void beam_topK_kernel(const T* log_probs, + int* topk_tmp_id_buf, + T* topk_tmp_val_buf, + const int vocab_size, + T diversity_rate) +{ + typedef cub::BlockReduce, THREADBLOCK_SIZE> BlockReduce; + __shared__ typename BlockReduce::TempStorage temp_storage; + + int thread_id = threadIdx.x; + int block_id = blockIdx.x; + TopK partial; + + for(int i = 0; i < MAX_K; ++i) + { + partial.p[i] = -1; + partial.u[i] = -FLT_MAX; + } + + for(int elem_id = thread_id; elem_id < vocab_size; elem_id += THREADBLOCK_SIZE) + { + int index = elem_id + block_id * vocab_size; + partial.insert( (T)log_probs[index], index); + } + + TopK total = BlockReduce(temp_storage).Reduce(partial, reduce_topk_op); + + if (thread_id == 0) + { + int index = block_id * MAX_K; + + for(int i = 0; i < MAX_K; ++i) + { + topk_tmp_id_buf[index + i] = total.p[i]; + topk_tmp_val_buf[index + i] = total.u[i] + diversity_rate * (T)i; + } + } +} + +template +__launch_bounds__(THREADBLOCK_SIZE) +__global__ +void batch_topK_kernel(int* topk_tmp_id_buf, + T* topk_tmp_val_buf, + int* id_buf) +{ + int thread_id = threadIdx.x; + int block_id = blockIdx.x; + TopK partial; + if (thread_id == 0) + { + for(int i = 0; i < MAX_K; ++i) + { + partial.p[i] = -1; + partial.u[i] = -FLT_MAX; + } + + int index = block_id * MAX_K * MAX_K; + for(int i = 0; i < MAX_K * MAX_K; i++) + { + partial.insert( (T)topk_tmp_val_buf[index + i], topk_tmp_id_buf[index + i]); + } + + index = block_id * MAX_K; + for(int i = 0; i < MAX_K; i++) + { + id_buf[index + i] = partial.p[i]; + } + } +} + +template +__launch_bounds__(THREADBLOCK_SIZE) +__global__ +void batch_topK_kernel(const int* __restrict topk_tmp_id_buf, + const T* __restrict topk_tmp_val_buf, + int* __restrict id_buf, + T* __restrict val_buf) +{ + int thread_id = threadIdx.x; + int block_id = blockIdx.x; + TopK partial; + if (thread_id == 0) + { + for(int i = 0; i < MAX_K; ++i) + { + partial.p[i] = -1; + partial.u[i] = -FLT_MAX; + } + + int index = block_id * MAX_K * MAX_K; + for(int i = 0; i < MAX_K * MAX_K; i++) + { + partial.insert( (T)topk_tmp_val_buf[index + i], topk_tmp_id_buf[index + i]); + } + + index = block_id * MAX_K; + for(int i = 0; i < MAX_K; i++) + { + id_buf[index + i] = partial.p[i]; + val_buf[index + i] = partial.u[i]; + } + } +} + +template +__launch_bounds__(THREADBLOCK_SIZE) +__global__ void batch_topk_kernel( + const int * __restrict x, + const T * __restrict y, + int * __restrict z, + float * __restrict v, + int V, + int K, + T diversity_rate) +{ + int thread_id = threadIdx.x; + int vector_id = blockIdx.x; + + // reposition x, y to data for the current vector + x += vector_id * V; + y += vector_id * V; + + typedef cub::BlockReduce, THREADBLOCK_SIZE> BlockReduce; + + __shared__ typename BlockReduce::TempStorage temp_storage; + + TopK partial; + for(int i = 0; i < MAX_K; ++i) + { + partial.p[i] = -1; + partial.u[i] = -FLT_MAX; + } + for(int elem_id = thread_id; elem_id < V; elem_id += THREADBLOCK_SIZE) + { + int i = elem_id % K; + T elem = y[elem_id] + diversity_rate * (T) i; + int elem_idx = elem_id; //x[elem_id]; + partial.insert(elem, elem_idx); + } + + TopK total = BlockReduce(temp_storage).Reduce(partial, reduce_topk_op); + + if (thread_id == 0) + { + z += vector_id * K; + v += vector_id * K; + + for(int i = 0; i < MAX_K; ++i) + { + if (i < K) + { + z[i] = x[total.p[i]]; + v[i] = (float)y[total.p[i]]; + } + } + } +} + +struct __align__(8) MD +{ + float m; + float d; +}; + +__device__ __forceinline__ MD reduce_md_op(MD a, MD b) +{ + bool a_bigger = (a.m > b.m); + MD bigger_m = a_bigger ? a : b; + MD smaller_m = a_bigger ? b : a; + MD res; + res.d = bigger_m.d + smaller_m.d * __expf(smaller_m.m - bigger_m.m); + res.m = bigger_m.m; + return res; +} + +template +struct TopKMD +{ + MD md; + TopK topk; +}; + +template +__device__ __forceinline__ TopKMD reduce_topk_md_op(const TopKMD& a, const TopKMD& b) +{ + TopKMD res; + res.md = reduce_md_op(a.md, b.md); + res.topk = reduce_topk_op(a.topk, b.topk); + return res; +} + +template +__launch_bounds__(THREADBLOCK_SIZE) +__global__ void beam_online_softmax_topk_kernel( + const T * __restrict x, + const T * __restrict b, + const float * __restrict c, + const bool * __restrict finished, + int * __restrict z, + T * __restrict v, + int V, + int K, + int E) +{ + int thread_id = threadIdx.x; + int vector_id = blockIdx.x; + + const bool IS_FP16 = std::is_same::value; + const T MAX_T_VAL = (IS_FP16)? HALF_FLT_MAX : FLT_MAX; + + // reposition y to data for the current vector + x += vector_id * V; + + typedef cub::BlockReduce, THREADBLOCK_SIZE> BlockReduce; + __shared__ typename BlockReduce::TempStorage temp_storage; + + TopKMD partial; + bool finish = finished[vector_id]; + for(int i = 0; i < MAX_K; ++i) + { + partial.topk.p[i] = -1; + partial.topk.u[i] = -MAX_T_VAL; + } + partial.md.m = -MAX_T_VAL; + partial.md.d = 0.0F; + + if (finish) + { + for(int elem_id = thread_id; elem_id < V; elem_id += THREADBLOCK_SIZE) + { + float elem = (elem_id == E) ? MAX_T_VAL : -MAX_T_VAL; + MD new_elem{elem, 1.0F}; + partial.md = reduce_md_op(partial.md, new_elem); + partial.topk.insert(elem, elem_id); + //if (elem_id > THREADBLOCK_SIZE * MAX_K && (elem_id == E)) break; + } + } + else + { + for(int elem_id = thread_id; elem_id < V; elem_id += THREADBLOCK_SIZE) + { + float elem = x[elem_id] + b[elem_id]; + MD new_elem{elem, 1.0F}; + partial.md = reduce_md_op(partial.md, new_elem); + partial.topk.insert(elem, elem_id); + } + } + + TopKMD total = BlockReduce(temp_storage).Reduce(partial, reduce_topk_md_op); + + if (thread_id == 0) + { + z += vector_id * K; + v += vector_id * K; + c += vector_id; + + //float d_total_inverse = __fdividef(1.0F, total.md.d); + float d_total_log = logf(total.md.d); + for(int i = 0; i < MAX_K; ++i) + { + //float val = __expf(total.topk.u[i] - total.md.m) * d_total_inverse; + float val = total.topk.u[i] - total.md.m - d_total_log; + if (i < K) + { + z[i] = total.topk.p[i] + vector_id * V; // faster transformer needs absolute id + v[i] = val + c[0]; + } + } + } +} + +template +__launch_bounds__(THREADBLOCK_SIZE, 1) +__global__ void beam_online_softmax_topk_stage1_kernel( + const T * __restrict x, + const T * __restrict b, + const bool * __restrict finished, + float * __restrict t, + int V, + int K, + int E) +{ + int thread_id = threadIdx.x; + int vector_id = blockIdx.x; + + const int PACKED_TOP_KMD_SIZE = 2 * MAX_K + 2; + + const bool IS_FP16 = std::is_same::value; + const T MAX_T_VAL = (IS_FP16)? HALF_FLT_MAX : FLT_MAX; + + // one will have multiple sections per V + const int v_local = (V + gridDim.y - 1) / gridDim.y; + const int section_start = v_local * blockIdx.y; + int section_end = section_start + v_local; + section_end = (section_end > V)? V : section_end; + + // reposition x to data for the current vector + x += vector_id * V; +#if TOPK_FP16_STORAGE == 1 + typedef cub::BlockReduce, THREADBLOCK_SIZE> BlockReduce; +#else + typedef cub::BlockReduce, THREADBLOCK_SIZE> BlockReduce; +#endif + __shared__ typename BlockReduce::TempStorage temp_storage; + __shared__ float buf_s[PACKED_TOP_KMD_SIZE]; // save intermediate result + +#if TOPK_FP16_STORAGE == 1 + TopKMD<__half, MAX_K> partial; +#else + TopKMD partial; +#endif + bool finish = finished[vector_id]; + for(int i = 0; i < MAX_K; ++i) + { + partial.topk.p[i] = -1; + partial.topk.u[i] = -MAX_T_VAL; + } + partial.md.m = -MAX_T_VAL; + partial.md.d = 0.0F; + + if (finish) + { + #pragma unroll 1 + for(int elem_id = section_start + thread_id; elem_id < section_end; elem_id += THREADBLOCK_SIZE) + { + float elem = (elem_id == E) ? MAX_T_VAL : -MAX_T_VAL; + MD new_elem{elem, 1.0F}; + partial.md = reduce_md_op(partial.md, new_elem); + partial.topk.insert(elem, elem_id); + } + } + else + { + #pragma unroll 1 + for(int elem_id = section_start + thread_id; elem_id < section_end; elem_id += THREADBLOCK_SIZE) + { + T bias = b == nullptr ? (T)0.0f : b[elem_id]; // gpt-2 does not use bias + T elem = x[elem_id] + bias; + MD new_elem{elem, 1.0F}; + partial.md = reduce_md_op(partial.md, new_elem); + partial.topk.insert(elem, elem_id); + } + } + +#if TOPK_FP16_STORAGE == 1 + TopKMD<__half, MAX_K> total = BlockReduce(temp_storage).Reduce(partial, reduce_topk_md_op<__half, MAX_K>); +#else + TopKMD total = BlockReduce(temp_storage).Reduce(partial, reduce_topk_md_op); +#endif + + if (thread_id == 0) + { + for (int i = 0; i < K; i++) + { + reinterpret_cast(buf_s)[i] = total.topk.p[i] + vector_id * V; // faster transformer needs absolute id + buf_s[MAX_K + i] = total.topk.u[i]; + } + buf_s[2 * MAX_K] = total.md.d; + buf_s[2 * MAX_K + 1] = total.md.m; + } + __syncthreads(); + if (threadIdx.x < PACKED_TOP_KMD_SIZE) + { + t[blockIdx.x * PACKED_TOP_KMD_SIZE * gridDim.y + blockIdx.y * PACKED_TOP_KMD_SIZE + threadIdx.x] = buf_s[threadIdx.x]; + } +} + +template +__launch_bounds__(THREADBLOCK_SIZE) +__global__ void beam_online_softmax_topk_stage2_kernel( + const float * __restrict x, + const float * __restrict c, + int * __restrict z, + T * __restrict v, + int K, + int parts_per_beam) +{ + const int vector_id = blockIdx.x; + const int thread_id = threadIdx.x; + const int PACKED_TOP_KMD_SIZE = 2 * MAX_K + 2; + + const bool IS_FP16 = std::is_same::value; + const T MAX_T_VAL = (IS_FP16)? HALF_FLT_MAX : FLT_MAX; + + extern __shared__ char buf_s_[]; // intermediate result + float * buf_s = reinterpret_cast(buf_s_); + //__shared__ float buf_s[PACKED_TOP_KMD_SIZE * THREADBLOCK_SIZE]; // intermediate result + + typedef cub::BlockReduce, THREADBLOCK_SIZE> BlockReduce; + __shared__ typename BlockReduce::TempStorage temp_storage; + + x += vector_id * PACKED_TOP_KMD_SIZE * parts_per_beam; + + TopKMD partial; + for(int i = 0; i < MAX_K; ++i) + { + partial.topk.p[i] = -1; + partial.topk.u[i] = -MAX_T_VAL; + } + partial.md.m = -MAX_T_VAL; + partial.md.d = 0.0F; + + // load and unpack into registers through smem + for (int idx = thread_id; idx < PACKED_TOP_KMD_SIZE * parts_per_beam; idx += THREADBLOCK_SIZE) + { + buf_s[idx] = x[idx]; + } + __syncthreads(); + + if (threadIdx.x < parts_per_beam) + { + float * b_s = buf_s + thread_id * PACKED_TOP_KMD_SIZE; + for (int i = 0; i < K; i++) + { + partial.topk.p[i] = reinterpret_cast(b_s)[i]; + partial.topk.u[i] = b_s[MAX_K + i]; + } + partial.md.d = b_s[2 * MAX_K]; + partial.md.m = b_s[2 * MAX_K + 1]; + } + __syncthreads(); + + TopKMD total = BlockReduce(temp_storage).Reduce(partial, reduce_topk_md_op); + + if (thread_id == 0) + { + z += vector_id * K; + v += vector_id * K; + c += vector_id; + + float d_total_log = logf(total.md.d); + for(int i = 0; i < MAX_K; ++i) + { + float val = (float)total.topk.u[i] - total.md.m - d_total_log; + if (i < K) + { + z[i] = total.topk.p[i]; + v[i] = (float)val + (float)c[0]; + } + } + } +} + +template +void topK_kernelLauncher(T* log_probs, + int* topk_tmp_id_buf, + T* topk_tmp_val_buf, + int* ids, + DecodingBeamsearchArguments args, + cudaStream_t stream) +{ + const int batch_size = args.batch_size_; + const int beam_width = args.beam_width_; + const int vocab_size = args.vocab_size_padded_; + const int diversity_rate = args.beam_search_diversity_rate_; + const int block_size = SMALL_TOP_K_SOFTMAX_THREADBLOCK_SIZE; + + switch(beam_width) + { + case 1 : + beam_topK_kernel<<>>(log_probs, + topk_tmp_id_buf, topk_tmp_val_buf, vocab_size, diversity_rate); + batch_topK_kernel<<>>(topk_tmp_id_buf, topk_tmp_val_buf, ids); + break; + case 2 : + beam_topK_kernel<<>>(log_probs, + topk_tmp_id_buf, topk_tmp_val_buf, vocab_size, diversity_rate); + batch_topK_kernel<<>>(topk_tmp_id_buf, topk_tmp_val_buf, ids); + break; + case 3 : + beam_topK_kernel<<>>(log_probs, + topk_tmp_id_buf, topk_tmp_val_buf, vocab_size, diversity_rate); + batch_topK_kernel<<>>(topk_tmp_id_buf, topk_tmp_val_buf, ids); + break; + case 4 : + beam_topK_kernel<<>>(log_probs, + topk_tmp_id_buf, topk_tmp_val_buf, vocab_size, diversity_rate); + batch_topK_kernel<<>>(topk_tmp_id_buf, topk_tmp_val_buf, ids); + break; + case 6 : + beam_topK_kernel<<>>(log_probs, + topk_tmp_id_buf, topk_tmp_val_buf, vocab_size, diversity_rate); + batch_topK_kernel<<>>(topk_tmp_id_buf, topk_tmp_val_buf, ids); + break; + case 8 : + beam_topK_kernel<<>>(log_probs, + topk_tmp_id_buf, topk_tmp_val_buf, vocab_size, diversity_rate); + batch_topK_kernel<<>>(topk_tmp_id_buf, topk_tmp_val_buf, ids); + break; + case 32 : + beam_topK_kernel<<>>(log_probs, + topk_tmp_id_buf, topk_tmp_val_buf, vocab_size, diversity_rate); + batch_topK_kernel<<>>(topk_tmp_id_buf, topk_tmp_val_buf, ids); + break; + default: + printf("[ERROR] Topk kernel does not support beamwidth = %d \n", beam_width); + exit(0); + break; + } +} + +template +void beam_online_softmax_topk_stage2_kernelLauncher( + const float * temp_storage, + const float * cum_log_probs, + int * ids, + T * vals, + int batch_size, + int beam_width, + int parts_per_beam, + cudaStream_t stream) +{ + // might rewrite beam_online_softmax_topk_stage2_kernel no to depend on constant block size + // in oreder to reduce compilation time + int smem_stage2_size = parts_per_beam * (2 * MAX_K + 2) * sizeof(float); + + if (parts_per_beam <= 32) + { + beam_online_softmax_topk_stage2_kernel + <<>> + (temp_storage, cum_log_probs, ids, vals, + beam_width, parts_per_beam); + return; + } + if (parts_per_beam <= 64) + { + beam_online_softmax_topk_stage2_kernel + <<>> + (temp_storage, cum_log_probs, ids, vals, + beam_width, parts_per_beam); + return; + } + if (parts_per_beam <= 128) + { + beam_online_softmax_topk_stage2_kernel + <<>> + (temp_storage, cum_log_probs, ids, vals, + beam_width, parts_per_beam); + return; + } + assert(0); +} + +template +void topK_softMax_kernelLauncher(const T* log_probs, + const T* bias, + const bool* finished, + float* cum_log_probs, + int* ids, + void* temp_storage, + const int temp_storage_size, + const int batch_size, + const int beam_width, + const int vocab_size, + const int end_id, + T diversity_rate, + cudaStream_t stream) +{ + const int items_per_thread = 1; + const int block_sz = (MAX_K < 16)? (MAX_K < 8)? SMALL_TOP_K_SOFTMAX_THREADBLOCK_SIZE:128:64; + //const int block_sz = SMALL_TOP_K_SOFTMAX_THREADBLOCK_SIZE; + + assert(temp_storage_size % 2 == 0); + assert(temp_storage_size >= 2 * batch_size * beam_width * beam_width); + + const int topk_buf_offset = ceil(batch_size * beam_width * beam_width / 4.) * 4; + int* topk_tmp_id_buf = reinterpret_cast(temp_storage); + T* topk_tmp_val_buf = reinterpret_cast(topk_tmp_id_buf + topk_buf_offset); + float* tmp_buffer = reinterpret_cast(topk_tmp_val_buf + topk_buf_offset); + +#ifdef DO_SPLIT_SMALL_TOP_K_SOFTMAX + int voc_parts = 4; + if (batch_size * beam_width < 256) + { + // Volta has 80 SMs, so we aim for three waves + voc_parts = (240 + batch_size * beam_width - 1) / (batch_size * beam_width); + voc_parts = std::min(128, voc_parts); // we implment up to 128 + } + dim3 grid(batch_size * beam_width, voc_parts); + cudaFuncSetAttribute( + beam_online_softmax_topk_stage1_kernel, + cudaFuncAttributePreferredSharedMemoryCarveout, + cudaSharedmemCarveoutMaxL1); + beam_online_softmax_topk_stage1_kernel + <<>> + (log_probs, bias, finished, tmp_buffer, + vocab_size, beam_width, end_id); +#endif + if (beam_width > 1) + { +#ifdef DO_SPLIT_SMALL_TOP_K_SOFTMAX + beam_online_softmax_topk_stage2_kernelLauncher + (tmp_buffer, cum_log_probs, topk_tmp_id_buf, topk_tmp_val_buf, + batch_size, beam_width, voc_parts, stream); +#else + beam_online_softmax_topk_kernel + <<>> + (log_probs, bias, cum_log_probs, finished, topk_tmp_id_buf, + topk_tmp_val_buf, vocab_size, beam_width, end_id); +#endif +#if 0 + // wrong result with diversity_rate != 0.f + batch_topK_kernel<<>> + (topk_tmp_id_buf, topk_tmp_val_buf, ids, cum_log_probs); +#else + batch_topk_kernel<<>> + (topk_tmp_id_buf, topk_tmp_val_buf, + ids, cum_log_probs, beam_width * beam_width, beam_width, diversity_rate); +#endif + } + else + { +#ifdef DO_SPLIT_SMALL_TOP_K_SOFTMAX + beam_online_softmax_topk_stage2_kernelLauncher + (tmp_buffer, cum_log_probs, ids, cum_log_probs, + batch_size, beam_width, voc_parts, stream); +#else + beam_online_softmax_topk_kernel + <<>> + (log_probs, bias, cum_log_probs, finished, ids, + cum_log_probs, vocab_size, beam_width, end_id); +#endif + } +} + +template +void topK_softMax(const T* log_probs, + const T* bias, + const bool* finished, + float* cum_log_probs, + int* ids, + void* temp_storage, + DecodingBeamsearchArguments args, + cudaStream_t stream) +{ + const int temp_storage_size = args.temp_storage_size_; + const int batch_size = args.batch_size_; + const int beam_width = args.beam_width_; + const int vocab_size = args.vocab_size_padded_; + const int end_id = args.end_id_; + const T diversity_rate = args.beam_search_diversity_rate_; + + switch(beam_width) + { + case 1 : + topK_softMax_kernelLauncher + (log_probs, bias, finished, cum_log_probs, ids, temp_storage, temp_storage_size, + batch_size, beam_width, vocab_size, end_id, diversity_rate, stream); + break; + case 2 : + topK_softMax_kernelLauncher + (log_probs, bias, finished, cum_log_probs, ids, temp_storage, temp_storage_size, + batch_size, beam_width, vocab_size, end_id, diversity_rate, stream); + break; + case 3 : + topK_softMax_kernelLauncher + (log_probs, bias, finished, cum_log_probs, ids, temp_storage, temp_storage_size, + batch_size, beam_width, vocab_size, end_id, diversity_rate, stream); + break; + case 4 : + topK_softMax_kernelLauncher + (log_probs, bias, finished, cum_log_probs, ids, temp_storage, temp_storage_size, + batch_size, beam_width, vocab_size, end_id, diversity_rate, stream); + break; + case 8 : + topK_softMax_kernelLauncher + (log_probs, bias, finished, cum_log_probs, ids, temp_storage, temp_storage_size, + batch_size, beam_width, vocab_size, end_id, diversity_rate, stream); + break; + case 16 : + topK_softMax_kernelLauncher + (log_probs, bias, finished, cum_log_probs, ids, temp_storage, temp_storage_size, + batch_size, beam_width, vocab_size, end_id, diversity_rate, stream); + break; + case 32 : + topK_softMax_kernelLauncher + (log_probs, bias, finished, cum_log_probs, ids, temp_storage, temp_storage_size, + batch_size, beam_width, vocab_size, end_id, diversity_rate, stream); + break; + default : + printf("[ERROR] Topk kernel does not support beamwidth = %d \n", beam_width); + exit(0); + break; + } +} + +template void topK_kernelLauncher(float* log_probs, + int* topk_tmp_id_buf, + float* topk_tmp_val_buf, + int* ids, + DecodingBeamsearchArguments args, + cudaStream_t stream); + +template void topK_kernelLauncher(half* log_probs, + int* topk_tmp_id_buf, + half* topk_tmp_val_buf, + int* ids, + DecodingBeamsearchArguments args, + cudaStream_t stream); + +template void topK_softMax(const float* log_probs, + const float* bias, + const bool* finished, + float* cum_log_probs, + int* ids, + void * tmp_storage, + DecodingBeamsearchArguments args, + cudaStream_t stream); + +template void topK_softMax(const half* log_probs, + const half* bias, + const bool* finished, + float* cum_log_probs, + int* ids, + void * tmp_storage, + DecodingBeamsearchArguments args, + cudaStream_t stream); + +} // end of namespace fastertransformer \ No newline at end of file diff --git a/cuda_code/oo-vfunc-complete_3.cu b/cuda_code/oo-vfunc-complete_3.cu new file mode 100644 index 0000000000000000000000000000000000000000..d638683102cc89bdad3e7f91a5c7d567057b38f5 --- /dev/null +++ b/cuda_code/oo-vfunc-complete_3.cu @@ -0,0 +1,374 @@ +/** + * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. + * + * Please refer to the NVIDIA end user license agreement (EULA) associated + * with this source code for terms and conditions that govern your use of + * this software. Any use, reproduction, disclosure, or distribution of + * this software and related documentation outside the terms of the EULA + * is strictly prohibited. + * + */ + +/** + * Vector addition: C = A + B. + * + * This sample is a very basic sample that implements element by element + * vector addition. It is the same as the sample illustrating Chapter 2 + * of the programming guide with some additions like error checking. + */ + + #include + #include "../mem_alloc/mem_alloc_2.h" + // For the CUDA runtime routines (prefixed with "cuda_") + #include + + class BaseClass { + public: + virtual __device__ void doTheMath(float &c, float a, int numCompute) = 0; + }; + + #define Derived(A) \ + class Class##A : public BaseClass { \ + public: \ + virtual __device__ void doTheMath(float &c, float a, int numCompute) { \ + for (int l = 0; l < numCompute; l++) \ + for (int l = 0; l < numCompute; l++) \ + c = c + a; \ + } \ + } + + Derived(0); + Derived(1); + Derived(2); + Derived(3); + Derived(4); + Derived(5); + Derived(6); + Derived(7); + Derived(8); + Derived(9); + Derived(10); + Derived(11); + Derived(12); + Derived(13); + Derived(14); + Derived(15); + Derived(16); + Derived(17); + Derived(18); + Derived(19); + Derived(20); + Derived(21); + Derived(22); + Derived(23); + Derived(24); + Derived(25); + Derived(26); + Derived(27); + Derived(28); + Derived(29); + Derived(30); + Derived(31); + + #define ObjCase_cpu(A) \ + case A: \ + if (numElements > i) { \ + array[i] = (BaseClass *)alloc->my_new(); \ + break; \ + } + + #define ObjCase(A) \ + case A: \ + if (numElements > i) { \ + new (array[i]) Class##A(); \ + break; \ + } + +__managed__ range_tree_node *range_tree; +__managed__ unsigned tree_size_g; + +__managed__ void *temp_ubench; + void initialize_0(BaseClass **pointerArray, int numElements, int numClasses, + int threadsPerBlock, obj_alloc *alloc) { + int i; + int threadIdx; + BaseClass **array = pointerArray; + for (i = 0; i < numElements; i++) { + threadIdx = i % threadsPerBlock; + + switch (threadIdx % numClasses) { + ObjCase_cpu(0); + ObjCase_cpu(1); + ObjCase_cpu(2); + ObjCase_cpu(3); + ObjCase_cpu(4); + ObjCase_cpu(5); + ObjCase_cpu(6); + ObjCase_cpu(7); + ObjCase_cpu(8); + ObjCase_cpu(9); + ObjCase_cpu(10); + ObjCase_cpu(11); + ObjCase_cpu(12); + ObjCase_cpu(13); + ObjCase_cpu(14); + ObjCase_cpu(15); + ObjCase_cpu(16); + ObjCase_cpu(17); + ObjCase_cpu(18); + ObjCase_cpu(19); + ObjCase_cpu(20); + ObjCase_cpu(21); + ObjCase_cpu(22); + ObjCase_cpu(23); + ObjCase_cpu(24); + ObjCase_cpu(25); + ObjCase_cpu(26); + ObjCase_cpu(27); + ObjCase_cpu(28); + ObjCase_cpu(29); + ObjCase_cpu(30); + ObjCase_cpu(31); + } + } + } + __global__ void initialize_1(BaseClass **pointerArray, int numElements, + int numClasses) { + int i = blockDim.x * blockIdx.x + threadIdx.x; + BaseClass **array = pointerArray; + switch (threadIdx.x % numClasses) { + ObjCase(0); + ObjCase(1); + ObjCase(2); + ObjCase(3); + ObjCase(4); + ObjCase(5); + ObjCase(6); + ObjCase(7); + ObjCase(8); + ObjCase(9); + ObjCase(10); + ObjCase(11); + ObjCase(12); + ObjCase(13); + ObjCase(14); + ObjCase(15); + ObjCase(16); + ObjCase(17); + ObjCase(18); + ObjCase(19); + ObjCase(20); + ObjCase(21); + ObjCase(22); + ObjCase(23); + ObjCase(24); + ObjCase(25); + ObjCase(26); + ObjCase(27); + ObjCase(28); + ObjCase(29); + ObjCase(30); + ObjCase(31); + } + } + + /** + * CUDA Kernel Device code + * + * Computes the vector addition of A and B into C. The 3 vectors have the same + * number of elements numElements. + */ + __global__ void ooVectorAdd(const float *A, float *C, int numElements, + BaseClass **classes, int numCompute) { + int i = blockDim.x * blockIdx.x + threadIdx.x; + BaseClass *myClass = classes[i]; + unsigned tree_size = tree_size_g; + range_tree_node *table = range_tree; + void **vtable; + if (i < numElements) { + vtable = get_vfunc(myClass, table, tree_size); + temp_ubench=vtable[0]; + myClass->doTheMath(C[i], A[i], numCompute); + } + } + + /** + * Host main routine + */ + int main(int argc, char **argv) { + // Error code to check return values for CUDA calls + cudaError_t err = cudaSuccess; + mem_alloc shared_mem(10ULL * 1024 * 1024 * 1024); + obj_alloc my_obj_alloc(&shared_mem); + // Print the vector length to be used, and compute its size + int numElements = atoi(argv[1]); // size of vector + int numCompute = atoi(argv[3]); // vfunc body size + int numClasses = atoi(argv[4]); // num of types + size_t size = numElements * sizeof(float); + printf("[Vector addition of %d elements]\n", numElements); + + // Allocate the host input vector A + float *h_A = (float *)malloc(size); + + // Allocate the host input vector B + float *h_B = (float *)malloc(size); + + // Allocate the host output vector C + float *h_C = (float *)malloc(size); + + // Verify that allocations succeeded + if (h_A == NULL || h_B == NULL || h_C == NULL) { + fprintf(stderr, "Failed to allocate host vectors!\n"); + exit(EXIT_FAILURE); + } + + // Initialize the host input vectors + for (int i = 0; i < numElements; ++i) { + h_A[i] = rand() / (float)RAND_MAX; + h_B[i] = rand() / (float)RAND_MAX; + } + + cudaDeviceSetLimit(cudaLimitMallocHeapSize, 4ULL * 1024 * 1024 * 1024); + // Allocate the device input vector A + float *d_A = NULL; + err = cudaMalloc((void **)&d_A, size); + + if (err != cudaSuccess) { + fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", + cudaGetErrorString(err)); + exit(EXIT_FAILURE); + } + + // Allocate the device input vector B + float *d_B = NULL; + err = cudaMalloc((void **)&d_B, size); + + if (err != cudaSuccess) { + fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", + cudaGetErrorString(err)); + exit(EXIT_FAILURE); + } + + // Allocate the device output vector C + float *d_C = NULL; + err = cudaMalloc((void **)&d_C, size); + + if (err != cudaSuccess) { + fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", + cudaGetErrorString(err)); + exit(EXIT_FAILURE); + } + + // Copy the host input vectors A and B in host memory to the device input + // vectors in + // device memory + printf("Copy input data from the host memory to the CUDA device\n"); + err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); + + if (err != cudaSuccess) { + fprintf(stderr, + "Failed to copy vector A from host to device (error code %s)!\n", + cudaGetErrorString(err)); + exit(EXIT_FAILURE); + } + + err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); + + if (err != cudaSuccess) { + fprintf(stderr, + "Failed to copy vector B from host to device (error code %s)!\n", + cudaGetErrorString(err)); + exit(EXIT_FAILURE); + } + + BaseClass **classes = NULL; + // cudaMalloc((void***)&classes, sizeof(BaseClass*)*numElements); + classes = (BaseClass **)my_obj_alloc.calloc(numElements); + // Launch the Vector Add CUDA Kernel + int threadsPerBlock = atoi(argv[2]); // thread per block + int blocksPerGrid = (numElements + threadsPerBlock - 1) / threadsPerBlock; + initialize_0(classes, numElements, numClasses, threadsPerBlock, + &my_obj_alloc); + initialize_1<<>>(classes, numElements, + numClasses); + err = cudaGetLastError(); + + if (err != cudaSuccess) { + fprintf(stderr, "Failed to launch initialize kernel (error code %s)!\n", + cudaGetErrorString(err)); + exit(EXIT_FAILURE); + } + my_obj_alloc.create_tree(); + range_tree = my_obj_alloc.get_range_tree(); + tree_size_g = my_obj_alloc.get_tree_size(); + printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, + threadsPerBlock); + ooVectorAdd<<>>(d_A, d_C, numElements, + classes, numCompute); + err = cudaGetLastError(); + + if (err != cudaSuccess) { + fprintf(stderr, "Failed to launch ooVectorAdd kernel (error code %s)!\n", + cudaGetErrorString(err)); + exit(EXIT_FAILURE); + } + + // Copy the device result vector in device memory to the host result vector + // in host memory. + printf("Copy output data from the CUDA device to the host memory\n"); + err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); + + if (err != cudaSuccess) { + fprintf(stderr, + "Failed to copy vector C from device to host (error code %s)!\n", + cudaGetErrorString(err)); + exit(EXIT_FAILURE); + } + + // Verify that the result vector is correct + for (int i = 0; i < numElements; ++i) { + float result = 0; + for (int j = 0; j < numCompute; j++) + result += h_A[i]; + if (fabs(result - h_C[i]) > 1e-3) { + fprintf(stderr, "Result verification failed at element %d!\n", i); + exit(EXIT_FAILURE); + } + } + + printf("Test PASSED\n"); + + // Free device global memory + err = cudaFree(d_A); + + if (err != cudaSuccess) { + fprintf(stderr, "Failed to free device vector A (error code %s)!\n", + cudaGetErrorString(err)); + exit(EXIT_FAILURE); + } + + err = cudaFree(d_B); + + if (err != cudaSuccess) { + fprintf(stderr, "Failed to free device vector B (error code %s)!\n", + cudaGetErrorString(err)); + exit(EXIT_FAILURE); + } + + err = cudaFree(d_C); + + if (err != cudaSuccess) { + fprintf(stderr, "Failed to free device vector C (error code %s)!\n", + cudaGetErrorString(err)); + exit(EXIT_FAILURE); + } + + // Free host memory + free(h_A); + free(h_B); + free(h_C); + + printf("Done\n"); + return 0; + } + \ No newline at end of file diff --git a/cuda_code/ops_1.cu b/cuda_code/ops_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..2f33c20ddb1514125b7007b66b8fc5e198970389 --- /dev/null +++ b/cuda_code/ops_1.cu @@ -0,0 +1,831 @@ +/* Copyright 2017 Stanford, NVIDIA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "ops.h" +#include "cnn_helper.h" + +CnnHandle init_cudnn(const Task *task, + const std::vector ®ions, + Context ctx, HighLevelRuntime *runtime) +{ + assert(regions.size() == 0); + assert(task->arglen == sizeof(size_t)); + size_t workSpaceSize = *(const size_t*) task->args; + CnnHandle handle; + handle.workSpaceSize = workSpaceSize; + printf("workSpaceSize = %zu\n", workSpaceSize); +#ifndef DISABLE_COMPUTATION + checkCUDA(cublasCreate(&handle.blas)); + checkCUDNN(cudnnCreate(&handle.dnn)); +#endif + checkCUDA(cudaMalloc(&handle.workSpace, workSpaceSize)); + return handle; +} + +Op::Op(Tensor input) +: numLocals(0) +{ + inputs[0] = input; +} + +Op::Op(int n, Tensor *_inputs) +: numLocals(0) +{ + for (int i = 0; i < n; i++) { + inputs[i] = _inputs[i]; + } +} + +void Op::dummy_task(const Task *task, + const std::vector ®ions, + Context ctx, Runtime *runtime) +{} + +void Op::prefetch(const CnnModel& model) +{ + ArgumentMap argmap; + Context ctx = model.config.lg_ctx; + Runtime* runtime = model.config.lg_hlr; + if (numLocals == 0) + return; + //FIXME: this is a hack, fix me later + if (numLocals == 3) { + // We must be an Linear operation + Rect<2> rect = runtime->get_index_space_domain(ctx, model.fc_part_is); + IndexLauncher launcher(DUMMY_TASK_ID, model.fc_part_is, + TaskArgument(NULL, 0), argmap); + launcher.add_region_requirement( + RegionRequirement(locals[1].partition, 0/*projection*/, + READ_ONLY, EXCLUSIVE, locals[1].region)); + launcher.add_field(0, FID_DATA); + launcher.add_region_requirement( + RegionRequirement(locals[2].partition, 0/*projection*/, + READ_ONLY, EXCLUSIVE, locals[2].region)); + launcher.add_field(1, FID_DATA); + runtime->execute_index_space(ctx, launcher); + } else { + assert(numLocals == 2); + Rect<3> rect = runtime->get_index_space_domain(ctx, model.part_is); + IndexLauncher launcher(DUMMY_TASK_ID, model.part_is, + TaskArgument(NULL, 0), argmap); + launcher.add_region_requirement( + RegionRequirement(locals[0].region, 0/*projection*/, + READ_ONLY, EXCLUSIVE, locals[0].region)); + launcher.add_field(0, FID_DATA); + launcher.add_region_requirement( + RegionRequirement(locals[1].region, 0/*projection*/, + READ_ONLY, EXCLUSIVE, locals[1].region)); + launcher.add_field(1, FID_DATA); + runtime->execute_index_space(ctx, launcher); + } +} + +CnnModel::CnnModel(int num_images, int height, int width, + int image_par, int height_par, int width_par, + int fc_par_n, int fc_par_c, bool profiling, + float learning_rate, + int num_loaders_per_node, int num_nodes, + Context ctx, Runtime* runtime) +{ + config.lg_ctx = ctx; + config.lg_hlr = runtime; + config.field_space = runtime->create_field_space(ctx); + { + FieldAllocator allocator = + runtime->create_field_allocator(ctx, config.field_space); + allocator.allocate_field(sizeof(float), FID_DATA); + } + //config.num_par_w = width_par; + //config.num_par_h = height_par; + //config.num_par_n = image_par; + //config.num_workers = width_par * height_par * image_par; + //config.fc_num_par_c = fc_par_c; + //config.fc_num_par_n = fc_par_n; + config.sm_num_par = fc_par_c * fc_par_n; + config.profiling = profiling; + config.learning_rate = learning_rate; + config.num_loaders = num_loaders_per_node; + config.num_nodes = num_nodes; + Rect<3, coord_t> part_bounds(Point<3>(0, 0, 0), Point<3>(width_par-1, height_par-1, image_par-1)); + part_is = runtime->create_index_space(ctx, part_bounds); + Rect<2, coord_t> fc_part_bounds(Point<2>(0, 0), Point<2>(fc_par_c-1, fc_par_n-1)); + fc_part_is = runtime->create_index_space(ctx, fc_part_bounds); + Rect<1, coord_t> sm_part_bounds(Point<1>(0), Point<1>(config.sm_num_par-1)); + sm_part_is = runtime->create_index_space(ctx, sm_part_bounds); + Rect<1, coord_t> load_part_bounds(Point<1>(0), Point<1>(config.num_loaders-1)); + load_part_is = runtime->create_index_space(ctx, load_part_bounds); + + // input_images + Rect<3, coord_t> image_rect(Point<3>(0, 0, 0), Point<3>(width-1, height-1, num_images*3-1)); + IndexSpaceT<3> image_is = runtime->create_index_space(ctx, image_rect); + LogicalRegion image_lr = runtime->create_logical_region(ctx, image_is, config.field_space); + LogicalRegion image_grad_lr = runtime->create_logical_region(ctx, image_is, config.field_space); + Transform<3, 3, coord_t> transform; + int extent_w = width / width_par; + int extent_h = height / height_par; + int extent_nc = 3 * num_images / image_par; + Rect<3, coord_t> extent(Point<3>(0, 0, 0), Point<3>(extent_w-1, extent_h-1, extent_nc-1)); + transform[0][0] = extent_w; transform[0][1] = 0; transform[0][2] = 0; + transform[1][0] = 0; transform[1][1] = extent_h; transform[1][2] = 0; + transform[2][0] = 0; transform[2][1] = 0; transform[2][2] = extent_nc; + IndexPartition image_ip = + runtime->create_partition_by_restriction(ctx, image_is, part_is, transform, extent); + LogicalPartition image_lp = runtime->get_logical_partition(ctx, image_lr, image_ip); + LogicalPartition image_grad_lp = + runtime->get_logical_partition(ctx, image_grad_lr, image_ip); + input_image.numDim = 4; + input_image.adim[0] = width; + input_image.adim[1] = height; + input_image.adim[2] = 3; + input_image.adim[3] = num_images; + input_image.pdim[0] = extent_w; + input_image.pdim[1] = extent_h; + input_image.pdim[2] = 3; + input_image.pdim[3] = extent_nc / 3; + input_image.region = image_lr; + input_image.region_grad = image_grad_lr; + input_image.partition = image_lp; + input_image.partition_grad = image_grad_lp; + + // rgb_images (has same index space as input_images + rgb_lr = runtime->create_logical_region(ctx, image_is, config.field_space); + rgb_image_lp = runtime->get_logical_partition(ctx, rgb_lr, image_ip); + // Create a partition based on num_loaders and num_nodes + assert(num_images * 3 % (config.num_loaders * config.num_nodes) == 0); + int extent_images = num_images * 3 / (config.num_loaders * config.num_nodes); + Transform<3, 1, coord_t> trans; + trans[0][0] = 0; trans[1][0] = 0; trans[2][0] = extent_images; + Rect<3, coord_t> ext(Point<3>(0, 0, 0), Point<3>(width-1, height-1, extent_images-1)); + IndexPartition rgb_ip = + runtime->create_partition_by_restriction(ctx, image_is, load_part_is, trans, ext); + rgb_load_lp = runtime->get_logical_partition(ctx, rgb_lr, rgb_ip); + + // input_label + Rect<1, coord_t> label_rect(Point<1>(0), Point<1>(num_images-1)); + IndexSpaceT<1> label_is = runtime->create_index_space(ctx, label_rect); + LogicalRegion label_lr = runtime->create_logical_region(ctx, label_is, config.field_space); + Transform<1, 1, coord_t> label_trans; + int extent_n = (num_images + config.sm_num_par - 1) / config.sm_num_par; + Rect<1, coord_t> label_extent(Point<1>(0), Point<1>(extent_n-1)); + label_trans[0][0] = extent_n; + IndexPartition label_ip = runtime->create_partition_by_restriction( + ctx, label_is, sm_part_is, label_trans, label_extent); + LogicalPartition label_lp = runtime->get_logical_partition(ctx, label_lr, label_ip); + input_label.numDim = 1; + input_label.adim[0] = num_images; + input_label.pdim[0] = extent_n; + input_label.region = label_lr; + input_label.partition = label_lp; + + // Build DataLoader + dataLoader = new DataLoader("list.txt"); +}; + +__inline__ +int calc_offset(int c, int y, int x, int yscale, int xscale) +{ + return (c * yscale * xscale + y * xscale + x); +} + +// Note: the layout is CHW in both buffer and image +void bilinear_interpolation(unsigned char* buffer, float *image, + int height, int width, + int input_height, int input_width, + float height_scale, float width_scale) +{ + //printf("h_in(%d) w_in(%d) h_out(%d) w_out(%d) h_scale(%.2lf) w_scale(%.2lf)\n", + // input_height, input_width, height, width, height_scale, width_scale); + //float mean[3] = {0.485, 0.456, 0.406}; + //float variance[3] = {0.229, 0.224, 0.225}; + for (int y = 0; y < height; y++) { + float input_y = y * height_scale; + int y0 = static_cast(std::floor(input_y)); + int y1 = std::min(y0 + 1, input_height - 1); + for (int x = 0; x < width; x++) { + float input_x = x * width_scale; + int x0 = static_cast(input_x); + int x1 = std::min(x0 + 1, input_width - 1); + + // Run kernel on the 4 corners of the bilinear resize algorithm + float scale = (1 - (input_y - y0)) * (1 - (input_x - x0)); + for (int c = 0; c < 3; c++) { + int input_offset = calc_offset(c, y0, x0, input_height, input_width); + int offset = calc_offset(c, y, x, height, width); + image[offset] = scale * static_cast(buffer[input_offset]); + } + + scale = (1 - (input_y - y0)) * (input_x - x0); + for (int c = 0; c < 3; c++) { + int input_offset = calc_offset(c, y0, x1, input_height, input_width); + int offset = calc_offset(c, y, x, height, width); + image[offset] += scale * static_cast(buffer[input_offset]); + } + + scale = (input_y - y0) * (1 - (input_x - x0)); + for (int c = 0; c < 3; c++) { + int input_offset = calc_offset(c, y1, x0, input_height, input_width); + int offset = calc_offset(c, y, x, height, width); + image[offset] += scale * static_cast(buffer[input_offset]); + } + + scale = (input_y - y0) * (input_x - x0); + for (int c = 0; c < 3; c++) { + int input_offset = calc_offset(c, y1, x1, input_height, input_width); + int offset = calc_offset(c, y, x, height, width); + image[offset] += scale * static_cast(buffer[input_offset]); + } + + //image[offset] = (image[offset] - mean[c]) / variance[c]; + } + } +} + +// Note: the layout is CHW in both buffer and image +void nearest_neighbor(unsigned char* buffer, unsigned char *image, + int height, int width, + int input_height, int input_width, + float height_scale, float width_scale) +{ + //const float mean[3] = {0.485, 0.456, 0.406}; + //const float variance[3] = {0.229, 0.224, 0.225}; + for (int y = 0; y < height; y++) { + int y0 = std::min(static_cast(roundf(y * height_scale)), input_height - 1); + for (int x = 0; x < width; x++) { + int x0 = std::min(static_cast(roundf(x * width_scale)), input_width - 1); + for (int c = 0; c < 3; c++) { + int input_offset = calc_offset(c, y0, x0, input_height, input_width); + int offset = calc_offset(c, y, x, height, width); + image[offset] = buffer[input_offset]; + //image[offset] = (static_cast(buffer[input_offset]) / 256 - mean[c]) / variance[c]; + } + } + } +} + +/* + regions[0]: image (unsigned char) + regions[1]: label (int) +*/ + +void CnnModel::load_images_task(const Task *task, + const std::vector ®ions, + Context ctx, HighLevelRuntime *runtime) +{ + long long start_time = Realm::Clock::current_time_in_microseconds(); + assert(regions.size() == 1); + assert(task->regions.size() == 1); + const AccessorWO acc_image(regions[0], FID_DATA); + Rect<3> rect_image; + unsigned char *buffer = (unsigned char*) malloc(2000 * 2000 * 3); + rect_image = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space()); + assert(acc_image.accessor.is_dense_arbitrary(rect_image)); + unsigned char *image_ptr = acc_image.ptr(rect_image.lo); + const DataLoadMeta* meta = (DataLoadMeta*) task->local_args; + int height = rect_image.hi[0] - rect_image.lo[0] + 1; + int width = rect_image.hi[1] - rect_image.lo[1] + 1; + int numImages = (rect_image.hi[2] - rect_image.lo[2] + 1) / 3; + assert((rect_image.hi[2] - rect_image.lo[2] + 1) % 3 == 0); + for (int fileIdx = 0; fileIdx < meta->cnt; fileIdx ++) { + //printf("fileIdx = %d filename = %s, start = %d, end = %d\n", fileIdx, meta->datasets[fileIdx].filename, meta->datasets[fileIdx].start, meta->datasets[fileIdx].end); + hid_t fileId = H5Fopen(meta->datasets[fileIdx].filename, H5F_ACC_RDONLY, H5P_DEFAULT); + //hid_t fileId = meta->datasets[fileIdx].fid; + //printf("fileId = %d\n", fileId); + char name[100]; + for (int i = meta->datasets[fileIdx].start; i <= meta->datasets[fileIdx].end; i++) { + H5Gget_objname_by_idx(fileId, i, name, 100); + hid_t datasetId = H5Dopen2(fileId, name, H5P_DEFAULT); + hid_t dataspaceId = H5Dget_space(datasetId); + hsize_t dims[3]; + H5Sget_simple_extent_dims(dataspaceId, dims, NULL); + H5Dread(datasetId, H5T_NATIVE_UCHAR, H5S_ALL, H5S_ALL, H5P_DEFAULT, buffer); + H5Sclose(dataspaceId); + H5Dclose(datasetId); + hsize_t input_height = dims[1]; + hsize_t input_width = dims[2]; + //char interlace[100]; + //hsize_t input_height, input_width, input_planes; + //hssize_t input_npals; + //H5IMget_image_info(fileId, name, &input_width, &input_height, &input_planes, + // interlace, &input_npals); + //printf("h = %zu, w = %zu, planes = %zu, npals = %zu\n", input_height, input_width, input_planes, input_npals); + //H5IMread_image(fileId, name, buffer); + float height_scale = static_cast(input_height) / height; + float width_scale = static_cast(input_width) / width; + nearest_neighbor(buffer, image_ptr, height, width, + input_height, input_width, height_scale, width_scale); + //bilinear_interpolation(buffer, image_ptr, height, width, + // input_height, input_width, height_scale, width_scale); + image_ptr += 3 * height * width; + } + H5Fclose(fileId); + } + long long end_time = Realm::Clock::current_time_in_microseconds(); + printf("exe time = %lld\n", end_time - start_time); + free(buffer); +} + +__global__ +void apply_normalize(float *tensor_ptr, const unsigned char *rgb_ptr, + size_t size, size_t hxw) +{ + const float mean[3] = {0.485, 0.456, 0.406}; + const float var[3] = {0.229, 0.224, 0.225}; + + CUDA_KERNEL_LOOP(i, size) + { + // decide the color of the current position by assuming NCHW layout + int c = (i / hxw) % 3; + tensor_ptr[i] = (static_cast(rgb_ptr[i]) / 256 - mean[c]) / var[c]; + } +} + +/* + regions[0](O): input_images + regions[1](I): input_rgb +*/ +__host__ +void CnnModel::normalize_images_task(const Task *task, + const std::vector ®ions, + Context ctx, Runtime *runtime) +{ + assert(regions.size() == 2); + assert(task->regions.size() == 2); + const AccessorWO acc_tensor(regions[0], FID_DATA); + const AccessorRO acc_rgb(regions[1], FID_DATA); + Rect<3> rect_tensor, rect_rgb; + rect_tensor = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space()); + rect_rgb = runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space()); + assert(acc_tensor.accessor.is_dense_arbitrary(rect_tensor)); + assert(acc_rgb.accessor.is_dense_arbitrary(rect_rgb)); + assert(rect_tensor == rect_rgb); + size_t w = rect_tensor.hi[0] - rect_tensor.lo[0] + 1; + size_t h = rect_tensor.hi[1] - rect_tensor.lo[1] + 1; + float *tensor_ptr = acc_tensor.ptr(rect_tensor.lo); + const unsigned char *rgb_ptr = acc_rgb.ptr(rect_rgb.lo); + apply_normalize<<>>( + tensor_ptr, rgb_ptr, rect_tensor.volume(), h * w); +} + +void CnnModel::load_images() +{ + ArgumentMap argmap; + Context ctx = config.lg_ctx; + Runtime* runtime = config.lg_hlr; + + Rect<1> rect = runtime->get_index_space_domain(ctx, load_part_is); + int total_loaders = config.num_loaders * config.num_nodes; + assert(input_image.adim[3] % total_loaders == 0); + int image_per_loader = input_image.adim[3] / total_loaders; + for (PointInRectIterator<1> it(rect); it(); it++) { + DataLoadMeta meta; + dataLoader->get_images(image_per_loader, meta); + argmap.set_point(*it, TaskArgument(&meta, sizeof(meta))); + } + + // Load the rgb images + IndexLauncher launcher(LOAD_IMAGES_TASK_ID, load_part_is, + TaskArgument(NULL, 0), argmap); + launcher.add_region_requirement( + RegionRequirement(rgb_load_lp, 0/*projection id*/, + WRITE_DISCARD, EXCLUSIVE, rgb_lr)); + launcher.add_field(0, FID_DATA); + runtime->execute_index_space(ctx, launcher); + + // Conver to float input tensor + ArgumentMap argmap_dummy; + IndexLauncher launcher2(NORMALIZE_IMAGES_TASK_ID, part_is, + TaskArgument(NULL, 0), argmap_dummy); + launcher2.add_region_requirement( + RegionRequirement(input_image.partition, 0/*projection id*/, + WRITE_DISCARD, EXCLUSIVE, input_image.region)); + launcher2.add_field(0, FID_DATA); + launcher2.add_region_requirement( + RegionRequirement(rgb_image_lp, 0/*projection id*/, + READ_ONLY, EXCLUSIVE, rgb_lr)); + launcher2.add_field(1, FID_DATA); + runtime->execute_index_space(ctx, launcher2); +} + +void CnnModel::prefetch() +{ + for (size_t i = 0; i < layers.size(); i++) + layers[i]->prefetch(*this); +} + +void CnnModel::forward() +{ + for (size_t i = 0; i < layers.size(); i++) { + layers[i]->forward(*this); + } +} + +void CnnModel::backward() +{ + for (int i = layers.size() - 1; i >= 0; i--) { + layers[i]->backward(*this); + } +} + +void CnnModel::update() +{ + for (int i = layers.size() - 1; i >= 0; i--) { + layers[i]->update(*this); + } +} + +__global__ +void init_image_kernel(float* ptr, coord_t size) +{ + const coord_t tid = blockIdx.x * blockDim.x + threadIdx.x; + if (tid < size) { + ptr[tid] = 1.0f; + } +} + +__global__ +void init_label_kernel(int* ptr, coord_t size) +{ + const coord_t tid = blockIdx.x * blockDim.x + threadIdx.x; + if (tid < size) { + ptr[tid] = 1; + } +} + +void CnnModel::init_images_task(const Task *task, + const std::vector ®ions, + Context ctx, Runtime *runtime) +{ +#ifndef DISABLE_COMPUTATION + const int BLKSIZE = 512; + const AccessorWO acc_image(regions[0], FID_DATA); + Rect<3> rect_image; + rect_image = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space()); + assert(acc_image.accessor.is_dense_arbitrary(rect_image)); + float *image_ptr = acc_image.ptr(rect_image.lo); + int num_blocks = (rect_image.volume() + BLKSIZE - 1) / BLKSIZE; + init_image_kernel<<>>(image_ptr, rect_image.volume()); +#endif +} + +void CnnModel::init_images() +{ + ArgumentMap argmap; + Context ctx = config.lg_ctx; + Runtime* runtime = config.lg_hlr; + IndexLauncher launcher(IMAGE_INIT_TASK_ID, part_is, + TaskArgument(NULL, 0), argmap); + launcher.add_region_requirement( + RegionRequirement(input_image.partition, 0/*projection id*/, + WRITE_DISCARD, EXCLUSIVE, input_image.region)); + launcher.add_field(0, FID_DATA); + runtime->execute_index_space(ctx, launcher); +} + +void CnnModel::init_labels_task(const Task *task, + const std::vector ®ions, + Context ctx, Runtime *runtime) +{ + const int BLKSIZE = 512; + const AccessorWO acc_label(regions[0], FID_DATA); + Rect<1> rect_label; + rect_label = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space()); + assert(acc_label.accessor.is_dense_arbitrary(rect_label)); + int *label_ptr = acc_label.ptr(rect_label.lo); + int num_blocks = (rect_label.volume() + BLKSIZE - 1) / BLKSIZE; + init_label_kernel<<>>(label_ptr, rect_label.volume()); +} + +void CnnModel::init_labels() +{ + ArgumentMap argmap; + Context ctx = config.lg_ctx; + Runtime* runtime = config.lg_hlr; + IndexLauncher launcher(LABEL_INIT_TASK_ID, sm_part_is, + TaskArgument(NULL, 0), argmap); + launcher.add_region_requirement( + RegionRequirement(input_label.partition, 0/*projection id*/, + WRITE_DISCARD, EXCLUSIVE, input_label.region)); + launcher.add_field(0, FID_DATA); + FutureMap fm = runtime->execute_index_space(ctx, launcher); + //fm.wait_all_results(); +} + +Tensor CnnModel::add_flat_layer(Tensor input) +{ + assert(input.numDim == 4); + Flat *flat = new Flat(config, input, part_is, fc_part_is); + layers.push_back(flat); + return flat->output; +} + +Flat::Flat(CnnConfig config, Tensor input, + IndexSpaceT<3> part_is_3d, + IndexSpaceT<2> part_is_2d) +: Op(input) +{ + Context ctx = config.lg_ctx; + HighLevelRuntime* runtime = config.lg_hlr; + Rect<2> part_rect_2d = runtime->get_index_space_domain(ctx, part_is_2d); + int fc_num_par_c = part_rect_2d.hi[0] - part_rect_2d.lo[0] + 1; + int fc_num_par_n = part_rect_2d.hi[1] - part_rect_2d.lo[1] + 1; + + FieldSpace fs = config.field_space; + + int output_c = input.adim[0] * input.adim[1] * input.adim[2]; + int output_n = input.adim[3]; + Rect<2, coord_t> output_rect(Point<2>(0, 0), Point<2>(output_c-1, output_n-1)); + IndexSpaceT<2> output_is = runtime->create_index_space(ctx, output_rect); + LogicalRegion output_lr = runtime->create_logical_region(ctx, output_is, fs); + LogicalRegion output_grad_lr = + runtime->create_logical_region(ctx, output_is, fs); + Transform<2, 2, coord_t> transform; + //int extent_c = input.pdim[0] * input.pdim[1] * input.pdim[2]; + //int extent_n = input.pdim[3]; + // We assume equal partition for load balancing + assert(output_c % fc_num_par_c == 0); + assert(output_n % fc_num_par_n == 0); + int extent_c = output_c / fc_num_par_c; + int extent_n = output_n / fc_num_par_n; + Rect<2, coord_t> extent(Point<2>(0, 0), Point<2>(extent_c-1,extent_n-1)); + transform[0][0] = extent_c; transform[0][1] = 0; + transform[1][0] = 0; transform[1][1] = extent_n; + IndexPartition output_ip = + runtime->create_partition_by_restriction(ctx, output_is, part_is_2d, transform, extent); + assert(runtime->is_index_partition_disjoint(ctx, output_ip)); + assert(runtime->is_index_partition_complete(ctx, output_ip)); + LogicalPartition output_lp = runtime->get_logical_partition(ctx, output_lr, output_ip); + LogicalPartition output_grad_lp = + runtime->get_logical_partition(ctx, output_grad_lr, output_ip); + output.numDim = 2; + output.adim[0] = output_c; + output.adim[1] = output_n; + output.pdim[0] = extent_c; + output.pdim[1] = extent_n; + output.region = output_lr; + output.region_grad = output_grad_lr; + output.partition = output_lp; + output.partition_grad = output_grad_lp; + printf("Create flat layer: input(N=%d C=%d H=%d W=%d) -> output(N=%d C=%d)\n", + input.adim[3], input.adim[2], input.adim[1], input.adim[0], output.adim[1], output.adim[0]); + + FieldSpace proj_fs = runtime->create_field_space(ctx); + { + FieldAllocator allocator = runtime->create_field_allocator(ctx, proj_fs); + allocator.allocate_field(sizeof(Rect<2>), FID_DATA); + } + LogicalRegion proj_lr = runtime->create_logical_region(ctx, part_is_3d, proj_fs); + InlineLauncher launcher(RegionRequirement(proj_lr, WRITE_DISCARD, EXCLUSIVE, proj_lr) + .add_field(FID_DATA)); + PhysicalRegion proj_pr = runtime->map_region(ctx, launcher); + proj_pr.wait_until_valid(); + coord_t subtotal = 0; + { + const FieldAccessor, 3, coord_t, + Realm::AffineAccessor, 3, coord_t> > ra(proj_pr, FID_DATA); + Rect<3> rect = runtime->get_index_space_domain(ctx, part_is_3d); + for(PointInRectIterator<3> pir(rect); pir(); ++pir) { + IndexSpace subspace = runtime->get_index_subspace(input.partition.get_index_partition(), *pir); + Rect<3> subrect = runtime->get_index_space_domain(ctx, subspace); + // Currently we assume the size of each subregion is divisible by output_n (i.e., batch size) + assert(subrect.volume() % output_n == 0); + coord_t subsize = subrect.volume() / output_n; + ra[*pir] = Rect<2>(Point<2>(subtotal, 0), Point<2>(subtotal + subsize - 1, output_n - 1)); + subtotal += subsize; + } + } + runtime->unmap_region(ctx, proj_pr); + Transform<3, 3, coord_t> proj_trans; + proj_trans[0][0] = 1; proj_trans[0][1] = 0; proj_trans[0][2] = 0; + proj_trans[1][0] = 0; proj_trans[1][1] = 1; proj_trans[1][2] = 0; + proj_trans[2][0] = 0; proj_trans[2][1] = 0; proj_trans[2][2] = 1; + Rect<3, coord_t> proj_extent(Point<3>(0, 0, 0), Point<3>(0, 0, 0)); + IndexPartition proj_ip = + runtime->create_partition_by_restriction(ctx, part_is_3d, part_is_3d, proj_trans, proj_extent); + LogicalPartition proj_lp = runtime->get_logical_partition(ctx, proj_lr, proj_ip); + IndexPartition flat_ip = + runtime->create_partition_by_image_range(ctx, output_is, + proj_lp, proj_lr, FID_DATA, part_is_3d); + assert(runtime->is_index_partition_disjoint(ctx, flat_ip)); + assert(runtime->is_index_partition_complete(ctx, flat_ip)); + flat_lp = runtime->get_logical_partition(ctx, output_lr, flat_ip); + flat_grad_lp = runtime->get_logical_partition(ctx, output_grad_lr, flat_ip); + return; +/* + Transform<2, 3, coord_t> flat_trans; + flat_trans[0][0] = input.pdim[0] * input.pdim[1] * input.adim[2]; + flat_trans[0][1] = input.adim[0] * input.pdim[1] * input.adim[2]; + flat_trans[0][2] = 0; + flat_trans[1][0] = 0; + flat_trans[1][1] = 0; + flat_trans[1][2] = input.pdim[3]; + IndexPartition flat_ip = + runtime->create_partition_by_restriction(ctx, output_is, part_is_3d, flat_trans, extent); + flat_lp = runtime->get_logical_partition(ctx, output_lr, flat_ip); +*/ +} + +OpMeta* Flat::init_task(const Task *task, + const std::vector ®ions, + Context ctx, Runtime *runtime) +{ + CnnHandle handle = *((const CnnHandle*) task->local_args); + FlatMeta* m = new FlatMeta(handle); + return m; +} + +void Flat::init(const CnnModel& model) +{ + ArgumentMap argmap; + Context ctx = model.config.lg_ctx; + Runtime* runtime = model.config.lg_hlr; + Rect<3> rect = runtime->get_index_space_domain(ctx, model.part_is); + int idx = 0; + for (PointInRectIterator<3> it(rect); it(); it++) { + CnnHandle handle = model.cnn_handlers[idx++]; + argmap.set_point(*it, TaskArgument(&handle, sizeof(CnnHandle))); + } + + IndexLauncher init_launcher(FLAT_INIT_TASK_ID, model.part_is, + TaskArgument(this, sizeof(Flat)), argmap); + FutureMap fm = runtime->execute_index_space(ctx, init_launcher); + fm.wait_all_results(); + idx = 0; + for (PointInRectIterator<3> it(rect); it(); it++) { + meta[idx++] = fm.get_result(*it); + } +} + +/* + regions[0](I): input + regions[1](O): output +*/ +void Flat::forward_task(const Task *task, + const std::vector ®ions, + Context ctx, Runtime *runtime) +{ +#ifndef DISABLE_COMPUTATION + assert(regions.size() == 2); + assert(task->regions.size() == 2); + const AccessorRO acc_input(regions[0], FID_DATA); + const AccessorWO acc_output(regions[1], FID_DATA); + Rect<3> rect_input; + Rect<2> rect_output; + rect_input = runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space()); + rect_output = runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space()); + assert(rect_input.volume() == rect_output.volume()); + assert(acc_input.accessor.is_dense_arbitrary(rect_input)); + assert(acc_output.accessor.is_dense_arbitrary(rect_output)); + const float *input_ptr = acc_input.ptr(rect_input.lo); + float *output_ptr = acc_output.ptr(rect_output.lo); + + checkCUDA(cudaMemcpyAsync(output_ptr, input_ptr, + rect_input.volume() * sizeof(float), + cudaMemcpyDeviceToDevice)); +#endif +} + +void Flat::forward(const CnnModel& model) +{ + ArgumentMap argmap; + Context ctx = model.config.lg_ctx; + Runtime* runtime = model.config.lg_hlr; + Rect<3> rect = runtime->get_index_space_domain(ctx, model.part_is); + int idx = 0; + for (PointInRectIterator<3> it(rect); it(); it++) { + OpMeta* mp = meta[idx++]; + argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); + } + IndexLauncher launcher(FLAT_FWD_TASK_ID, model.part_is, + TaskArgument(NULL, 0), argmap); + launcher.add_region_requirement( + RegionRequirement(inputs[0].partition, 0/*projection id*/, + READ_ONLY, EXCLUSIVE, inputs[0].region)); + launcher.add_field(0, FID_DATA); + launcher.add_region_requirement( + RegionRequirement(flat_lp /*3D->2D partitions*/, 0/*projection id*/, + WRITE_DISCARD, EXCLUSIVE, output.region)); + launcher.add_field(1, FID_DATA); + + runtime->execute_index_space(ctx, launcher); +} + +/* + regions[0](O) : input_grad + regions[1](I) : output_grad +*/ +void Flat::backward_task(const Task *task, + const std::vector ®ions, + Context ctx, Runtime *runtime) +{ +#ifndef DISABLE_COMPUTATION + assert(regions.size() == 2); + assert(task->regions.size() == 2); + const AccessorWO acc_input_grad(regions[0], FID_DATA); + const AccessorRO acc_output_grad(regions[1], FID_DATA); + Rect<3> rect_input_grad; + Rect<2> rect_output_grad; + rect_input_grad = + runtime->get_index_space_domain(ctx, task->regions[0].region.get_index_space()); + rect_output_grad = + runtime->get_index_space_domain(ctx, task->regions[1].region.get_index_space()); + assert(rect_input_grad.volume() == rect_output_grad.volume()); + assert(acc_input_grad.accessor.is_dense_arbitrary(rect_input_grad)); + assert(acc_output_grad.accessor.is_dense_arbitrary(rect_output_grad)); + float *input_grad_ptr = acc_input_grad.ptr(rect_input_grad.lo); + const float *output_grad_ptr = acc_output_grad.ptr(rect_output_grad.lo); + + checkCUDA(cudaMemcpyAsync(input_grad_ptr, output_grad_ptr, + rect_input_grad.volume() * sizeof(float), + cudaMemcpyDeviceToDevice)); +#endif +} + +void Flat::backward(const CnnModel& model) +{ + ArgumentMap argmap; + Context ctx = model.config.lg_ctx; + Runtime* runtime = model.config.lg_hlr; + Rect<3> rect = runtime->get_index_space_domain(ctx, model.part_is); + int idx = 0; + for (PointInRectIterator<3> it(rect); it(); it++) { + OpMeta* mp = meta[idx++]; + argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); + } + IndexLauncher launcher(FLAT_BWD_TASK_ID, model.part_is, + TaskArgument(NULL, 0), argmap); + launcher.add_region_requirement( + RegionRequirement(inputs[0].partition_grad, 0/*projection id*/, + WRITE_DISCARD, EXCLUSIVE, inputs[0].region_grad)); + launcher.add_field(0, FID_DATA); + launcher.add_region_requirement( + RegionRequirement(flat_grad_lp /*3D->2D partitions*/, 0/*projection id*/, + READ_ONLY, EXCLUSIVE, output.region_grad)); + launcher.add_field(1, FID_DATA); + + runtime->execute_index_space(ctx, launcher); +} + +void Flat::update(const CnnModel& model) +{ +} + +DataLoader::DataLoader(std::string filename) + : fileIdx(0), imageIdx(0) +{ + FILE *file; + file = fopen(filename.c_str(), "r"); + assert(file != NULL); + HDFFile hdf; + while (fgets(hdf.filename, MAX_FILENAME, file) != NULL) { + hdf.filename[strlen(hdf.filename) - 1] = 0; + printf("filename = %s\n", hdf.filename); + hid_t fileId = H5Fopen(hdf.filename, H5F_ACC_RDONLY, H5P_DEFAULT); + H5Gget_num_objs(fileId, &hdf.numImages); + hdf.fid = fileId; + //H5Fclose(fileId); + datasets.push_back(hdf); + } +} + +void DataLoader::get_images(int numImages, DataLoadMeta &meta) +{ + int idx = 0; + if (imageIdx == (int)datasets[fileIdx].numImages) { + imageIdx = 0; + fileIdx = (fileIdx + 1) % datasets.size(); + } + memcpy(meta.datasets[0].filename, datasets[fileIdx].filename, MAX_FILENAME); + meta.datasets[0].fid = datasets[fileIdx].fid; + meta.datasets[0].start = imageIdx; + meta.datasets[0].end = imageIdx; + for (int i = 0; i < numImages; i++) { + if (imageIdx < (int)datasets[fileIdx].numImages) { + meta.datasets[idx].end = imageIdx; + imageIdx ++; + } else { + imageIdx = 0; + fileIdx = (fileIdx + 1) % datasets.size(); + idx++; + memcpy(meta.datasets[idx].filename, datasets[fileIdx].filename, MAX_FILENAME); + meta.datasets[idx].fid = datasets[fileIdx].fid; + meta.datasets[idx].start = imageIdx; + meta.datasets[idx].end = imageIdx; + } + } + meta.cnt = idx + 1; + printf("meta.cnt = %d\n", meta.cnt); + for (int i = 0; i < meta.cnt; i++) + printf("fn = %s, start = %d, end = %d\n", meta.datasets[i].filename, meta.datasets[i].start, meta.datasets[i].end); +} + diff --git a/cuda_code/optical_flow_turing.cu b/cuda_code/optical_flow_turing.cu new file mode 100644 index 0000000000000000000000000000000000000000..08fdc058c44e669a3a5c23899540bdbe6e789770 --- /dev/null +++ b/cuda_code/optical_flow_turing.cu @@ -0,0 +1,182 @@ +// Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "dali/error_handling.h" +#include "dali/pipeline/operators/optical_flow/turing_of/optical_flow_turing.h" + +namespace dali { +namespace optical_flow { +namespace kernel { + +namespace { + +constexpr size_t kBlockSize = 32; + + +/** + * Calculating number of blocks + * @param length In bytes + * @param block_size + * @return + */ +inline size_t num_blocks(size_t length, size_t block_size) { + // Calculating ceil for ints + return (length + block_size - 1) / block_size; +} + + +/** + * Access a value at given (x, y) coordinates in a strided 2D array + * @param buffer + * @param x In pixels + * @param y In pixels + * @param pitch_bytes Offset, in bytes, between consecutive rows of the array + * @return Value at given coordinates + */ +template +__host__ __device__ constexpr T & +pitch_xy(T *buffer, ptrdiff_t x, ptrdiff_t y, ptrdiff_t pitch_bytes) { + return reinterpret_cast(reinterpret_cast(buffer) + pitch_bytes * y)[x]; +} + + +/** + * Trigger-function for kernel. This kernel utilizes 2 things: + * 1. Convert color type to RGBA (required by optical flow) + * 2. Reshape data to match layout required by optical flow + * + * @tparam ColorConversionMethod Function, that matches signature: + * __global__ void (const uint8_t*, uint8_t*, size_t, size_t, size_t) + * @param cvtm Kernel to call + * @param input + * @param output + * @param pitch Stride within output memory layout. In bytes + * @param width_px In pixels + * @param height + * @param out_channels How many channels output data has? + * @param stream Stream, within which kernel is called + */ +template +void ConvertToOFLayout(ColorConversionMethod cvtm, const uint8_t *input, uint8_t *output, + size_t pitch, size_t width_px, size_t height, int out_channels, + cudaStream_t stream) { + DALI_ENFORCE(pitch >= out_channels * width_px); + dim3 block_dim(kBlockSize, kBlockSize); + dim3 grid_dim(num_blocks(out_channels * width_px, block_dim.x), + num_blocks(height, block_dim.y)); + cvtm<<>>(input, output, pitch, width_px, height); +} + +} // namespace + + +__global__ void +RgbToRgbaKernel(const uint8_t *__restrict__ input, uint8_t *__restrict__ output, size_t pitch, + size_t width_px, size_t height) { + constexpr size_t in_channels = 3, out_channels = 4; + size_t x = threadIdx.x + blockIdx.x * blockDim.x; + size_t y = threadIdx.y + blockIdx.y * blockDim.y; + if (x >= width_px || y >= height) return; + size_t in_idx = in_channels * x + in_channels * width_px * y; + size_t out_idx = out_channels * x + pitch * y; + output[out_idx] = input[in_idx]; + output[out_idx + 1] = input[in_idx + 1]; + output[out_idx + 2] = input[in_idx + 2]; + output[out_idx + 3] = 255; +} + + +__global__ void +BgrToRgbaKernel(const uint8_t *__restrict__ input, uint8_t *__restrict__ output, size_t pitch, + size_t width_px, size_t height) { + constexpr size_t in_channels = 3, out_channels = 4; + size_t x = threadIdx.x + blockIdx.x * blockDim.x; + size_t y = threadIdx.y + blockIdx.y * blockDim.y; + if (x >= width_px || y >= height) return; + size_t in_idx = in_channels * x + in_channels * width_px * y; + size_t out_idx = out_channels * x + pitch * y; + output[out_idx] = input[in_idx + 2]; + output[out_idx + 1] = input[in_idx + 1]; + output[out_idx + 2] = input[in_idx]; + output[out_idx + 3] = 255; +} + + +void RgbToRgba(const uint8_t *input, uint8_t *output, size_t pitch, size_t width_px, size_t height, + cudaStream_t stream) { + ConvertToOFLayout(RgbToRgbaKernel, input, output, pitch, width_px, height, 4, stream); +} + + +void BgrToRgba(const uint8_t *input, uint8_t *output, size_t pitch, size_t width_px, size_t height, + cudaStream_t stream) { + ConvertToOFLayout(BgrToRgbaKernel, input, output, pitch, width_px, height, 4, stream); +} + + +void Gray(const uint8_t *input, uint8_t *output, size_t pitch, size_t width_px, size_t height, + cudaStream_t stream) { + CUDA_CALL(cudaMemcpy2DAsync(output, pitch, input, width_px * sizeof(uint8_t), + width_px * sizeof(uint8_t), height, cudaMemcpyDefault, stream)); +} + + +__global__ void +DecodeFlowComponentKernel(const int16_t *__restrict__ input, float *__restrict__ output, + size_t pitch, size_t width_px, size_t height) { + size_t x = threadIdx.x + blockIdx.x * blockDim.x; + size_t y = threadIdx.y + blockIdx.y * blockDim.y; + if (x >= width_px || y >= height) return; + auto value_in = pitch_xy(input, x, y, pitch); + size_t outidx = x + width_px * y; + output[outidx] = decode_flow_component(value_in); +} + + +__global__ void +EncodeFlowComponentKernel(const float *__restrict__ input, int16_t *__restrict__ output, + size_t pitch, size_t width_px, size_t height) { + size_t x = threadIdx.x + blockIdx.x * blockDim.x; + size_t y = threadIdx.y + blockIdx.y * blockDim.y; + if (x >= width_px || y >= height) return; + size_t in_idx = x + width_px * y; + size_t out_idx = x + pitch * y; + output[out_idx] = encode_flow_component(input[in_idx]); +} + + +void DecodeFlowComponents(const int16_t *input, float *output, size_t pitch, size_t width_px, + size_t height, cudaStream_t stream) { + DALI_ENFORCE(pitch >= 2 * sizeof(int16_t) * width_px); + dim3 block_dim(kBlockSize, kBlockSize); + dim3 grid_dim(num_blocks(sizeof(float) * width_px, block_dim.x), + num_blocks(height, block_dim.y)); + DecodeFlowComponentKernel<<>>(input, output, pitch, + sizeof(int16_t) * width_px, height); +} + + +void EncodeFlowComponents(const float *input, int16_t *output, size_t pitch, size_t width_px, + size_t height, cudaStream_t stream) { + dim3 block_dim(kBlockSize, kBlockSize); + dim3 grid_dim(num_blocks(sizeof(int16_t) * width_px, block_dim.x), + num_blocks(height, block_dim.y)); + EncodeFlowComponentKernel<<>>(input, output, pitch, + width_px, height); +} + +} // namespace kernel +} // namespace optical_flow +} // namespace dali + diff --git a/cuda_code/options_1.cu b/cuda_code/options_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..271decd8a7007925b4ef10cb1021a79b467897f1 --- /dev/null +++ b/cuda_code/options_1.cu @@ -0,0 +1,803 @@ +/*************************************************************************************************** + * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + *modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright notice, + *this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + *notice, this list of conditions and the following disclaimer in the + *documentation and/or other materials provided with the distribution. + * * Neither the name of the NVIDIA CORPORATION nor the names of its + *contributors may be used to endorse or promote products derived from this + *software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + *AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + *IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + *DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY DIRECT, + *INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + *DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + *OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TOR (INCLUDING + *NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, + *EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ +/* \file + \brief Command line options for performance test program +*/ + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/version.h" + +#include "cutlass/library/util.h" + +#include "options.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace profiler { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Newline and indent for help strings +static char const* end_of_line = + "\n "; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +Options::Device::Device(cutlass::CommandLine const& cmdline) { + cmdline.get_cmd_line_argument("device", device, 0); + + cudaError_t result; + result = cudaGetDeviceProperties(&properties, device); + + if (result != cudaSuccess) { + throw std::runtime_error( + "cudaGetDeviceProperties() failed for given device"); + } + + result = cudaSetDevice(device); + if (result != cudaSuccess) { + throw std::runtime_error("cudaSetDevice() failed for given device."); + } + + // Permit overriding the compute capability + if (cmdline.check_cmd_line_flag("compute-capability")) { + int cc = compute_capability(); + cmdline.get_cmd_line_argument("compute-capability", cc, cc); + properties.major = cc / 10; + properties.minor = cc % 10; + } + + // Permit overriding the L2 cache capacity + if (cmdline.check_cmd_line_flag("llc-capacity")) { + int llc_capacity = 0; + cmdline.get_cmd_line_argument("llc-capacity", llc_capacity, 0); + + if (llc_capacity >= 0) { + properties.l2CacheSize = (llc_capacity << 10); + } + } +} + +void Options::Device::print_usage(std::ostream& out) const { + out << "Device:\n" + << " --device= " + << " CUDA Device ID\n\n"; + + int device_count = 0; + cudaError_t result = cudaGetDeviceCount(&device_count); + + if (result != cudaSuccess) { + out << " \n"; + } else { + for (int idx = 0; idx < device_count; ++idx) { + cudaDeviceProp prop; + result = cudaGetDeviceProperties(&prop, idx); + if (result != cudaSuccess) { + out << " " << std::endl; + break; + } else { + out << " [" << idx << "] - " << prop.name << " - SM " + << prop.major << "." << prop.minor << ", " + << prop.multiProcessorCount << " SMs @ " + << (prop.clockRate / 1000.0) << " MHz, " + << "L2 cache: " << (prop.l2CacheSize >> 20) + << " MB, Global Memory: " << (prop.totalGlobalMem >> 30) + << " GB" << std::endl; + } + } + out << "\n"; + } + + out << " --compute-capability= " + << " Override the compute capability.\n\n" + + << " --llc-capacity= " + << " Capacity of last-level cache in kilobytes. If this is non-zero," + << end_of_line + << " profiling phases cycle through different input tensors to " + "induce" + << end_of_line << " capacity misses in the L2.\n\n"; +} + +void Options::Device::print_device_info(std::ostream& out) const { + int num_devices; + cudaDeviceProp props; + + cudaError_t result; + result = cudaGetDeviceCount(&num_devices); + + if (result != cudaSuccess) { + throw std::runtime_error("cudaGetNumDevices() failed"); + } + + out << "Device Name,SM,CUDA Device ID,Phy Device ID" << std::endl; + + for (int device = 0; device < num_devices; device++) { + result = cudaSetDevice(device); + if (result != cudaSuccess) { + throw std::runtime_error("cudaSetDevice() failed for device"); + } + + result = cudaGetDeviceProperties(&props, device); + if (result != cudaSuccess) { + throw std::runtime_error( + "cudaGetDeviceProperties failed for device"); + } + + out << props.name << "," << props.major << props.minor << "," << device + << "," << props.multiGpuBoardGroupID << std::endl; + } +} + +void Options::Device::print_options(std::ostream& out, int indent) const { + out << indent_str(indent) << "device: " << device << "\n" + << indent_str(indent) + << "clock: " << int(double(properties.clockRate) / 1000.0) << "\n" + << indent_str(indent) << "compute-capability: " << compute_capability() + << "\n"; +} + +/// Returns the compute capability of the listed device (e.g. 61, 60, 70, 75) +int Options::Device::compute_capability() const { + return properties.major * 10 + properties.minor; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +Options::Initialization::Initialization(cutlass::CommandLine const& cmdline) { + cmdline.get_cmd_line_argument("initialization-enabled", enabled, true); + + if (cmdline.check_cmd_line_flag("initialization-provider")) { + std::string str; + cmdline.get_cmd_line_argument("initialization-provider", str); + provider = library::from_string(str); + if (provider == library::Provider::kInvalid) { + enabled = false; + } else if (provider != library::Provider::kReferenceHost && + provider != library::Provider::kReferenceDevice) { + throw std::runtime_error( + "Unsupported intialization provider specified."); + } + } else { + provider = library::Provider::kReferenceDevice; + } + + cmdline.get_cmd_line_argument("seed", seed, 2019); + + if (cmdline.check_cmd_line_flag("dist")) { + // user has set the data distribution (fix data distribution once set) + fix_data_distribution = true; + // set user provided data distribution + get_distribution(cmdline, "dist", data_distribution); + } else { + // profiler choosen data distribution (allowed to change based on + // numeric types) + fix_data_distribution = false; + // set uniform data distribution with range [-4, 4] + data_distribution.set_uniform(-4, 4, 0); + } +} + +/// Gets the initial distribution +void Options::Initialization::get_distribution(cutlass::CommandLine const& args, + std::string const& arg, + cutlass::Distribution& dist) { + struct { + const char* label; + cutlass::Distribution::Kind kind; + } distribution_kinds[] = {{"uniform", cutlass::Distribution::Uniform}, + {"gaussian", cutlass::Distribution::Gaussian}, + {"identity", cutlass::Distribution::Identity}, + {"sequential", cutlass::Distribution::Sequential}, + {0, cutlass::Distribution::Invalid}}; + + struct { + char const* label; + double* member; + } members[] = {{"min", &dist.uniform.min}, + {"max", &dist.uniform.max}, + {"mean", &dist.gaussian.mean}, + {"stddev", &dist.gaussian.stddev}, + {"start", &dist.sequential.start}, + {"delta", &dist.sequential.delta}, + {0, 0}}; + + using KeyValueVector = std::vector >; + + KeyValueVector values; + args.get_cmd_line_argument_pairs(arg.c_str(), values); + + // The parser expects the first token to be a string identifying the + // distribution type. + auto it = values.begin(); + if (it != values.end()) { + for (int i = 0; distribution_kinds[i].label; ++i) { + if (it->first == distribution_kinds[i].label) { + dist.kind = distribution_kinds[i].kind; + break; + } + } + ++it; + } + + // Subsequent key-value pairs update the named field of the distribution + // struct. + for (; it != values.end(); ++it) { + // Integer scaling factor - if < 0, no integer rounding is performed. + if ((it->first.compare("scale") == 0) && !it->second.empty()) { + std::stringstream ss; + ss << it->second; + ss >> dist.int_scale; + continue; // next token + } + + // Casts as integer without scaling + if (it->first.compare("integer") == 0) { + dist.int_scale = 0; + continue; // next token + } + + // initialize other members + for (int m = 0; members[m].label; ++m) { + if (it->first == members[m].label && !it->second.empty()) { + std::stringstream ss; + ss << it->second; + ss >> *(members[m].member); + } + } + } +} + +void Options::Initialization::print_usage(std::ostream& out) const { + out << "Initialization:\n" + + << " --initialization= " + << " Enables initialization (default: true). If false, device " + "memory is" + << end_of_line << " not initialized after allocation.\n\n" + + << " --initialization-provider= " + << " Selects initialization provider {host, device*}. (default: " + "'*')\n\n" + + << " --dist= " + << " Data distribution of input tensors {uniform*, gaussian, " + "identity, sequential}" + << end_of_line + << " --dist=uniform,min:,max:,scale:" + << end_of_line + << " " + "--dist=gaussian,mean:,stddev:,scale:" + << end_of_line + << " " + "--dist=sequential,start:,delta:,scale:" + << end_of_line << " --dist=identity\n\n" + + << " --seed= " + << " Random number generator seed. Used to enforce deterministic" + << end_of_line << " initialization.\n\n"; +} + +void Options::Initialization::print_options(std::ostream& out, + int indent) const {} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +Options::Library::Library(cutlass::CommandLine const& cmdline) { + algorithm_mode = AlgorithmMode::kDefault; + + if (cmdline.check_cmd_line_flag("library-algo-mode")) { + std::string mode = "default"; + cmdline.get_cmd_line_argument("library-algo-mode", mode); + algorithm_mode = from_string(mode); + } + + if (cmdline.check_cmd_line_flag("library-algos")) { + // If algorithms are specified, override as kBest. + algorithm_mode = AlgorithmMode::kBest; + + std::vector tokens; + cmdline.get_cmd_line_arguments("library-algos", tokens); + + algorithms.reserve(tokens.size()); + + for (auto const& token : tokens) { + if (token.find(":")) { + // todo - tokenized range + } else { + int algo; + std::stringstream ss; + + ss << token; + ss >> algo; + + algorithms.push_back(algo); + } + } + } +} + +void Options::Library::print_usage(std::ostream& out) const { + out << "Library:\n" + + << " --library-algo-mode= " + << " Indicates algorithm mode used to call libraries such as cuBLAS " + "and cuDNN.\n" + << " " + << " mode={default*,matching,best}\n\n" + + << " --library-algos= " + << " If --algorithm-mode=best, permits specifying a selection of " + "algorithms.\n\n"; +} + +void Options::Library::print_options(std::ostream& out, int indent) const { + out << indent_str(indent) + << "library-algo-mode: " << to_string(algorithm_mode) << "\n" + << indent_str(indent) << "library-algos: "; + + int j = 0; + for (int x : algorithms) { + out << (j++ ? "," : "") << x; + } + + out << "\n\n"; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +Options::Profiling::Profiling(cutlass::CommandLine const& cmdline) { + cmdline.get_cmd_line_argument("workspace-count", workspace_count, 0); + cmdline.get_cmd_line_argument("warmup-iterations", warmup_iterations, 10); + cmdline.get_cmd_line_argument("profiling-iterations", iterations, 100); + cmdline.get_cmd_line_argument("sleep-duration", sleep_duration, 50); + cmdline.get_cmd_line_argument("profiling-enabled", enabled, true); + + if (cmdline.check_cmd_line_flag("providers")) { + std::vector tokens; + cmdline.get_cmd_line_arguments("providers", tokens); + + providers.clear(); + + for (auto const& token : tokens) { + providers.push_back(library::from_string(token)); + } + } else { + providers.push_back(library::Provider::kCUTLASS); + providers.push_back(library::Provider::kCUBLAS); + providers.push_back(library::Provider::kCUDNN); + } +} + +void Options::Profiling::print_usage(std::ostream& out) const { + out << "Profiling:\n" + + << " --workspace-count= " + << " Number of discrete workspaces maintained to avoid " + "cache-resident " + << end_of_line + << " If zero (default), the amount is chosen for each workload " + "based on " + << end_of_line << " capacity of the last-level cache.\n\n" + + << " --profiling-iterations= " + << " Number of iterations to profile each kernel. If zero, kernels" + << end_of_line << " are launched up to the profiling duration.\n\n" + + << " --warmup-iterations= " + << " Number of iterations to execute each kernel prior to " + "profiling.\n\n" + + << " --sleep-duration= " + << " Number of ms to sleep between profiling periods (ms).\n\n" + + << " --profiling-enabled= " + << " If true, profiling is actually conducted.\n\n" + + << " --providers= " + << " List of providers to be profiled for performance. (default: " + "'*')" + << end_of_line << " Gemm providers {cutlass*, cublas*}" + << end_of_line << " Conv2d providers {cutlass*, cudnn*}" + << "\n\n"; +} + +void Options::Profiling::print_options(std::ostream& out, int indent) const { + out << indent_str(indent) << "profiling_iterations: " << iterations << "\n" + << indent_str(indent) << "sleep_duration: " << sleep_duration << "\n" + << indent_str(indent) << "profiling_enabled: " << enabled << "\n" + << indent_str(indent) << "providers: ["; + + int j = 0; + for (auto const& provider : providers) { + out << (j++ ? ", " : "") << library::to_string(provider); + } + out << "]\n"; +} + +/// Returns true if a provider is enabled +bool Options::Profiling::provider_enabled(library::Provider provider) const { + return std::find(providers.begin(), providers.end(), provider) != + providers.end(); +} + +/// Returns the index of a provider if its enabled +size_t Options::Profiling::index(library::Provider provider) const { + size_t idx = 0; + for (auto const& x : providers) { + if (x == provider) { + return idx; + } + ++idx; + } + return idx; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +Options::Verification::Verification(cutlass::CommandLine const& cmdline) { + cmdline.get_cmd_line_argument("verification-enabled", enabled, true); + + cmdline.get_cmd_line_argument("epsilon", epsilon, 0.05); + + cmdline.get_cmd_line_argument("nonzero-floor", nonzero_floor, 1.0 / 256.0); + + if (cmdline.check_cmd_line_flag("save-workspace")) { + std::string value; + cmdline.get_cmd_line_argument("save-workspace", value); + save_workspace = from_string(value); + } else { + save_workspace = SaveWorkspace::kNever; + } + + if (cmdline.check_cmd_line_flag("verification-providers")) { + std::vector tokens; + cmdline.get_cmd_line_arguments("verification-providers", tokens); + + providers.clear(); + + for (auto const& token : tokens) { + library::Provider provider = + library::from_string(token); + if (provider != library::Provider::kInvalid) { + providers.push_back(provider); + } + } + } else { + providers.push_back(library::Provider::kCUBLAS); + providers.push_back(library::Provider::kReferenceDevice); + providers.push_back(library::Provider::kCUDNN); + } +} + +void Options::Verification::print_usage(std::ostream& out) const { + out << "Verification:\n" + + << " --verification-enabled= " + << " Whether to perform verification checks.\n\n" + + << " --epsilon= " + << " Error threshold. Setting to zero (default) requires" + << end_of_line << " bit-level equivalence.\n\n" + + << " --nonzero-floor= " + << " Results whose absolute value is less than this quantity" + << end_of_line << " are treated as zero for comparisons.\n\n" + + << " --save-workspace= " + << " Specifies when to save the GEMM inputs and results to the " + "filesystem." + << end_of_line + << " --save-workspace=never never save workspace (default)" + << end_of_line + << " --save-workspace=incorrect save workspace for incorrect " + "results" + << end_of_line + << " --save-workspace=always always save workspace\n\n" + + << " --verification-providers= " + << " List of providers used to verify result. (default: '*')" + << end_of_line << " Gemm verification-providers {cublas*}" + << end_of_line + << " Conv2d verification-providers {cudnn*, device*, host}" + << "\n\n"; +} + +void Options::Verification::print_options(std::ostream& out, int indent) const { + out << indent_str(indent) << "verification_enabled: " << enabled << "\n" + << indent_str(indent) << "epsilon: " << epsilon << "\n" + << indent_str(indent) << "save_workspace: " << to_string(save_workspace) + << "\n" + << indent_str(indent) << "verification_providers: ["; + + int j = 0; + for (auto const& provider : providers) { + out << (j++ ? ", " : "") << library::to_string(provider); + } + out << "]\n"; +} + +/// Returns true if a provider is enabled +bool Options::Verification::provider_enabled(library::Provider provider) const { + return std::find(providers.begin(), providers.end(), provider) != + providers.end(); +} + +/// Returns the index of a provider if its enabled +size_t Options::Verification::index(library::Provider provider) const { + size_t idx = 0; + for (auto const& x : providers) { + if (x == provider) { + return idx; + } + ++idx; + } + return idx; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +Options::Report::Report(cutlass::CommandLine const& cmdline) { + cmdline.get_cmd_line_argument("append", append, false); + cmdline.get_cmd_line_argument("output", output_path); + cmdline.get_cmd_line_argument("junit-output", junit_output_path); + + if (cmdline.check_cmd_line_flag("tags")) { + cmdline.get_cmd_line_argument_pairs("tags", pivot_tags); + } + + cmdline.get_cmd_line_argument("report-not-run", report_not_run, false); + + cmdline.get_cmd_line_argument("verbose", verbose, true); +} + +void Options::Report::print_usage(std::ostream& out) const { + out << "Report:\n" + + << " --append= " + << " If true, result is appended to possibly existing file. " + "Otherwise, " + << end_of_line << " any existing file is overwritten.\n\n" + + << " --output= " + << " Path to output file for machine readable results. Operation " + "kind and '.csv' is appended.\n\n" + + << " --junit-output= " + << " Path to junit output file for result reporting. Operation kind " + "and '.junit.xml' is appended.\n\n" + + << " --report-not-run= " + << " If true, reports the status of all kernels including those that" + << end_of_line << " do not satisfy the given arguments.\n\n" + + << " --tags= " + << " Inserts leading columns in output table and uniform values for " + "each" + << end_of_line + << " column. Useful for generating pivot tables.\n\n" + + << " --verbose= " + << " Prints human-readable text to stdout. If false, nothing is " + "written to stdout.\n\n"; +} + +void Options::Report::print_options(std::ostream& out, int indent) const { + out << indent_str(indent) << "append: " << append << "\n" + << indent_str(indent) << "output: " << output_path << "\n" + << indent_str(indent) << "junit-output: " << junit_output_path << "\n" + << indent_str(indent) << "report_not_run: " << report_not_run << "\n" + << indent_str(indent) << "tags:\n"; + + for (auto const& tag : pivot_tags) { + out << indent_str(indent + 1) << tag.first << ": " << tag.second + << "\n"; + } + + out << indent_str(indent) << "verbose: " << verbose << "\n"; +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +Options::About::About(cutlass::CommandLine const& cmdline) { + help = cmdline.check_cmd_line_flag("help"); + version = cmdline.check_cmd_line_flag("version"); + device_info = cmdline.check_cmd_line_flag("device-info"); +} + +void Options::About::print_usage(std::ostream& out) const { + out << "About:\n" + << " --version "; + + print_version(out); + + out << "\n"; +} + +void Options::About::print_version(std::ostream& out) { + out << "CUTLASS " << cutlass::getVersionString() << " built on " << __DATE__ + << " at " << __TIME__; + if (!cutlass::getGitRevision().empty()) + out << " with commit " << cutlass::getGitRevision() << ""; +} + +void Options::About::print_options(std::ostream& out, int indent) const {} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +Options::Options(cutlass::CommandLine const& cmdline) + : cmdline(cmdline), + device(cmdline), + initialization(cmdline), + library(cmdline), + profiling(cmdline), + verification(cmdline), + report(cmdline), + about(cmdline) { + if (cmdline.check_cmd_line_flag("mode")) { + std::string token; + cmdline.get_cmd_line_argument("mode", token); + execution_mode = from_string(token); + } else { + execution_mode = ExecutionMode::kProfile; + } + + // Enumerating kernels is equivalent to a dry run. + if (execution_mode == ExecutionMode::kEnumerate) { + execution_mode = ExecutionMode::kDryRun; + } + + if (cmdline.check_cmd_line_flag("operation")) { + std::string str; + cmdline.get_cmd_line_argument("operation", str); + operation_kind = library::from_string(str); + } else if (cmdline.check_cmd_line_flag("function")) { + std::string str; + cmdline.get_cmd_line_argument("function", str); + operation_kind = library::from_string(str); + } else { + operation_kind = library::OperationKind::kInvalid; + } + + if (cmdline.check_cmd_line_flag("operation_names")) { + cmdline.get_cmd_line_arguments("operation_names", operation_names); + } else if (cmdline.check_cmd_line_flag("kernels")) { + cmdline.get_cmd_line_arguments("kernels", operation_names); + } + + if (cmdline.check_cmd_line_flag("ignore-kernels")) { + cmdline.get_cmd_line_arguments("ignore-kernels", + excluded_operation_names); + } + + // Prevent launches on the device for anything other than CUTLASS operation + if (execution_mode == ExecutionMode::kTrace) { + initialization.provider = library::Provider::kReferenceHost; + verification.enabled = false; + profiling.enabled = false; + } +} + +void Options::print_usage(std::ostream& out) const { + out << "CUTLASS Profiler\n" + << "usage:\n\n" + << " cutlass_profiler [options]\n\n" + << " --help\n\n" + + << " --mode= " + << " Cutlass profiler execution mode." << end_of_line + << " --mode=profile regular verification and profiling " + "(default)" + << end_of_line + << " --mode=dry_run no kernels are launched or workspaces " + "allocated" + << end_of_line + << " --mode=enumerate lists all operation kind and operations" + << end_of_line + << " --mode=trace executes a single device-side computation " + "with" + << end_of_line + << " no other kernel launches\n\n" + + << " --device-info " + << " Prints information on all GPUs present in the system\n\n" + + << " --operation= " + << " CUTLASS operation to profile.\n\n" + + << " --kernels= " + << " Filter operations by kernel names. For example, call all " + "kernels with" + << end_of_line + << " (\"s1688\" and \"nt\") or (\"s844\" and \"tn\" and " + "\"align8\") in their" + << end_of_line + << " operation name using --kernels=\"s1688*nt, " + "s884*tn*align8\"\n\n" + + << " --ignore-kernels= " + << " Excludes kernels whose names match anything in this list.\n\n"; + + // + // Detailed options + // + + device.print_usage(out); + out << "\n"; + + initialization.print_usage(out); + out << "\n"; + + library.print_usage(out); + out << "\n"; + + profiling.print_usage(out); + out << "\n"; + + verification.print_usage(out); + out << "\n"; + + report.print_usage(out); + out << "\n"; + + about.print_usage(out); + out << "\n"; +} + +void Options::print_options(std::ostream& out) const { + out << "options:\n" + << " help: " << about.help << "\n" + << " mode: " << to_string(execution_mode) << "\n"; + + out << " device:\n"; + device.print_options(out, 2); + + out << " initialization:\n"; + initialization.print_options(out, 2); + + out << " profiling:\n"; + profiling.print_options(out, 2); + + out << " verification:\n"; + verification.print_options(out, 2); + + out << " report:\n"; + report.print_options(out, 2); +} + +std::string Options::indent_str(int indent) { + return std::string(indent * 2, ' '); +} + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace profiler +} // namespace cutlass diff --git a/cuda_code/optix_vol_intersect.cu b/cuda_code/optix_vol_intersect.cu new file mode 100644 index 0000000000000000000000000000000000000000..65e7250c1f1d0d6c239910be50a50fac3b20af9f --- /dev/null +++ b/cuda_code/optix_vol_intersect.cu @@ -0,0 +1,167 @@ +//-------------------------------------------------------------------------------- +// NVIDIA(R) GVDB VOXELS +// Copyright 2017, NVIDIA Corporation +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, this +// list of conditions and the following disclaimer in the documentation and/or +// other materials provided with the distribution. +// 3. Neither the name of the copyright holder nor the names of its contributors may +// be used to endorse or promote products derived from this software without specific +// prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT +// SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT +// OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +// TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, +// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Version 1.0: Rama Hoetzlein, 5/1/2017 +//---------------------------------------------------------------------------------- + +#include "optix_extra_math.cuh" +#include "texture_fetch_functions.h" + +//------------------- GVDB Structure +#define OPTIX_PATHWAY +#include "cuda_gvdb_scene.cuh" // GVDB Scene +#include "cuda_gvdb_nodes.cuh" // GVDB Node structure +#include "cuda_gvdb_geom.cuh" // GVDB Geom helpers +#include "cuda_gvdb_dda.cuh" // GVDB DDA +#include "cuda_gvdb_raycast.cuh" // GVDB Raycasting +//-------------------- + + +rtBuffer brick_buffer; + +rtDeclareVariable(uint, mat_id, , ); +rtDeclareVariable(float3, light_pos, , ); + +rtDeclareVariable(float3, back_hit_point, attribute back_hit_point, ); +rtDeclareVariable(float3, front_hit_point, attribute front_hit_point, ); +rtDeclareVariable(float3, geometric_normal, attribute geometric_normal, ); +rtDeclareVariable(float3, shading_normal, attribute shading_normal, ); +rtDeclareVariable(float4, deep_color, attribute deep_color, ); + +rtDeclareVariable(optix::Ray, ray, rtCurrentRay, ); + +struct PerRayData_radiance +{ + float3 result; + float length; + float alpha; + int depth; + int rtype; +}; + +rtDeclareVariable(PerRayData_radiance, prd_radiance, rtPayload, ); + +//------ Intersection Program + +RT_PROGRAM void vol_intersect( int primIdx ) +{ + float3 hit = make_float3(NOHIT,NOHIT,NOHIT); + float3 norm = make_float3(0,0,0); + float4 clr = make_float4(0,0,0,0); + float t; + + //-- Ray march + float4 hclr; + rayCast ( SCN_SHADE, gvdb.top_lev, 0, ray.origin, ray.direction, hit, norm, hclr, raySurfaceBrick ); + if ( hit.z == NOHIT) return; + t = length ( hit - ray.origin ); + + // report intersection to optix + if ( rtPotentialIntersection( t ) ) { + + shading_normal = norm; + geometric_normal = norm; + front_hit_point = hit + shading_normal*gvdb.voxelsize; + back_hit_point = hit - shading_normal*gvdb.voxelsize*5; + deep_color = make_float4(1,1,1,1); + if ( prd_radiance.rtype == SHADOW_RAY ) deep_color.w = (hit.x==NOHIT) ? 1 : 0; + + rtReportIntersection( mat_id ); + } +} + +RT_PROGRAM void vol_deep( int primIdx ) +{ + float3 hit = make_float3(NOHIT,NOHIT,NOHIT); + float3 norm = make_float3(0,1,0); + float4 clr = make_float4(0,0,0,1); + if ( prd_radiance.rtype == MESH_RAY ) return; + + // ---- Debugging + // Uncomment this code to demonstrate tracing of the bounding box + // surrounding the volume. + /*hit = rayBoxIntersect ( ray.origin, ray.direction, gvdb.bmin, gvdb.bmax ); + if ( hit.z == NOHIT ) return; + if ( rtPotentialIntersection ( hit.x ) ) { + shading_normal = norm; + geometric_normal = norm; + front_hit_point = ray.origin + hit.x * ray.direction; + back_hit_point = ray.origin + hit.y * ray.direction; + deep_color = make_float4( front_hit_point/200.0, 0.5); + rtReportIntersection( 0 ); + } + return;*/ + + //-- Raycast + rayCast ( SHADE_VOLUME, gvdb.top_lev, 0, ray.origin, ray.direction, hit, norm, clr, rayDeepBrick ); + if ( hit.z == NOHIT) return; + + if ( rtPotentialIntersection( hit.x ) ) { + + shading_normal = norm; + geometric_normal = norm; + front_hit_point = ray.origin + hit.x * ray.direction; + back_hit_point = ray.origin + hit.y * ray.direction; + deep_color = make_float4 ( fxyz(clr), 1.0-clr.w ); + + rtReportIntersection( 0 ); + } +} + +RT_PROGRAM void vol_levelset ( int primIdx ) +{ + float3 hit = make_float3(NOHIT,1,1); + float3 norm = make_float3(0,0,0); + float4 clr = make_float4(0,0,0,0); + float t; + + //-- Ray march + rayCast ( 0, gvdb.top_lev, 0, ray.origin, ray.direction, hit, norm, clr, rayLevelSetBrick ); + if ( hit.x == NOHIT) return; + t = length ( hit - ray.origin ); + + // report intersection to optix + if ( rtPotentialIntersection( t ) ) { + + shading_normal = norm; + geometric_normal = norm; + front_hit_point = hit + shading_normal*gvdb.voxelsize; + back_hit_point = hit - shading_normal*gvdb.voxelsize*5; + deep_color = make_float4(1,1,1,1); + if ( prd_radiance.rtype == SHADOW_RAY ) deep_color.w = (hit.x==NOHIT) ? 1 : 0; + + rtReportIntersection( mat_id ); + } +} + + +RT_PROGRAM void vol_bounds (int primIdx, float result[6]) +{ + // AABB bounds is just the brick extents + optix::Aabb* aabb = (optix::Aabb*) result; + aabb->m_min = brick_buffer[ primIdx*2 ]; + aabb->m_max = brick_buffer[ primIdx*2+1 ]; +} + diff --git a/cuda_code/or_2.cu b/cuda_code/or_2.cu new file mode 100644 index 0000000000000000000000000000000000000000..846e4c6fd812e1c4f38b3763f5f7c8455f72001f --- /dev/null +++ b/cuda_code/or_2.cu @@ -0,0 +1,66 @@ +#include +#include +#include +#include +#include + +namespace cusp { + +template +__global__ void kernel_or(const T **ins, T *out, int ninputs, int N) { + int i = blockIdx.x * blockDim.x + threadIdx.x; + if (i < N) { + T *in = (T *)(*ins); + out[i] = in[i]; + for (int j = 1; j < ninputs; j++) { + in = (T*)(*(ins+j)); + out[i] |= in[i]; //(*(in + j))[i]; + } + } +} + +template or_bitwise::or_bitwise(int ninputs) : _ninputs(ninputs) { + checkCudaErrors(cudaMalloc(&_dev_ptr_array, sizeof(void *) * _ninputs)); +} + +template +cudaError_t or_bitwise::launch(const std::vector& inputs, T *output, + int ninputs, int grid_size, int block_size, + size_t nitems, cudaStream_t stream) { + + // There is a better way to do this here - just getting the pointers into + // device memory + checkCudaErrors(cudaMemcpy(_dev_ptr_array, inputs.data(), sizeof(void *) * ninputs, + cudaMemcpyHostToDevice)); + + if (stream) { + kernel_or<<>>((const T **)_dev_ptr_array, + output, ninputs, nitems); + } else { + kernel_or<<>>((const T **)_dev_ptr_array, output, + ninputs, nitems); + } + return cudaPeekAtLastError(); +} + +template +cudaError_t or_bitwise::launch(const std::vector& inputs, + const std::vector& outputs, size_t nitems) { + return launch(inputs, (T *)outputs[0], _ninputs, _grid_size, _block_size, + nitems, _stream); +} + +template +cudaError_t or_bitwise::occupancy(int *minBlock, int *minGrid) { + return cudaOccupancyMaxPotentialBlockSize(minGrid, minBlock, kernel_or, 0, + 0); +} + +#define IMPLEMENT_KERNEL(T) template class or_bitwise; + +IMPLEMENT_KERNEL(int8_t) +IMPLEMENT_KERNEL(int16_t) +IMPLEMENT_KERNEL(int32_t) +IMPLEMENT_KERNEL(int64_t) + +} // namespace cusp \ No newline at end of file diff --git a/cuda_code/orderby_tests_1.cu b/cuda_code/orderby_tests_1.cu new file mode 100644 index 0000000000000000000000000000000000000000..c3b024f7e9c197a9edba200ed70a95828cacacb8 --- /dev/null +++ b/cuda_code/orderby_tests_1.cu @@ -0,0 +1,660 @@ +/* + * Copyright (c) 2018, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "order_by_type_vectors.h" + +#include + +// See this header for all of the handling of valids' vectors +#include + +// See this header for all of the recursive handling of tuples of vectors +#include + +#include +#include +#include +#include + +#include + +#include +#include + +#include +#include +#include +#include +#include +#include + +// A new instance of this class will be created for each *TEST(OrderbyTest, ...) +// Put all repeated setup and validation stuff here +template +struct OrderByTest : public GdfTest { + const bool nulls_are_smallest = test_parameters::nulls_are_smallest; + + // The sorting order for each column is passed via a member of the template argument class + std::vector sort_order_types; + + // multi_column_t is a tuple of vectors. The number of vectors in the tuple + // determines the number of columns to be ordered by, and the value_type of each + // vector determines the data type of the column + using multi_column_t = typename test_parameters::multi_column_t; + multi_column_t orderby_columns; + + size_t numberOfColumns = std::tuple_size::value; + + // valids for multi_columns + std::vector orderby_valids; + + // Type for a unique_ptr to a gdf_column with a custom deleter + // Custom deleter is defined at construction + using gdf_col_pointer = typename std::unique_ptr>; + + // Containers for unique_ptrs to gdf_columns that will be used in the orderby + // functions. unique_ptrs are used to automate freeing device memory + std::vector gdf_orderby_columns; + gdf_col_pointer gdf_sort_order_types; + gdf_col_pointer gdf_output_indices_column; + + // Containers for the raw pointers to the gdf_columns that will be used as + // input to the orderby functions + std::vector gdf_raw_orderby_columns; + gdf_column* gdf_raw_sort_order_types; + gdf_column* gdf_raw_output_indices_column; + + OrderByTest() + { + // Use constant seed so the psuedo-random order is the same each time + // Each time the class is constructed a new constant seed is used + static size_t number_of_instantiations{0}; + std::srand(number_of_instantiations++); + } + + ~OrderByTest() {} + + /* --------------------------------------------------------------------------* + * @brief Creates a unique_ptr that wraps a gdf_column structure + * initialized with a host vector + * + * @param host_vector vector containing data to be transfered to device side column + * @param host_valid vector containing valid masks associated with the supplied vector + * @param n_count null_count to be set for the generated column + * + * @returns A unique_ptr wrapping the new gdf_column + * --------------------------------------------------------------------------*/ + template + gdf_col_pointer create_gdf_column(std::vector const& host_vector, + cudf::valid_type* host_valid, + const cudf::size_type n_count) + { + // Deduce the type and set the gdf_dtype accordingly + gdf_dtype gdf_col_type = GDF_INT8; + if (std::is_same::value) + gdf_col_type = GDF_INT8; + else if (std::is_same::value) + gdf_col_type = GDF_INT8; + else if (std::is_same::value) + gdf_col_type = GDF_INT16; + else if (std::is_same::value) + gdf_col_type = GDF_INT16; + else if (std::is_same::value) + gdf_col_type = GDF_INT32; + else if (std::is_same::value) + gdf_col_type = GDF_INT32; + else if (std::is_same::value) + gdf_col_type = GDF_INT64; + else if (std::is_same::value) + gdf_col_type = GDF_INT64; + else if (std::is_same::value) + gdf_col_type = GDF_FLOAT32; + else if (std::is_same::value) + gdf_col_type = GDF_FLOAT64; + + // Create a new instance of a gdf_column with a custom deleter that will + // free the associated device memory when it eventually goes out of scope + auto deleter = [](gdf_column* col) { + col->size = 0; + RMM_FREE(col->data, 0); + RMM_FREE(col->valid, 0); + }; + gdf_col_pointer the_column{new gdf_column{}, deleter}; + + // Allocate device storage for gdf_column and copy contents from host_vector + EXPECT_EQ(RMM_ALLOC(&(the_column->data), host_vector.size() * sizeof(col_type), 0), + RMM_SUCCESS); + EXPECT_EQ(cudaMemcpy(the_column->data, + host_vector.data(), + host_vector.size() * sizeof(col_type), + cudaMemcpyHostToDevice), + cudaSuccess); + + // Allocate device storage for gdf_column.valid + if (host_valid != nullptr) { + EXPECT_EQ( + RMM_ALLOC((void**)&(the_column->valid), gdf_valid_allocation_size(host_vector.size()), 0), + RMM_SUCCESS); + EXPECT_EQ(cudaMemcpy(the_column->valid, + host_valid, + gdf_num_bitmask_elements(host_vector.size()), + cudaMemcpyHostToDevice), + cudaSuccess); + the_column->null_count = n_count; + } else { + the_column->valid = nullptr; + the_column->null_count = 0; + } + + // Fill the gdf_column members + the_column->size = host_vector.size(); + the_column->dtype = gdf_col_type; + gdf_dtype_extra_info extra_info{TIME_UNIT_NONE}; + the_column->dtype_info = extra_info; + + return the_column; + } + + // Compile time recursion to convert each vector in a tuple of vectors into + // a gdf_column and append it to a vector of gdf_columns + template + inline typename std::enable_if::type convert_tuple_to_gdf_columns( + std::vector& gdf_columns, + std::tuple...>& t, + std::vector& valids, + const cudf::size_type n_count) + { + // bottom of compile-time recursion + // purposely empty... + } + template + inline typename std::enable_if < I::type convert_tuple_to_gdf_columns( + std::vector& gdf_columns, + std::tuple...>& t, + std::vector& valids, + const cudf::size_type n_count) + { + // Creates a gdf_column for the current vector and pushes it onto + // the vector of gdf_columns + if (valids.size() != 0) { + gdf_columns.push_back(create_gdf_column(std::get(t), valids[I].get(), n_count)); + } else { + gdf_columns.push_back(create_gdf_column(std::get(t), nullptr, n_count)); + } + + // recurse to next vector in tuple + convert_tuple_to_gdf_columns(gdf_columns, t, valids, n_count); + } + + // Converts a tuple of host vectors into a vector of gdf_columns + std::vector initialize_gdf_columns(multi_column_t host_columns, + std::vector& valids, + const cudf::size_type n_count) + { + std::vector gdf_columns; + convert_tuple_to_gdf_columns(gdf_columns, host_columns, valids, n_count); + return gdf_columns; + } + + /* --------------------------------------------------------------------------* + * @brief Initializes a set of columns with random values for the order by + * operation. + * + * @param orderby_column_length The length of the orderby set of columns + * @param orderby_column_range The upper bound of random values for the orderby + * columns. Values are [0, orderby_column_range) + * @param n_count The null count in the columns + * @param random_order_type_values Randomly initialize the sort type for each + * column. + * @param print Optionally print the set of columns for debug + * -------------------------------------------------------------------------*/ + void create_input(size_t orderby_column_length, + size_t orderby_column_range, + const cudf::size_type n_count = 0, + bool random_order_type_values = true, + bool print = false) + { + initialize_tuple(orderby_columns, orderby_column_length, orderby_column_range); + + auto n_columns = std::tuple_size::value; + initialize_valids(orderby_valids, n_columns, orderby_column_length, n_count); + + gdf_orderby_columns = initialize_gdf_columns(orderby_columns, orderby_valids, n_count); + + // Fill vector of raw pointers to gdf_columns + gdf_raw_orderby_columns.clear(); + for (auto const& c : gdf_orderby_columns) { gdf_raw_orderby_columns.push_back(c.get()); } + + initialize_order_by_types(sort_order_types, n_columns, random_order_type_values); + gdf_sort_order_types = create_gdf_column(sort_order_types, nullptr, 0); + gdf_raw_sort_order_types = gdf_sort_order_types.get(); + + if (print) { + std::cout << "orderby column(s) created. Size: " << std::get<0>(orderby_columns).size() + << std::endl; + print_tuples_valids_and_order_by_types(orderby_columns, orderby_valids, sort_order_types); + } + } + + void create_gdf_output_buffers(const size_t orderby_column_length) + { + std::vector temp(orderby_column_length, 0); + gdf_output_indices_column = create_gdf_column(temp, nullptr, 0); + gdf_raw_output_indices_column = gdf_output_indices_column.get(); + } + + // Compile time recursion to sort an array of indices by each vector in a tuple of vectors + template + inline typename std::enable_if::type sort_multi_column( + std::tuple...>& t, + std::vector& valids, + std::vector& asc_desc, + std::vector& indices) + { + // bottom of compile-time recursion + // purposely empty... + } + template + inline typename std::enable_if < + I::type sort_multi_column(std::tuple...>& t, + std::vector& valids, + std::vector& asc_desc, + std::vector& indices) + { + const size_t col_index = sizeof...(Tp) - I - 1; + + // First column have higher priority so we sort back to front + auto column = std::get(t); + auto column_valids = valids[col_index].get(); + + // Group the invalid rows together at the beginning or the end + bool nulls_at_front = (nulls_are_smallest && asc_desc[col_index] == GDF_ORDER_ASC) || + (!nulls_are_smallest && asc_desc[col_index] == GDF_ORDER_DESC); + size_t invalid_count = 0; + for (size_t i = 0; i < column.size(); ++i) { + size_t j = (nulls_at_front ? i : column.size() - i - 1); + if (!gdf_is_valid(column_valids, indices[j])) { + if (nulls_at_front) { + std::rotate( + indices.begin() + invalid_count, indices.begin() + i, indices.begin() + i + 1); + } else { + std::rotate( + indices.rbegin() + invalid_count, indices.rbegin() + i, indices.rbegin() + i + 1); + } + ++invalid_count; + } + } + + auto cmp = [&](size_t i1, size_t i2) { + return (asc_desc[col_index] == GDF_ORDER_ASC ? column[i1] < column[i2] + : column[i1] > column[i2]); + }; + + if (nulls_at_front) { + std::stable_sort(indices.begin() + invalid_count, indices.end(), cmp); + } else { + std::stable_sort(indices.begin(), indices.end() - invalid_count, cmp); + } + + // recurse to next vector in tuple + sort_multi_column(t, valids, asc_desc, indices); + } + + /* --------------------------------------------------------------------------*/ + /** + * @brief Computes a reference solution + * + * @param print Option to print the solution for debug + * + * @returns A vector of 'size_t' sorted indices + */ + /* ----------------------------------------------------------------------------*/ + std::vector compute_reference_solution(bool print = false) + { + const size_t colums_size = std::get<0>(orderby_columns).size(); + + std::vector reference_result(colums_size); + std::iota(std::begin(reference_result), std::end(reference_result), 0); + + sort_multi_column(orderby_columns, orderby_valids, sort_order_types, reference_result); + + if (print) { + std::cout << "Reference result size: " << reference_result.size() << std::endl; + std::cout << "Indices:" << std::endl; + std::copy(reference_result.begin(), + reference_result.end(), + std::ostream_iterator(std::cout, ", ")); + std::cout << "\n"; + } + + return reference_result; + } + + /* --------------------------------------------------------------------------*/ + /** + * @brief Computes the result of sorting the set of columns with the libgdf functions + * + * @param use_default_sort_order Whether or not to sort using the default ascending order + * @param print Option to print the result computed by the libgdf function + */ + /* ----------------------------------------------------------------------------*/ + std::vector compute_gdf_result(bool use_default_sort_order = false, + bool print = false, + gdf_error expected_result = GDF_SUCCESS) + { + const int num_columns = std::tuple_size::value; + + gdf_error result_error{GDF_SUCCESS}; + + gdf_column** columns_to_sort = gdf_raw_orderby_columns.data(); + gdf_column* sort_order_types = gdf_raw_sort_order_types; + gdf_column* sorted_indices_output = gdf_raw_output_indices_column; + + gdf_context ctxt; + if (nulls_are_smallest) + ctxt.flag_null_sort_behavior = GDF_NULL_AS_SMALLEST; + else + ctxt.flag_null_sort_behavior = GDF_NULL_AS_LARGEST; + + result_error = + gdf_order_by(columns_to_sort, + (use_default_sort_order ? nullptr : (int8_t*)(sort_order_types->data)), + num_columns, + sorted_indices_output, + &ctxt); + + EXPECT_EQ(expected_result, result_error) + << "The gdf order by function did not complete successfully"; + + // If the expected result was not GDF_SUCCESS, then this test was testing for a + // specific error condition, in which case we return imediately and do not do + // any further work on the output + if (GDF_SUCCESS != expected_result) { return std::vector(); } + + size_t output_size = sorted_indices_output->size; + int* device_result = static_cast(sorted_indices_output->data); + + // Host vector to hold gdf sort output + std::vector host_result(output_size); + + // Copy result of gdf sorted_indices_output the host + EXPECT_EQ( + cudaMemcpy( + host_result.data(), device_result, output_size * sizeof(int), cudaMemcpyDeviceToHost), + cudaSuccess); + + if (print) { + std::cout << "GDF result size: " << host_result.size() << std::endl; + std::cout << "Indices:" << std::endl; + std::copy( + host_result.begin(), host_result.end(), std::ostream_iterator(std::cout, ", ")); + std::cout << "\n"; + } + + return std::vector(host_result.begin(), host_result.end()); + } +}; + +// This structure is used to nest the number/types of columns and +// the nulls_are_smallest flag for use with Google Test type-parameterized +// tests. +template +struct TestParameters { + // The tuple of vectors that determines the number and types of the columns to sort + using multi_column_t = tuple_of_vectors; + + // nulls are first + const static bool nulls_are_smallest{smaller_nulls}; +}; + +template +using VTuple = std::tuple...>; + +// Using Google Tests "Type Parameterized Tests" +// Every test defined as TYPED_TEST(OrderByTest, *) will be run once for every instance of +// TestParameters defined below +typedef ::testing::Types< + // Single column Order by Tests for some types + TestParameters, false>, + TestParameters, false>, + TestParameters, false>, + TestParameters, true>, + TestParameters, true>, + TestParameters, true>, + // Two Column Order by Tests for some combination of types + TestParameters, false>, + TestParameters, false>, + TestParameters, false>, + TestParameters, true>, + TestParameters, true>, + TestParameters, true>, + // Three Column Order by Tests for some combination of types + TestParameters, false>, + TestParameters, true> + + // TODO: enable and fix sorting tests for GDF_BOOL8 + // TestParameters< VTuple, true >, + // TestParameters< VTuple, false >, + // TestParameters< VTuple, true >, + // TestParameters< VTuple, true > + > + Implementations; + +TYPED_TEST_CASE(OrderByTest, Implementations); + +// This test is used for debugging purposes and is disabled by default. +// The input sizes are small and has a large amount of debug printing enabled. +TYPED_TEST(OrderByTest, DISABLED_DebugTest) +{ + this->create_input(5, 2, 1, true, true); + this->create_gdf_output_buffers(5); + + std::vector reference_result = this->compute_reference_solution(true); + + std::vector gdf_result = this->compute_gdf_result(false, true); + + ASSERT_EQ(reference_result.size(), gdf_result.size()) + << "Size of gdf result does not match reference result\n"; + + // Compare the GDF and reference solutions + for (size_t i = 0; i < reference_result.size(); ++i) { + EXPECT_EQ(reference_result[i], gdf_result[i]); + } +} + +TYPED_TEST(OrderByTest, EqualValues) +{ + this->create_input(100, 1); + this->create_gdf_output_buffers(100); + + std::vector reference_result = this->compute_reference_solution(); + + std::vector gdf_result = this->compute_gdf_result(); + + ASSERT_EQ(reference_result.size(), gdf_result.size()) + << "Size of gdf result does not match reference result\n"; + + // Compare the GDF and reference solutions + for (size_t i = 0; i < reference_result.size(); ++i) { + EXPECT_EQ(reference_result[i], gdf_result[i]); + } +} + +TYPED_TEST(OrderByTest, EqualValuesNull) +{ + this->create_input(100, 1, 100); + this->create_gdf_output_buffers(100); + + std::vector reference_result = this->compute_reference_solution(); + + std::vector gdf_result = this->compute_gdf_result(); + + ASSERT_EQ(reference_result.size(), gdf_result.size()) + << "Size of gdf result does not match reference result\n"; + + // Compare the GDF and reference solutions + for (size_t i = 0; i < reference_result.size(); ++i) { + EXPECT_EQ(reference_result[i], gdf_result[i]); + } +} + +TYPED_TEST(OrderByTest, MaxRandomValues) +{ + this->create_input(10000, RAND_MAX); + this->create_gdf_output_buffers(10000); + + std::vector reference_result = this->compute_reference_solution(); + + std::vector gdf_result = this->compute_gdf_result(); + + ASSERT_EQ(reference_result.size(), gdf_result.size()) + << "Size of gdf result does not match reference result\n"; + + // Compare the GDF and reference solutions + for (size_t i = 0; i < reference_result.size(); ++i) { + EXPECT_EQ(reference_result[i], gdf_result[i]); + } +} + +TYPED_TEST(OrderByTest, MaxRandomValuesAndNulls) +{ + this->create_input(10000, RAND_MAX, 2000); + this->create_gdf_output_buffers(10000); + + std::vector reference_result = this->compute_reference_solution(); + + std::vector gdf_result = this->compute_gdf_result(); + + ASSERT_EQ(reference_result.size(), gdf_result.size()) + << "Size of gdf result does not match reference result\n"; + + // Compare the GDF and reference solutions + for (size_t i = 0; i < reference_result.size(); ++i) { + EXPECT_EQ(reference_result[i], gdf_result[i]); + } +} + +TYPED_TEST(OrderByTest, EmptyColumns) +{ + this->create_input(0, 100); + this->create_gdf_output_buffers(0); + + std::vector reference_result = this->compute_reference_solution(); + + std::vector gdf_result = this->compute_gdf_result(); + + ASSERT_EQ(reference_result.size(), gdf_result.size()) + << "Size of gdf result does not match reference result\n"; + + // Compare the GDF and reference solutions + for (size_t i = 0; i < reference_result.size(); ++i) { + EXPECT_EQ(reference_result[i], gdf_result[i]); + } +} + +/* + * Below group of test are for testing the gdf_order_by method which always + * sort in ascendig. + **/ + +TYPED_TEST(OrderByTest, EqualValuesDefaultSort) +{ + this->create_input(100, 1, 0, false); + this->create_gdf_output_buffers(100); + + std::vector reference_result = this->compute_reference_solution(); + + std::vector gdf_result = this->compute_gdf_result(true); + + ASSERT_EQ(reference_result.size(), gdf_result.size()) + << "Size of gdf result does not match reference result\n"; + + // Compare the GDF and reference solutions + for (size_t i = 0; i < reference_result.size(); ++i) { + EXPECT_EQ(reference_result[i], gdf_result[i]); + } +} + +TYPED_TEST(OrderByTest, EqualValuesNullDefaultSort) +{ + this->create_input(100, 1, 100, false); + this->create_gdf_output_buffers(100); + + std::vector reference_result = this->compute_reference_solution(); + + std::vector gdf_result = this->compute_gdf_result(true); + + ASSERT_EQ(reference_result.size(), gdf_result.size()) + << "Size of gdf result does not match reference result\n"; + + // Compare the GDF and reference solutions + for (size_t i = 0; i < reference_result.size(); ++i) { + EXPECT_EQ(reference_result[i], gdf_result[i]); + } +} + +TYPED_TEST(OrderByTest, MaxRandomValuesDefaultSort) +{ + this->create_input(10000, RAND_MAX, 0, false); + this->create_gdf_output_buffers(10000); + + std::vector reference_result = this->compute_reference_solution(); + + std::vector gdf_result = this->compute_gdf_result(true); + + ASSERT_EQ(reference_result.size(), gdf_result.size()) + << "Size of gdf result does not match reference result\n"; + + // Compare the GDF and reference solutions + for (size_t i = 0; i < reference_result.size(); ++i) { + EXPECT_EQ(reference_result[i], gdf_result[i]); + } +} + +TYPED_TEST(OrderByTest, MaxRandomValuesAndNullsDefaultSort) +{ + this->create_input(10000, RAND_MAX, 2000, false); + this->create_gdf_output_buffers(10000); + + std::vector reference_result = this->compute_reference_solution(); + + std::vector gdf_result = this->compute_gdf_result(true); + + ASSERT_EQ(reference_result.size(), gdf_result.size()) + << "Size of gdf result does not match reference result\n"; + + // Compare the GDF and reference solutions + for (size_t i = 0; i < reference_result.size(); ++i) { + EXPECT_EQ(reference_result[i], gdf_result[i]); + } +} + +TYPED_TEST(OrderByTest, EmptyColumnsDefaultSort) +{ + this->create_input(0, 100, 0, false); + this->create_gdf_output_buffers(0); + + std::vector reference_result = this->compute_reference_solution(); + + std::vector gdf_result = this->compute_gdf_result(true); + + ASSERT_EQ(reference_result.size(), gdf_result.size()) + << "Size of gdf result does not match reference result\n"; + + // Compare the GDF and reference solutions + for (size_t i = 0; i < reference_result.size(); ++i) { + EXPECT_EQ(reference_result[i], gdf_result[i]); + } +} diff --git a/cuda_code/overlap.cu b/cuda_code/overlap.cu new file mode 100644 index 0000000000000000000000000000000000000000..915b2c8bd52dd503b955b2c1a78451fc754f919d --- /dev/null +++ b/cuda_code/overlap.cu @@ -0,0 +1,428 @@ +/* + * Copyright (c) 2019-2021, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** ---------------------------------------------------------------------------* + * @brief The cugraph Jaccard core functionality + * + * @file jaccard.cu + * ---------------------------------------------------------------------------**/ + +#include +#include +#include +#include + +namespace cugraph { +namespace detail { + +// Volume of neighboors (*weight_s) +// TODO: Identical kernel to jaccard_row_sum!! +template +__global__ void overlap_row_sum( + vertex_t n, edge_t const *csrPtr, vertex_t const *csrInd, weight_t const *v, weight_t *work) +{ + vertex_t row; + edge_t start, end, length; + weight_t sum; + + for (row = threadIdx.y + blockIdx.y * blockDim.y; row < n; row += gridDim.y * blockDim.y) { + start = csrPtr[row]; + end = csrPtr[row + 1]; + length = end - start; + + // compute row sums + if (weighted) { + sum = parallel_prefix_sum(length, csrInd + start, v); + if (threadIdx.x == 0) work[row] = sum; + } else { + work[row] = static_cast(length); + } + } +} + +// Volume of intersections (*weight_i) and cumulated volume of neighboors (*weight_s) +// TODO: Identical kernel to jaccard_row_sum!! +template +__global__ void overlap_is(vertex_t n, + edge_t const *csrPtr, + vertex_t const *csrInd, + weight_t const *v, + weight_t *work, + weight_t *weight_i, + weight_t *weight_s) +{ + edge_t i, j, Ni, Nj; + vertex_t row, col; + vertex_t ref, cur, ref_col, cur_col, match; + weight_t ref_val; + + for (row = threadIdx.z + blockIdx.z * blockDim.z; row < n; row += gridDim.z * blockDim.z) { + for (j = csrPtr[row] + threadIdx.y + blockIdx.y * blockDim.y; j < csrPtr[row + 1]; + j += gridDim.y * blockDim.y) { + col = csrInd[j]; + // find which row has least elements (and call it reference row) + Ni = csrPtr[row + 1] - csrPtr[row]; + Nj = csrPtr[col + 1] - csrPtr[col]; + ref = (Ni < Nj) ? row : col; + cur = (Ni < Nj) ? col : row; + + // compute new sum weights + weight_s[j] = min(work[row], work[col]); + + // compute new intersection weights + // search for the element with the same column index in the reference row + for (i = csrPtr[ref] + threadIdx.x + blockIdx.x * blockDim.x; i < csrPtr[ref + 1]; + i += gridDim.x * blockDim.x) { + match = -1; + ref_col = csrInd[i]; + if (weighted) { + ref_val = v[ref_col]; + } else { + ref_val = 1.0; + } + + // binary search (column indices are sorted within each row) + edge_t left = csrPtr[cur]; + edge_t right = csrPtr[cur + 1] - 1; + while (left <= right) { + edge_t middle = (left + right) >> 1; + cur_col = csrInd[middle]; + if (cur_col > ref_col) { + right = middle - 1; + } else if (cur_col < ref_col) { + left = middle + 1; + } else { + match = middle; + break; + } + } + + // if the element with the same column index in the reference row has been found + if (match != -1) { atomicAdd(&weight_i[j], ref_val); } + } + } + } +} + +// Volume of intersections (*weight_i) and cumulated volume of neighboors (*weight_s) +// Using list of node pairs +// NOTE: NOT the same as jaccard +template +__global__ void overlap_is_pairs(edge_t num_pairs, + edge_t const *csrPtr, + vertex_t const *csrInd, + vertex_t const *first_pair, + vertex_t const *second_pair, + weight_t const *v, + weight_t *work, + weight_t *weight_i, + weight_t *weight_s) +{ + edge_t i, idx, Ni, Nj, match; + vertex_t row, col, ref, cur, ref_col, cur_col; + weight_t ref_val; + + for (idx = threadIdx.z + blockIdx.z * blockDim.z; idx < num_pairs; + idx += gridDim.z * blockDim.z) { + row = first_pair[idx]; + col = second_pair[idx]; + + // find which row has least elements (and call it reference row) + Ni = csrPtr[row + 1] - csrPtr[row]; + Nj = csrPtr[col + 1] - csrPtr[col]; + ref = (Ni < Nj) ? row : col; + cur = (Ni < Nj) ? col : row; + + // compute new sum weights + weight_s[idx] = min(work[row], work[col]); + + // compute new intersection weights + // search for the element with the same column index in the reference row + for (i = csrPtr[ref] + threadIdx.x + blockIdx.x * blockDim.x; i < csrPtr[ref + 1]; + i += gridDim.x * blockDim.x) { + match = -1; + ref_col = csrInd[i]; + if (weighted) { + ref_val = v[ref_col]; + } else { + ref_val = 1.0; + } + + // binary search (column indices are sorted within each row) + edge_t left = csrPtr[cur]; + edge_t right = csrPtr[cur + 1] - 1; + while (left <= right) { + edge_t middle = (left + right) >> 1; + cur_col = csrInd[middle]; + if (cur_col > ref_col) { + right = middle - 1; + } else if (cur_col < ref_col) { + left = middle + 1; + } else { + match = middle; + break; + } + } + + // if the element with the same column index in the reference row has been found + if (match != -1) { atomicAdd(&weight_i[idx], ref_val); } + } + } +} + +// Overlap weights (*weight) +template +__global__ void overlap_jw(edge_t e, + edge_t const *csrPtr, + vertex_t const *csrInd, + weight_t *weight_i, + weight_t *weight_s, + weight_t *weight_j) +{ + edge_t j; + weight_t Wi, Wu; + + for (j = threadIdx.x + blockIdx.x * blockDim.x; j < e; j += gridDim.x * blockDim.x) { + Wi = weight_i[j]; + Wu = weight_s[j]; + weight_j[j] = (Wi / Wu); + } +} + +template +int overlap(vertex_t n, + edge_t e, + edge_t const *csrPtr, + vertex_t const *csrInd, + weight_t const *weight_in, + weight_t *work, + weight_t *weight_i, + weight_t *weight_s, + weight_t *weight_j) +{ + dim3 nthreads, nblocks; + int y = 4; + + // setup launch configuration + nthreads.x = 32; + nthreads.y = y; + nthreads.z = 1; + nblocks.x = 1; + nblocks.y = min((n + nthreads.y - 1) / nthreads.y, vertex_t{CUDA_MAX_BLOCKS}); + nblocks.z = 1; + + // launch kernel + overlap_row_sum + <<>>(n, csrPtr, csrInd, weight_in, work); + cudaDeviceSynchronize(); + fill(e, weight_i, weight_t{0.0}); + + // setup launch configuration + nthreads.x = 32 / y; + nthreads.y = y; + nthreads.z = 8; + nblocks.x = 1; + nblocks.y = 1; + nblocks.z = min((n + nthreads.z - 1) / nthreads.z, vertex_t{CUDA_MAX_BLOCKS}); // 1; + + // launch kernel + overlap_is + <<>>(n, csrPtr, csrInd, weight_in, work, weight_i, weight_s); + + // setup launch configuration + nthreads.x = min(e, edge_t{CUDA_MAX_KERNEL_THREADS}); + nthreads.y = 1; + nthreads.z = 1; + nblocks.x = min((e + nthreads.x - 1) / nthreads.x, edge_t{CUDA_MAX_BLOCKS}); + nblocks.y = 1; + nblocks.z = 1; + + // launch kernel + overlap_jw + <<>>(e, csrPtr, csrInd, weight_i, weight_s, weight_j); + + return 0; +} + +template +int overlap_pairs(vertex_t n, + edge_t num_pairs, + edge_t const *csrPtr, + vertex_t const *csrInd, + vertex_t const *first_pair, + vertex_t const *second_pair, + weight_t const *weight_in, + weight_t *work, + weight_t *weight_i, + weight_t *weight_s, + weight_t *weight_j) +{ + dim3 nthreads, nblocks; + int y = 4; + + // setup launch configuration + nthreads.x = 32; + nthreads.y = y; + nthreads.z = 1; + nblocks.x = 1; + nblocks.y = min((n + nthreads.y - 1) / nthreads.y, vertex_t{CUDA_MAX_BLOCKS}); + nblocks.z = 1; + // launch kernel + + overlap_row_sum + <<>>(n, csrPtr, csrInd, weight_in, work); + cudaDeviceSynchronize(); + fill(num_pairs, weight_i, weight_t{0.0}); + // setup launch configuration + nthreads.x = 32; + nthreads.y = 1; + nthreads.z = 8; + nblocks.x = 1; + nblocks.y = 1; + nblocks.z = min((n + nthreads.z - 1) / nthreads.z, vertex_t{CUDA_MAX_BLOCKS}); // 1; + + // launch kernel + overlap_is_pairs<<>>( + num_pairs, csrPtr, csrInd, first_pair, second_pair, weight_in, work, weight_i, weight_s); + + // setup launch configuration + nthreads.x = min(num_pairs, edge_t{CUDA_MAX_KERNEL_THREADS}); + nthreads.y = 1; + nthreads.z = 1; + nblocks.x = min((num_pairs + nthreads.x - 1) / nthreads.x, edge_t{CUDA_MAX_BLOCKS}); + nblocks.y = 1; + nblocks.z = 1; + // launch kernel + + overlap_jw + <<>>(num_pairs, csrPtr, csrInd, weight_i, weight_s, weight_j); + + return 0; +} +} // namespace detail + +template +void overlap(GraphCSRView const &graph, WT const *weights, WT *result) +{ + CUGRAPH_EXPECTS(result != nullptr, "Invalid input argument: result pointer is NULL"); + + rmm::device_vector weight_i(graph.number_of_edges); + rmm::device_vector weight_s(graph.number_of_edges); + rmm::device_vector work(graph.number_of_vertices); + + if (weights == nullptr) { + cugraph::detail::overlap(graph.number_of_vertices, + graph.number_of_edges, + graph.offsets, + graph.indices, + weights, + work.data().get(), + weight_i.data().get(), + weight_s.data().get(), + result); + } else { + cugraph::detail::overlap(graph.number_of_vertices, + graph.number_of_edges, + graph.offsets, + graph.indices, + weights, + work.data().get(), + weight_i.data().get(), + weight_s.data().get(), + result); + } +} + +template +void overlap_list(GraphCSRView const &graph, + WT const *weights, + ET num_pairs, + VT const *first, + VT const *second, + WT *result) +{ + CUGRAPH_EXPECTS(result != nullptr, "Invalid input argument: result pointer is NULL"); + CUGRAPH_EXPECTS(first != nullptr, "Invalid input argument: first column is NULL"); + CUGRAPH_EXPECTS(second != nullptr, "Invalid input argument: second column is NULL"); + + rmm::device_vector weight_i(num_pairs); + rmm::device_vector weight_s(num_pairs); + rmm::device_vector work(graph.number_of_vertices); + + if (weights == nullptr) { + cugraph::detail::overlap_pairs(graph.number_of_vertices, + num_pairs, + graph.offsets, + graph.indices, + first, + second, + weights, + work.data().get(), + weight_i.data().get(), + weight_s.data().get(), + result); + } else { + cugraph::detail::overlap_pairs(graph.number_of_vertices, + num_pairs, + graph.offsets, + graph.indices, + first, + second, + weights, + work.data().get(), + weight_i.data().get(), + weight_s.data().get(), + result); + } +} + +template void overlap(GraphCSRView const &, + float const *, + float *); +template void overlap(GraphCSRView const &, + double const *, + double *); +template void overlap(GraphCSRView const &, + float const *, + float *); +template void overlap(GraphCSRView const &, + double const *, + double *); +template void overlap_list(GraphCSRView const &, + float const *, + int32_t, + int32_t const *, + int32_t const *, + float *); +template void overlap_list(GraphCSRView const &, + double const *, + int32_t, + int32_t const *, + int32_t const *, + double *); +template void overlap_list(GraphCSRView const &, + float const *, + int64_t, + int64_t const *, + int64_t const *, + float *); +template void overlap_list(GraphCSRView const &, + double const *, + int64_t, + int64_t const *, + int64_t const *, + double *); + +} // namespace cugraph diff --git a/cuda_code/p2pbwcheck.cu b/cuda_code/p2pbwcheck.cu new file mode 100644 index 0000000000000000000000000000000000000000..98f71e4c3865d3d02e588c0220981b74e9c936f4 --- /dev/null +++ b/cuda_code/p2pbwcheck.cu @@ -0,0 +1,431 @@ +/* + * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. + * + * Modifications Copyright 2017 H2O.ai, Inc. + */ +#if 1 + +#include +#include + +#include + +using namespace std; + +const char *sSampleName = "P2P (Peer-to-Peer) GPU Bandwidth Latency Test"; + +//Macro for checking cuda errors following a cuda launch or api call +#define cudaCheckError() { \ + cudaError_t e=cudaGetLastError(); \ + if(e!=cudaSuccess) { \ + printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(e)); \ + exit(EXIT_FAILURE); \ + } \ + } +__global__ void delay(int * null) { + float j=threadIdx.x; + for(int i=1;i<10000;i++) + j=(j+1)/j; + + if(threadIdx.x == j) null[0] = j; +} + +void checkP2Paccess(int numGPUs) +{ + for (int i=0; i buffers(numGPUs); + vector start(numGPUs); + vector stop(numGPUs); + + for (int d=0; d bandwidthMatrix(numGPUs*numGPUs); + + for (int i=0; i>>(NULL); + cudaEventRecord(start[i]); + + for (int r=0; r buffers(numGPUs); + vector start(numGPUs); + vector stop(numGPUs); + vector stream0(numGPUs); + vector stream1(numGPUs); + + for (int d=0; d bandwidthMatrix(numGPUs*numGPUs); + + for (int i=0; i>>(NULL); + cudaEventRecord(start[i]); + + for (int r=0; r buffers(numGPUs); + vector start(numGPUs); + vector stop(numGPUs); + + for (int d=0; d latencyMatrix(numGPUs*numGPUs); + + for (int i=0; i>>(NULL); + cudaEventRecord(start[i]); + + for (int r=0; r +#ifdef __NVCC__ +#include "cub/cub.cuh" +#endif +#ifdef __HIPCC__ +#include +namespace cub = hipcub; +#endif +#include "paddle/fluid/operators/amp/fp16_type_traits.h" +#include "paddle/fluid/operators/elementwise/elementwise_op_impl.cu.h" +#include "paddle/fluid/operators/fc_op.h" +#include "paddle/fluid/operators/p_norm_op.h" +#include "paddle/fluid/operators/reduce_ops/reduce_op.cu.h" +#include "paddle/fluid/operators/reduce_ops/reduce_op.h" +#include "paddle/fluid/platform/float16.h" + +namespace paddle { +namespace operators { + +template +__device__ __forceinline__ int sgn(T val) { + return (T(0) < val) - (val < T(0)); +} + +__device__ __forceinline__ platform::float16 inline_abs(platform::float16 x) { + return static_cast(abs(static_cast(x))); +} + +__device__ __forceinline__ platform::bfloat16 inline_abs(platform::bfloat16 x) { + return static_cast(abs(static_cast(x))); +} + +__device__ __forceinline__ float inline_abs(float x) { return abs(x); } +__device__ __forceinline__ double inline_abs(double x) { return abs(x); } + +__device__ __forceinline__ int inline_sign(platform::float16 x) { + return sgn(x); +} +__device__ __forceinline__ int inline_sign(float x) { return sgn(x); } +__device__ __forceinline__ int inline_sign(double x) { return sgn(x); } + +__device__ __forceinline__ platform::float16 inline_pow( + platform::float16 base, platform::float16 exponent) { + return static_cast( + pow(static_cast(base), static_cast(exponent))); +} +__device__ __forceinline__ platform::bfloat16 inline_pow( + platform::bfloat16 base, platform::bfloat16 exponent) { + return static_cast( + pow(static_cast(base), static_cast(exponent))); +} +__device__ __forceinline__ float inline_pow(float base, float exponent) { + return pow(base, exponent); +} +__device__ __forceinline__ double inline_pow(double base, double exponent) { + return pow(base, exponent); +} + +template +struct NonzeroFunctor { + HOSTDEVICE explicit inline NonzeroFunctor() {} + HOSTDEVICE inline T operator()(const T x) const { + return static_cast(static_cast(x) != 0); + } +}; + +template +struct AbsFunctor { + HOSTDEVICE explicit inline AbsFunctor() {} + HOSTDEVICE inline T operator()(const T x) const { + return static_cast(inline_abs(x)); + } +}; + +template +struct UnsignedPowFunctor { + HOSTDEVICE explicit inline UnsignedPowFunctor(float porder) { + this->porder = porder; + } + HOSTDEVICE inline T operator()(const T x) const { + return static_cast(inline_pow(inline_abs(x), static_cast(porder))); + } + float porder; +}; + +template +class PnormCUDAKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto* in_x = ctx.Input("X"); + auto* out_norm = ctx.Output("Out"); + const T* x = in_x->data(); + T* norm = out_norm->mutable_data(ctx.GetPlace()); + auto xdim = in_x->dims(); + float porder = ctx.Attr("porder"); + bool asvector = ctx.Attr("asvector"); + int axis = ctx.Attr("axis"); + std::vector reduce_axis = {axis}; + reduce_axis = GetReduceDim(reduce_axis, xdim.size(), asvector); + auto stream = ctx.cuda_device_context().stream(); + + using MT = typename details::MPTypeTrait::Type; + if (porder == 0) { + TensorReduceImpl>( + ctx.cuda_device_context(), *in_x, out_norm, NonzeroFunctor(), + reduce_axis, stream); + } else if (porder == INFINITY) { + TensorReduceImpl>( + ctx.cuda_device_context(), *in_x, out_norm, AbsFunctor(), + reduce_axis, stream); + } else if (porder == -INFINITY) { + TensorReduceImpl>( + ctx.cuda_device_context(), *in_x, out_norm, AbsFunctor(), + reduce_axis, stream); + } else { + TensorReduceImpl>( + ctx.cuda_device_context(), *in_x, out_norm, + UnsignedPowFunctor(porder), reduce_axis, stream); + + const framework::Tensor* tmp_norm = out_norm; + std::vector ins = {tmp_norm}; + std::vector outs = {out_norm}; + const auto& cuda_ctx = + ctx.template device_context(); + paddle::operators::LaunchSameDimsElementwiseCudaKernel( + cuda_ctx, ins, &outs, UnsignedPowFunctor(1. / porder)); + } + } +}; + +template +struct AbsMaxAndMinGradFunctor { + template + void operator()(const DeviceContext& place, X* x, Y* y, DX* dx, DY* dy, + const Dim& dim, int size) { + dx->device(place) = dy->broadcast(dim) * (*x).sign() * + ((*x).abs() == y->broadcast(dim)).template cast(); + } +}; + +template +struct PNormGradFunctor { + HOSTDEVICE explicit inline PNormGradFunctor(float porder) { + this->porder = static_cast(porder - 1.); + } + template + void operator()(const DeviceContext& place, X* x, Y* y, DX* dx, DY* dy, + const Dim& dim, int size) { + dx->device(place) = (*x).abs().pow(this->porder) * (*x).sign() * + dy->broadcast(dim) * + (*y).pow(-this->porder).broadcast(dim); + } + T porder; +}; + +template +class PnormGradCUDAKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto* in_x = ctx.Input("X"); + auto* in_norm = ctx.Input("Out"); + auto* in_norm_dy = + ctx.Input(framework::GradVarName("Out")); + auto* out_dx = ctx.Output(framework::GradVarName("X")); + T* dx = out_dx->mutable_data(ctx.GetPlace()); + + auto xdim = in_x->dims(); + float porder = ctx.Attr("porder"); + int axis = ctx.Attr("axis"); + bool reduce_all = (in_norm->numel() == 1); + if (axis < 0) axis = xdim.size() + axis; + const std::vector dims = {axis}; + + auto& cuda_ctx = ctx.template device_context(); + + if (porder == 0) { + phi::funcs::SetConstant set_zero; + set_zero(cuda_ctx, out_dx, static_cast(0)); + } else if (porder == INFINITY || porder == -INFINITY) { + AbsMaxAndMinGradFunctor functor; + LaunchReduceGradKernel>( + ctx, in_x, in_norm, in_norm_dy, out_dx, functor, dims, reduce_all); + } else { + auto functor = PNormGradFunctor(porder); + LaunchReduceGradKernel>( + ctx, in_x, in_norm, in_norm_dy, out_dx, functor, dims, reduce_all); + } + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +using CUDA = paddle::platform::CUDADeviceContext; + +REGISTER_OP_CUDA_KERNEL(p_norm, + ops::PnormCUDAKernel, + ops::PnormCUDAKernel, + ops::PnormCUDAKernel, + ops::PnormCUDAKernel); +REGISTER_OP_CUDA_KERNEL( + p_norm_grad, ops::PnormGradCUDAKernel, + ops::PnormGradCUDAKernel, + ops::PnormGradCUDAKernel, + ops::PnormGradCUDAKernel); diff --git a/cuda_code/pack_unpack_z_columns.cu b/cuda_code/pack_unpack_z_columns.cu new file mode 100644 index 0000000000000000000000000000000000000000..9ddbbbbaa6e25130f2e08dfb1a91ae1222ad1917 --- /dev/null +++ b/cuda_code/pack_unpack_z_columns.cu @@ -0,0 +1,218 @@ +#include "kernels_common.h" + +template +__global__ void pack_unpack_z_cols_gpu_kernel +( + cuDoubleComplex* z_cols_packed__, + cuDoubleComplex* fft_buf__, + int size_x__, + int size_y__, + int size_z__, + int num_z_cols__, + int const* z_columns_pos__ +) +{ + int icol = blockIdx.x * blockDim.x + threadIdx.x; + int iz = blockIdx.y; + if (icol < num_z_cols__) + { + int x, y; + + if (conjugate) + { + x = (-z_columns_pos__[array2D_offset(0, icol, 2)] + size_x__) % size_x__; + y = (-z_columns_pos__[array2D_offset(1, icol, 2)] + size_y__) % size_y__; + } + else + { + x = (z_columns_pos__[array2D_offset(0, icol, 2)] + size_x__) % size_x__; + y = (z_columns_pos__[array2D_offset(1, icol, 2)] + size_y__) % size_y__; + } + + /* load into buffer */ + if (direction == 1) + { + if (conjugate) + { + fft_buf__[array3D_offset(x, y, iz, size_x__, size_y__)] = cuConj(z_cols_packed__[array2D_offset(iz, icol, size_z__)]); + } + else + { + fft_buf__[array3D_offset(x, y, iz, size_x__, size_y__)] = z_cols_packed__[array2D_offset(iz, icol, size_z__)]; + } + } + if (direction == -1) + { + z_cols_packed__[array2D_offset(iz, icol, size_z__)] = fft_buf__[array3D_offset(x, y, iz, size_x__, size_y__)]; + } + } +} + +extern "C" void unpack_z_cols_gpu(cuDoubleComplex* z_cols_packed__, + cuDoubleComplex* fft_buf__, + int size_x__, + int size_y__, + int size_z__, + int num_z_cols__, + int const* z_columns_pos__, + bool use_reduction__, + int stream_id__) +{ + cudaStream_t stream = cuda_stream_by_id(stream_id__); + + dim3 grid_t(64); + dim3 grid_b(num_blocks(num_z_cols__, grid_t.x), size_z__); + + cudaMemsetAsync(fft_buf__, 0, size_x__ * size_y__ * size_z__ * sizeof(cuDoubleComplex), stream); + + pack_unpack_z_cols_gpu_kernel<1, false> <<>> + ( + z_cols_packed__, + fft_buf__, + size_x__, + size_y__, + size_z__, + num_z_cols__, + z_columns_pos__ + ); + if (use_reduction__) + { + pack_unpack_z_cols_gpu_kernel<1, true> <<>> + ( + &z_cols_packed__[size_z__], + fft_buf__, + size_x__, + size_y__, + size_z__, + num_z_cols__ - 1, + &z_columns_pos__[2] // * num_z_cols__] + ); + } +} + +extern "C" void pack_z_cols_gpu(cuDoubleComplex* z_cols_packed__, + cuDoubleComplex* fft_buf__, + int size_x__, + int size_y__, + int size_z__, + int num_z_cols__, + int const* z_columns_pos__, + int stream_id__) +{ + cudaStream_t stream = cuda_stream_by_id(stream_id__); + + dim3 grid_t(64); + dim3 grid_b(num_blocks(num_z_cols__, grid_t.x), size_z__); + + pack_unpack_z_cols_gpu_kernel<-1, false> <<>> + ( + z_cols_packed__, + fft_buf__, + size_x__, + size_y__, + size_z__, + num_z_cols__, + z_columns_pos__ + ); +} + +template +__global__ void pack_unpack_z_cols_2_gpu_kernel +( + cuDoubleComplex* z_cols_packed1__, + cuDoubleComplex* z_cols_packed2__, + cuDoubleComplex* fft_buf__, + int size_x__, + int size_y__, + int size_z__, + int num_z_cols__, + int const* z_columns_pos__ +) +{ + int icol = blockIdx.x * blockDim.x + threadIdx.x; + int iz = blockIdx.y; + if (icol < num_z_cols__) + { + int x = (z_columns_pos__[array2D_offset(0, icol, 2)] + size_x__) % size_x__; + int y = (z_columns_pos__[array2D_offset(1, icol, 2)] + size_y__) % size_y__; + int mx = (-z_columns_pos__[array2D_offset(0, icol, 2)] + size_x__) % size_x__; + int my = (-z_columns_pos__[array2D_offset(1, icol, 2)] + size_y__) % size_y__; + + /* load into buffer */ + if (direction == 1) + { + fft_buf__[array3D_offset(x, y, iz, size_x__, size_y__)] = cuCadd(z_cols_packed1__[array2D_offset(iz, icol, size_z__)], + cuCmul(make_cuDoubleComplex(0, 1), z_cols_packed2__[array2D_offset(iz, icol, size_z__)])); + + fft_buf__[array3D_offset(mx, my, iz, size_x__, size_y__)] = cuCadd(cuConj(z_cols_packed1__[array2D_offset(iz, icol, size_z__)]), + cuCmul(make_cuDoubleComplex(0, 1), cuConj(z_cols_packed2__[array2D_offset(iz, icol, size_z__)]))); + } + if (direction == -1) + { + z_cols_packed1__[array2D_offset(iz, icol, size_z__)] = cuCmul(make_cuDoubleComplex(0.5, 0), + cuCadd(fft_buf__[array3D_offset(x, y, iz, size_x__, size_y__)], cuConj(fft_buf__[array3D_offset(mx, my, iz, size_x__, size_y__)]))); + + z_cols_packed2__[array2D_offset(iz, icol, size_z__)] = cuCmul(make_cuDoubleComplex(0, -0.5), + cuCsub(fft_buf__[array3D_offset(x, y, iz, size_x__, size_y__)], cuConj(fft_buf__[array3D_offset(mx, my, iz, size_x__, size_y__)]))); + } + } +} + +extern "C" void unpack_z_cols_2_gpu(cuDoubleComplex* z_cols_packed1__, + cuDoubleComplex* z_cols_packed2__, + cuDoubleComplex* fft_buf__, + int size_x__, + int size_y__, + int size_z__, + int num_z_cols__, + int const* z_columns_pos__, + int stream_id__) +{ + cudaStream_t stream = cuda_stream_by_id(stream_id__); + + dim3 grid_t(64); + dim3 grid_b(num_blocks(num_z_cols__, grid_t.x), size_z__); + + cudaMemsetAsync(fft_buf__, 0, size_x__ * size_y__ * size_z__ * sizeof(cuDoubleComplex), stream); + + pack_unpack_z_cols_2_gpu_kernel<1> <<>> + ( + z_cols_packed1__, + z_cols_packed2__, + fft_buf__, + size_x__, + size_y__, + size_z__, + num_z_cols__, + z_columns_pos__ + ); +} + +extern "C" void pack_z_cols_2_gpu(cuDoubleComplex* z_cols_packed1__, + cuDoubleComplex* z_cols_packed2__, + cuDoubleComplex* fft_buf__, + int size_x__, + int size_y__, + int size_z__, + int num_z_cols__, + int const* z_columns_pos__, + int stream_id__) +{ + cudaStream_t stream = cuda_stream_by_id(stream_id__); + + dim3 grid_t(64); + dim3 grid_b(num_blocks(num_z_cols__, grid_t.x), size_z__); + + pack_unpack_z_cols_2_gpu_kernel<-1> <<>> + ( + z_cols_packed1__, + z_cols_packed2__, + fft_buf__, + size_x__, + size_y__, + size_z__, + num_z_cols__, + z_columns_pos__ + ); +} + diff --git a/cuda_code/page_enc_19.cu b/cuda_code/page_enc_19.cu new file mode 100644 index 0000000000000000000000000000000000000000..4728a8001f281d69e8a6e624e854296e67285126 --- /dev/null +++ b/cuda_code/page_enc_19.cu @@ -0,0 +1,2085 @@ +/* + * Copyright (c) 2019-2021, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include "parquet_gpu.hpp" + +#include +#include +#include + +#include +#include + +#include + +#include +#include +#include +#include + +namespace cudf { +namespace io { +namespace parquet { +namespace gpu { +// Spark doesn't support RLE encoding for BOOLEANs +#ifdef ENABLE_BOOL_RLE +constexpr bool enable_bool_rle = true; +#else +constexpr bool enable_bool_rle = false; +#endif + +using ::cudf::detail::device_2dspan; + +constexpr int init_hash_bits = 12; +constexpr uint32_t rle_buffer_size = (1 << 9); + +struct frag_init_state_s { + parquet_column_device_view col; + PageFragment frag; + size_type start_value_idx; +}; + +struct page_enc_state_s { + uint8_t* cur; //!< current output ptr + uint8_t* rle_out; //!< current RLE write ptr + uint32_t rle_run; //!< current RLE run + uint32_t run_val; //!< current RLE run value + uint32_t rle_pos; //!< RLE encoder positions + uint32_t rle_numvals; //!< RLE input value count + uint32_t rle_lit_count; + uint32_t rle_rpt_count; + uint32_t page_start_val; + uint32_t chunk_start_val; + volatile uint32_t rpt_map[4]; + volatile uint32_t scratch_red[32]; + EncPage page; + EncColumnChunk ck; + parquet_column_device_view col; + gpu_inflate_input_s comp_in; + gpu_inflate_status_s comp_stat; + uint16_t vals[rle_buffer_size]; +}; + +/** + * @brief Returns the size of the type in the Parquet file. + */ +uint32_t __device__ physical_type_len(Type physical_type, type_id id) +{ + if (physical_type == FIXED_LEN_BYTE_ARRAY and id == type_id::DECIMAL128) { + return sizeof(__int128_t); + } + switch (physical_type) { + case INT96: return 12u; + case INT64: + case DOUBLE: return sizeof(int64_t); + case BOOLEAN: return 1u; + default: return sizeof(int32_t); + } +} + +/** + * @brief Return a 12-bit hash from a byte sequence + */ +inline __device__ uint32_t hash_string(const string_view& val) +{ + char const* ptr = val.data(); + uint32_t len = val.size_bytes(); + if (len != 0) { + return (ptr[0] + (ptr[len - 1] << 5) + (len << 10)) & ((1 << init_hash_bits) - 1); + } else { + return 0; + } +} + +inline __device__ uint32_t uint32_init_hash(uint32_t v) +{ + return (v + (v >> 11) + (v >> 22)) & ((1 << init_hash_bits) - 1); +} + +inline __device__ uint32_t uint64_init_hash(uint64_t v) +{ + return uint32_init_hash(static_cast(v + (v >> 32))); +} + +/** + * @brief Initializes encoder page fragments + * + * Based on the number of rows in each fragment, populates the value count, the size of data in the + * fragment, the number of unique values, and the data size of unique values. + * + * @param[in] frag Fragment array [fragment_id][column_id] + * @param[in] col_desc Column description array [column_id] + * @param[in] num_fragments Number of fragments per column + * @param[in] num_columns Number of columns + */ +// blockDim {512,1,1} +template +__global__ void __launch_bounds__(block_size) + gpuInitPageFragments(device_2dspan frag, + device_span col_desc, + uint32_t fragment_size, + uint32_t max_num_rows) +{ + __shared__ __align__(16) frag_init_state_s state_g; + + using block_reduce = cub::BlockReduce; + __shared__ typename block_reduce::TempStorage reduce_storage; + + frag_init_state_s* const s = &state_g; + uint32_t t = threadIdx.x; + + if (t == 0) s->col = col_desc[blockIdx.x]; + __syncthreads(); + uint32_t const start_row = blockIdx.y * fragment_size; + if (!t) { + // frag.num_rows = fragment_size except for the last page fragment which can be smaller. + // num_rows is fixed but fragment size could be larger if the data is strings or nested. + s->frag.num_rows = min(fragment_size, max_num_rows - min(start_row, max_num_rows)); + s->frag.num_dict_vals = 0; + s->frag.fragment_data_size = 0; + s->frag.dict_data_size = 0; + + // To use num_vals instead of num_rows, we need to calculate num_vals on the fly. + // For list>, values between i and i+50 can be calculated by + // off_11 = off[i], off_12 = off[i+50] + // off_21 = child.off[off_11], off_22 = child.off[off_12] + // etc... + size_type end_value_idx = start_row + s->frag.num_rows; + if (s->col.parent_column == nullptr) { + s->start_value_idx = start_row; + } else { + auto col = *(s->col.parent_column); + auto current_start_value_idx = start_row; + while (col.type().id() == type_id::LIST or col.type().id() == type_id::STRUCT) { + if (col.type().id() == type_id::STRUCT) { + current_start_value_idx += col.offset(); + end_value_idx += col.offset(); + col = col.child(0); + } else { + auto offset_col = col.child(lists_column_view::offsets_column_index); + current_start_value_idx = + offset_col.element(current_start_value_idx + col.offset()); + end_value_idx = offset_col.element(end_value_idx + col.offset()); + col = col.child(lists_column_view::child_column_index); + } + } + s->start_value_idx = current_start_value_idx; + } + s->frag.start_value_idx = s->start_value_idx; + s->frag.num_leaf_values = end_value_idx - s->start_value_idx; + + if (s->col.level_offsets != nullptr) { + // For nested schemas, the number of values in a fragment is not directly related to the + // number of encoded data elements or the number of rows. It is simply the number of + // repetition/definition values which together encode validity and nesting information. + size_type first_level_val_idx = s->col.level_offsets[start_row]; + size_type last_level_val_idx = s->col.level_offsets[start_row + s->frag.num_rows]; + s->frag.num_values = last_level_val_idx - first_level_val_idx; + } else { + s->frag.num_values = s->frag.num_rows; + } + } + auto const physical_type = s->col.physical_type; + auto const dtype_len = physical_type_len(physical_type, s->col.leaf_column->type().id()); + __syncthreads(); + + size_type nvals = s->frag.num_leaf_values; + size_type start_value_idx = s->start_value_idx; + + for (uint32_t i = 0; i < nvals; i += block_size) { + uint32_t val_idx = start_value_idx + i + t; + uint32_t is_valid = (i + t < nvals && val_idx < s->col.leaf_column->size()) + ? s->col.leaf_column->is_valid(val_idx) + : 0; + uint32_t len; + if (is_valid) { + len = dtype_len; + if (physical_type != BOOLEAN) { + if (physical_type == BYTE_ARRAY) { + auto str = s->col.leaf_column->element(val_idx); + len += str.size_bytes(); + } + } + } else { + len = 0; + } + + len = block_reduce(reduce_storage).Sum(len); + if (!t) { s->frag.fragment_data_size += len; } + __syncthreads(); + } + __syncthreads(); + if (t == 0) frag[blockIdx.x][blockIdx.y] = s->frag; +} + +// blockDim {128,1,1} +__global__ void __launch_bounds__(128) + gpuInitFragmentStats(device_2dspan groups, + device_2dspan fragments, + device_span col_desc) +{ + // TODO: why not 1 block per warp? + __shared__ __align__(8) statistics_group group_g[4]; + + uint32_t lane_id = threadIdx.x & 0x1f; + uint32_t frag_id = blockIdx.y * 4 + (threadIdx.x >> 5); + uint32_t column_id = blockIdx.x; + auto num_fragments_per_column = fragments.size().second; + statistics_group* const g = &group_g[threadIdx.x >> 5]; + if (!lane_id && frag_id < num_fragments_per_column) { + g->col = &col_desc[column_id]; + g->start_row = fragments[column_id][frag_id].start_value_idx; + g->num_rows = fragments[column_id][frag_id].num_leaf_values; + } + __syncthreads(); + if (frag_id < num_fragments_per_column and lane_id == 0) groups[column_id][frag_id] = *g; +} + +// blockDim {128,1,1} +__global__ void __launch_bounds__(128) + gpuInitPages(device_2dspan chunks, + device_span pages, + device_span col_desc, + statistics_merge_group* page_grstats, + statistics_merge_group* chunk_grstats, + size_t max_page_comp_data_size, + int32_t num_columns) +{ + // TODO: All writing seems to be done by thread 0. Could be replaced by thrust foreach + __shared__ __align__(8) parquet_column_device_view col_g; + __shared__ __align__(8) EncColumnChunk ck_g; + __shared__ __align__(8) PageFragment frag_g; + __shared__ __align__(8) EncPage page_g; + __shared__ __align__(8) statistics_merge_group pagestats_g; + + uint32_t t = threadIdx.x; + + if (t == 0) { + col_g = col_desc[blockIdx.x]; + ck_g = chunks[blockIdx.y][blockIdx.x]; + page_g = {}; + } + __syncthreads(); + if (t < 32) { + uint32_t fragments_in_chunk = 0; + uint32_t rows_in_page = 0; + uint32_t values_in_page = 0; + uint32_t leaf_values_in_page = 0; + uint32_t page_size = 0; + uint32_t num_pages = 0; + uint32_t num_rows = 0; + uint32_t page_start = 0; + uint32_t page_offset = ck_g.ck_stat_size; + uint32_t num_dict_entries = 0; + uint32_t comp_page_offset = ck_g.ck_stat_size; + uint32_t page_headers_size = 0; + uint32_t max_page_data_size = 0; + uint32_t cur_row = ck_g.start_row; + uint32_t ck_max_stats_len = 0; + uint32_t max_stats_len = 0; + + if (!t) { + pagestats_g.col = &col_desc[blockIdx.x]; + pagestats_g.start_chunk = ck_g.first_fragment; + pagestats_g.num_chunks = 0; + } + if (ck_g.use_dictionary) { + if (!t) { + page_g.page_data = ck_g.uncompressed_bfr + page_offset; + page_g.compressed_data = ck_g.compressed_bfr + comp_page_offset; + page_g.num_fragments = 0; + page_g.page_type = PageType::DICTIONARY_PAGE; + page_g.chunk = &chunks[blockIdx.y][blockIdx.x]; + page_g.chunk_id = blockIdx.y * num_columns + blockIdx.x; + page_g.hdr_size = 0; + page_g.max_hdr_size = 32; + page_g.max_data_size = ck_g.uniq_data_size; + page_g.start_row = cur_row; + page_g.num_rows = ck_g.num_dict_entries; + page_g.num_leaf_values = ck_g.num_dict_entries; + page_g.num_values = ck_g.num_dict_entries; // TODO: shouldn't matter for dict page + page_offset += page_g.max_hdr_size + page_g.max_data_size; + comp_page_offset += page_g.max_hdr_size + max_page_comp_data_size; + page_headers_size += page_g.max_hdr_size; + max_page_data_size = max(max_page_data_size, page_g.max_data_size); + } + __syncwarp(); + if (t == 0) { + if (not pages.empty()) pages[ck_g.first_page] = page_g; + if (page_grstats) page_grstats[ck_g.first_page] = pagestats_g; + } + num_pages = 1; + } + __syncwarp(); + // This loop goes over one page fragment at a time and adds it to page. + // When page size crosses a particular limit, then it moves on to the next page and then next + // page fragment gets added to that one. + + // This doesn't actually deal with data. It's agnostic. It only cares about number of rows and + // page size. + do { + uint32_t minmax_len = 0; + __syncwarp(); + if (num_rows < ck_g.num_rows) { + if (t == 0) { frag_g = ck_g.fragments[fragments_in_chunk]; } + if (!t && ck_g.stats && col_g.stats_dtype == dtype_string) { + minmax_len = max(ck_g.stats[fragments_in_chunk].min_value.str_val.length, + ck_g.stats[fragments_in_chunk].max_value.str_val.length); + } + } else if (!t) { + frag_g.fragment_data_size = 0; + frag_g.num_rows = 0; + } + __syncwarp(); + uint32_t fragment_data_size = + (ck_g.use_dictionary) + ? frag_g.num_leaf_values * 2 // Assume worst-case of 2-bytes per dictionary index + : frag_g.fragment_data_size; + // TODO (dm): this convoluted logic to limit page size needs refactoring + uint32_t max_page_size = (values_in_page * 2 >= ck_g.num_values) ? 256 * 1024 + : (values_in_page * 3 >= ck_g.num_values) ? 384 * 1024 + : 512 * 1024; + if (num_rows >= ck_g.num_rows || + (values_in_page > 0 && (page_size + fragment_data_size > max_page_size))) { + if (ck_g.use_dictionary) { + page_size = + 1 + 5 + ((values_in_page * ck_g.dict_rle_bits + 7) >> 3) + (values_in_page >> 8); + } + if (!t) { + page_g.num_fragments = fragments_in_chunk - page_start; + page_g.chunk = &chunks[blockIdx.y][blockIdx.x]; + page_g.chunk_id = blockIdx.y * num_columns + blockIdx.x; + page_g.page_type = PageType::DATA_PAGE; + page_g.hdr_size = 0; + page_g.max_hdr_size = 32; // Max size excluding statistics + if (ck_g.stats) { + uint32_t stats_hdr_len = 16; + if (col_g.stats_dtype == dtype_string) { + stats_hdr_len += 5 * 3 + 2 * max_stats_len; + } else { + stats_hdr_len += ((col_g.stats_dtype >= dtype_int64) ? 10 : 5) * 3; + } + page_g.max_hdr_size += stats_hdr_len; + } + page_g.page_data = ck_g.uncompressed_bfr + page_offset; + page_g.compressed_data = ck_g.compressed_bfr + comp_page_offset; + page_g.start_row = cur_row; + page_g.num_rows = rows_in_page; + page_g.num_leaf_values = leaf_values_in_page; + page_g.num_values = values_in_page; + uint32_t def_level_bits = col_g.num_def_level_bits(); + uint32_t rep_level_bits = col_g.num_rep_level_bits(); + // Run length = 4, max(rle/bitpack header) = 5, add one byte per 256 values for overhead + // TODO (dm): Improve readability of these calculations. + uint32_t def_level_size = + (def_level_bits != 0) + ? 4 + 5 + ((def_level_bits * page_g.num_values + 7) >> 3) + (page_g.num_values >> 8) + : 0; + uint32_t rep_level_size = + (rep_level_bits != 0) + ? 4 + 5 + ((rep_level_bits * page_g.num_values + 7) >> 3) + (page_g.num_values >> 8) + : 0; + page_g.max_data_size = page_size + def_level_size + rep_level_size; + + pagestats_g.start_chunk = ck_g.first_fragment + page_start; + pagestats_g.num_chunks = page_g.num_fragments; + page_offset += page_g.max_hdr_size + page_g.max_data_size; + comp_page_offset += page_g.max_hdr_size + max_page_comp_data_size; + page_headers_size += page_g.max_hdr_size; + max_page_data_size = max(max_page_data_size, page_g.max_data_size); + cur_row += rows_in_page; + ck_max_stats_len = max(ck_max_stats_len, max_stats_len); + } + __syncwarp(); + if (t == 0) { + if (not pages.empty()) { pages[ck_g.first_page + num_pages] = page_g; } + + if (page_grstats) { page_grstats[ck_g.first_page + num_pages] = pagestats_g; } + } + + num_pages++; + page_size = 0; + rows_in_page = 0; + values_in_page = 0; + leaf_values_in_page = 0; + page_start = fragments_in_chunk; + max_stats_len = 0; + } + max_stats_len = max(max_stats_len, minmax_len); + num_dict_entries += frag_g.num_dict_vals; + page_size += fragment_data_size; + rows_in_page += frag_g.num_rows; + values_in_page += frag_g.num_values; + leaf_values_in_page += frag_g.num_leaf_values; + num_rows += frag_g.num_rows; + fragments_in_chunk++; + } while (frag_g.num_rows != 0); + __syncwarp(); + if (!t) { + if (ck_g.ck_stat_size == 0 && ck_g.stats) { + uint32_t ck_stat_size = 48 + 2 * ck_max_stats_len; + page_offset += ck_stat_size; + comp_page_offset += ck_stat_size; + ck_g.ck_stat_size = ck_stat_size; + } + ck_g.num_pages = num_pages; + ck_g.bfr_size = page_offset; + ck_g.page_headers_size = page_headers_size; + ck_g.max_page_data_size = max_page_data_size; + pagestats_g.start_chunk = ck_g.first_page + ck_g.use_dictionary; // Exclude dictionary + pagestats_g.num_chunks = num_pages - ck_g.use_dictionary; + } + } + __syncthreads(); + if (t == 0) { + if (not pages.empty()) ck_g.pages = &pages[ck_g.first_page]; + chunks[blockIdx.y][blockIdx.x] = ck_g; + if (chunk_grstats) chunk_grstats[blockIdx.y * num_columns + blockIdx.x] = pagestats_g; + } +} + +/** + * @brief Mask table representing how many consecutive repeats are needed to code a repeat run + *[nbits-1] + */ +static __device__ __constant__ uint32_t kRleRunMask[16] = { + 0x00ffffff, 0x0fff, 0x00ff, 0x3f, 0x0f, 0x0f, 0x7, 0x7, 0x3, 0x3, 0x3, 0x3, 0x1, 0x1, 0x1, 0x1}; + +/** + * @brief Variable-length encode an integer + */ +inline __device__ uint8_t* VlqEncode(uint8_t* p, uint32_t v) +{ + while (v > 0x7f) { + *p++ = (v | 0x80); + v >>= 7; + } + *p++ = v; + return p; +} + +/** + * @brief Pack literal values in output bitstream (1,2,4,8,12 or 16 bits per value) + */ +inline __device__ void PackLiterals( + uint8_t* dst, uint32_t v, uint32_t count, uint32_t w, uint32_t t) +{ + if (w == 1 || w == 2 || w == 4 || w == 8 || w == 12 || w == 16) { + if (t <= (count | 0x1f)) { + if (w == 1 || w == 2 || w == 4) { + uint32_t mask = 0; + if (w == 1) { + v |= shuffle_xor(v, 1) << 1; + v |= shuffle_xor(v, 2) << 2; + v |= shuffle_xor(v, 4) << 4; + mask = 0x7; + } else if (w == 2) { + v |= shuffle_xor(v, 1) << 2; + v |= shuffle_xor(v, 2) << 4; + mask = 0x3; + } else if (w == 4) { + v |= shuffle_xor(v, 1) << 4; + mask = 0x1; + } + if (t < count && mask && !(t & mask)) { dst[(t * w) >> 3] = v; } + return; + } else if (w == 8) { + if (t < count) { dst[t] = v; } + return; + } else if (w == 12) { + v |= shuffle_xor(v, 1) << 12; + if (t < count && !(t & 1)) { + dst[(t >> 1) * 3 + 0] = v; + dst[(t >> 1) * 3 + 1] = v >> 8; + dst[(t >> 1) * 3 + 2] = v >> 16; + } + return; + } else if (w == 16) { + if (t < count) { + dst[t * 2 + 0] = v; + dst[t * 2 + 1] = v >> 8; + } + return; + } + } else { + return; + } + } else { + // Scratch space to temporarily write to. Needed because we will use atomics to write 32 bit + // words but the destination mem may not be a multiple of 4 bytes. + // TODO (dm): This assumes blockdim = 128 and max bits per value = 16. Reduce magic numbers. + __shared__ uint32_t scratch[64]; + if (t < 64) { scratch[t] = 0; } + __syncthreads(); + + if (t <= count) { + uint64_t v64 = v; + v64 <<= (t * w) & 0x1f; + + // Copy 64 bit word into two 32 bit words while following C++ strict aliasing rules. + uint32_t v32[2]; + memcpy(&v32, &v64, sizeof(uint64_t)); + + // Atomically write result to scratch + if (v32[0]) { atomicOr(scratch + ((t * w) >> 5), v32[0]); } + if (v32[1]) { atomicOr(scratch + ((t * w) >> 5) + 1, v32[1]); } + } + __syncthreads(); + + // Copy scratch data to final destination + auto available_bytes = (count * w + 7) / 8; + + auto scratch_bytes = reinterpret_cast(&scratch[0]); + if (t < available_bytes) { dst[t] = scratch_bytes[t]; } + if (t + 128 < available_bytes) { dst[t + 128] = scratch_bytes[t + 128]; } + __syncthreads(); + } +} + +/** + * @brief RLE encoder + * + * @param[in,out] s Page encode state + * @param[in] numvals Total count of input values + * @param[in] nbits number of bits per symbol (1..16) + * @param[in] flush nonzero if last batch in block + * @param[in] t thread id (0..127) + */ +static __device__ void RleEncode( + page_enc_state_s* s, uint32_t numvals, uint32_t nbits, uint32_t flush, uint32_t t) +{ + uint32_t rle_pos = s->rle_pos; + uint32_t rle_run = s->rle_run; + + while (rle_pos < numvals || (flush && rle_run)) { + uint32_t pos = rle_pos + t; + if (rle_run > 0 && !(rle_run & 1)) { + // Currently in a long repeat run + uint32_t mask = ballot(pos < numvals && s->vals[pos & (rle_buffer_size - 1)] == s->run_val); + uint32_t rle_rpt_count, max_rpt_count; + if (!(t & 0x1f)) { s->rpt_map[t >> 5] = mask; } + __syncthreads(); + if (t < 32) { + uint32_t c32 = ballot(t >= 4 || s->rpt_map[t] != 0xffffffffu); + if (!t) { + uint32_t last_idx = __ffs(c32) - 1; + s->rle_rpt_count = + last_idx * 32 + ((last_idx < 4) ? __ffs(~s->rpt_map[last_idx]) - 1 : 0); + } + } + __syncthreads(); + max_rpt_count = min(numvals - rle_pos, 128); + rle_rpt_count = s->rle_rpt_count; + rle_run += rle_rpt_count << 1; + rle_pos += rle_rpt_count; + if (rle_rpt_count < max_rpt_count || (flush && rle_pos == numvals)) { + if (t == 0) { + uint32_t const run_val = s->run_val; + uint8_t* dst = VlqEncode(s->rle_out, rle_run); + *dst++ = run_val; + if (nbits > 8) { *dst++ = run_val >> 8; } + s->rle_out = dst; + } + rle_run = 0; + } + } else { + // New run or in a literal run + uint32_t v0 = s->vals[pos & (rle_buffer_size - 1)]; + uint32_t v1 = s->vals[(pos + 1) & (rle_buffer_size - 1)]; + uint32_t mask = ballot(pos + 1 < numvals && v0 == v1); + uint32_t maxvals = min(numvals - rle_pos, 128); + uint32_t rle_lit_count, rle_rpt_count; + if (!(t & 0x1f)) { s->rpt_map[t >> 5] = mask; } + __syncthreads(); + if (t < 32) { + // Repeat run can only start on a multiple of 8 values + uint32_t idx8 = (t * 8) >> 5; + uint32_t pos8 = (t * 8) & 0x1f; + uint32_t m0 = (idx8 < 4) ? s->rpt_map[idx8] : 0; + uint32_t m1 = (idx8 < 3) ? s->rpt_map[idx8 + 1] : 0; + uint32_t needed_mask = kRleRunMask[nbits - 1]; + mask = ballot((__funnelshift_r(m0, m1, pos8) & needed_mask) == needed_mask); + if (!t) { + uint32_t rle_run_start = (mask != 0) ? min((__ffs(mask) - 1) * 8, maxvals) : maxvals; + uint32_t rpt_len = 0; + if (rle_run_start < maxvals) { + uint32_t idx_cur = rle_run_start >> 5; + uint32_t idx_ofs = rle_run_start & 0x1f; + while (idx_cur < 4) { + m0 = (idx_cur < 4) ? s->rpt_map[idx_cur] : 0; + m1 = (idx_cur < 3) ? s->rpt_map[idx_cur + 1] : 0; + mask = ~__funnelshift_r(m0, m1, idx_ofs); + if (mask != 0) { + rpt_len += __ffs(mask) - 1; + break; + } + rpt_len += 32; + idx_cur++; + } + } + s->rle_lit_count = rle_run_start; + s->rle_rpt_count = min(rpt_len, maxvals - rle_run_start); + } + } + __syncthreads(); + rle_lit_count = s->rle_lit_count; + rle_rpt_count = s->rle_rpt_count; + if (rle_lit_count != 0 || (rle_run != 0 && rle_rpt_count != 0)) { + uint32_t lit_div8; + bool need_more_data = false; + if (!flush && rle_pos + rle_lit_count == numvals) { + // Wait for more data + rle_lit_count -= min(rle_lit_count, 24); + need_more_data = true; + } + if (rle_lit_count != 0) { + lit_div8 = (rle_lit_count + ((flush && rle_pos + rle_lit_count == numvals) ? 7 : 0)) >> 3; + if (rle_run + lit_div8 * 2 > 0x7f) { + lit_div8 = 0x3f - (rle_run >> 1); // Limit to fixed 1-byte header (504 literals) + rle_rpt_count = 0; // Defer repeat run + } + if (lit_div8 != 0) { + uint8_t* dst = s->rle_out + 1 + (rle_run >> 1) * nbits; + PackLiterals(dst, (rle_pos + t < numvals) ? v0 : 0, lit_div8 * 8, nbits, t); + rle_run = (rle_run + lit_div8 * 2) | 1; + rle_pos = min(rle_pos + lit_div8 * 8, numvals); + } + } + if (rle_run >= ((rle_rpt_count != 0 || (flush && rle_pos == numvals)) ? 0x03 : 0x7f)) { + __syncthreads(); + // Complete literal run + if (!t) { + uint8_t* dst = s->rle_out; + dst[0] = rle_run; // At most 0x7f + dst += 1 + nbits * (rle_run >> 1); + s->rle_out = dst; + } + rle_run = 0; + } + if (need_more_data) { break; } + } + // Start a repeat run + if (rle_rpt_count != 0) { + if (t == s->rle_lit_count) { s->run_val = v0; } + rle_run = rle_rpt_count * 2; + rle_pos += rle_rpt_count; + if (rle_pos + 1 == numvals && !flush) { break; } + } + } + __syncthreads(); + } + __syncthreads(); + if (!t) { + s->rle_run = rle_run; + s->rle_pos = rle_pos; + s->rle_numvals = numvals; + } +} + +/** + * @brief PLAIN bool encoder + * + * @param[in,out] s Page encode state + * @param[in] numvals Total count of input values + * @param[in] flush nonzero if last batch in block + * @param[in] t thread id (0..127) + */ +static __device__ void PlainBoolEncode(page_enc_state_s* s, + uint32_t numvals, + uint32_t flush, + uint32_t t) +{ + uint32_t rle_pos = s->rle_pos; + uint8_t* dst = s->rle_out; + + while (rle_pos < numvals) { + uint32_t pos = rle_pos + t; + uint32_t v = (pos < numvals) ? s->vals[pos & (rle_buffer_size - 1)] : 0; + uint32_t n = min(numvals - rle_pos, 128); + uint32_t nbytes = (n + ((flush) ? 7 : 0)) >> 3; + if (!nbytes) { break; } + v |= shuffle_xor(v, 1) << 1; + v |= shuffle_xor(v, 2) << 2; + v |= shuffle_xor(v, 4) << 4; + if (t < n && !(t & 7)) { dst[t >> 3] = v; } + rle_pos = min(rle_pos + nbytes * 8, numvals); + dst += nbytes; + } + __syncthreads(); + if (!t) { + s->rle_pos = rle_pos; + s->rle_numvals = numvals; + s->rle_out = dst; + } +} + +/** + * @brief Determines the difference between the Proleptic Gregorian Calendar epoch (1970-01-01 + * 00:00:00 UTC) and the Julian date epoch (-4713-11-24 12:00:00 UTC). + * + * @return The difference between two epochs in `cuda::std::chrono::duration` format with a period + * of hours. + */ +constexpr auto julian_calendar_epoch_diff() +{ + using namespace cuda::std::chrono; + using namespace cuda::std::chrono_literals; + return sys_days{January / 1 / 1970} - (sys_days{November / 24 / -4713} + 12h); +} + +/** + * @brief Converts a timestamp_ns into a pair with nanoseconds since midnight and number of Julian + * days. Does not deal with time zones. Used by INT96 code. + * + * @param ns number of nanoseconds since epoch + * @return std::pair where nanoseconds is the number of nanoseconds + * elapsed in the day and days is the number of days from Julian epoch. + */ +static __device__ std::pair convert_nanoseconds(timestamp_ns const ns) +{ + using namespace cuda::std::chrono; + auto const nanosecond_ticks = ns.time_since_epoch(); + auto const gregorian_days = floor(nanosecond_ticks); + auto const julian_days = gregorian_days + ceil(julian_calendar_epoch_diff()); + + auto const last_day_ticks = nanosecond_ticks - gregorian_days; + return {last_day_ticks, julian_days}; +} + +// blockDim(128, 1, 1) +template +__global__ void __launch_bounds__(128, 8) + gpuEncodePages(device_span pages, + device_span comp_in, + device_span comp_stat) +{ + __shared__ __align__(8) page_enc_state_s state_g; + using block_scan = cub::BlockScan; + __shared__ typename block_scan::TempStorage temp_storage; + + page_enc_state_s* const s = &state_g; + uint32_t t = threadIdx.x; + + if (t == 0) { + s->page = pages[blockIdx.x]; + s->ck = *s->page.chunk; + s->col = *s->ck.col_desc; + s->cur = s->page.page_data + s->page.max_hdr_size; + } + __syncthreads(); + + // Encode Repetition and Definition levels + if (s->page.page_type != PageType::DICTIONARY_PAGE && + (s->col.num_def_level_bits()) != 0 && // This means max definition level is not 0 (nullable) + (s->col.num_rep_level_bits()) == 0 // This means there are no repetition levels (non-list) + ) { + // Calculate definition levels from validity + uint32_t def_lvl_bits = s->col.num_def_level_bits(); + if (def_lvl_bits != 0) { + if (!t) { + s->rle_run = 0; + s->rle_pos = 0; + s->rle_numvals = 0; + s->rle_out = s->cur + 4; + } + __syncthreads(); + while (s->rle_numvals < s->page.num_rows) { + uint32_t rle_numvals = s->rle_numvals; + uint32_t nrows = min(s->page.num_rows - rle_numvals, 128); + uint32_t row = s->page.start_row + rle_numvals + t; + // Definition level encodes validity. Checks the valid map and if it is valid, then sets the + // def_lvl accordingly and sets it in s->vals which is then given to RleEncode to encode + uint32_t def_lvl = [&]() { + bool within_bounds = rle_numvals + t < s->page.num_rows && row < s->col.num_rows; + if (not within_bounds) { return 0u; } + uint32_t def = 0; + size_type l = 0; + bool is_col_struct = false; + auto col = *s->col.parent_column; + do { + // If col not nullable then it does not contribute to def levels + if (s->col.nullability[l]) { + if (col.is_valid(row)) { + ++def; + } else { + // We have found the shallowest level at which this row is null + break; + } + } + is_col_struct = (col.type().id() == type_id::STRUCT); + if (is_col_struct) { + row += col.offset(); + col = col.child(0); + ++l; + } + } while (is_col_struct); + return def; + }(); + s->vals[(rle_numvals + t) & (rle_buffer_size - 1)] = def_lvl; + __syncthreads(); + rle_numvals += nrows; + RleEncode(s, rle_numvals, def_lvl_bits, (rle_numvals == s->page.num_rows), t); + __syncthreads(); + } + if (t < 32) { + uint8_t* cur = s->cur; + uint8_t* rle_out = s->rle_out; + if (t < 4) { + uint32_t rle_bytes = (uint32_t)(rle_out - cur) - 4; + cur[t] = rle_bytes >> (t * 8); + } + __syncwarp(); + if (t == 0) { s->cur = rle_out; } + } + } + } else if (s->page.page_type != PageType::DICTIONARY_PAGE && + s->col.num_rep_level_bits() != 0 // This means there ARE repetition levels (has list) + ) { + auto encode_levels = [&](uint8_t const* lvl_val_data, uint32_t nbits) { + // For list types, the repetition and definition levels are pre-calculated. We just need to + // encode and write them now. + if (!t) { + s->rle_run = 0; + s->rle_pos = 0; + s->rle_numvals = 0; + s->rle_out = s->cur + 4; + } + __syncthreads(); + size_type page_first_val_idx = s->col.level_offsets[s->page.start_row]; + size_type col_last_val_idx = s->col.level_offsets[s->col.num_rows]; + while (s->rle_numvals < s->page.num_values) { + uint32_t rle_numvals = s->rle_numvals; + uint32_t nvals = min(s->page.num_values - rle_numvals, 128); + uint32_t idx = page_first_val_idx + rle_numvals + t; + uint32_t lvl_val = + (rle_numvals + t < s->page.num_values && idx < col_last_val_idx) ? lvl_val_data[idx] : 0; + s->vals[(rle_numvals + t) & (rle_buffer_size - 1)] = lvl_val; + __syncthreads(); + rle_numvals += nvals; + RleEncode(s, rle_numvals, nbits, (rle_numvals == s->page.num_values), t); + __syncthreads(); + } + if (t < 32) { + uint8_t* cur = s->cur; + uint8_t* rle_out = s->rle_out; + if (t < 4) { + uint32_t rle_bytes = (uint32_t)(rle_out - cur) - 4; + cur[t] = rle_bytes >> (t * 8); + } + __syncwarp(); + if (t == 0) { s->cur = rle_out; } + } + }; + encode_levels(s->col.rep_values, s->col.num_rep_level_bits()); + __syncthreads(); + encode_levels(s->col.def_values, s->col.num_def_level_bits()); + } + // Encode data values + __syncthreads(); + auto const physical_type = s->col.physical_type; + auto const type_id = s->col.leaf_column->type().id(); + auto const dtype_len_out = physical_type_len(physical_type, type_id); + auto const dtype_len_in = [&]() -> uint32_t { + if (physical_type == INT32) { return int32_logical_len(type_id); } + if (physical_type == INT96) { return sizeof(int64_t); } + return dtype_len_out; + }(); + + auto const dict_bits = (physical_type == BOOLEAN) ? 1 + : (s->ck.use_dictionary and s->page.page_type != PageType::DICTIONARY_PAGE) + ? s->ck.dict_rle_bits + : -1; + if (t == 0) { + uint8_t* dst = s->cur; + s->rle_run = 0; + s->rle_pos = 0; + s->rle_numvals = 0; + s->rle_out = dst; + if (dict_bits >= 0 && physical_type != BOOLEAN) { + dst[0] = dict_bits; + s->rle_out = dst + 1; + } + s->page_start_val = s->page.start_row; // Dictionary page's start row is chunk's start row + auto chunk_start_val = s->ck.start_row; + if (s->col.parent_column != nullptr) { // TODO: remove this check. parent is now never nullptr + auto col = *(s->col.parent_column); + auto current_page_start_val = s->page_start_val; + // TODO: We do this so much. Add a global function that converts row idx to val idx + while (col.type().id() == type_id::LIST or col.type().id() == type_id::STRUCT) { + if (col.type().id() == type_id::STRUCT) { + current_page_start_val += col.offset(); + chunk_start_val += col.offset(); + col = col.child(0); + } else { + auto offset_col = col.child(lists_column_view::offsets_column_index); + current_page_start_val = + offset_col.element(current_page_start_val + col.offset()); + chunk_start_val = offset_col.element(chunk_start_val + col.offset()); + col = col.child(lists_column_view::child_column_index); + } + } + s->page_start_val = current_page_start_val; + s->chunk_start_val = chunk_start_val; + } + } + __syncthreads(); + for (uint32_t cur_val_idx = 0; cur_val_idx < s->page.num_leaf_values;) { + uint32_t nvals = min(s->page.num_leaf_values - cur_val_idx, 128); + uint32_t len, pos; + + auto [is_valid, val_idx] = [&]() { + uint32_t val_idx; + uint32_t is_valid; + + size_type val_idx_in_block = cur_val_idx + t; + if (s->page.page_type == PageType::DICTIONARY_PAGE) { + val_idx = val_idx_in_block; + is_valid = (val_idx < s->page.num_leaf_values); + if (is_valid) { val_idx = s->ck.dict_data[val_idx]; } + } else { + size_type val_idx_in_leaf_col = s->page_start_val + val_idx_in_block; + + is_valid = (val_idx_in_leaf_col < s->col.leaf_column->size() && + val_idx_in_block < s->page.num_leaf_values) + ? s->col.leaf_column->is_valid(val_idx_in_leaf_col) + : 0; + val_idx = + (s->ck.use_dictionary) ? val_idx_in_leaf_col - s->chunk_start_val : val_idx_in_leaf_col; + } + return std::make_tuple(is_valid, val_idx); + }(); + + cur_val_idx += nvals; + if (dict_bits >= 0) { + // Dictionary encoding + if (dict_bits > 0) { + uint32_t rle_numvals; + uint32_t rle_numvals_in_block; + block_scan(temp_storage).ExclusiveSum(is_valid, pos, rle_numvals_in_block); + rle_numvals = s->rle_numvals; + if (is_valid) { + uint32_t v; + if (physical_type == BOOLEAN) { + v = s->col.leaf_column->element(val_idx); + } else { + v = s->ck.dict_index[val_idx]; + } + s->vals[(rle_numvals + pos) & (rle_buffer_size - 1)] = v; + } + rle_numvals += rle_numvals_in_block; + __syncthreads(); + if ((!enable_bool_rle) && (physical_type == BOOLEAN)) { + PlainBoolEncode(s, rle_numvals, (cur_val_idx == s->page.num_leaf_values), t); + } else { + RleEncode(s, rle_numvals, dict_bits, (cur_val_idx == s->page.num_leaf_values), t); + } + __syncthreads(); + } + if (t == 0) { s->cur = s->rle_out; } + __syncthreads(); + } else { + // Non-dictionary encoding + uint8_t* dst = s->cur; + + if (is_valid) { + len = dtype_len_out; + if (physical_type == BYTE_ARRAY) { + len += s->col.leaf_column->element(val_idx).size_bytes(); + } + } else { + len = 0; + } + uint32_t total_len = 0; + block_scan(temp_storage).ExclusiveSum(len, pos, total_len); + __syncthreads(); + if (t == 0) { s->cur = dst + total_len; } + if (is_valid) { + switch (physical_type) { + case INT32: + case FLOAT: { + int32_t v; + if (dtype_len_in == 4) + v = s->col.leaf_column->element(val_idx); + else if (dtype_len_in == 2) + v = s->col.leaf_column->element(val_idx); + else + v = s->col.leaf_column->element(val_idx); + dst[pos + 0] = v; + dst[pos + 1] = v >> 8; + dst[pos + 2] = v >> 16; + dst[pos + 3] = v >> 24; + } break; + case INT64: { + int64_t v = s->col.leaf_column->element(val_idx); + int32_t ts_scale = s->col.ts_scale; + if (ts_scale != 0) { + if (ts_scale < 0) { + v /= -ts_scale; + } else { + v *= ts_scale; + } + } + dst[pos + 0] = v; + dst[pos + 1] = v >> 8; + dst[pos + 2] = v >> 16; + dst[pos + 3] = v >> 24; + dst[pos + 4] = v >> 32; + dst[pos + 5] = v >> 40; + dst[pos + 6] = v >> 48; + dst[pos + 7] = v >> 56; + } break; + case INT96: { + int64_t v = s->col.leaf_column->element(val_idx); + int32_t ts_scale = s->col.ts_scale; + if (ts_scale != 0) { + if (ts_scale < 0) { + v /= -ts_scale; + } else { + v *= ts_scale; + } + } + + auto const ret = convert_nanoseconds([&]() { + switch (s->col.leaf_column->type().id()) { + case type_id::TIMESTAMP_SECONDS: + case type_id::TIMESTAMP_MILLISECONDS: { + return timestamp_ns{duration_ms{v}}; + } break; + case type_id::TIMESTAMP_MICROSECONDS: + case type_id::TIMESTAMP_NANOSECONDS: { + return timestamp_ns{duration_us{v}}; + } break; + } + return timestamp_ns{duration_ns{0}}; + }()); + + // the 12 bytes of fixed length data. + v = ret.first.count(); + dst[pos + 0] = v; + dst[pos + 1] = v >> 8; + dst[pos + 2] = v >> 16; + dst[pos + 3] = v >> 24; + dst[pos + 4] = v >> 32; + dst[pos + 5] = v >> 40; + dst[pos + 6] = v >> 48; + dst[pos + 7] = v >> 56; + uint32_t w = ret.second.count(); + dst[pos + 8] = w; + dst[pos + 9] = w >> 8; + dst[pos + 10] = w >> 16; + dst[pos + 11] = w >> 24; + } break; + + case DOUBLE: { + auto v = s->col.leaf_column->element(val_idx); + memcpy(dst + pos, &v, 8); + } break; + case BYTE_ARRAY: { + auto str = s->col.leaf_column->element(val_idx); + uint32_t v = len - 4; // string length + dst[pos + 0] = v; + dst[pos + 1] = v >> 8; + dst[pos + 2] = v >> 16; + dst[pos + 3] = v >> 24; + if (v != 0) memcpy(dst + pos + 4, str.data(), v); + } break; + case FIXED_LEN_BYTE_ARRAY: { + if (type_id == type_id::DECIMAL128) { + // When using FIXED_LEN_BYTE_ARRAY for decimals, the rep is encoded in big-endian + auto const v = s->col.leaf_column->element(val_idx).value(); + auto const v_char_ptr = reinterpret_cast(&v); + thrust::copy(thrust::seq, + thrust::make_reverse_iterator(v_char_ptr + sizeof(v)), + thrust::make_reverse_iterator(v_char_ptr), + dst + pos); + } + } break; + } + } + __syncthreads(); + } + } + if (t == 0) { + uint8_t* base = s->page.page_data + s->page.max_hdr_size; + uint32_t actual_data_size = static_cast(s->cur - base); + uint32_t compressed_bfr_size = GetMaxCompressedBfrSize(actual_data_size); + s->page.max_data_size = actual_data_size; + s->comp_in.srcDevice = base; + s->comp_in.srcSize = actual_data_size; + s->comp_in.dstDevice = s->page.compressed_data + s->page.max_hdr_size; + s->comp_in.dstSize = compressed_bfr_size; + s->comp_stat.bytes_written = 0; + s->comp_stat.status = ~0; + s->comp_stat.reserved = 0; + } + __syncthreads(); + if (t == 0) { + pages[blockIdx.x] = s->page; + if (not comp_in.empty()) comp_in[blockIdx.x] = s->comp_in; + if (not comp_stat.empty()) { + comp_stat[blockIdx.x] = s->comp_stat; + pages[blockIdx.x].comp_stat = &comp_stat[blockIdx.x]; + } + } +} + +// blockDim(128, 1, 1) +__global__ void __launch_bounds__(128) gpuDecideCompression(device_span chunks) +{ + // After changing the way structs are loaded from coop to normal, this kernel has no business + // being launched with 128 thread block. It can easily be a single warp. + __shared__ __align__(8) EncColumnChunk ck_g; + __shared__ __align__(4) unsigned int error_count; + using warp_reduce = cub::WarpReduce; + __shared__ typename warp_reduce::TempStorage temp_storage[2]; + __shared__ volatile bool has_compression; + + uint32_t t = threadIdx.x; + uint32_t uncompressed_data_size = 0; + uint32_t compressed_data_size = 0; + uint32_t num_pages; + + if (t == 0) { + ck_g = chunks[blockIdx.x]; + atomicAnd(&error_count, 0); + has_compression = false; + } + __syncthreads(); + if (t < 32) { + num_pages = ck_g.num_pages; + for (uint32_t page = t; page < num_pages; page += 32) { + auto& curr_page = ck_g.pages[page]; + uint32_t page_data_size = curr_page.max_data_size; + uncompressed_data_size += page_data_size; + if (auto comp_status = curr_page.comp_stat; comp_status != nullptr) { + has_compression = true; + compressed_data_size += comp_status->bytes_written; + if (comp_status->status != 0) { atomicAdd(&error_count, 1); } + } + } + uncompressed_data_size = warp_reduce(temp_storage[0]).Sum(uncompressed_data_size); + compressed_data_size = warp_reduce(temp_storage[1]).Sum(compressed_data_size); + } + __syncthreads(); + if (t == 0) { + bool is_compressed; + if (has_compression) { + uint32_t compression_error = atomicAdd(&error_count, 0); + is_compressed = (!compression_error && compressed_data_size < uncompressed_data_size); + } else { + is_compressed = false; + } + chunks[blockIdx.x].is_compressed = is_compressed; + chunks[blockIdx.x].bfr_size = uncompressed_data_size; + chunks[blockIdx.x].compressed_size = + (is_compressed) ? compressed_data_size : uncompressed_data_size; + } +} + +/** + * Minimal thrift compact protocol support + */ +inline __device__ uint8_t* cpw_put_uint32(uint8_t* p, uint32_t v) +{ + while (v > 0x7f) { + *p++ = v | 0x80; + v >>= 7; + } + *p++ = v; + return p; +} + +inline __device__ uint8_t* cpw_put_uint64(uint8_t* p, uint64_t v) +{ + while (v > 0x7f) { + *p++ = v | 0x80; + v >>= 7; + } + *p++ = v; + return p; +} + +inline __device__ uint8_t* cpw_put_int32(uint8_t* p, int32_t v) +{ + int32_t s = (v < 0); + return cpw_put_uint32(p, (v ^ -s) * 2 + s); +} + +inline __device__ uint8_t* cpw_put_int64(uint8_t* p, int64_t v) +{ + int64_t s = (v < 0); + return cpw_put_uint64(p, (v ^ -s) * 2 + s); +} + +inline __device__ uint8_t* cpw_put_fldh(uint8_t* p, int f, int cur, int t) +{ + if (f > cur && f <= cur + 15) { + *p++ = ((f - cur) << 4) | t; + return p; + } else { + *p++ = t; + return cpw_put_int32(p, f); + } +} + +class header_encoder { + uint8_t* current_header_ptr; + int current_field_index; + + public: + inline __device__ header_encoder(uint8_t* header_start) + : current_header_ptr(header_start), current_field_index(0) + { + } + + inline __device__ void field_struct_begin(int field) + { + current_header_ptr = + cpw_put_fldh(current_header_ptr, field, current_field_index, ST_FLD_STRUCT); + current_field_index = 0; + } + + inline __device__ void field_struct_end(int field) + { + *current_header_ptr++ = 0; + current_field_index = field; + } + + template + inline __device__ void field_int32(int field, T value) + { + current_header_ptr = cpw_put_fldh(current_header_ptr, field, current_field_index, ST_FLD_I32); + current_header_ptr = cpw_put_int32(current_header_ptr, static_cast(value)); + current_field_index = field; + } + + template + inline __device__ void field_int64(int field, T value) + { + current_header_ptr = cpw_put_fldh(current_header_ptr, field, current_field_index, ST_FLD_I64); + current_header_ptr = cpw_put_int64(current_header_ptr, static_cast(value)); + current_field_index = field; + } + + inline __device__ void field_binary(int field, const void* value, uint32_t length) + { + current_header_ptr = + cpw_put_fldh(current_header_ptr, field, current_field_index, ST_FLD_BINARY); + current_header_ptr = cpw_put_uint32(current_header_ptr, length); + memcpy(current_header_ptr, value, length); + current_header_ptr += length; + current_field_index = field; + } + + inline __device__ void end(uint8_t** header_end, bool termination_flag = true) + { + if (termination_flag == false) { *current_header_ptr++ = 0; } + *header_end = current_header_ptr; + } + + inline __device__ uint8_t* get_ptr(void) { return current_header_ptr; } + + inline __device__ void set_ptr(uint8_t* ptr) { current_header_ptr = ptr; } +}; + +__device__ uint8_t* EncodeStatistics(uint8_t* start, + const statistics_chunk* s, + uint8_t dtype, + float* fp_scratch) +{ + uint8_t *end, dtype_len; + switch (dtype) { + case dtype_bool: dtype_len = 1; break; + case dtype_int8: + case dtype_int16: + case dtype_int32: + case dtype_date32: + case dtype_float32: dtype_len = 4; break; + case dtype_int64: + case dtype_timestamp64: + case dtype_float64: + case dtype_decimal64: dtype_len = 8; break; + case dtype_decimal128: dtype_len = 16; break; + case dtype_string: + default: dtype_len = 0; break; + } + header_encoder encoder(start); + encoder.field_int64(3, s->null_count); + if (s->has_minmax) { + const void *vmin, *vmax; + uint32_t lmin, lmax; + + if (dtype == dtype_string) { + lmin = s->min_value.str_val.length; + vmin = s->min_value.str_val.ptr; + lmax = s->max_value.str_val.length; + vmax = s->max_value.str_val.ptr; + } else { + lmin = lmax = dtype_len; + if (dtype == dtype_float32) { // Convert from double to float32 + fp_scratch[0] = s->min_value.fp_val; + fp_scratch[1] = s->max_value.fp_val; + vmin = &fp_scratch[0]; + vmax = &fp_scratch[1]; + } else { + vmin = &s->min_value; + vmax = &s->max_value; + } + } + encoder.field_binary(5, vmax, lmax); + encoder.field_binary(6, vmin, lmin); + } + encoder.end(&end); + return end; +} + +// blockDim(128, 1, 1) +__global__ void __launch_bounds__(128) + gpuEncodePageHeaders(device_span pages, + device_span comp_stat, + device_span page_stats, + const statistics_chunk* chunk_stats) +{ + // When this whole kernel becomes single thread, the following variables need not be __shared__ + __shared__ __align__(8) parquet_column_device_view col_g; + __shared__ __align__(8) EncColumnChunk ck_g; + __shared__ __align__(8) EncPage page_g; + __shared__ __align__(8) float fp_scratch[2]; + + uint32_t t = threadIdx.x; + + if (t == 0) { + uint8_t *hdr_start, *hdr_end; + uint32_t compressed_page_size, uncompressed_page_size; + + page_g = pages[blockIdx.x]; + ck_g = *page_g.chunk; + col_g = *ck_g.col_desc; + + if (chunk_stats && &pages[blockIdx.x] == ck_g.pages) { // Is this the first page in a chunk? + hdr_start = (ck_g.is_compressed) ? ck_g.compressed_bfr : ck_g.uncompressed_bfr; + hdr_end = + EncodeStatistics(hdr_start, &chunk_stats[page_g.chunk_id], col_g.stats_dtype, fp_scratch); + page_g.chunk->ck_stat_size = static_cast(hdr_end - hdr_start); + } + uncompressed_page_size = page_g.max_data_size; + if (ck_g.is_compressed) { + hdr_start = page_g.compressed_data; + compressed_page_size = (uint32_t)comp_stat[blockIdx.x].bytes_written; + page_g.max_data_size = compressed_page_size; + } else { + hdr_start = page_g.page_data; + compressed_page_size = uncompressed_page_size; + } + header_encoder encoder(hdr_start); + PageType page_type = page_g.page_type; + // NOTE: For dictionary encoding, parquet v2 recommends using PLAIN in dictionary page and + // RLE_DICTIONARY in data page, but parquet v1 uses PLAIN_DICTIONARY in both dictionary and + // data pages (actual encoding is identical). + Encoding encoding; + if (enable_bool_rle) { + encoding = (col_g.physical_type == BOOLEAN) ? Encoding::RLE + : (page_type == PageType::DICTIONARY_PAGE || page_g.chunk->use_dictionary) + ? Encoding::PLAIN_DICTIONARY + : Encoding::PLAIN; + } else { + encoding = (page_type == PageType::DICTIONARY_PAGE || page_g.chunk->use_dictionary) + ? Encoding::PLAIN_DICTIONARY + : Encoding::PLAIN; + } + encoder.field_int32(1, page_type); + encoder.field_int32(2, uncompressed_page_size); + encoder.field_int32(3, compressed_page_size); + if (page_type == PageType::DATA_PAGE) { + // DataPageHeader + encoder.field_struct_begin(5); + encoder.field_int32(1, page_g.num_values); // NOTE: num_values != num_rows for list types + encoder.field_int32(2, encoding); // encoding + encoder.field_int32(3, Encoding::RLE); // definition_level_encoding + encoder.field_int32(4, Encoding::RLE); // repetition_level_encoding + // Optionally encode page-level statistics + if (not page_stats.empty()) { + encoder.field_struct_begin(5); + encoder.set_ptr(EncodeStatistics( + encoder.get_ptr(), &page_stats[blockIdx.x], col_g.stats_dtype, fp_scratch)); + encoder.field_struct_end(5); + } + encoder.field_struct_end(5); + } else { + // DictionaryPageHeader + encoder.field_struct_begin(7); + encoder.field_int32(1, ck_g.num_dict_entries); // number of values in dictionary + encoder.field_int32(2, encoding); + encoder.field_struct_end(7); + } + encoder.end(&hdr_end, false); + page_g.hdr_size = (uint32_t)(hdr_end - hdr_start); + } + __syncthreads(); + if (t == 0) pages[blockIdx.x] = page_g; +} + +// blockDim(1024, 1, 1) +__global__ void __launch_bounds__(1024) + gpuGatherPages(device_span chunks, device_span pages) +{ + __shared__ __align__(8) EncColumnChunk ck_g; + __shared__ __align__(8) EncPage page_g; + + uint32_t t = threadIdx.x; + uint8_t *dst, *dst_base; + const EncPage* first_page; + uint32_t num_pages, uncompressed_size; + + if (t == 0) ck_g = chunks[blockIdx.x]; + __syncthreads(); + + first_page = ck_g.pages; + num_pages = ck_g.num_pages; + dst = (ck_g.is_compressed) ? ck_g.compressed_bfr : ck_g.uncompressed_bfr; + dst += ck_g.ck_stat_size; // Skip over chunk statistics + dst_base = dst; + uncompressed_size = ck_g.bfr_size; + for (uint32_t page = 0; page < num_pages; page++) { + const uint8_t* src; + uint32_t hdr_len, data_len; + + if (t == 0) { page_g = first_page[page]; } + __syncthreads(); + + src = (ck_g.is_compressed) ? page_g.compressed_data : page_g.page_data; + // Copy page header + hdr_len = page_g.hdr_size; + memcpy_block<1024, true>(dst, src, hdr_len, t); + src += page_g.max_hdr_size; + dst += hdr_len; + // Copy page data + uncompressed_size += hdr_len; + data_len = page_g.max_data_size; + memcpy_block<1024, true>(dst, src, data_len, t); + dst += data_len; + __syncthreads(); + if (!t && page == 0 && ck_g.use_dictionary) { ck_g.dictionary_size = hdr_len + data_len; } + } + if (t == 0) { + chunks[blockIdx.x].bfr_size = uncompressed_size; + chunks[blockIdx.x].compressed_size = (dst - dst_base); + if (ck_g.use_dictionary) { chunks[blockIdx.x].dictionary_size = ck_g.dictionary_size; } + } +} + +/** + * @brief Functor to get definition level value for a nested struct column until the leaf level or + * the first list level. + * + */ +struct def_level_fn { + column_device_view const* parent_col; + uint8_t const* d_nullability; + uint8_t sub_level_start; + uint8_t curr_def_level; + + __device__ uint32_t operator()(size_type i) + { + uint32_t def = curr_def_level; + uint8_t l = sub_level_start; + bool is_col_struct = false; + auto col = *parent_col; + do { + // If col not nullable then it does not contribute to def levels + if (d_nullability[l]) { + if (not col.nullable() or bit_is_set(col.null_mask(), i)) { + ++def; + } else { // We have found the shallowest level at which this row is null + break; + } + } + is_col_struct = (col.type().id() == type_id::STRUCT); + if (is_col_struct) { + col = col.child(0); + ++l; + } + } while (is_col_struct); + return def; + } +}; + +/** + * @brief Get the dremel offsets and repetition and definition levels for a LIST column + * + * The repetition and definition level values are ideally computed using a recursive call over a + * nested structure but in order to better utilize GPU resources, this function calculates them + * with a bottom up merge method. + * + * Given a LIST column of type `List>` like so: + * ``` + * col = { + * [], + * [[], [1, 2, 3], [4, 5]], + * [[]] + * } + * ``` + * We can represent it in cudf format with two level of offsets like this: + * ``` + * Level 0 offsets = {0, 0, 3, 5, 6} + * Level 1 offsets = {0, 0, 3, 5, 5} + * Values = {1, 2, 3, 4, 5} + * ``` + * The desired result of this function is the repetition and definition level values that + * correspond to the data values: + * ``` + * col = {[], [[], [1, 2, 3], [4, 5]], [[]]} + * def = { 0 1, 2, 2, 2, 2, 2, 1 } + * rep = { 0, 0, 0, 2, 2, 1, 2, 0 } + * ``` + * + * Since repetition and definition levels arrays contain a value for each empty list, the size of + * the rep/def level array can be given by + * ``` + * rep_level.size() = size of leaf column + number of empty lists in level 0 + * + number of empty lists in level 1 ... + * ``` + * + * We start with finding the empty lists in the penultimate level and merging it with the indices + * of the leaf level. The values for the merge are the definition and repetition levels + * ``` + * empties at level 1 = {0, 5} + * def values at 1 = {1, 1} + * rep values at 1 = {1, 1} + * indices at leaf = {0, 1, 2, 3, 4} + * def values at leaf = {2, 2, 2, 2, 2} + * rep values at leaf = {2, 2, 2, 2, 2} + * ``` + * + * merged def values = {1, 2, 2, 2, 2, 2, 1} + * merged rep values = {1, 2, 2, 2, 2, 2, 1} + * + * The size of the rep/def values is now larger than the leaf values and the offsets need to be + * adjusted in order to point to the correct start indices. We do this with an exclusive scan over + * the indices of offsets of empty lists and adding to existing offsets. + * ``` + * Level 1 new offsets = {0, 1, 4, 6, 7} + * ``` + * Repetition values at the beginning of a list need to be decremented. We use the new offsets to + * scatter the rep value. + * ``` + * merged rep values = {1, 2, 2, 2, 2, 2, 1} + * scatter (1, new offsets) + * new offsets = {0, 1, 4, 6, 7} + * new rep values = {1, 1, 2, 2, 1, 2, 1} + * ``` + * + * Similarly we merge up all the way till level 0 offsets + * + * STRUCT COLUMNS : + * In case of struct columns, we don't have to merge struct levels with their children because a + * struct is the same size as its children. e.g. for a column `struct`, if the row `i` + * is null, then the children columns `int` and `float` are also null at `i`. They also have the + * null entry represented in their respective null masks. So for any case of strictly struct based + * nesting, we can get the definition levels merely by iterating over the nesting for the same row. + * + * In case struct and lists are intermixed, the definition levels of all the contiguous struct + * levels can be constructed using the aforementioned iterative method. Only when we reach a list + * level, we need to do a merge with the subsequent level. + * + * So, for a column like `struct>`, we are going to merge between the levels `struct>`, we are going to merge between `list` and `struct`. + * + * In general, one nesting level is the list level and any struct level that precedes it. + * + * A few more examples to visualize the partitioning of column hierarchy into nesting levels: + * (L is list, S is struct, i is integer(leaf data level), angle brackets omitted) + * ``` + * 1. LSi = L Si + * - | -- + * + * 2. LLSi = L L Si + * - | - | -- + * + * 3. SSLi = SSL i + * --- | - + * + * 4. LLSLSSi = L L SL SSi + * - | - | -- | --- + * ``` + */ +dremel_data get_dremel_data(column_view h_col, + // TODO(cp): use device_span once it is converted to a single hd_vec + rmm::device_uvector const& d_nullability, + std::vector const& nullability, + rmm::cuda_stream_view stream) +{ + auto get_list_level = [](column_view col) { + while (col.type().id() == type_id::STRUCT) { + col = col.child(0); + } + return col; + }; + + auto get_empties = [&](column_view col, size_type start, size_type end) { + auto lcv = lists_column_view(get_list_level(col)); + rmm::device_uvector empties_idx(lcv.size(), stream); + rmm::device_uvector empties(lcv.size(), stream); + auto d_off = lcv.offsets().data(); + + auto empties_idx_end = + thrust::copy_if(rmm::exec_policy(stream), + thrust::make_counting_iterator(start), + thrust::make_counting_iterator(end), + empties_idx.begin(), + [d_off] __device__(auto i) { return d_off[i] == d_off[i + 1]; }); + auto empties_end = thrust::gather(rmm::exec_policy(stream), + empties_idx.begin(), + empties_idx_end, + lcv.offsets().begin(), + empties.begin()); + + auto empties_size = empties_end - empties.begin(); + return std::make_tuple(std::move(empties), std::move(empties_idx), empties_size); + }; + + auto curr_col = h_col; + std::vector nesting_levels; + std::vector def_at_level; + std::vector start_at_sub_level; + uint8_t curr_nesting_level_idx = 0; + + auto add_def_at_level = [&](column_view col) { + // Add up all def level contributions in this column all the way till the first list column + // appears in the hierarchy or until we get to leaf + uint32_t def = 0; + start_at_sub_level.push_back(curr_nesting_level_idx); + while (col.type().id() == type_id::STRUCT) { + def += (nullability[curr_nesting_level_idx]) ? 1 : 0; + col = col.child(0); + ++curr_nesting_level_idx; + } + // At the end of all those structs is either a list column or the leaf. Leaf column contributes + // at least one def level. It doesn't matter what the leaf contributes because it'll be at the + // end of the exclusive scan. + def += (nullability[curr_nesting_level_idx]) ? 2 : 1; + def_at_level.push_back(def); + ++curr_nesting_level_idx; + }; + while (cudf::is_nested(curr_col.type())) { + nesting_levels.push_back(curr_col); + add_def_at_level(curr_col); + while (curr_col.type().id() == type_id::STRUCT) { + // Go down the hierarchy until we get to the LIST or the leaf level + curr_col = curr_col.child(0); + } + if (curr_col.type().id() == type_id::LIST) { + curr_col = curr_col.child(lists_column_view::child_column_index); + if (not is_nested(curr_col.type())) { + // Special case: when the leaf data column is the immediate child of the list col then we + // want it to be included right away. Otherwise the struct containing it will be included in + // the next iteration of this loop. + nesting_levels.push_back(curr_col); + add_def_at_level(curr_col); + break; + } + } + } + + std::unique_ptr device_view_owners; + column_device_view* d_nesting_levels; + std::tie(device_view_owners, d_nesting_levels) = + contiguous_copy_column_device_views(nesting_levels, stream); + + thrust::exclusive_scan( + thrust::host, def_at_level.begin(), def_at_level.end(), def_at_level.begin()); + + // Sliced list column views only have offsets applied to top level. Get offsets for each level. + rmm::device_uvector d_column_offsets(nesting_levels.size(), stream); + rmm::device_uvector d_column_ends(nesting_levels.size(), stream); + + auto d_col = column_device_view::create(h_col, stream); + cudf::detail::device_single_thread( + [offset_at_level = d_column_offsets.data(), + end_idx_at_level = d_column_ends.data(), + col = *d_col] __device__() { + auto curr_col = col; + size_type off = curr_col.offset(); + size_type end = off + curr_col.size(); + size_type level = 0; + offset_at_level[level] = off; + end_idx_at_level[level] = end; + ++level; + // Apply offset recursively until we get to leaf data + // Skip doing the following for any structs we encounter in between. + while (curr_col.type().id() == type_id::LIST or curr_col.type().id() == type_id::STRUCT) { + if (curr_col.type().id() == type_id::LIST) { + off = curr_col.child(lists_column_view::offsets_column_index).element(off); + end = curr_col.child(lists_column_view::offsets_column_index).element(end); + offset_at_level[level] = off; + end_idx_at_level[level] = end; + ++level; + curr_col = curr_col.child(lists_column_view::child_column_index); + } else { + curr_col = curr_col.child(0); + } + } + }, + stream); + + thrust::host_vector column_offsets = + cudf::detail::make_host_vector_async(d_column_offsets, stream); + thrust::host_vector column_ends = + cudf::detail::make_host_vector_async(d_column_ends, stream); + stream.synchronize(); + + size_t max_vals_size = 0; + for (size_t l = 0; l < column_offsets.size(); ++l) { + max_vals_size += column_ends[l] - column_offsets[l]; + } + + rmm::device_uvector rep_level(max_vals_size, stream); + rmm::device_uvector def_level(max_vals_size, stream); + + rmm::device_uvector temp_rep_vals(max_vals_size, stream); + rmm::device_uvector temp_def_vals(max_vals_size, stream); + rmm::device_uvector new_offsets(0, stream); + size_type curr_rep_values_size = 0; + { + // At this point, curr_col contains the leaf column. Max nesting level is + // nesting_levels.size(). + + // We are going to start by merging the last column in nesting_levels (the leaf, which is at the + // index `nesting_levels.size() - 1`) with the second-to-last (which is at + // `nesting_levels.size() - 2`). + size_t level = nesting_levels.size() - 2; + curr_col = nesting_levels[level]; + auto lcv = lists_column_view(get_list_level(curr_col)); + auto offset_size_at_level = column_ends[level] - column_offsets[level] + 1; + + // Get empties at this level + rmm::device_uvector empties(0, stream); + rmm::device_uvector empties_idx(0, stream); + size_t empties_size; + std::tie(empties, empties_idx, empties_size) = + get_empties(nesting_levels[level], column_offsets[level], column_ends[level]); + + // Merge empty at deepest parent level with the rep, def level vals at leaf level + + auto input_parent_rep_it = thrust::make_constant_iterator(level); + auto input_parent_def_it = + thrust::make_transform_iterator(empties_idx.begin(), + def_level_fn{d_nesting_levels + level, + d_nullability.data(), + start_at_sub_level[level], + def_at_level[level]}); + + // `nesting_levels.size()` == no of list levels + leaf. Max repetition level = no of list levels + auto input_child_rep_it = thrust::make_constant_iterator(nesting_levels.size() - 1); + auto input_child_def_it = + thrust::make_transform_iterator(thrust::make_counting_iterator(column_offsets[level + 1]), + def_level_fn{d_nesting_levels + level + 1, + d_nullability.data(), + start_at_sub_level[level + 1], + def_at_level[level + 1]}); + + // Zip the input and output value iterators so that merge operation is done only once + auto input_parent_zip_it = + thrust::make_zip_iterator(thrust::make_tuple(input_parent_rep_it, input_parent_def_it)); + + auto input_child_zip_it = + thrust::make_zip_iterator(thrust::make_tuple(input_child_rep_it, input_child_def_it)); + + auto output_zip_it = + thrust::make_zip_iterator(thrust::make_tuple(rep_level.begin(), def_level.begin())); + + auto ends = thrust::merge_by_key(rmm::exec_policy(stream), + empties.begin(), + empties.begin() + empties_size, + thrust::make_counting_iterator(column_offsets[level + 1]), + thrust::make_counting_iterator(column_ends[level + 1]), + input_parent_zip_it, + input_child_zip_it, + thrust::make_discard_iterator(), + output_zip_it); + + curr_rep_values_size = ends.second - output_zip_it; + + // Scan to get distance by which each offset value is shifted due to the insertion of empties + auto scan_it = cudf::detail::make_counting_transform_iterator( + column_offsets[level], + [off = lcv.offsets().data(), size = lcv.offsets().size()] __device__( + auto i) -> int { return (i + 1 < size) && (off[i] == off[i + 1]); }); + rmm::device_uvector scan_out(offset_size_at_level, stream); + thrust::exclusive_scan( + rmm::exec_policy(stream), scan_it, scan_it + offset_size_at_level, scan_out.begin()); + + // Add scan output to existing offsets to get new offsets into merged rep level values + new_offsets = rmm::device_uvector(offset_size_at_level, stream); + thrust::for_each_n(rmm::exec_policy(stream), + thrust::make_counting_iterator(0), + offset_size_at_level, + [off = lcv.offsets().data() + column_offsets[level], + scan_out = scan_out.data(), + new_off = new_offsets.data()] __device__(auto i) { + new_off[i] = off[i] - off[0] + scan_out[i]; + }); + + // Set rep level values at level starts to appropriate rep level + auto scatter_it = thrust::make_constant_iterator(level); + thrust::scatter(rmm::exec_policy(stream), + scatter_it, + scatter_it + new_offsets.size() - 1, + new_offsets.begin(), + rep_level.begin()); + } + + // Having already merged the last two levels, we are now going to merge the result with the + // third-last level which is at index `nesting_levels.size() - 3`. + for (int level = nesting_levels.size() - 3; level >= 0; level--) { + curr_col = nesting_levels[level]; + auto lcv = lists_column_view(get_list_level(curr_col)); + auto offset_size_at_level = column_ends[level] - column_offsets[level] + 1; + + // Get empties at this level + rmm::device_uvector empties(0, stream); + rmm::device_uvector empties_idx(0, stream); + size_t empties_size; + std::tie(empties, empties_idx, empties_size) = + get_empties(nesting_levels[level], column_offsets[level], column_ends[level]); + + auto offset_transformer = [new_child_offsets = new_offsets.data(), + child_start = column_offsets[level + 1]] __device__(auto x) { + return new_child_offsets[x - child_start]; // (x - child's offset) + }; + + // We will be reading from old rep_levels and writing again to rep_levels. Swap the current + // rep values into temp_rep_vals so it can become the input and rep_levels can again be output. + std::swap(temp_rep_vals, rep_level); + std::swap(temp_def_vals, def_level); + + // Merge empty at parent level with the rep, def level vals at current level + auto transformed_empties = thrust::make_transform_iterator(empties.begin(), offset_transformer); + + auto input_parent_rep_it = thrust::make_constant_iterator(level); + auto input_parent_def_it = + thrust::make_transform_iterator(empties_idx.begin(), + def_level_fn{d_nesting_levels + level, + d_nullability.data(), + start_at_sub_level[level], + def_at_level[level]}); + + // Zip the input and output value iterators so that merge operation is done only once + auto input_parent_zip_it = + thrust::make_zip_iterator(thrust::make_tuple(input_parent_rep_it, input_parent_def_it)); + + auto input_child_zip_it = + thrust::make_zip_iterator(thrust::make_tuple(temp_rep_vals.begin(), temp_def_vals.begin())); + + auto output_zip_it = + thrust::make_zip_iterator(thrust::make_tuple(rep_level.begin(), def_level.begin())); + + auto ends = thrust::merge_by_key(rmm::exec_policy(stream), + transformed_empties, + transformed_empties + empties_size, + thrust::make_counting_iterator(0), + thrust::make_counting_iterator(curr_rep_values_size), + input_parent_zip_it, + input_child_zip_it, + thrust::make_discard_iterator(), + output_zip_it); + + curr_rep_values_size = ends.second - output_zip_it; + + // Scan to get distance by which each offset value is shifted due to the insertion of dremel + // level value fof an empty list + auto scan_it = cudf::detail::make_counting_transform_iterator( + column_offsets[level], + [off = lcv.offsets().data(), size = lcv.offsets().size()] __device__( + auto i) -> int { return (i + 1 < size) && (off[i] == off[i + 1]); }); + rmm::device_uvector scan_out(offset_size_at_level, stream); + thrust::exclusive_scan( + rmm::exec_policy(stream), scan_it, scan_it + offset_size_at_level, scan_out.begin()); + + // Add scan output to existing offsets to get new offsets into merged rep level values + rmm::device_uvector temp_new_offsets(offset_size_at_level, stream); + thrust::for_each_n(rmm::exec_policy(stream), + thrust::make_counting_iterator(0), + offset_size_at_level, + [off = lcv.offsets().data() + column_offsets[level], + scan_out = scan_out.data(), + new_off = temp_new_offsets.data(), + offset_transformer] __device__(auto i) { + new_off[i] = offset_transformer(off[i]) + scan_out[i]; + }); + new_offsets = std::move(temp_new_offsets); + + // Set rep level values at level starts to appropriate rep level + auto scatter_it = thrust::make_constant_iterator(level); + thrust::scatter(rmm::exec_policy(stream), + scatter_it, + scatter_it + new_offsets.size() - 1, + new_offsets.begin(), + rep_level.begin()); + } + + size_t level_vals_size = new_offsets.back_element(stream); + rep_level.resize(level_vals_size, stream); + def_level.resize(level_vals_size, stream); + + stream.synchronize(); + + size_type leaf_data_size = column_ends.back() - column_offsets.back(); + + return dremel_data{ + std::move(new_offsets), std::move(rep_level), std::move(def_level), leaf_data_size}; +} + +/** + * @brief Launches kernel for initializing encoder page fragments + * + * @param[in,out] frag Fragment array [column_id][fragment_id] + * @param[in] col_desc Column description array [column_id] + * @param[in] num_fragments Number of fragments per column + * @param[in] num_columns Number of columns + * @param[in] stream CUDA stream to use, default 0 + */ +void InitPageFragments(device_2dspan frag, + device_span col_desc, + uint32_t fragment_size, + uint32_t num_rows, + rmm::cuda_stream_view stream) +{ + auto num_columns = frag.size().first; + auto num_fragments_per_column = frag.size().second; + dim3 dim_grid(num_columns, num_fragments_per_column); // 1 threadblock per fragment + gpuInitPageFragments<512> + <<>>(frag, col_desc, fragment_size, num_rows); +} + +/** + * @brief Launches kernel for initializing fragment statistics groups + * + * @param[out] groups Statistics groups [num_columns x num_fragments] + * @param[in] fragments Page fragments [num_columns x num_fragments] + * @param[in] col_desc Column description [num_columns] + * @param[in] stream CUDA stream to use, default 0 + */ +void InitFragmentStatistics(device_2dspan groups, + device_2dspan fragments, + device_span col_desc, + rmm::cuda_stream_view stream) +{ + int const num_columns = col_desc.size(); + int const num_fragments_per_column = fragments.size().second; + auto grid_y = util::div_rounding_up_safe(num_fragments_per_column, 128 / cudf::detail::warp_size); + dim3 dim_grid(num_columns, grid_y); // 1 warp per fragment + gpuInitFragmentStats<<>>(groups, fragments, col_desc); +} + +/** + * @brief Launches kernel for initializing encoder data pages + * + * @param[in,out] chunks Column chunks [rowgroup][column] + * @param[out] pages Encode page array (null if just counting pages) + * @param[in] col_desc Column description array [column_id] + * @param[in] num_rowgroups Number of fragments per column + * @param[in] num_columns Number of columns + * @param[out] page_grstats Setup for page-level stats + * @param[out] chunk_grstats Setup for chunk-level stats + * @param[in] max_page_comp_data_size Calculated maximum compressed data size of pages + * @param[in] stream CUDA stream to use, default 0 + */ +void InitEncoderPages(device_2dspan chunks, + device_span pages, + device_span col_desc, + int32_t num_columns, + statistics_merge_group* page_grstats, + statistics_merge_group* chunk_grstats, + size_t max_page_comp_data_size, + rmm::cuda_stream_view stream) +{ + auto num_rowgroups = chunks.size().first; + dim3 dim_grid(num_columns, num_rowgroups); // 1 threadblock per rowgroup + gpuInitPages<<>>( + chunks, pages, col_desc, page_grstats, chunk_grstats, max_page_comp_data_size, num_columns); +} + +/** + * @brief Launches kernel for packing column data into parquet pages + * + * @param[in,out] pages Device array of EncPages (unordered) + * @param[out] comp_in Optionally initializes compressor input params + * @param[out] comp_stat Optionally initializes compressor status + * @param[in] stream CUDA stream to use, default 0 + */ +void EncodePages(device_span pages, + device_span comp_in, + device_span comp_stat, + rmm::cuda_stream_view stream) +{ + auto num_pages = pages.size(); + // A page is part of one column. This is launching 1 block per page. 1 block will exclusively + // deal with one datatype. + gpuEncodePages<128><<>>(pages, comp_in, comp_stat); +} + +/** + * @brief Launches kernel to make the compressed vs uncompressed chunk-level decision + * + * @param[in,out] chunks Column chunks + * @param[in] stream CUDA stream to use, default 0 + */ +void DecideCompression(device_span chunks, rmm::cuda_stream_view stream) +{ + gpuDecideCompression<<>>(chunks); +} + +/** + * @brief Launches kernel to encode page headers + * + * @param[in,out] pages Device array of EncPages + * @param[in] comp_stat Compressor status or nullptr if no compression + * @param[in] page_stats Optional page-level statistics to be included in page header + * @param[in] chunk_stats Optional chunk-level statistics to be encoded + * @param[in] stream CUDA stream to use, default 0 + */ +void EncodePageHeaders(device_span pages, + device_span comp_stat, + device_span page_stats, + const statistics_chunk* chunk_stats, + rmm::cuda_stream_view stream) +{ + // TODO: single thread task. No need for 128 threads/block. Earlier it used to employ rest of the + // threads to coop load structs + gpuEncodePageHeaders<<>>( + pages, comp_stat, page_stats, chunk_stats); +} + +/** + * @brief Launches kernel to gather pages to a single contiguous block per chunk + * + * @param[in,out] chunks Column chunks + * @param[in] pages Device array of EncPages + * @param[in] stream CUDA stream to use, default 0 + */ +void GatherPages(device_span chunks, + device_span pages, + rmm::cuda_stream_view stream) +{ + gpuGatherPages<<>>(chunks, pages); +} + +} // namespace gpu +} // namespace parquet +} // namespace io +} // namespace cudf diff --git a/cuda_code/page_hdr_15.cu b/cuda_code/page_hdr_15.cu new file mode 100644 index 0000000000000000000000000000000000000000..ef496e71d964325a06252cb801e85a5d1348f67c --- /dev/null +++ b/cuda_code/page_hdr_15.cu @@ -0,0 +1,494 @@ +/* + * Copyright (c) 2018-2020, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +#include + +namespace cudf { +namespace io { +namespace parquet { +namespace gpu { +// Minimal thrift implementation for parsing page headers +// https://github.com/apache/thrift/blob/master/doc/specs/thrift-compact-protocol.md + +static const __device__ __constant__ uint8_t g_list2struct[16] = {0, + 1, + 2, + ST_FLD_BYTE, + ST_FLD_DOUBLE, + 5, + ST_FLD_I16, + 7, + ST_FLD_I32, + 9, + ST_FLD_I64, + ST_FLD_BINARY, + ST_FLD_STRUCT, + ST_FLD_MAP, + ST_FLD_SET, + ST_FLD_LIST}; + +struct byte_stream_s { + const uint8_t *cur; + const uint8_t *end; + const uint8_t *base; + // Parsed symbols + PageType page_type; + PageInfo page; + ColumnChunkDesc ck; +}; + +/** + * @brief Get current byte from the byte stream + * + * @param[in] bs Byte stream + * + * @return Current byte pointed to by the byte stream + */ +inline __device__ unsigned int getb(byte_stream_s *bs) +{ + return (bs->cur < bs->end) ? *bs->cur++ : 0; +} + +inline __device__ void skip_bytes(byte_stream_s *bs, size_t bytecnt) +{ + bytecnt = min(bytecnt, (size_t)(bs->end - bs->cur)); + bs->cur += bytecnt; +} + +/** + * @brief Decode unsigned integer from a byte stream using VarInt encoding + * + * Concatenate least significant 7 bits of each byte to form a 32 bit + * integer. Most significant bit of each byte indicates if more bytes + * are to be used to form the number. + * + * @param[in] bs Byte stream + * + * @return Decoded 32 bit integer + */ +__device__ uint32_t get_u32(byte_stream_s *bs) +{ + uint32_t v = 0, l = 0, c; + do { + c = getb(bs); + v |= (c & 0x7f) << l; + l += 7; + } while (c & 0x80); + return v; +} + +/** + * @brief Decode signed integer from a byte stream using zigzag encoding + * + * The number n encountered in a byte stream translates to + * -1^(n%2) * ceil(n/2), with the exception of 0 which remains the same. + * i.e. 0, 1, 2, 3, 4, 5 etc convert to 0, -1, 1, -2, 2 respectively. + * + * @param[in] bs Byte stream + * + * @return Decoded 32 bit integer + */ +inline __device__ int32_t get_i32(byte_stream_s *bs) +{ + uint32_t u = get_u32(bs); + return (int32_t)((u >> 1u) ^ -(int32_t)(u & 1)); +} + +__device__ void skip_struct_field(byte_stream_s *bs, int field_type) +{ + int struct_depth = 0; + int rep_cnt = 0; + + do { + if (rep_cnt != 0) { + rep_cnt--; + } else if (struct_depth != 0) { + unsigned int c; + do { + c = getb(bs); + if (!c) --struct_depth; + } while (!c && struct_depth); + if (!struct_depth) break; + field_type = c & 0xf; + if (!(c & 0xf0)) get_i32(bs); + } + switch (field_type) { + case ST_FLD_TRUE: + case ST_FLD_FALSE: break; + case ST_FLD_I16: + case ST_FLD_I32: + case ST_FLD_I64: get_u32(bs); break; + case ST_FLD_BYTE: skip_bytes(bs, 1); break; + case ST_FLD_DOUBLE: skip_bytes(bs, 8); break; + case ST_FLD_BINARY: skip_bytes(bs, get_u32(bs)); break; + case ST_FLD_LIST: + case ST_FLD_SET: { // NOTE: skipping a list of lists is not handled + auto const c = getb(bs); + int n = c >> 4; + if (n == 0xf) n = get_u32(bs); + field_type = g_list2struct[c & 0xf]; + if (field_type == ST_FLD_STRUCT) + struct_depth += n; + else + rep_cnt = n; + } break; + case ST_FLD_STRUCT: struct_depth++; break; + } + } while (rep_cnt || struct_depth); +} + +/** + * @brief Functor to set value to 32 bit integer read from byte stream + * + * @return True if field type is not int32 + */ +struct ParquetFieldInt32 { + int field; + int32_t &val; + + __device__ ParquetFieldInt32(int f, int32_t &v) : field(f), val(v) {} + + inline __device__ bool operator()(byte_stream_s *bs, int field_type) + { + val = get_i32(bs); + return (field_type != ST_FLD_I32); + } +}; + +/** + * @brief Functor to set value to enum read from byte stream + * + * @return True if field type is not int32 + */ +template +struct ParquetFieldEnum { + int field; + Enum &val; + + __device__ ParquetFieldEnum(int f, Enum &v) : field(f), val(v) {} + + inline __device__ bool operator()(byte_stream_s *bs, int field_type) + { + val = static_cast(get_i32(bs)); + return (field_type != ST_FLD_I32); + } +}; + +/** + * @brief Functor to run operator on byte stream + * + * @return True if field type is not struct type or if the calling operator + * fails + */ +template +struct ParquetFieldStruct { + int field; + Operator op; + + __device__ ParquetFieldStruct(int f) : field(f) {} + + inline __device__ bool operator()(byte_stream_s *bs, int field_type) + { + return ((field_type != ST_FLD_STRUCT) || !op(bs)); + } +}; + +/** + * @brief Functor to run an operator + * + * The purpose of this functor is to replace a switch case. If the field in + * the argument is equal to the field specified in any element of the tuple + * of operators then it is run with the byte stream and field type arguments. + * + * If the field does not match any of the functors then skip_struct_field is + * called over the byte stream. + * + * @return Return value of the selected operator or false if no operator + * matched the field value + */ +template +struct FunctionSwitchImpl { + template + static inline __device__ bool run(byte_stream_s *bs, + int field_type, + const int &field, + thrust::tuple &ops) + { + if (field == thrust::get(ops).field) { + return thrust::get(ops)(bs, field_type); + } else { + return FunctionSwitchImpl::run(bs, field_type, field, ops); + } + } +}; + +template <> +struct FunctionSwitchImpl<0> { + template + static inline __device__ bool run(byte_stream_s *bs, + int field_type, + const int &field, + thrust::tuple &ops) + { + if (field == thrust::get<0>(ops).field) { + return thrust::get<0>(ops)(bs, field_type); + } else { + skip_struct_field(bs, field_type); + return false; + } + } +}; + +/** + * @brief Function to parse page header based on the tuple of functors provided + * + * Bytes are read from the byte stream and the field delta and field type are + * matched up against user supplied reading functors. If they match then the + * corresponding values are written to references pointed to by the functors. + * + * @return Returns false if an unexpected field is encountered while reading + * byte stream. Otherwise true is returned. + */ +template +inline __device__ bool parse_header(thrust::tuple &op, byte_stream_s *bs) +{ + constexpr int index = thrust::tuple_size>::value - 1; + int field = 0; + while (true) { + auto const current_byte = getb(bs); + if (!current_byte) break; + int const field_delta = current_byte >> 4; + int const field_type = current_byte & 0xf; + field = field_delta ? field + field_delta : get_i32(bs); + bool exit_function = FunctionSwitchImpl::run(bs, field_type, field, op); + if (exit_function) { return false; } + } + return true; +} + +struct gpuParseDataPageHeader { + __device__ bool operator()(byte_stream_s *bs) + { + auto op = thrust::make_tuple(ParquetFieldInt32(1, bs->page.num_input_values), + ParquetFieldEnum(2, bs->page.encoding), + ParquetFieldEnum(3, bs->page.definition_level_encoding), + ParquetFieldEnum(4, bs->page.repetition_level_encoding)); + return parse_header(op, bs); + } +}; + +struct gpuParseDictionaryPageHeader { + __device__ bool operator()(byte_stream_s *bs) + { + auto op = thrust::make_tuple(ParquetFieldInt32(1, bs->page.num_input_values), + ParquetFieldEnum(2, bs->page.encoding)); + return parse_header(op, bs); + } +}; + +struct gpuParseDataPageHeaderV2 { + __device__ bool operator()(byte_stream_s *bs) + { + auto op = thrust::make_tuple(ParquetFieldInt32(1, bs->page.num_input_values), + ParquetFieldInt32(3, bs->page.num_rows), + ParquetFieldEnum(4, bs->page.encoding), + ParquetFieldEnum(5, bs->page.definition_level_encoding), + ParquetFieldEnum(6, bs->page.repetition_level_encoding)); + return parse_header(op, bs); + } +}; + +struct gpuParsePageHeader { + __device__ bool operator()(byte_stream_s *bs) + { + auto op = thrust::make_tuple(ParquetFieldEnum(1, bs->page_type), + ParquetFieldInt32(2, bs->page.uncompressed_page_size), + ParquetFieldInt32(3, bs->page.compressed_page_size), + ParquetFieldStruct(5), + ParquetFieldStruct(7), + ParquetFieldStruct(8)); + return parse_header(op, bs); + } +}; + +/** + * @brief Kernel for outputting page headers from the specified column chunks + * + * @param[in] chunks List of column chunks + * @param[in] num_chunks Number of column chunks + */ +// blockDim {128,1,1} +extern "C" __global__ void __launch_bounds__(128) + gpuDecodePageHeaders(ColumnChunkDesc *chunks, int32_t num_chunks) +{ + gpuParsePageHeader parse_page_header; + __shared__ byte_stream_s bs_g[4]; + + int lane_id = threadIdx.x % 32; + int chunk = (blockIdx.x * 4) + (threadIdx.x / 32); + byte_stream_s *const bs = &bs_g[threadIdx.x / 32]; + + if (chunk < num_chunks and lane_id == 0) bs->ck = chunks[chunk]; + __syncthreads(); + + if (chunk < num_chunks) { + size_t num_values, values_found; + uint32_t data_page_count = 0; + uint32_t dictionary_page_count = 0; + int32_t max_num_pages; + int32_t num_dict_pages = bs->ck.num_dict_pages; + PageInfo *page_info; + + if (!lane_id) { + bs->base = bs->cur = bs->ck.compressed_data; + bs->end = bs->base + bs->ck.compressed_size; + bs->page.chunk_idx = chunk; + bs->page.src_col_schema = bs->ck.src_col_schema; + // this computation is only valid for flat schemas. for nested schemas, + // they will be recomputed in the preprocess step by examining repetition and + // definition levels + bs->page.chunk_row = 0; + bs->page.num_rows = 0; + } + num_values = bs->ck.num_values; + page_info = bs->ck.page_info; + num_dict_pages = bs->ck.num_dict_pages; + max_num_pages = (page_info) ? bs->ck.max_num_pages : 0; + values_found = 0; + SYNCWARP(); + while (values_found < num_values && bs->cur < bs->end) { + int index_out = -1; + + if (lane_id == 0) { + // this computation is only valid for flat schemas. for nested schemas, + // they will be recomputed in the preprocess step by examining repetition and + // definition levels + bs->page.chunk_row += bs->page.num_rows; + bs->page.num_rows = 0; + if (parse_page_header(bs) && bs->page.compressed_page_size >= 0) { + switch (bs->page_type) { + case PageType::DATA_PAGE: + // this computation is only valid for flat schemas. for nested schemas, + // they will be recomputed in the preprocess step by examining repetition and + // definition levels + bs->page.num_rows = bs->page.num_input_values; + case PageType::DATA_PAGE_V2: + index_out = num_dict_pages + data_page_count; + data_page_count++; + bs->page.flags = 0; + values_found += bs->page.num_input_values; + break; + case PageType::DICTIONARY_PAGE: + index_out = dictionary_page_count; + dictionary_page_count++; + bs->page.flags = PAGEINFO_FLAGS_DICTIONARY; + break; + default: index_out = -1; break; + } + bs->page.page_data = const_cast(bs->cur); + bs->cur += bs->page.compressed_page_size; + } else { + bs->cur = bs->end; + } + } + index_out = SHFL0(index_out); + if (index_out >= 0 && index_out < max_num_pages && lane_id == 0) + page_info[index_out] = bs->page; + num_values = SHFL0(num_values); + SYNCWARP(); + } + if (lane_id == 0) { + chunks[chunk].num_data_pages = data_page_count; + chunks[chunk].num_dict_pages = dictionary_page_count; + } + } +} + +/** + * @brief Kernel for building dictionary index for the specified column chunks + * + * This function builds an index to point to each dictionary entry + * (string format is 4-byte little-endian string length followed by character + * data). The index is a 32-bit integer which contains the offset of each string + * relative to the beginning of the dictionary page data. + * + * @param[in] chunks List of column chunks + * @param[in] num_chunks Number of column chunks + */ +// blockDim {128,1,1} +extern "C" __global__ void __launch_bounds__(128) + gpuBuildStringDictionaryIndex(ColumnChunkDesc *chunks, int32_t num_chunks) +{ + __shared__ ColumnChunkDesc chunk_g[4]; + + int lane_id = threadIdx.x % 32; + int chunk = (blockIdx.x * 4) + (threadIdx.x / 32); + ColumnChunkDesc *const ck = &chunk_g[threadIdx.x / 32]; + if (chunk < num_chunks and lane_id == 0) *ck = chunks[chunk]; + __syncthreads(); + + if (chunk >= num_chunks) { return; } + if (!lane_id && ck->num_dict_pages > 0 && ck->str_dict_index) { + // Data type to describe a string + nvstrdesc_s *dict_index = ck->str_dict_index; + const uint8_t *dict = ck->page_info[0].page_data; + int dict_size = ck->page_info[0].uncompressed_page_size; + int num_entries = ck->page_info[0].num_input_values; + int pos = 0, cur = 0; + for (int i = 0; i < num_entries; i++) { + int len = 0; + if (cur + 4 <= dict_size) { + len = dict[cur + 0] | (dict[cur + 1] << 8) | (dict[cur + 2] << 16) | (dict[cur + 3] << 24); + if (len >= 0 && cur + 4 + len <= dict_size) { + pos = cur; + cur = cur + 4 + len; + } else { + cur = dict_size; + } + } + // TODO: Could store 8 entries in shared mem, then do a single warp-wide store + dict_index[i].ptr = reinterpret_cast(dict + pos + 4); + dict_index[i].count = len; + } + } +} + +void __host__ DecodePageHeaders(ColumnChunkDesc *chunks, + int32_t num_chunks, + rmm::cuda_stream_view stream) +{ + dim3 dim_block(128, 1); + dim3 dim_grid((num_chunks + 3) >> 2, 1); // 1 chunk per warp, 4 warps per block + gpuDecodePageHeaders<<>>(chunks, num_chunks); +} + +void __host__ BuildStringDictionaryIndex(ColumnChunkDesc *chunks, + int32_t num_chunks, + rmm::cuda_stream_view stream) +{ + dim3 dim_block(128, 1); + dim3 dim_grid((num_chunks + 3) >> 2, 1); // 1 chunk per warp, 4 warps per block + gpuBuildStringDictionaryIndex<<>>(chunks, num_chunks); +} + +} // namespace gpu +} // namespace parquet +} // namespace io +} // namespace cudf diff --git a/cuda_code/pairwise_hist_one_byte_5bit.cu b/cuda_code/pairwise_hist_one_byte_5bit.cu new file mode 100644 index 0000000000000000000000000000000000000000..77241c2c551eb88c50cdd2ae9ddc02bf2805aaec --- /dev/null +++ b/cuda_code/pairwise_hist_one_byte_5bit.cu @@ -0,0 +1,399 @@ +#include "pairwise_hist.cuh" +#include "split_properties_helpers.cuh" +#include "compute_pair_hist_loop.cuh" +#include +#include +#include +#include +#include + + +using namespace cooperative_groups; + +namespace NKernel { + + template + struct TFiveBitPairwiseHistUnrollTrait { + + static constexpr int InnerUnroll() { + #if __CUDA_ARCH__ <= 350 + return 4; + #elif __CUDA_ARCH__ < 700 + return 2; + #else + return IsFullPass ? 4 : 8; + #endif + } + + static constexpr int OuterUnroll() { + #if __CUDA_ARCH__ <= 350 + return 2; + #elif __CUDA_ARCH__ < 700 + return 2; + #else + return 1; + #endif + } + }; + + template + struct TFiveBitHistogram { + float* Histogram; + + __forceinline__ __device__ int SliceOffset() { + const int warpOffset = 1024 * (threadIdx.x / 32); + //2 blocks if INNER_HIST_BITS_COUNT = 0, else 1 + // x4 feature and x4 histograms, though histStart = blockIdx * 16 + return warpOffset + (threadIdx.x & 16); + } + + + __forceinline__ __device__ TFiveBitHistogram(float* buff) { + Histogram = buff; + for (int i = threadIdx.x; i < BlockSize * 32; i += BlockSize) { + Histogram[i] = 0; + } + Histogram += SliceOffset(); + __syncthreads(); + } + + __forceinline__ __device__ void AddPair(const ui32 ci1, + const ui32 ci2, + const float w) { + thread_block_tile<16> groupTile = tiled_partition<16>(this_thread_block()); + + const bool flag = threadIdx.x & 1; + + const int shift = 4 * (threadIdx.x & 6); + const ui32 bins1 = RotateRight(flag ? ci2 : ci1, shift); + const ui32 bins2 = RotateRight(flag ? ci1 : ci2, shift); + + #pragma unroll + for (int i = 0; i < 4; i++) { + const int f = (threadIdx.x + 2 * i) & 6; + int bin1 = (bins1 >> (24 - 8 * i)) & 255; + int bin2 = (bins2 >> (24 - 8 * i)) & 255; + + + const float w1 = (!NeedLastBinMask || bin1 < 32) ? w : 0; + const float w2 = (!NeedLastBinMask || bin2 < 32) ? w : 0; + + const int tmp = ((bin1 >= bin2) == flag ? 0 : 8) + f; + + int offset1 = tmp + ((bin1 & 31) << 5) + flag; + int offset2 = tmp + ((bin2 & 31) << 5) + !flag; + + + groupTile.sync(); + + if (groupTile.thread_rank() < 8) { + Histogram[offset1] += w1; + } + + groupTile.sync(); + + if (groupTile.thread_rank() >= 8) { + Histogram[offset1] += w1; + } + + groupTile.sync(); + + if (groupTile.thread_rank() < 8) { + Histogram[offset2] += w2; + } + + groupTile.sync(); + + if (groupTile.thread_rank() >= 8) { + Histogram[offset2] += w2; + } + } + } + + + #if __CUDA_ARCH__ < 700 + template + __forceinline__ __device__ void AddPairs(const ui32* ci1, + const ui32* ci2, + const float* w) { + #pragma unroll + for (int k = 0; k < N; ++k) { + AddPair(ci1[k], ci2[k], w[k]); + } + } + #else + template + __forceinline__ __device__ void AddPairs(const ui32* ci1, + const ui32* ci2, + const float* w) { + thread_block_tile<16> groupTile = tiled_partition<16>(this_thread_block()); + + const bool flag = threadIdx.x & 1; + const int shift = 4 * (threadIdx.x & 6); + + ui32 bins1[N]; + ui32 bins2[N]; + + #pragma unroll + for (int i = 0; i < 4; i++) { + const int f = (threadIdx.x + 2 * i) & 6; + + int bin1[N]; + int bin2[N]; + + float w1[N]; + float w2[N]; + + int offset1[N]; + int offset2[N]; + + #pragma unroll + for (int k = 0; k < N;++k) { + if (i == 0) { + bins1[k] = RotateRight(flag ? ci2[k] : ci1[k], shift); + bins2[k] = RotateRight(flag ? ci1[k] : ci2[k], shift); + } + bin1[k] = (bins1[k] >> (24 - 8 * i)) & 255; + bin2[k] = (bins2[k] >> (24 - 8 * i)) & 255; + + w1[k] = (!NeedLastBinMask || bin1[k] < 32) ? w[k] : 0; + w2[k] = (!NeedLastBinMask || bin2[k] < 32) ? w[k] : 0; + + const int tmp = ((bin1[k] >= bin2[k]) == flag ? 0 : 8) + f; + offset1[k] = tmp + ((bin1[k] & 31) * 32) + flag; + offset2[k] = tmp + ((bin2[k] & 31) * 32) + !flag; + } + + + groupTile.sync(); + + if (groupTile.thread_rank() < 8) { + #pragma unroll + for (int k = 0; k < N; ++k) { + Histogram[offset1[k]] += w1[k]; + } + } + + groupTile.sync(); + + if (groupTile.thread_rank() >= 8) { + #pragma unroll + for (int k = 0; k < N; ++k) { + Histogram[offset1[k]] += w1[k]; + } + } + + groupTile.sync(); + + if (groupTile.thread_rank() < 8) { + #pragma unroll + for (int k = 0; k < N; ++k) { + Histogram[offset2[k]] += w2[k]; + } + } + + groupTile.sync(); + + if (groupTile.thread_rank() >= 8) { + #pragma unroll + for (int k = 0; k < N; ++k) { + Histogram[offset2[k]] += w2[k]; + } + } + } + } + #endif + + __forceinline__ __device__ void Reduce() { + Histogram -= SliceOffset(); + __syncthreads(); + + { + const int warpHistSize = 1024; + + for (int start = threadIdx.x; start < warpHistSize; start += BlockSize) { + float sum = 0; + + #pragma unroll 12 + for (int i = start; i < 32 * BlockSize; i += warpHistSize) { + sum += Histogram[i]; + } + + Histogram[warpHistSize + start] = sum; + } + } + __syncthreads(); + + const int maxFoldCount = 32; + const int fold = (threadIdx.x >> 1) & 31; + const int f = threadIdx.x / 64; + + + if (threadIdx.x < 256) { + float weightLeq = 0; + float weightGe = 0; + const bool isSecondBin = (threadIdx.x & 1); + + if (fold < maxFoldCount) { + const volatile float* __restrict__ src = Histogram + + 1024 //warpHistSize + + 32 * fold + + 2 * f + + isSecondBin; + + weightLeq = src[0] + src[16]; + weightGe = src[8] + src[24]; + + Histogram[4 * (maxFoldCount * f + fold) + isSecondBin] = weightLeq; + Histogram[4 * (maxFoldCount * f + fold) + 2 + isSecondBin] = weightGe; + } + } + + __syncthreads(); + } + }; + + + template + #if __CUDA_ARCH__ <= 350 + __launch_bounds__(BlockSize, 1) + #elif __CUDA_ARCH__ < 700 + __launch_bounds__(BlockSize, 2) + #endif + __global__ void ComputeSplitPropertiesNonBinaryPairs(const TCFeature* feature, int fCount, const ui32* cindex, + const uint2* pairs, const float* weight, + const TDataPartition* partition, + int histLineSize, + float* histogram) { + + const int featureOffset = (blockIdx.x / M) * 4; + feature += featureOffset; + cindex += feature->Offset; + fCount = min(fCount - featureOffset, 4); + + + __shared__ float localHist[32 * BlockSize]; + + const int maxBinCount = GetMaxBinCount(feature, fCount, (int*) &localHist[0]); + + if (maxBinCount > 32) { + return; + } + __syncthreads(); + + + if (IsFullPass) { + partition += blockIdx.y; + histogram += blockIdx.y * histLineSize * 4ULL; + } else { + const int depth = (int)log2((float)gridDim.y); + int partId = GetPairwisePartIdToCalculate(partition); + partition += partId; + histogram += (((blockIdx.z + 1) << depth) | blockIdx.y) * histLineSize * 4ULL; + } + + if (partition->Size == 0) { + return; + } + + + constexpr int histBlockCount = 1; + constexpr int innerUnroll = TFiveBitPairwiseHistUnrollTrait::InnerUnroll(); + constexpr int outerUnroll = TFiveBitPairwiseHistUnrollTrait::OuterUnroll(); + + #define DECLARE_PASS(NEED_MASK) \ + { \ + using THist = TFiveBitHistogram;\ + ComputePairHistogram< BlockSize, histBlockCount, innerUnroll, outerUnroll, M, THist>(partition->Offset, cindex, partition->Size, pairs, weight, &localHist[0]);\ + } + if (maxBinCount < 32) { + DECLARE_PASS(false); + } else { + DECLARE_PASS(true); + } + #undef DECLARE_PASS + + if (threadIdx.x < 256) { + const int histId = threadIdx.x & 3; + const int binId = (threadIdx.x >> 2) & 15; + const int fid = (threadIdx.x >> 6) & 3; + const int maxFoldCount = 1 << 5; + + if (fid < fCount) { + const ui32 bfStart = feature[fid].FirstFoldIndex; + histogram += 4 * bfStart; + + for (int fold = binId; fold < feature[fid].Folds; fold += 16) { + const int readOffset = 4 * (maxFoldCount * fid + fold) + histId; + if (M > 1) { + atomicAdd(histogram + 4 * fold + histId, localHist[readOffset]); + } else { + histogram[4 * fold + histId] += localHist[readOffset]; + } + } + } + } + } + + + + void ComputePairwiseHistogramOneByte5Bits(const TCFeature* features, + const ui32 featureCount, + const ui32 fiveBitsFeatureCount, + const ui32* compressedIndex, + const uint2* pairs, ui32 pairCount, + const float* weight, + const TDataPartition* partition, + ui32 partCount, + ui32 histLineSize, + bool fullPass, + float* histogram, + TCudaStream stream) { + + if (fiveBitsFeatureCount > 0) { + const int blockSize = 384; + dim3 numBlocks; + numBlocks.x = (fiveBitsFeatureCount+ 3) / 4; + numBlocks.y = fullPass ? partCount : partCount / 4; + numBlocks.z = fullPass ? 1 : 3; + const ui32 blockPerFeatureMultiplier = EstimateBlockPerFeatureMultiplier(numBlocks, pairCount, 64); + numBlocks.x = (featureCount + 3) / 4; + numBlocks.x *= blockPerFeatureMultiplier; + + + + #define NB_HIST(IS_FULL, BLOCKS_PER_FEATURE) \ + ComputeSplitPropertiesNonBinaryPairs < blockSize, IS_FULL, BLOCKS_PER_FEATURE > << >>(\ + features, featureCount, compressedIndex, pairs,\ + weight, partition, histLineSize, histogram); + + #define DISPATCH(BLOCKS_PER_FEATURE) \ + if (fullPass) { \ + NB_HIST(true, BLOCKS_PER_FEATURE) \ + } else { \ + NB_HIST(false, BLOCKS_PER_FEATURE)\ + } + + + if (blockPerFeatureMultiplier == 1) { + DISPATCH(1); + } else if (blockPerFeatureMultiplier == 2) { + DISPATCH(2); + } else if (blockPerFeatureMultiplier == 4) { + DISPATCH(4); + } else if (blockPerFeatureMultiplier == 8) { + DISPATCH(8); + } else if (blockPerFeatureMultiplier == 16) { + DISPATCH(16); + } else if (blockPerFeatureMultiplier == 32) { + DISPATCH(32); + } else if (blockPerFeatureMultiplier == 64) { + DISPATCH(64); + } else { + exit(0); + } + #undef NB_HIST + #undef DISPATCH + } + } +} diff --git a/cuda_code/par_ilu_kernels_5.cu b/cuda_code/par_ilu_kernels_5.cu new file mode 100644 index 0000000000000000000000000000000000000000..6f212b9d75cb1b5bc1a98e7896dde971a5330490 --- /dev/null +++ b/cuda_code/par_ilu_kernels_5.cu @@ -0,0 +1,277 @@ +/************************************************************* +Copyright (c) 2017-2019, the Ginkgo authors +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*************************************************************/ + +#include "core/factorization/par_ilu_kernels.hpp" + + +#include +#include +#include + + +#include "cuda/base/math.hpp" +#include "cuda/base/types.hpp" +#include "cuda/components/prefix_sum.cuh" + + +namespace gko { +namespace kernels { +namespace cuda { +/** + * @brief The parallel ilu factorization namespace. + * + * @ingroup factor + */ +namespace par_ilu_factorization { + + +constexpr int default_block_size{512}; + + +namespace kernel { + + +template +__global__ __launch_bounds__(default_block_size) void count_nnz_per_l_u_row( + size_type num_rows, const IndexType *__restrict__ row_ptrs, + const IndexType *__restrict__ col_idxs, + const ValueType *__restrict__ values, IndexType *__restrict__ l_nnz_row, + IndexType *__restrict__ u_nnz_row) +{ + const auto row = blockDim.x * blockIdx.x + threadIdx.x; + if (row < num_rows) { + IndexType l_row_nnz{}; + IndexType u_row_nnz{}; + for (auto idx = row_ptrs[row]; idx < row_ptrs[row + 1]; ++idx) { + auto col = col_idxs[idx]; + l_row_nnz += (col <= row); + u_row_nnz += (row <= col); + } + l_nnz_row[row] = l_row_nnz; + u_nnz_row[row] = u_row_nnz; + } +} + + +} // namespace kernel + + +template +void initialize_row_ptrs_l_u( + std::shared_ptr exec, + const matrix::Csr *system_matrix, + IndexType *l_row_ptrs, IndexType *u_row_ptrs) +{ + const size_type num_rows{system_matrix->get_size()[0]}; + const size_type num_row_ptrs{num_rows + 1}; + + const dim3 block_size{default_block_size, 1, 1}; + const uint32 number_blocks = + ceildiv(num_rows, static_cast(block_size.x)); + const dim3 grid_dim{number_blocks, 1, 1}; + + kernel::count_nnz_per_l_u_row<<>>( + num_rows, as_cuda_type(system_matrix->get_const_row_ptrs()), + as_cuda_type(system_matrix->get_const_col_idxs()), + as_cuda_type(system_matrix->get_const_values()), + as_cuda_type(l_row_ptrs), as_cuda_type(u_row_ptrs)); + + Array block_sum(exec, grid_dim.x); + auto block_sum_ptr = block_sum.get_data(); + + start_prefix_sum<<>>( + num_row_ptrs, as_cuda_type(l_row_ptrs), as_cuda_type(block_sum_ptr)); + finalize_prefix_sum<<>>( + num_row_ptrs, as_cuda_type(l_row_ptrs), as_cuda_type(block_sum_ptr)); + + start_prefix_sum<<>>( + num_row_ptrs, as_cuda_type(u_row_ptrs), as_cuda_type(block_sum_ptr)); + finalize_prefix_sum<<>>( + num_row_ptrs, as_cuda_type(u_row_ptrs), as_cuda_type(block_sum_ptr)); +} + +GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( + GKO_DECLARE_PAR_ILU_INITIALIZE_ROW_PTRS_L_U_KERNEL); + + +namespace kernel { + + +template +__global__ __launch_bounds__(default_block_size) void initialize_l_u( + size_type num_rows, const IndexType *__restrict__ row_ptrs, + const IndexType *__restrict__ col_idxs, + const ValueType *__restrict__ values, + const IndexType *__restrict__ l_row_ptrs, + IndexType *__restrict__ l_col_idxs, ValueType *__restrict__ l_values, + const IndexType *__restrict__ u_row_ptrs, + IndexType *__restrict__ u_col_idxs, ValueType *__restrict__ u_values) +{ + const auto row = blockDim.x * blockIdx.x + threadIdx.x; + if (row < num_rows) { + auto l_idx = l_row_ptrs[row]; + auto u_idx = u_row_ptrs[row]; + for (size_type i = row_ptrs[row]; i < row_ptrs[row + 1]; ++i) { + const auto col = col_idxs[i]; + const auto val = values[i]; + if (col <= row) { + l_col_idxs[l_idx] = col; + l_values[l_idx] = (col == row ? one() : val); + ++l_idx; + } + if (row <= col) { + u_col_idxs[u_idx] = col; + u_values[u_idx] = val; + ++u_idx; + } + } + } +} + + +} // namespace kernel + + +template +void initialize_l_u(std::shared_ptr exec, + const matrix::Csr *system_matrix, + matrix::Csr *csr_l, + matrix::Csr *csr_u) +{ + const size_type num_rows{system_matrix->get_size()[0]}; + const dim3 block_size{default_block_size, 1, 1}; + const dim3 grid_dim{static_cast(ceildiv( + num_rows, static_cast(block_size.x))), + 1, 1}; + + kernel::initialize_l_u<<>>( + num_rows, as_cuda_type(system_matrix->get_const_row_ptrs()), + as_cuda_type(system_matrix->get_const_col_idxs()), + as_cuda_type(system_matrix->get_const_values()), + as_cuda_type(csr_l->get_const_row_ptrs()), + as_cuda_type(csr_l->get_col_idxs()), as_cuda_type(csr_l->get_values()), + as_cuda_type(csr_u->get_const_row_ptrs()), + as_cuda_type(csr_u->get_col_idxs()), as_cuda_type(csr_u->get_values())); +} + +GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( + GKO_DECLARE_PAR_ILU_INITIALIZE_L_U_KERNEL); + + +namespace kernel { + + +template +__global__ __launch_bounds__(default_block_size) void compute_l_u_factors( + size_type num_elements, const IndexType *__restrict__ row_idxs, + const IndexType *__restrict__ col_idxs, + const ValueType *__restrict__ values, + const IndexType *__restrict__ l_row_ptrs, + const IndexType *__restrict__ l_col_idxs, ValueType *__restrict__ l_values, + const IndexType *__restrict__ u_row_ptrs, + const IndexType *__restrict__ u_col_idxs, ValueType *__restrict__ u_values) +{ + const auto elem_id = blockDim.x * blockIdx.x + threadIdx.x; + if (elem_id < num_elements) { + const auto row = row_idxs[elem_id]; + const auto col = col_idxs[elem_id]; + const auto val = values[elem_id]; + auto l_idx = l_row_ptrs[row]; + auto u_idx = u_row_ptrs[col]; + ValueType sum{val}; + ValueType last_operation{}; + while (l_idx < l_row_ptrs[row + 1] && u_idx < u_row_ptrs[col + 1]) { + const auto l_col = l_col_idxs[l_idx]; + const auto u_col = u_col_idxs[u_idx]; + last_operation = zero(); + if (l_col == u_col) { + last_operation = l_values[l_idx] * u_values[u_idx]; + sum -= last_operation; + } + l_idx += (l_col <= u_col); + u_idx += (u_col <= l_col); + } + sum += last_operation; // undo the last operation + if (row > col) { + auto to_write = sum / u_values[u_row_ptrs[col + 1] - 1]; + if (::gko::isfinite(to_write)) { + l_values[l_idx - 1] = to_write; + } + } else { + auto to_write = sum; + if (::gko::isfinite(to_write)) { + u_values[u_idx - 1] = to_write; + } + } + } +} + + +} // namespace kernel + + +template +void compute_l_u_factors(std::shared_ptr exec, + size_type iterations, + const matrix::Coo *system_matrix, + matrix::Csr *l_factor, + matrix::Csr *u_factor) +{ + iterations = (iterations == 0) ? 10 : iterations; + const auto num_elements = system_matrix->get_num_stored_elements(); + const dim3 block_size{default_block_size, 1, 1}; + const dim3 grid_dim{ + static_cast( + ceildiv(num_elements, static_cast(block_size.x))), + 1, 1}; + for (size_type i = 0; i < iterations; ++i) { + kernel::compute_l_u_factors<<>>( + num_elements, as_cuda_type(system_matrix->get_const_row_idxs()), + as_cuda_type(system_matrix->get_const_col_idxs()), + as_cuda_type(system_matrix->get_const_values()), + as_cuda_type(l_factor->get_const_row_ptrs()), + as_cuda_type(l_factor->get_const_col_idxs()), + as_cuda_type(l_factor->get_values()), + as_cuda_type(u_factor->get_const_row_ptrs()), + as_cuda_type(u_factor->get_const_col_idxs()), + as_cuda_type(u_factor->get_values())); + } +} + +GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( + GKO_DECLARE_PAR_ILU_COMPUTE_L_U_FACTORS_KERNEL); + + +} // namespace par_ilu_factorization +} // namespace cuda +} // namespace kernels +} // namespace gko diff --git a/cuda_code/parallel_4.cu b/cuda_code/parallel_4.cu new file mode 100644 index 0000000000000000000000000000000000000000..5cc207fdaa369161f252ad48d8d320c4c2058ffc --- /dev/null +++ b/cuda_code/parallel_4.cu @@ -0,0 +1,82 @@ +#include +#include +#include + +#define TILE_WIDTH 16 + +__global__ void _gpu_m_add(int *a, int *b, int *c, int rows, int columns) +{ + int i = TILE_WIDTH * blockIdx.y + threadIdx.y; + int j = TILE_WIDTH * blockIdx.x + threadIdx.x; + + if (i < rows && j < columns) + c[i * columns + j] = a[i * columns + j] + b[i * columns + j]; +} + + +int m_add(int *a, int *b, int *c, int rows, int columns) +{ + int *da, + *db, + *dc, + size = rows * columns * sizeof(int); + + cudaMalloc((void **)&da, size); + cudaMalloc((void **)&db, size); + cudaMalloc((void **)&dc, size); + + cudaMemcpy(da, a, size, cudaMemcpyHostToDevice); + cudaMemcpy(db, b, size, cudaMemcpyHostToDevice); + + dim3 dimGrid(ceil((float)columns / TILE_WIDTH), + ceil((float)rows / TILE_WIDTH), + 1); + dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); + _gpu_m_add<<>>(da, db, dc, rows, columns); + + cudaMemcpy(c, dc, size, cudaMemcpyDeviceToHost); + cudaFree(da); cudaFree(db); cudaFree(dc); + + return 0; +} + + +int main() +{ + int *A, *B, *C; + int i, j; + + //Input + int linhas, colunas; + + scanf("%d", &linhas); + scanf("%d", &colunas); + + //Alocando memória na CPU + A = (int *)malloc(sizeof(int)*linhas*colunas); + B = (int *)malloc(sizeof(int)*linhas*colunas); + C = (int *)malloc(sizeof(int)*linhas*colunas); + + //Inicializar + for(i = 0; i < linhas; i++){ + for(j = 0; j < colunas; j++){ + A[i*colunas+j] = B[i*colunas+j] = i+j; + } + } + + m_add(A, B, C, linhas, colunas); + + long long int somador=0; + //Manter esta computação na CPU + for(i = 0; i < linhas; i++){ + for(j = 0; j < colunas; j++){ + somador+=C[i*colunas+j]; + } + } + + printf("%lli\n", somador); + + free(A); + free(B); + free(C); +} diff --git a/cuda_code/parsing_utils_5.cu b/cuda_code/parsing_utils_5.cu new file mode 100644 index 0000000000000000000000000000000000000000..f14f8093a26650942c460326ea2e68878f5ef27e --- /dev/null +++ b/cuda_code/parsing_utils_5.cu @@ -0,0 +1,191 @@ +/* + * Copyright (c) 2019, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file parsing_utils.cu Utility functions for parsing plain-text files + * + */ + + +#include "parsing_utils.cuh" + +#include + +#include +#include + +#include "rmm/rmm.h" +#include "rmm/thrust_rmm_allocator.h" +#include "utilities/error_utils.hpp" + +// When processing the input in chunks, this is the maximum size of each chunk. +// Only one chunk is loaded on the GPU at a time, so this value is chosen to +// be small enough to fit on the GPU in most cases. +constexpr size_t max_chunk_bytes = 256*1024*1024; // 256MB + +constexpr int bytes_per_find_thread = 64; + +template +struct rmm_deleter { + void operator()(T *ptr) { RMM_FREE(ptr, 0); } +}; +template +using device_ptr = std::unique_ptr>; + +/**---------------------------------------------------------------------------* + * @brief Sets the specified element of the array to the passed value + *---------------------------------------------------------------------------**/ +template +__device__ __forceinline__ +void setElement(T* array, gdf_size_type idx, const T& t, const V& v){ + array[idx] = t; +} + +/**---------------------------------------------------------------------------* + * @brief Sets the specified element of the array of pairs using the two passed + * parameters. + *---------------------------------------------------------------------------**/ +template +__device__ __forceinline__ +void setElement(thrust::pair* array, gdf_size_type idx, const T& t, const V& v) { + array[idx] = {t, v}; +} + +/**---------------------------------------------------------------------------* + * @brief Overloads the setElement() functions for void* arrays. + * Does not do anything, indexing is not allowed with void* arrays. + *---------------------------------------------------------------------------**/ +template +__device__ __forceinline__ +void setElement(void* array, gdf_size_type idx, const T& t, const V& v) { +} + +/**---------------------------------------------------------------------------* + * @brief CUDA kernel that finds all occurrences of a character in the given + * character array. If the 'positions' parameter is not void*, + * positions of all occurrences are stored in the output array. + * + * @param[in] data Pointer to the input character array + * @param[in] size Number of bytes in the input array + * @param[in] offset Offset to add to the output positions + * @param[in] key Character to find in the array + * @param[in,out] count Pointer to the number of found occurrences + * @param[out] positions Array containing the output positions + * + * @return void + *---------------------------------------------------------------------------**/ +template + __global__ + void countAndSetPositions(char *data, uint64_t size, uint64_t offset, const char key, gdf_size_type* count, + T* positions) { + + // thread IDs range per block, so also need the block id + const uint64_t tid = threadIdx.x + (blockDim.x * blockIdx.x); + const uint64_t did = tid * bytes_per_find_thread; + + const char *raw = (data + did); + + const long byteToProcess = ((did + bytes_per_find_thread) < size) ? + bytes_per_find_thread : + (size - did); + + // Process the data + for (long i = 0; i < byteToProcess; i++) { + if (raw[i] == key) { + const auto idx = atomicAdd(count, (gdf_size_type)1); + setElement(positions, idx, did + offset + i, key); + } + } +} + +/**---------------------------------------------------------------------------* + * @brief Searches the input character array for each of characters in a set. + * Sums up the number of occurrences. If the 'positions' parameter is not void*, + * positions of all occurrences are stored in the output device array. + * + * Does not load the entire file into the GPU memory at any time, so it can + * be used to parse large files. Output array needs to be preallocated. + * + * @param[in] h_data Pointer to the input character array + * @param[in] h_size Number of bytes in the input array + * @param[in] keys Vector containing the keys to count in the buffer + * @param[in] result_offset Offset to add to the output positions + * @param[out] positions Array containing the output positions + * + * @return gdf_size_type total number of occurrences + *---------------------------------------------------------------------------**/ +template +gdf_size_type findAllFromSet(const char *h_data, size_t h_size, const std::vector& keys, uint64_t result_offset, + T *positions) { + + char* d_chunk = nullptr; + RMM_TRY(RMM_ALLOC (&d_chunk, min(max_chunk_bytes, h_size), 0)); + device_ptr chunk_deleter(d_chunk); + + gdf_size_type* d_count; + RMM_TRY(RMM_ALLOC((void**)&d_count, sizeof(gdf_size_type), 0) ); + device_ptr count_deleter(d_count); + CUDA_TRY(cudaMemsetAsync(d_count, 0ull, sizeof(gdf_size_type))); + + int blockSize; // suggested thread count to use + int minGridSize; // minimum block count required + CUDA_TRY(cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, countAndSetPositions) ); + + const size_t chunk_count = (h_size + max_chunk_bytes - 1) / max_chunk_bytes; + for (size_t ci = 0; ci < chunk_count; ++ci) { + const auto chunk_offset = ci * max_chunk_bytes; + const auto h_chunk = h_data + chunk_offset; + const auto chunk_bytes = std::min((size_t)(h_size - ci * max_chunk_bytes), max_chunk_bytes); + const auto chunk_bits = (chunk_bytes + bytes_per_find_thread - 1) / bytes_per_find_thread; + const int gridSize = (chunk_bits + blockSize - 1) / blockSize; + + // Copy chunk to device + CUDA_TRY(cudaMemcpyAsync(d_chunk, h_chunk, chunk_bytes, cudaMemcpyDefault)); + + for (char key: keys) { + countAndSetPositions <<< gridSize, blockSize >>> ( + d_chunk, chunk_bytes, chunk_offset + result_offset, key, + d_count, positions); + } + } + + gdf_size_type h_count = 0; + CUDA_TRY(cudaMemcpy(&h_count, d_count, sizeof(gdf_size_type), cudaMemcpyDefault)); + return h_count; +} + +/**---------------------------------------------------------------------------* + * @brief Searches the input character array for each of characters in a set + * and sums up the number of occurrences. + * + * Does not load the entire buffer into the GPU memory at any time, so it can + * be used with buffers of any size. + * + * @param[in] h_data Pointer to the data in host memory + * @param[in] h_size Size of the input data, in bytes + * @param[in] keys Vector containing the keys to count in the buffer + * + * @return gdf_size_type total number of occurrences + *---------------------------------------------------------------------------**/ +gdf_size_type countAllFromSet(const char *h_data, size_t h_size, const std::vector& keys) { + return findAllFromSet(h_data, h_size, keys, 0, nullptr); + } + +template gdf_size_type findAllFromSet(const char *h_data, size_t h_size, const std::vector& keys, uint64_t result_offset, + uint64_t *positions); + +template gdf_size_type findAllFromSet>(const char *h_data, size_t h_size, const std::vector& keys, uint64_t result_offset, + thrust::pair *positions); diff --git a/cuda_code/partitioning_2.cu b/cuda_code/partitioning_2.cu new file mode 100644 index 0000000000000000000000000000000000000000..69c727556877034634ea897582c56e76f8fb26fa --- /dev/null +++ b/cuda_code/partitioning_2.cu @@ -0,0 +1,795 @@ +/* + * Copyright (c) 2020, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace cudf { +namespace experimental { +namespace { +// Launch configuration for optimized hash partition +constexpr size_type OPTIMIZED_BLOCK_SIZE = 512; +constexpr size_type OPTIMIZED_ROWS_PER_THREAD = 8; +constexpr size_type ELEMENTS_PER_THREAD = 2; +constexpr size_type THRESHOLD_FOR_OPTIMIZED_PARTITION_KERNEL = 1024; + +// Launch configuration for fallback hash partition +constexpr size_type FALLBACK_BLOCK_SIZE = 256; +constexpr size_type FALLBACK_ROWS_PER_THREAD = 1; + +/** + * @brief Functor to map a hash value to a particular 'bin' or partition number + * that uses the modulo operation. + */ +template +class modulo_partitioner { + public: + modulo_partitioner(size_type num_partitions) : divisor{num_partitions} {} + + __device__ size_type operator()(hash_value_t hash_value) const { return hash_value % divisor; } + + private: + const size_type divisor; +}; + +template +bool is_power_two(T number) +{ + return (0 == (number & (number - 1))); +} + +/** + * @brief Functor to map a hash value to a particular 'bin' or partition number + * that uses a bitwise mask. Only works when num_partitions is a power of 2. + * + * For n % d, if d is a power of two, then it can be computed more efficiently + * via a single bitwise AND as: n & (d - 1) + */ +template +class bitwise_partitioner { + public: + bitwise_partitioner(size_type num_partitions) : mask{(num_partitions - 1)} + { + assert(is_power_two(num_partitions)); + } + + __device__ size_type operator()(hash_value_t hash_value) const + { + return hash_value & mask; // hash_value & (num_partitions - 1) + } + + private: + const size_type mask; +}; + +/* --------------------------------------------------------------------------*/ +/** + * @brief Computes which partition each row of a device_table will belong to + based on hashing each row, and applying a partition function to the hash value. + Records the size of each partition for each thread block as well as the + global size of each partition across all thread blocks. + * + * @param[in] the_table The table whose rows will be partitioned + * @param[in] num_rows The number of rows in the table + * @param[in] num_partitions The number of partitions to divide the rows into + * @param[in] the_partitioner The functor that maps a rows hash value to a + partition number + * @param[out] row_partition_numbers Array that holds which partition each row + belongs to + * @param[out] row_partition_offset Array that holds the offset of each row in + its partition of + * the thread block + * @param[out] block_partition_sizes Array that holds the size of each partition + for each block, + * i.e., { {block0 partition0 size, block1 partition0 size, ...}, + {block0 partition1 size, block1 partition1 size, ...}, + ... + {block0 partition(num_partitions-1) size, block1 + partition(num_partitions -1) size, ...} } + * @param[out] global_partition_sizes The number of rows in each partition. + */ +/* ----------------------------------------------------------------------------*/ +template +__global__ void compute_row_partition_numbers(row_hasher_t the_hasher, + const size_type num_rows, + const size_type num_partitions, + const partitioner_type the_partitioner, + size_type* __restrict__ row_partition_numbers, + size_type* __restrict__ row_partition_offset, + size_type* __restrict__ block_partition_sizes, + size_type* __restrict__ global_partition_sizes) +{ + // Accumulate histogram of the size of each partition in shared memory + extern __shared__ size_type shared_partition_sizes[]; + + size_type row_number = threadIdx.x + blockIdx.x * blockDim.x; + + // Initialize local histogram + size_type partition_number = threadIdx.x; + while (partition_number < num_partitions) { + shared_partition_sizes[partition_number] = 0; + partition_number += blockDim.x; + } + + __syncthreads(); + + // Compute the hash value for each row, store it to the array of hash values + // and compute the partition to which the hash value belongs and increment + // the shared memory counter for that partition + while (row_number < num_rows) { + const hash_value_type row_hash_value = the_hasher(row_number); + + const size_type partition_number = the_partitioner(row_hash_value); + + row_partition_numbers[row_number] = partition_number; + + row_partition_offset[row_number] = + atomicAdd(&(shared_partition_sizes[partition_number]), size_type(1)); + + row_number += blockDim.x * gridDim.x; + } + + __syncthreads(); + + // Flush shared memory histogram to global memory + partition_number = threadIdx.x; + while (partition_number < num_partitions) { + const size_type block_partition_size = shared_partition_sizes[partition_number]; + + // Update global size of each partition + atomicAdd(&global_partition_sizes[partition_number], block_partition_size); + + // Record the size of this partition in this block + const size_type write_location = partition_number * gridDim.x + blockIdx.x; + block_partition_sizes[write_location] = block_partition_size; + partition_number += blockDim.x; + } +} + +/* --------------------------------------------------------------------------*/ +/** + * @brief Given an array of partition numbers, computes the final output + location for each element in the output such that all rows with the same + partition are contiguous in memory. + * + * @param row_partition_numbers The array that records the partition number for + each row + * @param num_rows The number of rows + * @param num_partitions THe number of partitions + * @param[out] block_partition_offsets Array that holds the offset of each + partition for each thread block, + * i.e., { {block0 partition0 offset, block1 partition0 offset, ...}, + {block0 partition1 offset, block1 partition1 offset, ...}, + ... + {block0 partition(num_partitions-1) offset, block1 + partition(num_partitions -1) offset, ...} } + */ +/* ----------------------------------------------------------------------------*/ +__global__ void compute_row_output_locations(size_type* __restrict__ row_partition_numbers, + const size_type num_rows, + const size_type num_partitions, + size_type* __restrict__ block_partition_offsets) +{ + // Shared array that holds the offset of this blocks partitions in + // global memory + extern __shared__ size_type shared_partition_offsets[]; + + // Initialize array of this blocks offsets from global array + size_type partition_number = threadIdx.x; + while (partition_number < num_partitions) { + shared_partition_offsets[partition_number] = + block_partition_offsets[partition_number * gridDim.x + blockIdx.x]; + partition_number += blockDim.x; + } + __syncthreads(); + + size_type row_number = threadIdx.x + blockIdx.x * blockDim.x; + + // Get each row's partition number, and get it's output location by + // incrementing block's offset counter for that partition number + // and store the row's output location in-place + while (row_number < num_rows) { + // Get partition number of this row + const size_type partition_number = row_partition_numbers[row_number]; + + // Get output location based on partition number by incrementing the + // corresponding partition offset for this block + const size_type row_output_location = + atomicAdd(&(shared_partition_offsets[partition_number]), size_type(1)); + + // Store the row's output location in-place + row_partition_numbers[row_number] = row_output_location; + + row_number += blockDim.x * gridDim.x; + } +} + +/* --------------------------------------------------------------------------*/ +/** + * @brief Move one column from the input table to the hashed table. + * + * @param[in] input_buf Data buffer of the column in the input table + * @param[out] output_buf Preallocated data buffer of the column in the output + * table + * @param[in] num_rows The number of rows in each column + * @param[in] num_partitions The number of partitions to divide the rows into + * @param[in] row_partition_numbers Array that holds which partition each row + * belongs to + * @param[in] row_partition_offset Array that holds the offset of each row in + * its partition of the thread block. + * @param[in] block_partition_sizes Array that holds the size of each partition + * for each block + * @param[in] scanned_block_partition_sizes The scan of block_partition_sizes + */ +/* ----------------------------------------------------------------------------*/ +template +__global__ void copy_block_partitions(InputIter input_iter, + DataType* __restrict__ output_buf, + const size_type num_rows, + const size_type num_partitions, + size_type const* __restrict__ row_partition_numbers, + size_type const* __restrict__ row_partition_offset, + size_type const* __restrict__ block_partition_sizes, + size_type const* __restrict__ scanned_block_partition_sizes) +{ + extern __shared__ char shared_memory[]; + auto block_output = reinterpret_cast(shared_memory); + auto partition_offset_shared = + reinterpret_cast(block_output + OPTIMIZED_BLOCK_SIZE * OPTIMIZED_ROWS_PER_THREAD); + auto partition_offset_global = + reinterpret_cast(partition_offset_shared + num_partitions + 1); + + typedef cub::BlockScan BlockScan; + __shared__ typename BlockScan::TempStorage temp_storage; + + // use ELEMENTS_PER_THREAD=2 to support upto 1024 partitions + size_type temp_histo[ELEMENTS_PER_THREAD]; + + for (int i = 0; i < ELEMENTS_PER_THREAD; ++i) { + if (ELEMENTS_PER_THREAD * threadIdx.x + i < num_partitions) { + temp_histo[i] = + block_partition_sizes[blockIdx.x + (ELEMENTS_PER_THREAD * threadIdx.x + i) * gridDim.x]; + } else { + temp_histo[i] = 0; + } + } + + __syncthreads(); + + BlockScan(temp_storage).InclusiveSum(temp_histo, temp_histo); + + __syncthreads(); + + if (threadIdx.x == 0) { partition_offset_shared[0] = 0; } + + // Calculate the offset in shared memory of each partition in this thread + // block + for (int i = 0; i < ELEMENTS_PER_THREAD; ++i) { + if (ELEMENTS_PER_THREAD * threadIdx.x + i < num_partitions) { + partition_offset_shared[ELEMENTS_PER_THREAD * threadIdx.x + i + 1] = temp_histo[i]; + } + } + + // Fetch the offset in the output buffer of each partition in this thread + // block + for (size_type ipartition = threadIdx.x; ipartition < num_partitions; ipartition += blockDim.x) { + partition_offset_global[ipartition] = + scanned_block_partition_sizes[ipartition * gridDim.x + blockIdx.x]; + } + + __syncthreads(); + + // Fetch the input data to shared memory + for (size_type row_number = threadIdx.x + blockIdx.x * blockDim.x; row_number < num_rows; + row_number += blockDim.x * gridDim.x) { + size_type const ipartition = row_partition_numbers[row_number]; + + block_output[partition_offset_shared[ipartition] + row_partition_offset[row_number]] = + input_iter[row_number]; + } + + __syncthreads(); + + // Copy data from shared memory to output using 32 threads for each partition + constexpr int nthreads_partition = 32; + static_assert(OPTIMIZED_BLOCK_SIZE % nthreads_partition == 0, + "BLOCK_SIZE must be divisible by number of threads"); + + for (size_type ipartition = threadIdx.x / nthreads_partition; ipartition < num_partitions; + ipartition += OPTIMIZED_BLOCK_SIZE / nthreads_partition) { + size_type const nelements_partition = + partition_offset_shared[ipartition + 1] - partition_offset_shared[ipartition]; + + for (size_type row_offset = threadIdx.x % nthreads_partition; row_offset < nelements_partition; + row_offset += nthreads_partition) { + output_buf[partition_offset_global[ipartition] + row_offset] = + block_output[partition_offset_shared[ipartition] + row_offset]; + } + } +} + +template +void copy_block_partitions_impl(InputIter const input, + OutputIter output, + size_type num_rows, + size_type num_partitions, + size_type const* row_partition_numbers, + size_type const* row_partition_offset, + size_type const* block_partition_sizes, + size_type const* scanned_block_partition_sizes, + size_type grid_size, + cudaStream_t stream) +{ + // We need 3 chunks of shared memory: + // 1. BLOCK_SIZE * ROWS_PER_THREAD elements of size_type for copying to output + // 2. num_partitions + 1 elements of size_type for per-block partition offsets + // 3. num_partitions + 1 elements of size_type for global partition offsets + int const smem = OPTIMIZED_BLOCK_SIZE * OPTIMIZED_ROWS_PER_THREAD * sizeof(*output) + + (num_partitions + 1) * sizeof(size_type) * 2; + + copy_block_partitions<<>>( + input, + output, + num_rows, + num_partitions, + row_partition_numbers, + row_partition_offset, + block_partition_sizes, + scanned_block_partition_sizes); +} + +rmm::device_vector compute_gather_map(size_type num_rows, + size_type num_partitions, + size_type const* row_partition_numbers, + size_type const* row_partition_offset, + size_type const* block_partition_sizes, + size_type const* scanned_block_partition_sizes, + size_type grid_size, + cudaStream_t stream) +{ + auto sequence = thrust::make_counting_iterator(0); + rmm::device_vector gather_map(num_rows); + + copy_block_partitions_impl(sequence, + gather_map.data().get(), + num_rows, + num_partitions, + row_partition_numbers, + row_partition_offset, + block_partition_sizes, + scanned_block_partition_sizes, + grid_size, + stream); + + return gather_map; +} + +struct copy_block_partitions_dispatcher { + template ()>* = nullptr> + std::unique_ptr operator()(column_view const& input, + const size_type num_partitions, + size_type const* row_partition_numbers, + size_type const* row_partition_offset, + size_type const* block_partition_sizes, + size_type const* scanned_block_partition_sizes, + size_type grid_size, + rmm::mr::device_memory_resource* mr, + cudaStream_t stream) + { + rmm::device_buffer output(input.size() * sizeof(DataType), stream, mr); + + copy_block_partitions_impl(input.data(), + static_cast(output.data()), + input.size(), + num_partitions, + row_partition_numbers, + row_partition_offset, + block_partition_sizes, + scanned_block_partition_sizes, + grid_size, + stream); + + return std::make_unique(input.type(), input.size(), std::move(output)); + } + + template ()>* = nullptr> + std::unique_ptr operator()(column_view const& input, + const size_type num_partitions, + size_type const* row_partition_numbers, + size_type const* row_partition_offset, + size_type const* block_partition_sizes, + size_type const* scanned_block_partition_sizes, + size_type grid_size, + rmm::mr::device_memory_resource* mr, + cudaStream_t stream) + { + // Use move_to_output_buffer to create an equivalent gather map + auto gather_map = compute_gather_map(input.size(), + num_partitions, + row_partition_numbers, + row_partition_offset, + block_partition_sizes, + scanned_block_partition_sizes, + grid_size, + stream); + + // Use gather instead for non-fixed width types + return experimental::type_dispatcher(input.type(), + experimental::detail::column_gatherer{}, + input, + gather_map.begin(), + gather_map.end(), + false, + mr, + stream); + } +}; + +// NOTE hash_has_nulls must be true if table_to_hash has nulls +template +std::pair, std::vector> hash_partition_table( + table_view const& input, + table_view const& table_to_hash, + size_type num_partitions, + rmm::mr::device_memory_resource* mr, + cudaStream_t stream) +{ + auto const num_rows = table_to_hash.num_rows(); + + bool const use_optimization{num_partitions <= THRESHOLD_FOR_OPTIMIZED_PARTITION_KERNEL}; + auto const block_size = use_optimization ? OPTIMIZED_BLOCK_SIZE : FALLBACK_BLOCK_SIZE; + auto const rows_per_thread = + use_optimization ? OPTIMIZED_ROWS_PER_THREAD : FALLBACK_ROWS_PER_THREAD; + auto const rows_per_block = block_size * rows_per_thread; + + // NOTE grid_size is non-const to workaround lambda capture bug in gcc 5.4 + auto grid_size = util::div_rounding_up_safe(num_rows, rows_per_block); + + // Allocate array to hold which partition each row belongs to + auto row_partition_numbers = rmm::device_vector(num_rows); + + // Array to hold the size of each partition computed by each block + // i.e., { {block0 partition0 size, block1 partition0 size, ...}, + // {block0 partition1 size, block1 partition1 size, ...}, + // ... + // {block0 partition(num_partitions-1) size, block1 + // partition(num_partitions -1) size, ...} } + auto block_partition_sizes = rmm::device_vector(grid_size * num_partitions); + + auto scanned_block_partition_sizes = rmm::device_vector(grid_size * num_partitions); + + // Holds the total number of rows in each partition + auto global_partition_sizes = rmm::device_vector(num_partitions, size_type{0}); + + auto row_partition_offset = rmm::device_vector(num_rows); + + auto const device_input = table_device_view::create(table_to_hash, stream); + auto const hasher = experimental::row_hasher(*device_input); + + // If the number of partitions is a power of two, we can compute the partition + // number of each row more efficiently with bitwise operations + if (is_power_two(num_partitions)) { + // Determines how the mapping between hash value and partition number is + // computed + using partitioner_type = bitwise_partitioner; + + // Computes which partition each row belongs to by hashing the row and + // performing a partitioning operator on the hash value. Also computes the + // number of rows in each partition both for each thread block as well as + // across all blocks + compute_row_partition_numbers<<>>(hasher, + num_rows, + num_partitions, + partitioner_type(num_partitions), + row_partition_numbers.data().get(), + row_partition_offset.data().get(), + block_partition_sizes.data().get(), + global_partition_sizes.data().get()); + } else { + // Determines how the mapping between hash value and partition number is + // computed + using partitioner_type = modulo_partitioner; + + // Computes which partition each row belongs to by hashing the row and + // performing a partitioning operator on the hash value. Also computes the + // number of rows in each partition both for each thread block as well as + // across all blocks + compute_row_partition_numbers<<>>(hasher, + num_rows, + num_partitions, + partitioner_type(num_partitions), + row_partition_numbers.data().get(), + row_partition_offset.data().get(), + block_partition_sizes.data().get(), + global_partition_sizes.data().get()); + } + + // Compute exclusive scan of all blocks' partition sizes in-place to determine + // the starting point for each blocks portion of each partition in the output + thrust::exclusive_scan(rmm::exec_policy(stream)->on(stream), + block_partition_sizes.begin(), + block_partition_sizes.end(), + scanned_block_partition_sizes.data().get()); + + // Compute exclusive scan of size of each partition to determine offset + // location of each partition in final output. + // TODO This can be done independently on a separate stream + size_type* scanned_global_partition_sizes{global_partition_sizes.data().get()}; + thrust::exclusive_scan(rmm::exec_policy(stream)->on(stream), + global_partition_sizes.begin(), + global_partition_sizes.end(), + scanned_global_partition_sizes); + + // Copy the result of the exlusive scan to the output offsets array + // to indicate the starting point for each partition in the output + std::vector partition_offsets(num_partitions); + CUDA_TRY(cudaMemcpyAsync(partition_offsets.data(), + scanned_global_partition_sizes, + num_partitions * sizeof(size_type), + cudaMemcpyDeviceToHost, + stream)); + + // When the number of partitions is less than a threshold, we can apply an + // optimization using shared memory to copy values to the output buffer. + // Otherwise, fallback to using scatter. + if (use_optimization) { + std::vector> output_cols(input.num_columns()); + + // NOTE these pointers are non-const to workaround lambda capture bug in + // gcc 5.4 + auto row_partition_numbers_ptr{row_partition_numbers.data().get()}; + auto row_partition_offset_ptr{row_partition_offset.data().get()}; + auto block_partition_sizes_ptr{block_partition_sizes.data().get()}; + auto scanned_block_partition_sizes_ptr{scanned_block_partition_sizes.data().get()}; + + // Copy input to output by partition per column + std::transform(input.begin(), input.end(), output_cols.begin(), [=](auto const& col) { + return cudf::experimental::type_dispatcher(col.type(), + copy_block_partitions_dispatcher{}, + col, + num_partitions, + row_partition_numbers_ptr, + row_partition_offset_ptr, + block_partition_sizes_ptr, + scanned_block_partition_sizes_ptr, + grid_size, + mr, + stream); + }); + + if (has_nulls(input)) { + // Use copy_block_partitions to compute a gather map + auto gather_map = compute_gather_map(num_rows, + num_partitions, + row_partition_numbers_ptr, + row_partition_offset_ptr, + block_partition_sizes_ptr, + scanned_block_partition_sizes_ptr, + grid_size, + stream); + + // Handle bitmask using gather to take advantage of ballot_sync + experimental::detail::gather_bitmask(input, + gather_map.begin(), + output_cols, + experimental::detail::gather_bitmask_op::DONT_CHECK, + mr, + stream); + } + + auto output{std::make_unique(std::move(output_cols))}; + return std::make_pair(std::move(output), std::move(partition_offsets)); + } else { + // Compute a scatter map from input to output such that the output rows are + // sorted by partition number + auto row_output_locations{row_partition_numbers.data().get()}; + auto scanned_block_partition_sizes_ptr{scanned_block_partition_sizes.data().get()}; + compute_row_output_locations<<>>( + row_output_locations, num_rows, num_partitions, scanned_block_partition_sizes_ptr); + + // Use the resulting scatter map to materialize the output + auto output = experimental::detail::scatter( + input, row_partition_numbers.begin(), row_partition_numbers.end(), input, false, mr, stream); + + return std::make_pair(std::move(output), std::move(partition_offsets)); + } +} + +struct dispatch_map_type { + /** + * @brief Partitions the table `t` according to the `partition_map`. + * + * Algorithm: + * - Compute the histogram of the size each partition + * - Compute the exclusive scan of the histogram to get the offset for each + * partition in the final partitioned output + * - Use a transform iterator to materialize the scatter map of the rows from + * `t` into the final output. + * + * @note JH: It would likely be more efficient to avoid the atomic increments + * in the transform iterator. It would probably be faster to compute a + * per-thread block histogram and compute an exclusive scan of all of the + * per-block histograms (like in hash partition). But I'm purposefully trying + * to reduce memory pressure by avoiding intermediate materializations. Plus, + * atomics resolve in L2 and should be pretty fast since all the offsets will + * fit in L2. + * + */ + template + std::enable_if_t::value and not is_boolean(), + std::pair, std::vector>> + operator()(table_view const& t, + column_view const& partition_map, + size_type num_partitions, + rmm::mr::device_memory_resource* mr, + cudaStream_t stream) const + { + // Build a histogram of the number of rows in each partition + rmm::device_vector histogram(num_partitions + 1); + std::size_t temp_storage_bytes{}; + std::size_t const num_levels = num_partitions + 1; + size_type const lower_level = 0; + size_type const upper_level = num_partitions; + cub::DeviceHistogram::HistogramEven(nullptr, + temp_storage_bytes, + partition_map.begin(), + histogram.data().get(), + num_levels, + lower_level, + upper_level, + partition_map.size(), + stream); + + rmm::device_buffer temp_storage(temp_storage_bytes, stream); + + cub::DeviceHistogram::HistogramEven(temp_storage.data(), + temp_storage_bytes, + partition_map.begin(), + histogram.data().get(), + num_levels, + lower_level, + upper_level, + partition_map.size(), + stream); + + // `histogram` was created with an extra entry at the end such that an + // exclusive scan will put the total number of rows at the end + thrust::exclusive_scan( + rmm::exec_policy()->on(stream), histogram.begin(), histogram.end(), histogram.begin()); + + // Copy offsets to host + std::vector partition_offsets(histogram.size()); + thrust::copy(histogram.begin(), histogram.end(), partition_offsets.begin()); + + // Unfortunately need to materialize the scatter map because + // `detail::scatter` requires multiple passes through the iterator + rmm::device_vector scatter_map(partition_map.size()); + + // For each `partition_map[i]`, atomically increment the corresponding + // partition offset to determine `i`s location in the output + thrust::transform(rmm::exec_policy(stream)->on(stream), + partition_map.begin(), + partition_map.end(), + scatter_map.begin(), + [offsets = histogram.data().get()] __device__(auto partition_number) { + return atomicAdd(&offsets[partition_number], 1); + }); + + // Scatter the rows into their partitions + auto scattered = + cudf::experimental::detail::scatter(t, scatter_map.begin(), scatter_map.end(), t); + + return std::make_pair(std::move(scattered), std::move(partition_offsets)); + } + + template + std::enable_if_t::value or is_boolean(), + std::pair, std::vector>> + operator()(table_view const& t, + column_view const& partition_map, + size_type num_partitions, + rmm::mr::device_memory_resource* mr, + cudaStream_t stream) const + { + CUDF_FAIL("Unexpected, non-integral partition map."); + } +}; +} // namespace + +namespace detail { +std::pair, std::vector> hash_partition( + table_view const& input, + std::vector const& columns_to_hash, + int num_partitions, + rmm::mr::device_memory_resource* mr, + cudaStream_t stream = 0) +{ + auto table_to_hash = input.select(columns_to_hash); + + // Return empty result if there are no partitions or nothing to hash + if (num_partitions <= 0 || input.num_rows() == 0 || table_to_hash.num_columns() == 0) { + return std::make_pair(experimental::empty_like(input), std::vector{}); + } + + if (has_nulls(table_to_hash)) { + return hash_partition_table(input, table_to_hash, num_partitions, mr, stream); + } else { + return hash_partition_table(input, table_to_hash, num_partitions, mr, stream); + } +} + +std::pair, std::vector> partition( + table_view const& t, + column_view const& partition_map, + size_type num_partitions, + rmm::mr::device_memory_resource* mr, + cudaStream_t stream = 0) +{ + CUDF_EXPECTS(t.num_rows() == partition_map.size(), + "Size mismatch between table and partition map."); + CUDF_EXPECTS(not partition_map.has_nulls(), "Unexpected null values in partition_map."); + + if (num_partitions == 0 or t.num_rows() == 0) { + return std::make_pair(empty_like(t), std::vector{}); + } + + return cudf::experimental::type_dispatcher( + partition_map.type(), dispatch_map_type{}, t, partition_map, num_partitions, mr, stream); +} +} // namespace detail + +// Partition based on hash values +std::pair, std::vector> hash_partition( + table_view const& input, + std::vector const& columns_to_hash, + int num_partitions, + rmm::mr::device_memory_resource* mr) +{ + CUDF_FUNC_RANGE(); + return detail::hash_partition(input, columns_to_hash, num_partitions, mr); +} + +// Partition based on an explicit partition map +std::pair, std::vector> partition( + table_view const& t, + column_view const& partition_map, + size_type num_partitions, + rmm::mr::device_memory_resource* mr) +{ + CUDF_FUNC_RANGE(); + return detail::partition(t, partition_map, num_partitions, mr); +} + +} // namespace experimental +} // namespace cudf diff --git a/cuda_code/partitioning_3.cu b/cuda_code/partitioning_3.cu new file mode 100644 index 0000000000000000000000000000000000000000..e2cf53408350ebe0a3ecdfecd3595c657684f278 --- /dev/null +++ b/cuda_code/partitioning_3.cu @@ -0,0 +1,812 @@ +/* + * Copyright (c) 2020, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +namespace cudf { +namespace { +// Launch configuration for optimized hash partition +constexpr size_type OPTIMIZED_BLOCK_SIZE = 512; +constexpr size_type OPTIMIZED_ROWS_PER_THREAD = 8; +constexpr size_type ELEMENTS_PER_THREAD = 2; +constexpr size_type THRESHOLD_FOR_OPTIMIZED_PARTITION_KERNEL = 1024; + +// Launch configuration for fallback hash partition +constexpr size_type FALLBACK_BLOCK_SIZE = 256; +constexpr size_type FALLBACK_ROWS_PER_THREAD = 1; + +/** + * @brief Functor to map a hash value to a particular 'bin' or partition number + * that uses the modulo operation. + */ +template +class modulo_partitioner { + public: + modulo_partitioner(size_type num_partitions) : divisor{num_partitions} {} + + __device__ size_type operator()(hash_value_t hash_value) const { return hash_value % divisor; } + + private: + const size_type divisor; +}; + +template +bool is_power_two(T number) +{ + return (0 == (number & (number - 1))); +} + +/** + * @brief Functor to map a hash value to a particular 'bin' or partition number + * that uses a bitwise mask. Only works when num_partitions is a power of 2. + * + * For n % d, if d is a power of two, then it can be computed more efficiently + * via a single bitwise AND as: n & (d - 1) + */ +template +class bitwise_partitioner { + public: + bitwise_partitioner(size_type num_partitions) : mask{(num_partitions - 1)} + { + assert(is_power_two(num_partitions)); + } + + __device__ size_type operator()(hash_value_t hash_value) const + { + return hash_value & mask; // hash_value & (num_partitions - 1) + } + + private: + const size_type mask; +}; + +/* --------------------------------------------------------------------------*/ +/** + * @brief Computes which partition each row of a device_table will belong to + based on hashing each row, and applying a partition function to the hash value. + Records the size of each partition for each thread block as well as the + global size of each partition across all thread blocks. + * + * @param[in] the_table The table whose rows will be partitioned + * @param[in] num_rows The number of rows in the table + * @param[in] num_partitions The number of partitions to divide the rows into + * @param[in] the_partitioner The functor that maps a rows hash value to a + partition number + * @param[out] row_partition_numbers Array that holds which partition each row + belongs to + * @param[out] row_partition_offset Array that holds the offset of each row in + its partition of + * the thread block + * @param[out] block_partition_sizes Array that holds the size of each partition + for each block, + * i.e., { {block0 partition0 size, block1 partition0 size, ...}, + {block0 partition1 size, block1 partition1 size, ...}, + ... + {block0 partition(num_partitions-1) size, block1 + partition(num_partitions -1) size, ...} } + * @param[out] global_partition_sizes The number of rows in each partition. + */ +/* ----------------------------------------------------------------------------*/ +template +__global__ void compute_row_partition_numbers(row_hasher_t the_hasher, + const size_type num_rows, + const size_type num_partitions, + const partitioner_type the_partitioner, + size_type* __restrict__ row_partition_numbers, + size_type* __restrict__ row_partition_offset, + size_type* __restrict__ block_partition_sizes, + size_type* __restrict__ global_partition_sizes) +{ + // Accumulate histogram of the size of each partition in shared memory + extern __shared__ size_type shared_partition_sizes[]; + + size_type row_number = threadIdx.x + blockIdx.x * blockDim.x; + + // Initialize local histogram + size_type partition_number = threadIdx.x; + while (partition_number < num_partitions) { + shared_partition_sizes[partition_number] = 0; + partition_number += blockDim.x; + } + + __syncthreads(); + + // Compute the hash value for each row, store it to the array of hash values + // and compute the partition to which the hash value belongs and increment + // the shared memory counter for that partition + while (row_number < num_rows) { + const hash_value_type row_hash_value = the_hasher(row_number); + + const size_type partition_number = the_partitioner(row_hash_value); + + row_partition_numbers[row_number] = partition_number; + + row_partition_offset[row_number] = + atomicAdd(&(shared_partition_sizes[partition_number]), size_type(1)); + + row_number += blockDim.x * gridDim.x; + } + + __syncthreads(); + + // Flush shared memory histogram to global memory + partition_number = threadIdx.x; + while (partition_number < num_partitions) { + const size_type block_partition_size = shared_partition_sizes[partition_number]; + + // Update global size of each partition + atomicAdd(&global_partition_sizes[partition_number], block_partition_size); + + // Record the size of this partition in this block + const size_type write_location = partition_number * gridDim.x + blockIdx.x; + block_partition_sizes[write_location] = block_partition_size; + partition_number += blockDim.x; + } +} + +/* --------------------------------------------------------------------------*/ +/** + * @brief Given an array of partition numbers, computes the final output + location for each element in the output such that all rows with the same + partition are contiguous in memory. + * + * @param row_partition_numbers The array that records the partition number for + each row + * @param num_rows The number of rows + * @param num_partitions THe number of partitions + * @param[out] block_partition_offsets Array that holds the offset of each + partition for each thread block, + * i.e., { {block0 partition0 offset, block1 partition0 offset, ...}, + {block0 partition1 offset, block1 partition1 offset, ...}, + ... + {block0 partition(num_partitions-1) offset, block1 + partition(num_partitions -1) offset, ...} } + */ +/* ----------------------------------------------------------------------------*/ +__global__ void compute_row_output_locations(size_type* __restrict__ row_partition_numbers, + const size_type num_rows, + const size_type num_partitions, + size_type* __restrict__ block_partition_offsets) +{ + // Shared array that holds the offset of this blocks partitions in + // global memory + extern __shared__ size_type shared_partition_offsets[]; + + // Initialize array of this blocks offsets from global array + size_type partition_number = threadIdx.x; + while (partition_number < num_partitions) { + shared_partition_offsets[partition_number] = + block_partition_offsets[partition_number * gridDim.x + blockIdx.x]; + partition_number += blockDim.x; + } + __syncthreads(); + + size_type row_number = threadIdx.x + blockIdx.x * blockDim.x; + + // Get each row's partition number, and get it's output location by + // incrementing block's offset counter for that partition number + // and store the row's output location in-place + while (row_number < num_rows) { + // Get partition number of this row + const size_type partition_number = row_partition_numbers[row_number]; + + // Get output location based on partition number by incrementing the + // corresponding partition offset for this block + const size_type row_output_location = + atomicAdd(&(shared_partition_offsets[partition_number]), size_type(1)); + + // Store the row's output location in-place + row_partition_numbers[row_number] = row_output_location; + + row_number += blockDim.x * gridDim.x; + } +} + +/* --------------------------------------------------------------------------*/ +/** + * @brief Move one column from the input table to the hashed table. + * + * @param[in] input_buf Data buffer of the column in the input table + * @param[out] output_buf Preallocated data buffer of the column in the output + * table + * @param[in] num_rows The number of rows in each column + * @param[in] num_partitions The number of partitions to divide the rows into + * @param[in] row_partition_numbers Array that holds which partition each row + * belongs to + * @param[in] row_partition_offset Array that holds the offset of each row in + * its partition of the thread block. + * @param[in] block_partition_sizes Array that holds the size of each partition + * for each block + * @param[in] scanned_block_partition_sizes The scan of block_partition_sizes + */ +/* ----------------------------------------------------------------------------*/ +template +__global__ void copy_block_partitions(InputIter input_iter, + DataType* __restrict__ output_buf, + const size_type num_rows, + const size_type num_partitions, + size_type const* __restrict__ row_partition_numbers, + size_type const* __restrict__ row_partition_offset, + size_type const* __restrict__ block_partition_sizes, + size_type const* __restrict__ scanned_block_partition_sizes) +{ + extern __shared__ char shared_memory[]; + auto block_output = reinterpret_cast(shared_memory); + auto partition_offset_shared = + reinterpret_cast(block_output + OPTIMIZED_BLOCK_SIZE * OPTIMIZED_ROWS_PER_THREAD); + auto partition_offset_global = partition_offset_shared + num_partitions + 1; + + typedef cub::BlockScan BlockScan; + __shared__ typename BlockScan::TempStorage temp_storage; + + // use ELEMENTS_PER_THREAD=2 to support upto 1024 partitions + size_type temp_histo[ELEMENTS_PER_THREAD]; + + for (int i = 0; i < ELEMENTS_PER_THREAD; ++i) { + if (ELEMENTS_PER_THREAD * threadIdx.x + i < num_partitions) { + temp_histo[i] = + block_partition_sizes[blockIdx.x + (ELEMENTS_PER_THREAD * threadIdx.x + i) * gridDim.x]; + } else { + temp_histo[i] = 0; + } + } + + __syncthreads(); + + BlockScan(temp_storage).InclusiveSum(temp_histo, temp_histo); + + __syncthreads(); + + if (threadIdx.x == 0) { partition_offset_shared[0] = 0; } + + // Calculate the offset in shared memory of each partition in this thread + // block + for (int i = 0; i < ELEMENTS_PER_THREAD; ++i) { + if (ELEMENTS_PER_THREAD * threadIdx.x + i < num_partitions) { + partition_offset_shared[ELEMENTS_PER_THREAD * threadIdx.x + i + 1] = temp_histo[i]; + } + } + + // Fetch the offset in the output buffer of each partition in this thread + // block + for (size_type ipartition = threadIdx.x; ipartition < num_partitions; ipartition += blockDim.x) { + partition_offset_global[ipartition] = + scanned_block_partition_sizes[ipartition * gridDim.x + blockIdx.x]; + } + + __syncthreads(); + + // Fetch the input data to shared memory + for (size_type row_number = threadIdx.x + blockIdx.x * blockDim.x; row_number < num_rows; + row_number += blockDim.x * gridDim.x) { + size_type const ipartition = row_partition_numbers[row_number]; + + block_output[partition_offset_shared[ipartition] + row_partition_offset[row_number]] = + input_iter[row_number]; + } + + __syncthreads(); + + // Copy data from shared memory to output using 32 threads for each partition + constexpr int nthreads_partition = 32; + static_assert(OPTIMIZED_BLOCK_SIZE % nthreads_partition == 0, + "BLOCK_SIZE must be divisible by number of threads"); + + for (size_type ipartition = threadIdx.x / nthreads_partition; ipartition < num_partitions; + ipartition += OPTIMIZED_BLOCK_SIZE / nthreads_partition) { + size_type const nelements_partition = + partition_offset_shared[ipartition + 1] - partition_offset_shared[ipartition]; + + for (size_type row_offset = threadIdx.x % nthreads_partition; row_offset < nelements_partition; + row_offset += nthreads_partition) { + output_buf[partition_offset_global[ipartition] + row_offset] = + block_output[partition_offset_shared[ipartition] + row_offset]; + } + } +} + +template +void copy_block_partitions_impl(InputIter const input, + OutputIter output, + size_type num_rows, + size_type num_partitions, + size_type const* row_partition_numbers, + size_type const* row_partition_offset, + size_type const* block_partition_sizes, + size_type const* scanned_block_partition_sizes, + size_type grid_size, + rmm::cuda_stream_view stream) +{ + // We need 3 chunks of shared memory: + // 1. BLOCK_SIZE * ROWS_PER_THREAD elements of size_type for copying to output + // 2. num_partitions + 1 elements of size_type for per-block partition offsets + // 3. num_partitions + 1 elements of size_type for global partition offsets + int const smem = OPTIMIZED_BLOCK_SIZE * OPTIMIZED_ROWS_PER_THREAD * sizeof(*output) + + (num_partitions + 1) * sizeof(size_type) * 2; + + copy_block_partitions<<>>( + input, + output, + num_rows, + num_partitions, + row_partition_numbers, + row_partition_offset, + block_partition_sizes, + scanned_block_partition_sizes); +} + +rmm::device_vector compute_gather_map(size_type num_rows, + size_type num_partitions, + size_type const* row_partition_numbers, + size_type const* row_partition_offset, + size_type const* block_partition_sizes, + size_type const* scanned_block_partition_sizes, + size_type grid_size, + rmm::cuda_stream_view stream) +{ + auto sequence = thrust::make_counting_iterator(0); + rmm::device_vector gather_map(num_rows); + + copy_block_partitions_impl(sequence, + gather_map.data().get(), + num_rows, + num_partitions, + row_partition_numbers, + row_partition_offset, + block_partition_sizes, + scanned_block_partition_sizes, + grid_size, + stream); + + return gather_map; +} + +struct copy_block_partitions_dispatcher { + template ()>* = nullptr> + std::unique_ptr operator()(column_view const& input, + const size_type num_partitions, + size_type const* row_partition_numbers, + size_type const* row_partition_offset, + size_type const* block_partition_sizes, + size_type const* scanned_block_partition_sizes, + size_type grid_size, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) + { + rmm::device_buffer output(input.size() * sizeof(DataType), stream, mr); + + copy_block_partitions_impl(input.data(), + static_cast(output.data()), + input.size(), + num_partitions, + row_partition_numbers, + row_partition_offset, + block_partition_sizes, + scanned_block_partition_sizes, + grid_size, + stream); + + return std::make_unique(input.type(), input.size(), std::move(output)); + } + + template ()>* = nullptr> + std::unique_ptr operator()(column_view const& input, + const size_type num_partitions, + size_type const* row_partition_numbers, + size_type const* row_partition_offset, + size_type const* block_partition_sizes, + size_type const* scanned_block_partition_sizes, + size_type grid_size, + rmm::cuda_stream_view stream, + rmm::mr::device_memory_resource* mr) + { + // Use move_to_output_buffer to create an equivalent gather map + auto gather_map = compute_gather_map(input.size(), + num_partitions, + row_partition_numbers, + row_partition_offset, + block_partition_sizes, + scanned_block_partition_sizes, + grid_size, + stream); + + // Use gather instead for non-fixed width types + return type_dispatcher(input.type(), + detail::column_gatherer{}, + input, + gather_map.begin(), + gather_map.end(), + false, + stream, + mr); + } +}; + +// NOTE hash_has_nulls must be true if table_to_hash has nulls +template